├── lambda ├── check │ ├── requirements.txt │ ├── package.sh │ └── lambda_function.py └── parser │ ├── requirements.txt │ ├── package.sh │ ├── ai_parser.py │ └── lambda_function.py ├── infra ├── backend.tfvars.template ├── provider.tf ├── outputs.tf ├── shared │ └── .terraform.lock.hcl ├── QUICK_START.md ├── variables.tf ├── .terraform.lock.hcl └── main.tf ├── LICENSE ├── prisma └── schema.prisma ├── .github └── workflows │ ├── deploy-shared.yml │ ├── destroy-shared.yml │ ├── destroy.yml │ └── deploy.yml ├── test-email-receiving.sh ├── .gitignore ├── destroy.sh ├── README.md ├── deploy.sh ├── destroy-old-infrastructure.sh ├── setup-github-secrets.sh ├── debug-email-receiving.sh ├── MIGRATION_SUMMARY.md └── ENVIRONMENTS.md /lambda/check/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.26.0 2 | requests 3 | pymongo>=4.0,<5.0 -------------------------------------------------------------------------------- /lambda/parser/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | requests 3 | pymongo>=4.0,<5.0 4 | pystache 5 | google-genai 6 | daytona-sdk 7 | -------------------------------------------------------------------------------- /infra/backend.tfvars.template: -------------------------------------------------------------------------------- 1 | # This file is a template for backend configuration 2 | # The actual key will be generated dynamically based on environment 3 | # Usage: terraform init -backend-config="key=terraform/${ENVIRONMENT}/state.tfstate" 4 | 5 | # Example state paths for different environments: 6 | # main: terraform/main/state.tfstate 7 | # preview: terraform/preview/state.tfstate 8 | # dev: terraform/dev/state.tfstate 9 | 10 | -------------------------------------------------------------------------------- /infra/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.aws_region 3 | 4 | default_tags { 5 | tags = { 6 | Environment = var.environment 7 | Project = "email-to-webhook" 8 | ManagedBy = "terraform" 9 | } 10 | } 11 | } 12 | 13 | terraform { 14 | backend "s3" { 15 | # Backend configuration is set dynamically during terraform init 16 | # Use -backend-config flags to specify: 17 | # - bucket: The S3 bucket in the target AWS account 18 | # - key: The state file path (e.g., "terraform.tfstate" or "terraform/${environment}/state.tfstate") 19 | # - region: The AWS region where the state bucket exists 20 | # Example: terraform init -backend-config="bucket=terraform-state-main" \ 21 | # -backend-config="key=terraform.tfstate" \ 22 | # -backend-config="region=us-east-1" 23 | encrypt = true 24 | } 25 | } 26 | 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 [Yakir Perlin] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /prisma/schema.prisma: -------------------------------------------------------------------------------- 1 | generator client { 2 | provider = "prisma-client-js" 3 | } 4 | 5 | datasource db { 6 | provider = "postgresql" 7 | url = env("DATABASE_URL") 8 | } 9 | 10 | model ParsedEmail { 11 | id String @id @default(uuid()) 12 | domain String // The domain part of the recipient address 13 | local_part String // The local part of the recipient address 14 | email_id String // Full email address identifier 15 | iFs_function_called Boolean @default(false) 16 | function_run_id String? // The ID of the function that was called 17 | function_url String? // The URL of the function to call 18 | function_response String? // The response from the function 19 | function_status_code Int? // The status code from the function 20 | is_webhook_sent Boolean @default(false) 21 | webhook_url String? // The webhook URL to send the email data to 22 | webhook_payload Json? // The payload to send to the webhook 23 | webhook_response String? // The response from the webhook 24 | webhook_status_code Int? // The status code from the webhook 25 | email_data Json // JSON string storing additional email data 26 | createdAt DateTime @default(now()) 27 | } -------------------------------------------------------------------------------- /infra/outputs.tf: -------------------------------------------------------------------------------- 1 | # Output the API Gateway endpoint 2 | output "api_gateway_url" { 3 | value = "${aws_apigatewayv2_api.lambda_api.api_endpoint}/prod/v1/domain" 4 | description = "API Gateway endpoint URL" 5 | } 6 | 7 | output "api_gateway_id" { 8 | value = aws_apigatewayv2_api.lambda_api.id 9 | description = "API Gateway ID" 10 | } 11 | 12 | output "api_gateway_name" { 13 | value = aws_apigatewayv2_api.lambda_api.name 14 | description = "API Gateway name (includes environment suffix)" 15 | } 16 | 17 | output "environment" { 18 | value = var.environment 19 | description = "Current deployment environment" 20 | } 21 | 22 | output "email_bucket_name" { 23 | value = aws_s3_bucket.emails_bucket.id 24 | description = "Name of the per-environment email S3 bucket" 25 | } 26 | 27 | output "email_bucket_arn" { 28 | value = aws_s3_bucket.emails_bucket.arn 29 | description = "ARN of the per-environment email S3 bucket" 30 | } 31 | 32 | output "parser_lambda_arn" { 33 | value = aws_lambda_function.parsing_lambda.arn 34 | description = "ARN of the email parser Lambda function" 35 | } 36 | 37 | output "ses_receipt_rule_name" { 38 | value = aws_ses_receipt_rule.env_catch_rule.name 39 | description = "Name of the SES receipt rule for this environment" 40 | } -------------------------------------------------------------------------------- /infra/shared/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "6.19.0" 6 | hashes = [ 7 | "h1:5oDrH7uIKjvBIDd1YKaZwbB5NiJnSafMaiNaKMTy80k=", 8 | "zh:221061660f519f09e9fcd3bbe1fc5c63e81d997e8e9e759984c80095403d7fd6", 9 | "zh:2436e7f7de4492998d7badfae37f88b042ce993f3fdb411ba7f7a47ff4cc66a2", 10 | "zh:49e78e889bf5f9378dfacb08040553bf1529171222eda931e31fcdeac223e802", 11 | "zh:5a07c255ac8694aebe3e166cc3d0ae5f64e0502d47610fd42be22fd907cb81fa", 12 | "zh:68180e2839faba80b64a5e9eb03cfcc50c75dcf0adb24c6763f97dade8311835", 13 | "zh:6c7ae7fb8d51fecdd000bdcfec60222c1f0aeac41dacf1c33aa16609e6ccaf43", 14 | "zh:6ebea9b2eb48fc44ee5674797a5f3b093640b054803495c10a1e558ccd8fee2b", 15 | "zh:8010d1ca1ab0f89732da3c56351779b6728707270c935bf5fd7d99fdf69bc1da", 16 | "zh:8ca7544dbe3b2499d0179fd289e536aedac25115855434d76a4dc342409d335a", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:c6ed10fb06f561d6785c10ff0f0134b7bfcb9964f1bc38ed8b263480bc3cebc0", 19 | "zh:d011d703a3b22f7e296baa8ddfd4d550875daa3f551a133988f843d6c8e6ec38", 20 | "zh:eceb5a8e929b4b0f26e437d1181aeebfb81f376902e0677ead9b886bb41e7c08", 21 | "zh:eda96ae2f993df469cf5dfeecd842e922de97b8a8600e7d197d884ca5179ad2f", 22 | "zh:fb229392236c0c76214d157bb1c7734ded4fa1221e9ef7831d67258950246ff3", 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /.github/workflows/deploy-shared.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Shared Infrastructure 2 | 3 | # Manual trigger only - shared infrastructure is deployed once and used by all environments 4 | on: 5 | workflow_dispatch: 6 | 7 | jobs: 8 | deploy-shared: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - name: Checkout code 13 | uses: actions/checkout@v3 14 | 15 | - name: Set up AWS credentials 16 | uses: aws-actions/configure-aws-credentials@v2 17 | with: 18 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 19 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 20 | aws-region: "us-east-1" 21 | 22 | - name: Set up Terraform 23 | uses: hashicorp/setup-terraform@v2 24 | 25 | - name: Terraform Init 26 | run: | 27 | cd infra/shared 28 | terraform init 29 | 30 | - name: Terraform Plan 31 | run: | 32 | cd infra/shared 33 | terraform plan 34 | 35 | - name: Terraform Apply 36 | run: | 37 | cd infra/shared 38 | terraform apply -auto-approve 39 | 40 | - name: Output Results 41 | run: | 42 | cd infra/shared 43 | echo "✅ Shared infrastructure deployed successfully!" 44 | echo "" 45 | echo "Shared Email Bucket: $(terraform output -raw shared_email_bucket_name)" 46 | echo "SES Receipt Rule Set: $(terraform output -raw ses_receipt_rule_set_name)" 47 | echo "" 48 | echo "You can now deploy per-environment infrastructure using the Deploy Infrastructure workflow." 49 | 50 | -------------------------------------------------------------------------------- /test-email-receiving.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Real-time email receiving test for h.kidox.ai 4 | AWS_PROFILE="main" 5 | BUCKET="email-to-webhook-emails-main-302835751737" 6 | TEST_EMAIL="test@h.kidox.ai" 7 | 8 | echo "==========================================" 9 | echo "Real-Time Email Receiving Test" 10 | echo "==========================================" 11 | echo "" 12 | echo "Send a test email to: $TEST_EMAIL" 13 | echo "" 14 | echo "Monitoring bucket for new emails..." 15 | echo "Press Ctrl+C to stop" 16 | echo "" 17 | 18 | # Get initial count 19 | INITIAL_COUNT=$(aws s3 ls "s3://$BUCKET" --profile "$AWS_PROFILE" --recursive | wc -l | tr -d ' ') 20 | echo "Current email count: $INITIAL_COUNT" 21 | echo "" 22 | 23 | # Monitor for changes 24 | while true; do 25 | CURRENT_COUNT=$(aws s3 ls "s3://$BUCKET" --profile "$AWS_PROFILE" --recursive | wc -l | tr -d ' ') 26 | 27 | if [ "$CURRENT_COUNT" -gt "$INITIAL_COUNT" ]; then 28 | echo "" 29 | echo "✅ NEW EMAIL DETECTED!" 30 | echo "" 31 | echo "Latest emails in bucket:" 32 | aws s3 ls "s3://$BUCKET" --profile "$AWS_PROFILE" --recursive --human-readable | tail -5 33 | echo "" 34 | 35 | # Show the newest email 36 | NEWEST_EMAIL=$(aws s3 ls "s3://$BUCKET" --profile "$AWS_PROFILE" --recursive | sort -k1,2 | tail -1 | awk '{print $4}') 37 | echo "Downloading newest email: $NEWEST_EMAIL" 38 | echo "" 39 | aws s3 cp "s3://$BUCKET/$NEWEST_EMAIL" - --profile "$AWS_PROFILE" | head -30 40 | 41 | break 42 | fi 43 | 44 | echo -n "." 45 | sleep 2 46 | done 47 | 48 | echo "" 49 | echo "Test complete!" 50 | -------------------------------------------------------------------------------- /lambda/check/package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get the current directory name (function name) 4 | FUNCTION_NAME=$(basename "$PWD") 5 | ROOT_DIR=$(git rev-parse --show-toplevel 2>/dev/null || echo "$(cd ../ && pwd)") 6 | ZIP_FILE="${ROOT_DIR}/lambda_packages/${FUNCTION_NAME}.zip" 7 | TEMP_DIR=$(mktemp -d) 8 | 9 | echo "Packaging Lambda function: $FUNCTION_NAME" 10 | echo "Root directory for zip: $ROOT_DIR" 11 | 12 | # Step 1: Clean up old zip file 13 | if [ -f "$ZIP_FILE" ]; then 14 | echo "Removing old package at $ZIP_FILE..." 15 | rm "$ZIP_FILE" 16 | fi 17 | 18 | # Step 2: Install dependencies in a temporary directory 19 | if [ -f "requirements.txt" ]; then 20 | echo "Installing dependencies from requirements.txt..." 21 | pip install -r "requirements.txt" -t "$TEMP_DIR" || { 22 | echo "Error installing dependencies for $FUNCTION_NAME." 23 | rm -rf "$TEMP_DIR" 24 | exit 1 25 | } 26 | else 27 | echo "No requirements.txt found. Skipping dependency installation." 28 | fi 29 | 30 | # Step 3: Copy Lambda function code into the temporary directory 31 | echo "Copying Lambda function code..." 32 | cp ./*.py "$TEMP_DIR/" || { 33 | echo "Error copying code for $FUNCTION_NAME." 34 | rm -rf "$TEMP_DIR" 35 | exit 1 36 | } 37 | 38 | # Step 4: Create the ZIP package in the root directory 39 | echo "Creating ZIP package..." 40 | cd "$TEMP_DIR" || { echo "Failed to change directory to $TEMP_DIR"; exit 1; } 41 | zip -r "$ZIP_FILE" ./* || { 42 | echo "Error creating ZIP package for $FUNCTION_NAME." 43 | rm -rf "$TEMP_DIR" 44 | exit 1 45 | } 46 | cd - > /dev/null || exit 47 | 48 | # Step 5: Clean up the temporary directory 49 | rm -rf "$TEMP_DIR" 50 | 51 | echo "Package created at $ZIP_FILE." 52 | -------------------------------------------------------------------------------- /lambda/parser/package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get the current directory name (function name) 4 | FUNCTION_NAME=$(basename "$PWD") 5 | ROOT_DIR=$(git rev-parse --show-toplevel 2>/dev/null || echo "$(cd ../ && pwd)") 6 | ZIP_FILE="${ROOT_DIR}/lambda_packages/${FUNCTION_NAME}.zip" 7 | TEMP_DIR=$(mktemp -d) 8 | 9 | echo "Packaging Lambda function: $FUNCTION_NAME" 10 | echo "Root directory for zip: $ROOT_DIR" 11 | 12 | # Step 1: Clean up old zip file 13 | if [ -f "$ZIP_FILE" ]; then 14 | echo "Removing old package at $ZIP_FILE..." 15 | rm "$ZIP_FILE" 16 | fi 17 | 18 | # Step 2: Install dependencies in a temporary directory 19 | if [ -f "requirements.txt" ]; then 20 | echo "Installing dependencies from requirements.txt..." 21 | pip install -r "requirements.txt" -t "$TEMP_DIR" || { 22 | echo "Error installing dependencies for $FUNCTION_NAME." 23 | rm -rf "$TEMP_DIR" 24 | exit 1 25 | } 26 | else 27 | echo "No requirements.txt found. Skipping dependency installation." 28 | fi 29 | 30 | # Step 3: Copy Lambda function code into the temporary directory 31 | echo "Copying Lambda function code..." 32 | cp ./*.py "$TEMP_DIR/" || { 33 | echo "Error copying code for $FUNCTION_NAME." 34 | rm -rf "$TEMP_DIR" 35 | exit 1 36 | } 37 | 38 | # Step 4: Create the ZIP package in the root directory 39 | echo "Creating ZIP package..." 40 | cd "$TEMP_DIR" || { echo "Failed to change directory to $TEMP_DIR"; exit 1; } 41 | zip -r "$ZIP_FILE" ./* || { 42 | echo "Error creating ZIP package for $FUNCTION_NAME." 43 | rm -rf "$TEMP_DIR" 44 | exit 1 45 | } 46 | cd - > /dev/null || exit 47 | 48 | # Step 5: Clean up the temporary directory 49 | rm -rf "$TEMP_DIR" 50 | 51 | echo "Package created at $ZIP_FILE." 52 | -------------------------------------------------------------------------------- /infra/QUICK_START.md: -------------------------------------------------------------------------------- 1 | # Quick Start - Multi-Environment Setup 2 | 3 | ## First Time Setup 4 | 5 | ### Step 1: Deploy Shared Infrastructure (one-time) 6 | ```bash 7 | ./deploy-shared.sh 8 | ``` 9 | 10 | This creates the shared SES infrastructure used by all environments: 11 | - SES receipt rule set 12 | - Shared email S3 bucket 13 | - SES email routing rules 14 | 15 | ### Step 2: Deploy Your First Environment 16 | ```bash 17 | ./deploy.sh 18 | ``` 19 | 20 | ## Daily Usage 21 | 22 | ### Deploy 23 | ```bash 24 | # Main (production) 25 | ./deploy.sh 26 | 27 | # Other environments 28 | ENVIRONMENT=preview ./deploy.sh 29 | ENVIRONMENT=dev ./deploy.sh 30 | ``` 31 | 32 | ### Destroy 33 | ```bash 34 | # Main (production) 35 | ./destroy.sh 36 | 37 | # Other environments 38 | ENVIRONMENT=preview ./destroy.sh 39 | ENVIRONMENT=dev ./destroy.sh 40 | ``` 41 | 42 | ## Environment Names 43 | - `main` - Production (default) 44 | - `preview` - Staging/Preview 45 | - `dev` - Development 46 | - Any branch name for feature deployments 47 | 48 | ## Architecture 49 | 50 | ### Two-Tier Infrastructure 51 | 1. **Shared Infrastructure** (`infra/shared/`) 52 | - SES receipt rule set (account-level, only one can be active) 53 | - Shared email S3 bucket (all environments store emails here) 54 | - State file: `s3://terraform-tregfd/terraform/shared/state.tfstate` 55 | 56 | 2. **Per-Environment Infrastructure** (`infra/`) 57 | - Lambda functions (unique per environment) 58 | - API Gateway endpoints (unique per environment) 59 | - IAM roles and policies (namespaced per environment) 60 | - State files: `s3://terraform-tregfd/terraform/${ENVIRONMENT}/state.tfstate` 61 | 62 | ### What Changed? 63 | - ✅ SES resources are now shared (avoiding conflicts) 64 | - ✅ Email storage uses one shared bucket with prefix-based organization 65 | - ✅ Each environment has isolated Lambda functions and API Gateway 66 | - ✅ Separate state files for shared vs per-environment resources 67 | 68 | ## Need More Info? 69 | See [ENVIRONMENTS.md](../ENVIRONMENTS.md) for complete documentation. 70 | 71 | -------------------------------------------------------------------------------- /infra/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "AWS region where resources will be deployed" 3 | default = "us-east-1" 4 | type = string 5 | } 6 | 7 | variable "aws_account_id" { 8 | description = "The AWS account ID for the target environment (required, no default)" 9 | type = string 10 | } 11 | 12 | 13 | variable "verify_lambda_file_path" { 14 | description = "The path to the DNS lambda file" 15 | default = "../lambda_packages/check.zip" 16 | type = string 17 | } 18 | 19 | variable "parser_lambda_file_path" { 20 | description = "The path to the parser lambda file" 21 | default = "../lambda_packages/parser.zip" 22 | type = string 23 | } 24 | 25 | # Note: email_bucket_name is now managed in infra/shared/ as a shared resource 26 | 27 | variable "attachments_bucket_name" { 28 | description = "The name of the S3 bucket for email attachments" 29 | default = "email-to-webhook-attachments" 30 | type = string 31 | } 32 | 33 | variable "mongodb_uri" { 34 | description = "The MongoDB connection URI for email and domain storage" 35 | default = "" 36 | type = string 37 | sensitive = true 38 | } 39 | 40 | variable "environment" { 41 | description = "Environment name (main, preview, dev, etc.) - each environment deploys to its own AWS account" 42 | default = "main" 43 | type = string 44 | } 45 | 46 | variable "state_bucket_name" { 47 | description = "S3 bucket name for Terraform state storage in the target AWS account" 48 | type = string 49 | default = "" 50 | } 51 | 52 | variable "gemini_api_key" { 53 | description = "API Key for Google Gemini" 54 | type = string 55 | sensitive = true 56 | default = "" 57 | } 58 | 59 | variable "gemini_model" { 60 | description = "Model name for Google Gemini" 61 | type = string 62 | default = "gemini-3-pro-preview" 63 | } 64 | 65 | variable "daytona_api_key" { 66 | description = "API Key for Daytona" 67 | type = string 68 | sensitive = true 69 | default = "" 70 | } 71 | -------------------------------------------------------------------------------- /.github/workflows/destroy-shared.yml: -------------------------------------------------------------------------------- 1 | name: Destroy Shared Infrastructure 2 | 3 | # Manual trigger only - DANGEROUS: destroys SES infrastructure used by ALL environments 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | confirm: 8 | description: 'Type "destroy-all" to confirm destruction of shared infrastructure' 9 | required: true 10 | default: '' 11 | 12 | jobs: 13 | destroy-shared: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: Verify confirmation 18 | run: | 19 | if [ "${{ github.event.inputs.confirm }}" != "destroy-all" ]; then 20 | echo "❌ Confirmation failed. You must type 'destroy-all' to proceed." 21 | exit 1 22 | fi 23 | 24 | - name: Checkout code 25 | uses: actions/checkout@v3 26 | 27 | - name: Set up AWS credentials 28 | uses: aws-actions/configure-aws-credentials@v2 29 | with: 30 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 31 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 32 | aws-region: "us-east-1" 33 | 34 | - name: Set up Terraform 35 | uses: hashicorp/setup-terraform@v2 36 | 37 | - name: Terraform Init 38 | run: | 39 | cd infra/shared 40 | terraform init 41 | 42 | - name: Deactivate SES Receipt Rule Set 43 | run: | 44 | aws ses set-active-receipt-rule-set --region us-east-1 || echo "No active rule set" 45 | 46 | - name: Empty Shared Email Bucket 47 | run: | 48 | cd infra/shared 49 | BUCKET_NAME=$(terraform output -raw shared_email_bucket_name 2>/dev/null || echo "email-to-webhook-emails-shared") 50 | echo "Emptying bucket: $BUCKET_NAME" 51 | aws s3 rm "s3://${BUCKET_NAME}" --recursive || echo "Bucket doesn't exist or already empty" 52 | 53 | - name: Terraform Destroy 54 | run: | 55 | cd infra/shared 56 | terraform destroy -auto-approve 57 | 58 | - name: Completion Message 59 | run: | 60 | echo "✅ Shared infrastructure destroyed." 61 | echo "⚠️ All SES email routing has been removed." 62 | 63 | -------------------------------------------------------------------------------- /.github/workflows/destroy.yml: -------------------------------------------------------------------------------- 1 | name: Destroy Infrastructure 2 | 3 | # Workflow for manually destroying all infrastructure resources 4 | on: 5 | workflow_dispatch: 6 | 7 | jobs: 8 | destroy: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout code 12 | uses: actions/checkout@v3 13 | 14 | - name: Set up AWS credentials 15 | uses: aws-actions/configure-aws-credentials@v2 16 | with: 17 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 18 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 19 | aws-region: "us-east-1" 20 | 21 | - name: Set up Terraform 22 | uses: hashicorp/setup-terraform@v2 23 | 24 | - name: Extract branch name 25 | shell: bash 26 | run: echo "BRANCH_NAME=${GITHUB_REF#refs/heads/}" >> $GITHUB_ENV 27 | 28 | - name: Package Check Lambda function 29 | run: | 30 | mkdir -p lambda_packages 31 | cd lambda/check && ./package.sh 32 | 33 | - name: Package Parser Lambda function 34 | run: | 35 | cd lambda/parser && ./package.sh 36 | 37 | - name: Terraform Init 38 | run: | 39 | cd infra 40 | terraform init -reconfigure \ 41 | -backend-config="key=terraform/${BRANCH_NAME}/state.tfstate" 42 | 43 | - name: Empty S3 Buckets 44 | run: | 45 | # Get bucket names with environment suffix 46 | ENVIRONMENT=${BRANCH_NAME} 47 | aws s3 rb s3://email-to-webhook-kv-database-${ENVIRONMENT} --force || echo "Bucket doesn't exist or already empty" 48 | aws s3 rb s3://email-to-webhook-emails-${ENVIRONMENT} --force || echo "Bucket doesn't exist or already empty" 49 | aws s3 rb s3://email-to-webhook-attachments-${ENVIRONMENT} --force || echo "Bucket doesn't exist or already empty" 50 | 51 | - name: Terraform Destroy 52 | run: | 53 | cd infra 54 | terraform destroy -var="aws_account_id=${{ secrets.AWS_ACCOUNT_ID }}" -var="mongodb_uri=${{ secrets.MONGODB_URI }}" -var="environment=${BRANCH_NAME}" --auto-approve 55 | 56 | - name: Clean up Lambda packages 57 | run: | 58 | rm -rf lambda_packages 59 | -------------------------------------------------------------------------------- /infra/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.92.0" 6 | hashes = [ 7 | "h1:Hm5w8euRSm6tZyc60+nVPQheCikB7P0NhFI/dSFK0IM=", 8 | "zh:1d3a0b40831360e8e988aee74a9ff3d69d95cb541c2eae5cb843c64303a091ba", 9 | "zh:3d29cbced6c708be2041a708d25c7c0fc22d09e4d0b174360ed113bfae786137", 10 | "zh:4341a203cf5820a0ca18bb514ae10a6c113bc6a728fb432acbf817d232e8eff4", 11 | "zh:4a49e2d91e4d92b6b93ccbcbdcfa2d67935ce62e33b939656766bb81b3fd9a2c", 12 | "zh:54c7189358b37fd895dedbabf84e509c1980a8c404a1ee5b29b06e40497b8655", 13 | "zh:5d8bb1ff089c37cb65c83b4647f1981fded993e87d8132915d92d79f29e2fcd8", 14 | "zh:618f2eb87cd65b245aefba03991ad714a51ff3b841016ef68e2da2b85d0b2325", 15 | "zh:7bce07bc542d0588ca42bac5098dd4f8af715417cd30166b4fb97cedd44ab109", 16 | "zh:81419eab2d8810beb114b1ff5cbb592d21edc21b809dc12bb066e4b88fdd184a", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:9dea39d4748eeeebe2e76ca59bca4ccd161c2687050878c47289a98407a23372", 19 | "zh:d692fc33b67ac89e916c8f9233d39eacab8c438fe10172990ee9d94fba5ca372", 20 | "zh:d9075c7da48947c029ba47d5985e1e8e3bf92367bfee8ca1ff0e747765e779a1", 21 | "zh:e81c62db317f3b640b2e04eba0ada8aa606bcbae0152c09f6242e86b86ef5889", 22 | "zh:f68562e073722c378d2f3529eb80ad463f12c44aa5523d558ae3b69f4de5ca1f", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/hashicorp/null" { 27 | version = "3.2.3" 28 | hashes = [ 29 | "h1:I0Um8UkrMUb81Fxq/dxbr3HLP2cecTH2WMJiwKSrwQY=", 30 | "zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2", 31 | "zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d", 32 | "zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3", 33 | "zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f", 34 | "zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1", 35 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 36 | "zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301", 37 | "zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670", 38 | "zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed", 39 | "zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65", 40 | "zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd", 41 | "zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5", 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | 164 | # Ignore Terraform working directory 165 | .terraform/ 166 | 167 | # Ignore Terraform state files (very important for security) 168 | terraform.tfstate 169 | terraform.tfstate.backup 170 | 171 | # Ignore crash logs 172 | crash.log 173 | 174 | # Ignore Terraform plan output 175 | *.tfplan 176 | # .tfstate files 177 | *.tfstate 178 | *.tfstate.* 179 | # Ignore provider-specific override files 180 | override.tf 181 | override.tf.json 182 | *_override.tf 183 | *_override.tf.json 184 | 185 | # Ignore variable files with secrets 186 | *.auto.tfvars 187 | *.auto.tfvars.json 188 | terraform.tfvars 189 | terraform.tfvars.json 190 | 191 | # Optional: If you do NOT want to commit .terraform.lock.hcl, uncomment the line below 192 | # .terraform.lock.hcl 193 | 194 | 195 | *.zip 196 | 197 | 198 | # .idea 199 | .idea/ -------------------------------------------------------------------------------- /destroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # MIT License 3 | # Copyright (c) 2023 [Your Name or Organization] 4 | # See LICENSE file for details 5 | 6 | set -e 7 | 8 | # Multi-Account Destroy Script 9 | # Each environment exists in its own AWS account 10 | 11 | # Get environment name from ENV variable or default to "main" 12 | ENVIRONMENT=${ENVIRONMENT:-main} 13 | echo "🌍 Destroying environment: $ENVIRONMENT" 14 | echo "🧹 Starting cleanup process..." 15 | echo "" 16 | 17 | # AWS Profile validation (required for multi-account setup) 18 | if [ -z "$AWS_PROFILE" ]; then 19 | echo "" 20 | echo "❌ ERROR: AWS_PROFILE is not set!" 21 | echo "" 22 | echo "In multi-account setup, you must specify which AWS account to destroy from." 23 | echo "Set the AWS_PROFILE environment variable to target the correct account." 24 | echo "" 25 | echo "Examples:" 26 | echo " AWS_PROFILE=main ENVIRONMENT=main ./destroy.sh" 27 | echo " AWS_PROFILE=preview ENVIRONMENT=preview ./destroy.sh" 28 | echo " AWS_PROFILE=dev ENVIRONMENT=dev ./destroy.sh" 29 | echo "" 30 | exit 1 31 | fi 32 | 33 | echo "📋 Using AWS Profile: $AWS_PROFILE" 34 | 35 | # Check if AWS CLI is installed 36 | if ! command -v aws &> /dev/null; then 37 | echo "❌ AWS CLI is not installed. Please install it before running this script." 38 | exit 1 39 | fi 40 | 41 | # Verify AWS credentials 42 | echo "🔐 Verifying AWS credentials for profile: $AWS_PROFILE..." 43 | if ! aws sts get-caller-identity --profile "$AWS_PROFILE" &> /dev/null; then 44 | echo "" 45 | echo "❌ ERROR: Failed to authenticate with AWS using profile: $AWS_PROFILE" 46 | echo "" 47 | exit 1 48 | fi 49 | 50 | # Get AWS account ID and region 51 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --profile "$AWS_PROFILE" --query Account --output text) 52 | AWS_REGION=$(aws configure get region --profile "$AWS_PROFILE" || echo "us-east-1") 53 | 54 | echo "✅ Authenticated successfully" 55 | echo " Account ID: $AWS_ACCOUNT_ID" 56 | echo " Region: $AWS_REGION" 57 | echo "" 58 | 59 | # State bucket configuration 60 | STATE_BUCKET="terraform-state-${ENVIRONMENT}-${AWS_ACCOUNT_ID}" 61 | echo "📦 Terraform state bucket: $STATE_BUCKET" 62 | echo "" 63 | 64 | # Check if Terraform is installed 65 | if ! command -v terraform &> /dev/null; then 66 | echo "❌ Terraform is not installed. Please install it before running this script." 67 | exit 1 68 | fi 69 | 70 | # Create placeholder Lambda packages for destroy 71 | echo "📦 Creating placeholder Lambda packages for destroy operation..." 72 | mkdir -p lambda_packages 73 | 74 | if [ ! -f "lambda_packages/check.zip" ]; then 75 | echo " Creating placeholder check.zip..." 76 | touch dummy_file 77 | zip -q lambda_packages/check.zip dummy_file 78 | rm dummy_file 79 | fi 80 | 81 | if [ ! -f "lambda_packages/parser.zip" ]; then 82 | echo " Creating placeholder parser.zip..." 83 | touch dummy_file 84 | zip -q lambda_packages/parser.zip dummy_file 85 | rm dummy_file 86 | fi 87 | 88 | echo "" 89 | 90 | # Change to the infrastructure directory 91 | cd infra 92 | 93 | # Initialize Terraform with account-specific backend 94 | echo "🔧 Initializing Terraform..." 95 | terraform init -reconfigure \ 96 | -backend-config="bucket=$STATE_BUCKET" \ 97 | -backend-config="key=terraform.tfstate" \ 98 | -backend-config="region=$AWS_REGION" 99 | 100 | echo "" 101 | 102 | # Get bucket names from Terraform configuration 103 | PARSER_BUCKET="email-to-webhook-kv-database-${ENVIRONMENT}-${AWS_ACCOUNT_ID}" 104 | ATTACHMENTS_BUCKET="email-to-webhook-attachments-${ENVIRONMENT}-${AWS_ACCOUNT_ID}" 105 | EMAIL_BUCKET="email-to-webhook-emails-${ENVIRONMENT}-${AWS_ACCOUNT_ID}" 106 | 107 | # Function to empty an S3 bucket safely 108 | empty_bucket() { 109 | local bucket_name=$1 110 | if [[ -z "$bucket_name" ]]; then 111 | echo "⚠️ Skipping bucket cleanup - no bucket name provided." 112 | return 113 | fi 114 | 115 | echo "🗑️ Emptying bucket: $bucket_name" 116 | if aws s3 ls "s3://$bucket_name" --profile "$AWS_PROFILE" 2>/dev/null; then 117 | aws s3 rm "s3://$bucket_name" --recursive --profile "$AWS_PROFILE" || echo "⚠️ Warning: Failed to empty bucket $bucket_name" 118 | else 119 | echo " Bucket does not exist or already emptied." 120 | fi 121 | } 122 | 123 | # Empty buckets before destroying 124 | echo "🗑️ Emptying S3 buckets..." 125 | empty_bucket "$PARSER_BUCKET" 126 | empty_bucket "$ATTACHMENTS_BUCKET" 127 | empty_bucket "$EMAIL_BUCKET" 128 | 129 | echo "" 130 | echo "💥 Running terraform destroy for ${ENVIRONMENT}..." 131 | echo "" 132 | 133 | # Run terraform destroy 134 | terraform destroy -auto-approve \ 135 | -var="environment=$ENVIRONMENT" \ 136 | -var="aws_account_id=$AWS_ACCOUNT_ID" \ 137 | -var="aws_region=$AWS_REGION" \ 138 | -var="state_bucket_name=$STATE_BUCKET" 139 | 140 | echo "" 141 | echo "🧹 Cleaning up placeholder files..." 142 | cd .. 143 | rm -rf lambda_packages 144 | 145 | echo "" 146 | echo "✅ Cleanup complete! All resources have been destroyed." 147 | echo "🎉 Environment '$ENVIRONMENT' has been removed from AWS account $AWS_ACCOUNT_ID" 148 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Email to Webhook Service 2 | 3 | Transforms emails into webhook notifications with attachment handling via AWS. 4 | 5 | ## Cloud Version 6 | 7 | A hosted version of this service is available at [emailtowebhook.com](https://emailtowebhook.com/dashboard). 8 | 9 | ## Features 10 | 11 | - Domain registration with webhook endpoints 12 | - Email forwarding to webhooks 13 | - S3 attachment storage 14 | - Automated DNS verification 15 | - Serverless architecture 16 | 17 | ## Deployment 18 | 19 | ### Multi-Account Architecture 20 | 21 | This project uses a **multi-account architecture** where each environment deploys to its own isolated AWS account: 22 | 23 | - **main**: Production environment (dedicated AWS account) 24 | - **preview**: Staging environment (dedicated AWS account) 25 | - **dev**: Development environment (dedicated AWS account) 26 | 27 | **Benefits:** 28 | - Complete resource isolation between environments 29 | - Enhanced security with account-level boundaries 30 | - Independent cost tracking per environment 31 | - No shared infrastructure dependencies 32 | 33 | **Prerequisites:** 34 | 1. Three separate AWS accounts (or one account for testing) 35 | 2. AWS CLI configured with profiles for each account 36 | 3. Terraform installed 37 | 4. S3 bucket for Terraform state in each account 38 | 39 | **Quick Start:** 40 | 41 | ```bash 42 | # Deploy to main environment 43 | AWS_PROFILE=main ENVIRONMENT=main ./deploy.sh 44 | 45 | # Deploy to preview environment 46 | AWS_PROFILE=preview ENVIRONMENT=preview ./deploy.sh 47 | 48 | # Deploy to dev environment 49 | AWS_PROFILE=dev ENVIRONMENT=dev ./deploy.sh 50 | ``` 51 | 52 | 📖 **See [ENVIRONMENTS.md](ENVIRONMENTS.md)** for complete setup guide including: 53 | - AWS account creation 54 | - AWS CLI profile configuration 55 | - Terraform state bucket setup 56 | - IAM permissions required 57 | - GitHub Actions configuration 58 | 59 | ### GitHub Actions 60 | 61 | 1. Fork/clone this repository 62 | 2. Set repository secrets for each environment: 63 | 64 | **Main Environment (Production):** 65 | - `AWS_ACCESS_KEY_ID_MAIN`: AWS access key for main account 66 | - `AWS_SECRET_ACCESS_KEY_MAIN`: AWS secret key for main account 67 | - `AWS_ACCOUNT_ID_MAIN`: Main AWS account ID 68 | - `AWS_REGION_MAIN`: AWS region (e.g., `us-east-1`) 69 | 70 | **Preview Environment (Staging):** 71 | - `AWS_ACCESS_KEY_ID_PREVIEW`: AWS access key for preview account 72 | - `AWS_SECRET_ACCESS_KEY_PREVIEW`: AWS secret key for preview account 73 | - `AWS_ACCOUNT_ID_PREVIEW`: Preview AWS account ID 74 | - `AWS_REGION_PREVIEW`: AWS region (e.g., `us-east-1`) 75 | 76 | **Dev Environment:** 77 | - `AWS_ACCESS_KEY_ID_DEV`: AWS access key for dev account 78 | - `AWS_SECRET_ACCESS_KEY_DEV`: AWS secret key for dev account 79 | - `AWS_ACCOUNT_ID_DEV`: Dev AWS account ID 80 | - `AWS_REGION_DEV`: AWS region (e.g., `us-east-1`) 81 | 82 | **Shared Secrets:** 83 | - `MONGODB_URI`: (optional) MongoDB connection string if using external database 84 | 85 | Deployment runs automatically on pushes to `main`, `preview`, or `dev` branches. Each branch deploys to its dedicated AWS account. 86 | 87 | ## Using the API 88 | 89 | After successful deployment, you will see the API Gateway URL: 90 | ![API Gateway URL Example](https://res.cloudinary.com/dhwxfvlrn/image/upload/f_auto,q_auto/9fd400e4-af82-4f0f-b0eb-ac9036dcede3.png) 91 | 92 | ### Register Domain 93 | 94 | ``` 95 | curl -X POST '/v1/domain/yourdomain.com' -H 'Content-Type: application/json' -d '{"webhook": "https://your-webhook-endpoint.com/path"}' 96 | ``` 97 | 98 | ### Get Domain Status 99 | 100 | ``` 101 | curl -X GET '/v1/domain/yourdomain.com' 102 | ``` 103 | 104 | ### Update Domain 105 | 106 | ``` 107 | curl -X PUT '/v1/domain/yourdomain.com' -H 'Content-Type: application/json' -d '{"webhook": "https://your-new-webhook-endpoint.com/path"}' 108 | ``` 109 | 110 | ### Delete Domain 111 | 112 | ``` 113 | curl -X DELETE '/v1/domain/yourdomain.com' 114 | ``` 115 | 116 | Once verified, emails to `anything@yourdomain.com` will be sent to your webhook as JSON with S3 attachment links. 117 | 118 | ## Contributing and Support 119 | 120 | ### How to Contribute 121 | 122 | 1. Fork the repository 123 | 2. Create a new branch (`git checkout -b feature/your-feature`) 124 | 3. Make your changes 125 | 4. Commit your changes (`git commit -m 'Add some feature'`) 126 | 5. Push to the branch (`git push origin feature/your-feature`) 127 | 6. Open a Pull Request 128 | 129 | ### Getting Support 130 | 131 | If you encounter issues or have questions: 132 | 133 | 1. Check existing [GitHub Issues](https://github.com/emailtowebhook/emailtowebhook/issues) first 134 | 2. Open a new Issue with: 135 | - Clear description of the problem 136 | - Steps to reproduce 137 | - Expected vs actual behavior 138 | - System information (AWS region, etc.) 139 | 140 | For security concerns, please report them directly to maintainers rather than opening public issues. 141 | 142 | ## Connect 143 | 144 | - **LinkedIn**: [Yakir Perlin](https://www.linkedin.com/in/yakirperlin/) 145 | - **Twitter**: [@yakirbipbip](https://x.com/yakirbipbip) 146 | 147 | Licensed under MIT. 148 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # MIT License 3 | # Copyright (c) 2023 [Your Name or Organization] 4 | # See LICENSE file for details 5 | 6 | set -e 7 | 8 | # Multi-Account Deployment Script 9 | # Each environment deploys to its own AWS account 10 | 11 | # Get environment name from ENV variable or default to "main" 12 | ENVIRONMENT=${ENVIRONMENT:-main} 13 | echo "🌍 Deploying to environment: $ENVIRONMENT" 14 | 15 | # AWS Profile validation (required for multi-account setup) 16 | if [ -z "$AWS_PROFILE" ]; then 17 | echo "" 18 | echo "❌ ERROR: AWS_PROFILE is not set!" 19 | echo "" 20 | echo "In multi-account setup, you must specify which AWS account to deploy to." 21 | echo "Set the AWS_PROFILE environment variable to target the correct account." 22 | echo "" 23 | echo "Examples:" 24 | echo " AWS_PROFILE=main ENVIRONMENT=main ./deploy.sh" 25 | echo " AWS_PROFILE=preview ENVIRONMENT=preview ./deploy.sh" 26 | echo " AWS_PROFILE=dev ENVIRONMENT=dev ./deploy.sh" 27 | echo "" 28 | echo "To configure AWS profiles, see: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html" 29 | exit 1 30 | fi 31 | 32 | echo "📋 Using AWS Profile: $AWS_PROFILE" 33 | 34 | # Check if AWS CLI is installed 35 | if ! command -v aws &> /dev/null; then 36 | echo "❌ AWS CLI is not installed. Please install it before running this script." 37 | echo "Installation instructions: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html" 38 | exit 1 39 | fi 40 | 41 | echo "✅ AWS CLI is installed." 42 | 43 | # Verify AWS credentials are valid for the selected profile 44 | echo "🔐 Verifying AWS credentials for profile: $AWS_PROFILE..." 45 | if ! aws sts get-caller-identity --profile "$AWS_PROFILE" &> /dev/null; then 46 | echo "" 47 | echo "❌ ERROR: Failed to authenticate with AWS using profile: $AWS_PROFILE" 48 | echo "" 49 | echo "Please ensure:" 50 | echo " 1. The profile exists in ~/.aws/credentials or ~/.aws/config" 51 | echo " 2. The credentials are valid and not expired" 52 | echo " 3. You have network connectivity to AWS" 53 | echo "" 54 | exit 1 55 | fi 56 | 57 | # Get AWS account ID and region from the profile 58 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --profile "$AWS_PROFILE" --query Account --output text) 59 | AWS_REGION=$(aws configure get region --profile "$AWS_PROFILE" || echo "us-east-1") 60 | 61 | echo "✅ Authenticated successfully" 62 | echo " Account ID: $AWS_ACCOUNT_ID" 63 | echo " Region: $AWS_REGION" 64 | echo "" 65 | 66 | # State bucket configuration 67 | # Best practice: Each AWS account has its own state bucket 68 | STATE_BUCKET="terraform-state-${ENVIRONMENT}-${AWS_ACCOUNT_ID}" 69 | echo "📦 Terraform state bucket: $STATE_BUCKET" 70 | echo "" 71 | 72 | # Check if Terraform is installed 73 | if ! command -v terraform &> /dev/null; then 74 | echo "❌ Terraform is not installed. Please install it before running this script." 75 | echo "Installation instructions: https://learn.hashicorp.com/tutorials/terraform/install-cli" 76 | exit 1 77 | fi 78 | 79 | # Verify Terraform version 80 | TERRAFORM_VERSION=$(terraform version -json 2>/dev/null | jq -r '.terraform_version' 2>/dev/null || echo "unknown") 81 | echo "✅ Terraform version $TERRAFORM_VERSION is installed." 82 | 83 | # Check if jq is installed 84 | if ! command -v jq &> /dev/null; then 85 | echo "⚠️ Warning: jq is not installed. This script uses jq for parsing JSON." 86 | echo " The script will continue, but for full functionality, please install jq." 87 | fi 88 | 89 | echo "" 90 | echo "📦 Packaging Lambda functions..." 91 | 92 | mkdir -p lambda_packages 93 | 94 | echo " 📦 Packaging Check Lambda..." 95 | (cd lambda/check && ./package.sh) || { 96 | echo "❌ Check Lambda packaging failed." 97 | exit 1 98 | } 99 | 100 | echo " 📦 Packaging Parser Lambda..." 101 | (cd lambda/parser && ./package.sh) || { 102 | echo "❌ Parser Lambda packaging failed." 103 | exit 1 104 | } 105 | 106 | echo "✅ Packaging complete." 107 | echo "" 108 | 109 | # Change directory to the infra folder 110 | cd infra 111 | 112 | # Initialize Terraform with account-specific backend 113 | echo "🔧 Initializing Terraform..." 114 | echo " Backend bucket: $STATE_BUCKET" 115 | echo " Backend key: terraform.tfstate" 116 | echo " Backend region: $AWS_REGION" 117 | echo "" 118 | 119 | terraform init -reconfigure \ 120 | -backend-config="bucket=$STATE_BUCKET" \ 121 | -backend-config="key=terraform.tfstate" \ 122 | -backend-config="region=$AWS_REGION" 123 | 124 | echo "" 125 | echo "🚀 Deploying infrastructure to $ENVIRONMENT environment..." 126 | echo "" 127 | 128 | # Apply Terraform configuration with environment-specific variables 129 | terraform apply -auto-approve \ 130 | -var="environment=$ENVIRONMENT" \ 131 | -var="aws_account_id=$AWS_ACCOUNT_ID" \ 132 | -var="aws_region=$AWS_REGION" \ 133 | -var="state_bucket_name=$STATE_BUCKET" 134 | 135 | echo "" 136 | echo "✅ Deployment complete!" 137 | echo "" 138 | 139 | # Clean up zip files after deployment 140 | echo "🧹 Cleaning up Lambda function zip files..." 141 | cd .. 142 | rm -rf lambda_packages 143 | 144 | echo "✅ Cleanup complete." 145 | echo "" 146 | echo "🎉 Environment '$ENVIRONMENT' is now deployed to AWS account $AWS_ACCOUNT_ID" 147 | -------------------------------------------------------------------------------- /destroy-old-infrastructure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Migration Script: Destroy Old Infrastructure 3 | # This script destroys infrastructure from the OLD shared state file location 4 | # Use this ONCE to clean up before deploying with the new multi-account setup 5 | 6 | set -e 7 | 8 | echo "🔄 Migration Cleanup Script" 9 | echo "This will destroy infrastructure from the OLD state file location" 10 | echo "" 11 | 12 | # Get environment name from ENV variable or default to "main" 13 | ENVIRONMENT=${ENVIRONMENT:-main} 14 | echo "🌍 Destroying old environment: $ENVIRONMENT" 15 | echo "🧹 Starting cleanup process..." 16 | echo "" 17 | 18 | # AWS Profile validation 19 | if [ -z "$AWS_PROFILE" ]; then 20 | echo "" 21 | echo "❌ ERROR: AWS_PROFILE is not set!" 22 | echo "" 23 | echo "Examples:" 24 | echo " AWS_PROFILE=main ENVIRONMENT=main ./destroy-old-infrastructure.sh" 25 | echo " AWS_PROFILE=preview ENVIRONMENT=preview ./destroy-old-infrastructure.sh" 26 | echo " AWS_PROFILE=dev ENVIRONMENT=dev ./destroy-old-infrastructure.sh" 27 | echo "" 28 | exit 1 29 | fi 30 | 31 | echo "📋 Using AWS Profile: $AWS_PROFILE" 32 | 33 | # Check if AWS CLI is installed 34 | if ! command -v aws &> /dev/null; then 35 | echo "❌ AWS CLI is not installed." 36 | exit 1 37 | fi 38 | 39 | # Verify AWS credentials 40 | echo "🔐 Verifying AWS credentials for profile: $AWS_PROFILE..." 41 | if ! aws sts get-caller-identity --profile "$AWS_PROFILE" &> /dev/null; then 42 | echo "" 43 | echo "❌ ERROR: Failed to authenticate with AWS using profile: $AWS_PROFILE" 44 | echo "" 45 | exit 1 46 | fi 47 | 48 | # Get AWS account ID and region 49 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --profile "$AWS_PROFILE" --query Account --output text) 50 | AWS_REGION=$(aws configure get region --profile "$AWS_PROFILE" || echo "us-east-1") 51 | 52 | echo "✅ Authenticated successfully" 53 | echo " Account ID: $AWS_ACCOUNT_ID" 54 | echo " Region: $AWS_REGION" 55 | echo "" 56 | 57 | # OLD state bucket configuration (before migration) 58 | OLD_STATE_BUCKET="terraform-tregfd" 59 | OLD_STATE_KEY="terraform/${ENVIRONMENT}/state.tfstate" 60 | echo "📦 OLD Terraform state location:" 61 | echo " Bucket: $OLD_STATE_BUCKET" 62 | echo " Key: $OLD_STATE_KEY" 63 | echo "" 64 | 65 | # Check if Terraform is installed 66 | if ! command -v terraform &> /dev/null; then 67 | echo "❌ Terraform is not installed." 68 | exit 1 69 | fi 70 | 71 | # Create placeholder Lambda packages for destroy 72 | echo "📦 Creating placeholder Lambda packages for destroy operation..." 73 | mkdir -p lambda_packages 74 | 75 | if [ ! -f "lambda_packages/check.zip" ]; then 76 | echo " Creating placeholder check.zip..." 77 | touch dummy_file 78 | zip -q lambda_packages/check.zip dummy_file 79 | rm dummy_file 80 | fi 81 | 82 | if [ ! -f "lambda_packages/parser.zip" ]; then 83 | echo " Creating placeholder parser.zip..." 84 | touch dummy_file 85 | zip -q lambda_packages/parser.zip dummy_file 86 | rm dummy_file 87 | fi 88 | 89 | echo "" 90 | 91 | # Change to the infrastructure directory 92 | cd infra 93 | 94 | # Initialize Terraform with OLD state file location 95 | echo "🔧 Initializing Terraform with OLD state file location..." 96 | terraform init -reconfigure \ 97 | -backend-config="bucket=$OLD_STATE_BUCKET" \ 98 | -backend-config="key=$OLD_STATE_KEY" \ 99 | -backend-config="region=us-east-1" 100 | 101 | echo "" 102 | 103 | # Get bucket names from Terraform configuration (OLD naming without account ID) 104 | PARSER_BUCKET="email-to-webhook-kv-database-${ENVIRONMENT}" 105 | ATTACHMENTS_BUCKET="email-to-webhook-attachments-${ENVIRONMENT}" 106 | EMAIL_BUCKET="email-to-webhook-emails-${ENVIRONMENT}" 107 | 108 | # Function to empty an S3 bucket safely 109 | empty_bucket() { 110 | local bucket_name=$1 111 | if [[ -z "$bucket_name" ]]; then 112 | echo "⚠️ Skipping bucket cleanup - no bucket name provided." 113 | return 114 | fi 115 | 116 | echo "🗑️ Emptying bucket: $bucket_name" 117 | if aws s3 ls "s3://$bucket_name" --profile "$AWS_PROFILE" 2>/dev/null; then 118 | aws s3 rm "s3://$bucket_name" --recursive --profile "$AWS_PROFILE" || echo "⚠️ Warning: Failed to empty bucket $bucket_name" 119 | else 120 | echo " Bucket does not exist or already emptied." 121 | fi 122 | } 123 | 124 | # Empty buckets before destroying 125 | echo "🗑️ Emptying S3 buckets..." 126 | empty_bucket "$PARSER_BUCKET" 127 | empty_bucket "$ATTACHMENTS_BUCKET" 128 | empty_bucket "$EMAIL_BUCKET" 129 | 130 | echo "" 131 | echo "💥 Running terraform destroy for ${ENVIRONMENT}..." 132 | echo "" 133 | 134 | # Run terraform destroy 135 | terraform destroy -auto-approve \ 136 | -var="environment=$ENVIRONMENT" \ 137 | -var="aws_account_id=$AWS_ACCOUNT_ID" \ 138 | -var="aws_region=$AWS_REGION" 139 | 140 | echo "" 141 | echo "🧹 Cleaning up placeholder files..." 142 | cd .. 143 | rm -rf lambda_packages 144 | 145 | echo "" 146 | echo "✅ Old infrastructure cleanup complete!" 147 | echo "" 148 | echo "📌 Next Steps:" 149 | echo " 1. Create the new state bucket:" 150 | echo " aws s3 mb s3://terraform-state-${ENVIRONMENT}-${AWS_ACCOUNT_ID} --region $AWS_REGION --profile $AWS_PROFILE" 151 | echo " aws s3api put-bucket-versioning --bucket terraform-state-${ENVIRONMENT}-${AWS_ACCOUNT_ID} \\" 152 | echo " --versioning-configuration Status=Enabled --profile $AWS_PROFILE" 153 | echo "" 154 | echo " 2. Deploy with new multi-account setup:" 155 | echo " AWS_PROFILE=$AWS_PROFILE ENVIRONMENT=$ENVIRONMENT ./deploy.sh" 156 | echo "" 157 | 158 | -------------------------------------------------------------------------------- /setup-github-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to set up GitHub secrets for multi-account deployment 3 | # Requires GitHub CLI (gh) to be installed and authenticated 4 | 5 | set -e 6 | 7 | echo "🔐 GitHub Secrets Setup Script" 8 | echo "================================" 9 | echo "" 10 | 11 | # Check if GitHub CLI is installed 12 | if ! command -v gh &> /dev/null; then 13 | echo "❌ GitHub CLI (gh) is not installed." 14 | echo "" 15 | echo "Install it with:" 16 | echo " macOS: brew install gh" 17 | echo " Linux: See https://github.com/cli/cli/blob/trunk/docs/install_linux.md" 18 | echo " Windows: See https://github.com/cli/cli#windows" 19 | echo "" 20 | exit 1 21 | fi 22 | 23 | # Check if authenticated 24 | if ! gh auth status &> /dev/null; then 25 | echo "❌ Not authenticated with GitHub CLI." 26 | echo "" 27 | echo "Run: gh auth login" 28 | echo "" 29 | exit 1 30 | fi 31 | 32 | echo "✅ GitHub CLI is installed and authenticated" 33 | echo "" 34 | 35 | # Prompt for AWS credentials 36 | echo "📋 Enter AWS credentials for MAIN environment:" 37 | read -p "AWS Access Key ID (MAIN): " AWS_ACCESS_KEY_ID_MAIN 38 | read -sp "AWS Secret Access Key (MAIN): " AWS_SECRET_ACCESS_KEY_MAIN 39 | echo "" 40 | read -p "AWS Account ID (MAIN) [302835751737]: " AWS_ACCOUNT_ID_MAIN 41 | AWS_ACCOUNT_ID_MAIN=${AWS_ACCOUNT_ID_MAIN:-302835751737} 42 | read -p "AWS Region (MAIN) [us-east-1]: " AWS_REGION_MAIN 43 | AWS_REGION_MAIN=${AWS_REGION_MAIN:-us-east-1} 44 | echo "" 45 | 46 | # Ask if using same credentials for all environments 47 | read -p "Use same AWS account for preview and dev? (y/n) [y]: " USE_SAME 48 | USE_SAME=${USE_SAME:-y} 49 | echo "" 50 | 51 | if [[ "$USE_SAME" =~ ^[Yy]$ ]]; then 52 | AWS_ACCESS_KEY_ID_PREVIEW="$AWS_ACCESS_KEY_ID_MAIN" 53 | AWS_SECRET_ACCESS_KEY_PREVIEW="$AWS_SECRET_ACCESS_KEY_MAIN" 54 | AWS_ACCOUNT_ID_PREVIEW="$AWS_ACCOUNT_ID_MAIN" 55 | AWS_REGION_PREVIEW="$AWS_REGION_MAIN" 56 | 57 | AWS_ACCESS_KEY_ID_DEV="$AWS_ACCESS_KEY_ID_MAIN" 58 | AWS_SECRET_ACCESS_KEY_DEV="$AWS_SECRET_ACCESS_KEY_MAIN" 59 | AWS_ACCOUNT_ID_DEV="$AWS_ACCOUNT_ID_MAIN" 60 | AWS_REGION_DEV="$AWS_REGION_MAIN" 61 | else 62 | echo "📋 Enter AWS credentials for PREVIEW environment:" 63 | read -p "AWS Access Key ID (PREVIEW): " AWS_ACCESS_KEY_ID_PREVIEW 64 | read -sp "AWS Secret Access Key (PREVIEW): " AWS_SECRET_ACCESS_KEY_PREVIEW 65 | echo "" 66 | read -p "AWS Account ID (PREVIEW): " AWS_ACCOUNT_ID_PREVIEW 67 | read -p "AWS Region (PREVIEW) [us-east-1]: " AWS_REGION_PREVIEW 68 | AWS_REGION_PREVIEW=${AWS_REGION_PREVIEW:-us-east-1} 69 | echo "" 70 | 71 | echo "📋 Enter AWS credentials for DEV environment:" 72 | read -p "AWS Access Key ID (DEV): " AWS_ACCESS_KEY_ID_DEV 73 | read -sp "AWS Secret Access Key (DEV): " AWS_SECRET_ACCESS_KEY_DEV 74 | echo "" 75 | read -p "AWS Account ID (DEV): " AWS_ACCOUNT_ID_DEV 76 | read -p "AWS Region (DEV) [us-east-1]: " AWS_REGION_DEV 77 | AWS_REGION_DEV=${AWS_REGION_DEV:-us-east-1} 78 | echo "" 79 | fi 80 | 81 | # Optional MongoDB URI 82 | read -p "Enter MongoDB URI (optional, press Enter to skip): " MONGODB_URI 83 | echo "" 84 | 85 | # Confirm before setting secrets 86 | echo "🔍 Summary:" 87 | echo " Main Account ID: $AWS_ACCOUNT_ID_MAIN" 88 | echo " Main Region: $AWS_REGION_MAIN" 89 | echo " Preview Account ID: $AWS_ACCOUNT_ID_PREVIEW" 90 | echo " Preview Region: $AWS_REGION_PREVIEW" 91 | echo " Dev Account ID: $AWS_ACCOUNT_ID_DEV" 92 | echo " Dev Region: $AWS_REGION_DEV" 93 | if [ -n "$MONGODB_URI" ]; then 94 | echo " MongoDB URI: " 95 | fi 96 | echo "" 97 | 98 | read -p "Create these secrets in GitHub? (y/n): " CONFIRM 99 | if [[ ! "$CONFIRM" =~ ^[Yy]$ ]]; then 100 | echo "❌ Cancelled" 101 | exit 0 102 | fi 103 | 104 | echo "" 105 | echo "🚀 Creating GitHub secrets..." 106 | echo "" 107 | 108 | # Create secrets for MAIN environment 109 | echo " Creating MAIN environment secrets..." 110 | echo "$AWS_ACCESS_KEY_ID_MAIN" | gh secret set AWS_ACCESS_KEY_ID_MAIN 111 | echo "$AWS_SECRET_ACCESS_KEY_MAIN" | gh secret set AWS_SECRET_ACCESS_KEY_MAIN 112 | echo "$AWS_ACCOUNT_ID_MAIN" | gh secret set AWS_ACCOUNT_ID_MAIN 113 | echo "$AWS_REGION_MAIN" | gh secret set AWS_REGION_MAIN 114 | 115 | # Create secrets for PREVIEW environment 116 | echo " Creating PREVIEW environment secrets..." 117 | echo "$AWS_ACCESS_KEY_ID_PREVIEW" | gh secret set AWS_ACCESS_KEY_ID_PREVIEW 118 | echo "$AWS_SECRET_ACCESS_KEY_PREVIEW" | gh secret set AWS_SECRET_ACCESS_KEY_PREVIEW 119 | echo "$AWS_ACCOUNT_ID_PREVIEW" | gh secret set AWS_ACCOUNT_ID_PREVIEW 120 | echo "$AWS_REGION_PREVIEW" | gh secret set AWS_REGION_PREVIEW 121 | 122 | # Create secrets for DEV environment 123 | echo " Creating DEV environment secrets..." 124 | echo "$AWS_ACCESS_KEY_ID_DEV" | gh secret set AWS_ACCESS_KEY_ID_DEV 125 | echo "$AWS_SECRET_ACCESS_KEY_DEV" | gh secret set AWS_SECRET_ACCESS_KEY_DEV 126 | echo "$AWS_ACCOUNT_ID_DEV" | gh secret set AWS_ACCOUNT_ID_DEV 127 | echo "$AWS_REGION_DEV" | gh secret set AWS_REGION_DEV 128 | 129 | # Create optional MongoDB URI 130 | if [ -n "$MONGODB_URI" ]; then 131 | echo " Creating MONGODB_URI secret..." 132 | echo "$MONGODB_URI" | gh secret set MONGODB_URI 133 | fi 134 | 135 | echo "" 136 | echo "✅ All secrets created successfully!" 137 | echo "" 138 | echo "📋 View your secrets at:" 139 | gh repo view --web 140 | echo " → Settings → Secrets and variables → Actions" 141 | echo "" 142 | echo "🎉 You can now push to main/preview/dev branches to trigger deployments!" 143 | 144 | -------------------------------------------------------------------------------- /debug-email-receiving.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Email Receiving Debug Script for h.kidox.ai 4 | # This script checks all the components needed for email receiving to work 5 | 6 | set -e 7 | 8 | DOMAIN="h.kidox.ai" 9 | ENVIRONMENT=${1:-"main"} 10 | AWS_PROFILE="main" 11 | 12 | echo "========================================" 13 | echo "Debugging Email Receiving for $DOMAIN" 14 | echo "Environment: $ENVIRONMENT" 15 | echo "========================================" 16 | echo "" 17 | 18 | # Check AWS credentials 19 | echo "1. Checking AWS Credentials..." 20 | echo " Using AWS Profile: $AWS_PROFILE" 21 | if ! aws sts get-caller-identity --profile "$AWS_PROFILE" &> /dev/null; then 22 | echo "❌ AWS credentials not configured or invalid for profile '$AWS_PROFILE'" 23 | echo " Run: aws configure --profile $AWS_PROFILE" 24 | exit 1 25 | fi 26 | ACCOUNT_ID=$(aws sts get-caller-identity --profile "$AWS_PROFILE" --query Account --output text) 27 | echo "✅ AWS Account ID: $ACCOUNT_ID" 28 | echo "" 29 | 30 | # Get Terraform outputs 31 | echo "2. Getting Infrastructure Details..." 32 | cd infra 33 | export AWS_PROFILE="$AWS_PROFILE" 34 | EMAIL_BUCKET=$(terraform output -raw email_bucket_name 2>/dev/null || echo "") 35 | RULE_SET_NAME="$ENVIRONMENT-rule-set" 36 | if [ -z "$EMAIL_BUCKET" ]; then 37 | echo "❌ Could not get email bucket name from Terraform" 38 | echo " Trying to construct bucket name from account ID..." 39 | EMAIL_BUCKET="email-to-webhook-emails-$ENVIRONMENT-$ACCOUNT_ID" 40 | echo " Using: $EMAIL_BUCKET" 41 | fi 42 | echo "✅ Email Bucket: $EMAIL_BUCKET" 43 | echo "✅ Rule Set: $RULE_SET_NAME" 44 | cd .. 45 | echo "" 46 | 47 | # Check domain verification in SES 48 | echo "3. Checking SES Domain Verification..." 49 | VERIFICATION=$(aws ses get-identity-verification-attributes --profile "$AWS_PROFILE" --identities "$DOMAIN" --output json) 50 | VERIFICATION_STATUS=$(echo "$VERIFICATION" | jq -r ".VerificationAttributes.\"$DOMAIN\".VerificationStatus // \"NotFound\"") 51 | echo " Domain Verification Status: $VERIFICATION_STATUS" 52 | if [ "$VERIFICATION_STATUS" != "Success" ]; then 53 | echo "❌ Domain is not verified in SES!" 54 | echo " Current status: $VERIFICATION_STATUS" 55 | echo "" 56 | echo " To verify the domain, you need to:" 57 | echo " 1. POST to your API: /v1/domain/$DOMAIN" 58 | echo " 2. Wait for verification to complete" 59 | else 60 | echo "✅ Domain is verified in SES" 61 | fi 62 | echo "" 63 | 64 | # Check active receipt rule set 65 | echo "4. Checking Active Receipt Rule Set..." 66 | ACTIVE_RULE_SET=$(aws ses describe-active-receipt-rule-set --profile "$AWS_PROFILE" --output json 2>/dev/null || echo "{}") 67 | ACTIVE_NAME=$(echo "$ACTIVE_RULE_SET" | jq -r '.Metadata.Name // "None"') 68 | echo " Active Rule Set: $ACTIVE_NAME" 69 | if [ "$ACTIVE_NAME" != "$RULE_SET_NAME" ]; then 70 | echo "⚠️ Warning: Active rule set ($ACTIVE_NAME) doesn't match expected ($RULE_SET_NAME)" 71 | else 72 | echo "✅ Correct rule set is active" 73 | fi 74 | echo "" 75 | 76 | # Check receipt rules in the rule set 77 | echo "5. Checking Receipt Rules..." 78 | RECEIPT_RULE=$(aws ses describe-receipt-rule-set --profile "$AWS_PROFILE" --rule-set-name "$RULE_SET_NAME" --output json 2>/dev/null || echo "{}") 79 | RULE_COUNT=$(echo "$RECEIPT_RULE" | jq '.Rules | length') 80 | echo " Number of rules: $RULE_COUNT" 81 | if [ "$RULE_COUNT" -gt 0 ]; then 82 | echo "$RECEIPT_RULE" | jq -r '.Rules[] | " - Rule: \(.Name) | Enabled: \(.Enabled) | Recipients: \(if .Recipients then (.Recipients | join(", ")) else "ALL" end)"' 83 | echo "✅ Receipt rules configured" 84 | else 85 | echo "❌ No receipt rules found!" 86 | fi 87 | echo "" 88 | 89 | # Check S3 bucket 90 | echo "6. Checking S3 Email Bucket..." 91 | if aws s3 ls "s3://$EMAIL_BUCKET" --profile "$AWS_PROFILE" &> /dev/null; then 92 | OBJECT_COUNT=$(aws s3 ls "s3://$EMAIL_BUCKET" --profile "$AWS_PROFILE" --recursive | wc -l | tr -d ' ') 93 | echo "✅ Bucket exists" 94 | echo " Objects in bucket: $OBJECT_COUNT" 95 | if [ "$OBJECT_COUNT" -gt 0 ]; then 96 | echo "" 97 | echo " Recent objects:" 98 | aws s3 ls "s3://$EMAIL_BUCKET" --profile "$AWS_PROFILE" --recursive | tail -5 99 | else 100 | echo " ⚠️ Bucket is empty - no emails received yet" 101 | fi 102 | else 103 | echo "❌ Cannot access bucket" 104 | fi 105 | echo "" 106 | 107 | # Check S3 bucket policy 108 | echo "7. Checking S3 Bucket Policy..." 109 | BUCKET_POLICY=$(aws s3api get-bucket-policy --profile "$AWS_PROFILE" --bucket "$EMAIL_BUCKET" 2>/dev/null || echo "") 110 | if [ -n "$BUCKET_POLICY" ]; then 111 | SES_PERMISSION=$(echo "$BUCKET_POLICY" | jq -r '.Policy | fromjson | .Statement[] | select(.Principal.Service == "ses.amazonaws.com") | .Action') 112 | if [ -n "$SES_PERMISSION" ] && [ "$SES_PERMISSION" != "null" ]; then 113 | echo "✅ SES has permission to write to bucket" 114 | else 115 | echo "❌ SES does not have PutObject permission on bucket" 116 | fi 117 | else 118 | echo "❌ No bucket policy found" 119 | fi 120 | echo "" 121 | 122 | # Check CloudWatch Logs for parsing lambda 123 | echo "8. Checking Recent Lambda Logs..." 124 | PARSER_LAMBDA_LOG_GROUP="/aws/lambda/email-parser-lambda-$ENVIRONMENT" 125 | echo " Log Group: $PARSER_LAMBDA_LOG_GROUP" 126 | if aws logs describe-log-groups --profile "$AWS_PROFILE" --log-group-name-prefix "$PARSER_LAMBDA_LOG_GROUP" --output json | jq -e '.logGroups[0]' > /dev/null 2>&1; then 127 | echo "✅ Lambda log group exists" 128 | echo "" 129 | echo " Recent log events (last 10 minutes):" 130 | aws logs tail "$PARSER_LAMBDA_LOG_GROUP" --profile "$AWS_PROFILE" --since 10m --format short 2>/dev/null || echo " No recent logs" 131 | else 132 | echo "⚠️ Lambda log group not found (Lambda may not have run yet)" 133 | fi 134 | echo "" 135 | 136 | echo "========================================" 137 | echo "Summary" 138 | echo "========================================" 139 | echo "" 140 | echo "Test email receiving by sending an email to: test@$DOMAIN" 141 | echo "" 142 | echo "If emails still don't appear in S3:" 143 | echo "1. Check if domain verification is 'Success'" 144 | echo "2. Verify the receipt rule set is active" 145 | echo "3. Ensure receipt rules exist and are enabled" 146 | echo "4. Check S3 bucket policy allows SES to write" 147 | echo "5. Try sending from a verified external email (Gmail, etc.)" 148 | echo "" 149 | 150 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Infrastructure 2 | 3 | on: 4 | push: 5 | branches: ["main", "preview", "dev"] 6 | pull_request: 7 | branches: ["main", "preview", "dev"] 8 | workflow_dispatch: 9 | 10 | jobs: 11 | deploy: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@v3 17 | 18 | - name: Determine environment from branch 19 | id: determine-env 20 | run: | 21 | BRANCH_NAME="${GITHUB_REF#refs/heads/}" 22 | echo "branch=$BRANCH_NAME" >> $GITHUB_OUTPUT 23 | 24 | # Map branch to environment (can be customized) 25 | case "$BRANCH_NAME" in 26 | main) 27 | echo "environment=main" >> $GITHUB_OUTPUT 28 | ;; 29 | preview) 30 | echo "environment=preview" >> $GITHUB_OUTPUT 31 | ;; 32 | dev) 33 | echo "environment=dev" >> $GITHUB_OUTPUT 34 | ;; 35 | *) 36 | echo "environment=dev" >> $GITHUB_OUTPUT 37 | ;; 38 | esac 39 | 40 | - name: Set AWS credentials for environment 41 | id: aws-creds 42 | run: | 43 | ENV="${{ steps.determine-env.outputs.environment }}" 44 | 45 | case "$ENV" in 46 | main) 47 | echo "aws_access_key_id=${{ secrets.AWS_ACCESS_KEY_ID_MAIN }}" >> $GITHUB_OUTPUT 48 | echo "aws_secret_access_key=${{ secrets.AWS_SECRET_ACCESS_KEY_MAIN }}" >> $GITHUB_OUTPUT 49 | echo "aws_account_id=${{ secrets.AWS_ACCOUNT_ID_MAIN }}" >> $GITHUB_OUTPUT 50 | echo "aws_region=${{ secrets.AWS_REGION_MAIN || 'us-east-1' }}" >> $GITHUB_OUTPUT 51 | ;; 52 | preview) 53 | echo "aws_access_key_id=${{ secrets.AWS_ACCESS_KEY_ID_PREVIEW }}" >> $GITHUB_OUTPUT 54 | echo "aws_secret_access_key=${{ secrets.AWS_SECRET_ACCESS_KEY_PREVIEW }}" >> $GITHUB_OUTPUT 55 | echo "aws_account_id=${{ secrets.AWS_ACCOUNT_ID_PREVIEW }}" >> $GITHUB_OUTPUT 56 | echo "aws_region=${{ secrets.AWS_REGION_PREVIEW || 'us-east-1' }}" >> $GITHUB_OUTPUT 57 | ;; 58 | dev) 59 | echo "aws_access_key_id=${{ secrets.AWS_ACCESS_KEY_ID_DEV }}" >> $GITHUB_OUTPUT 60 | echo "aws_secret_access_key=${{ secrets.AWS_SECRET_ACCESS_KEY_DEV }}" >> $GITHUB_OUTPUT 61 | echo "aws_account_id=${{ secrets.AWS_ACCOUNT_ID_DEV }}" >> $GITHUB_OUTPUT 62 | echo "aws_region=${{ secrets.AWS_REGION_DEV || 'us-east-1' }}" >> $GITHUB_OUTPUT 63 | ;; 64 | *) 65 | echo "::error::Unknown environment: $ENV" 66 | exit 1 67 | ;; 68 | esac 69 | 70 | - name: Configure AWS credentials 71 | uses: aws-actions/configure-aws-credentials@v2 72 | with: 73 | aws-access-key-id: ${{ steps.aws-creds.outputs.aws_access_key_id }} 74 | aws-secret-access-key: ${{ steps.aws-creds.outputs.aws_secret_access_key }} 75 | aws-region: ${{ steps.aws-creds.outputs.aws_region }} 76 | 77 | - name: Verify AWS credentials 78 | run: | 79 | echo "Verifying AWS credentials..." 80 | aws sts get-caller-identity 81 | echo "Account ID: ${{ steps.aws-creds.outputs.aws_account_id }}" 82 | echo "Region: ${{ steps.aws-creds.outputs.aws_region }}" 83 | echo "Environment: ${{ steps.determine-env.outputs.environment }}" 84 | 85 | - name: Set up Terraform 86 | uses: hashicorp/setup-terraform@v2 87 | 88 | - name: Package Check Lambda function 89 | run: | 90 | mkdir -p lambda_packages 91 | cd lambda/check && ./package.sh 92 | 93 | - name: Package Parser Lambda function 94 | run: | 95 | cd lambda/parser && ./package.sh 96 | 97 | - name: Terraform Init 98 | env: 99 | ENV: ${{ steps.determine-env.outputs.environment }} 100 | AWS_REGION: ${{ steps.aws-creds.outputs.aws_region }} 101 | run: | 102 | cd infra 103 | STATE_BUCKET="terraform-state-${ENV}-${{ steps.aws-creds.outputs.aws_account_id }}" 104 | echo "Initializing Terraform with state bucket: $STATE_BUCKET" 105 | terraform init -reconfigure \ 106 | -backend-config="bucket=$STATE_BUCKET" \ 107 | -backend-config="key=terraform.tfstate" \ 108 | -backend-config="region=$AWS_REGION" 109 | 110 | - name: Terraform Plan 111 | env: 112 | ENV: ${{ steps.determine-env.outputs.environment }} 113 | AWS_ACCOUNT_ID: ${{ steps.aws-creds.outputs.aws_account_id }} 114 | AWS_REGION: ${{ steps.aws-creds.outputs.aws_region }} 115 | run: | 116 | cd infra 117 | STATE_BUCKET="terraform-state-${ENV}-${AWS_ACCOUNT_ID}" 118 | terraform plan \ 119 | -var="environment=$ENV" \ 120 | -var="aws_account_id=$AWS_ACCOUNT_ID" \ 121 | -var="aws_region=$AWS_REGION" \ 122 | -var="state_bucket_name=$STATE_BUCKET" \ 123 | -var="mongodb_uri=${{ secrets.MONGODB_URI }}" \ 124 | -var="gemini_api_key=${{ secrets.GEMINI_API_KEY }}" \ 125 | -var="daytona_api_key=${{ secrets.DAYTONA_API_KEY }}" \ 126 | -var="daytona_api_key=${{ secrets.DAYTONA_API_KEY }}" 127 | 128 | - name: Terraform Apply 129 | if: always() 130 | env: 131 | ENV: ${{ steps.determine-env.outputs.environment }} 132 | AWS_ACCOUNT_ID: ${{ steps.aws-creds.outputs.aws_account_id }} 133 | AWS_REGION: ${{ steps.aws-creds.outputs.aws_region }} 134 | run: | 135 | cd infra 136 | STATE_BUCKET="terraform-state-${ENV}-${AWS_ACCOUNT_ID}" 137 | terraform apply -auto-approve \ 138 | -var="environment=$ENV" \ 139 | -var="aws_account_id=$AWS_ACCOUNT_ID" \ 140 | -var="aws_region=$AWS_REGION" \ 141 | -var="state_bucket_name=$STATE_BUCKET" \ 142 | -var="mongodb_uri=${{ secrets.MONGODB_URI }}" \ 143 | -var="gemini_api_key=${{ secrets.GEMINI_API_KEY }}" \ 144 | -var="daytona_api_key=${{ secrets.DAYTONA_API_KEY }}" 145 | 146 | - name: Clean up Lambda packages 147 | if: always() 148 | run: | 149 | rm -rf lambda_packages 150 | -------------------------------------------------------------------------------- /MIGRATION_SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Multi-Account Migration Summary 2 | 3 | ## ✅ Migration Complete 4 | 5 | Your infrastructure has been successfully migrated from a shared infrastructure model to a multi-account architecture. 6 | 7 | ## What Changed 8 | 9 | ### 🗑️ Deleted Files 10 | 11 | The following shared infrastructure files were removed: 12 | - `infra/shared/main.tf` 13 | - `infra/shared/provider.tf` 14 | - `infra/shared/variables.tf` 15 | - `infra/shared/outputs.tf` 16 | - `deploy-shared.sh` 17 | - `destroy-shared.sh` 18 | - `infra/destroy_routes.tf` 19 | - `infra/route_cleanup.sh` 20 | 21 | ### 📝 Modified Files 22 | 23 | #### Infrastructure Configuration 24 | 25 | **`infra/main.tf`** 26 | - Added per-environment SES receipt rule set (no longer shared) 27 | - Added SES active receipt rule set activation 28 | - Updated SES receipt rule to reference local rule set 29 | - Removed dependency on shared infrastructure 30 | 31 | **`infra/provider.tf`** 32 | - Made Terraform backend fully configurable 33 | - Removed hardcoded state bucket 34 | - Backend now accepts bucket, key, and region via `-backend-config` flags 35 | - Updated default tags to use environment variable 36 | 37 | **`infra/variables.tf`** 38 | - Improved documentation for all variables 39 | - Added `state_bucket_name` variable 40 | - Clarified that `aws_account_id` is required (no default) 41 | - Updated `environment` description for multi-account context 42 | 43 | #### Deployment Scripts 44 | 45 | **`deploy.sh`** 46 | - Complete rewrite for multi-account support 47 | - Added `AWS_PROFILE` requirement and validation 48 | - Added AWS credential verification 49 | - Automatic account ID and region detection from profile 50 | - Dynamic state bucket configuration per environment 51 | - Removed shared infrastructure dependency checks 52 | - Enhanced error messages and user guidance 53 | 54 | **`destroy.sh`** 55 | - Complete rewrite for multi-account support 56 | - Added `AWS_PROFILE` requirement and validation 57 | - Dynamic state bucket configuration 58 | - Improved S3 bucket emptying logic 59 | - Enhanced error handling 60 | 61 | #### CI/CD 62 | 63 | **`.github/workflows/deploy.yml`** 64 | - Branch-to-environment mapping (main → main, preview → preview, dev → dev) 65 | - Conditional AWS credential selection per branch 66 | - Separate secrets for each environment: 67 | - `AWS_ACCESS_KEY_ID_MAIN/PREVIEW/DEV` 68 | - `AWS_SECRET_ACCESS_KEY_MAIN/PREVIEW/DEV` 69 | - `AWS_ACCOUNT_ID_MAIN/PREVIEW/DEV` 70 | - `AWS_REGION_MAIN/PREVIEW/DEV` 71 | - Dynamic state bucket configuration 72 | - Added Terraform plan step for visibility 73 | 74 | #### Documentation 75 | 76 | **`ENVIRONMENTS.md`** 77 | - Complete rewrite for multi-account architecture 78 | - Comprehensive AWS account setup guide 79 | - AWS CLI profile configuration instructions 80 | - Required IAM permissions documentation 81 | - Terraform state bucket creation guide 82 | - Local deployment examples 83 | - GitHub Actions secrets configuration 84 | - Security best practices 85 | - Troubleshooting guide 86 | - Cost management guidance 87 | 88 | **`README.md`** 89 | - Updated deployment section for multi-account architecture 90 | - Updated prerequisites 91 | - New GitHub Actions secrets list 92 | - Clarified branch-based deployments 93 | 94 | ## Architecture Changes 95 | 96 | ### Before (Shared Infrastructure) 97 | ``` 98 | Single AWS Account 99 | ├── Shared SES Receipt Rule Set (all environments) 100 | ├── Environment: main 101 | │ ├── Lambda Functions 102 | │ ├── API Gateway 103 | │ └── S3 Buckets 104 | ├── Environment: preview 105 | │ ├── Lambda Functions 106 | │ ├── API Gateway 107 | │ └── S3 Buckets 108 | └── Environment: dev 109 | ├── Lambda Functions 110 | ├── API Gateway 111 | └── S3 Buckets 112 | ``` 113 | 114 | ### After (Multi-Account) 115 | ``` 116 | AWS Account 111111111111 (main) 117 | ├── SES Receipt Rule Set 118 | ├── Lambda Functions 119 | ├── API Gateway 120 | ├── S3 Buckets 121 | └── Terraform State Bucket 122 | 123 | AWS Account 222222222222 (preview) 124 | ├── SES Receipt Rule Set 125 | ├── Lambda Functions 126 | ├── API Gateway 127 | ├── S3 Buckets 128 | └── Terraform State Bucket 129 | 130 | AWS Account 333333333333 (dev) 131 | ├── SES Receipt Rule Set 132 | ├── Lambda Functions 133 | ├── API Gateway 134 | ├── S3 Buckets 135 | └── Terraform State Bucket 136 | ``` 137 | 138 | ### Terraform State Buckets 139 | 140 | Terraform keeps remote state in a dedicated S3 bucket per account, using the pattern `terraform-state-${environment}-${account_id}`. For example: 141 | 142 | ``` 143 | terraform-state-main-111111111111 144 | terraform-state-preview-222222222222 145 | terraform-state-dev-333333333333 146 | ``` 147 | 148 | State files live at `s3://{bucket}/terraform.tfstate`. 149 | 150 | ## Next Steps 151 | 152 | ### 1. Set Up AWS Accounts 153 | 154 | You need three separate AWS accounts (or one for testing). Options: 155 | 156 | **Option A: AWS Organizations (Recommended)** 157 | 1. Create an AWS Organization 158 | 2. Create member accounts for main, preview, and dev 159 | 3. Note the account IDs 160 | 161 | **Option B: Standalone Accounts** 162 | 1. Create three separate AWS accounts manually 163 | 2. Note the account IDs 164 | 165 | ### 2. Create IAM Users/Roles 166 | 167 | For each AWS account: 168 | 1. Create an IAM user named `terraform-deployer` 169 | 2. Attach `AdministratorAccess` policy (or custom policy with required permissions) 170 | 3. Create access keys 171 | 4. Save credentials securely 172 | 173 | ### 3. Configure AWS CLI Profiles 174 | 175 | ```bash 176 | # Configure main environment 177 | aws configure --profile main 178 | # Enter access key, secret key, region (us-east-1), output format (json) 179 | 180 | # Configure preview environment 181 | aws configure --profile preview 182 | # Enter access key, secret key, region (us-east-1), output format (json) 183 | 184 | # Configure dev environment 185 | aws configure --profile dev 186 | # Enter access key, secret key, region (us-east-1), output format (json) 187 | ``` 188 | 189 | Verify: 190 | ```bash 191 | aws sts get-caller-identity --profile main 192 | aws sts get-caller-identity --profile preview 193 | aws sts get-caller-identity --profile dev 194 | ``` 195 | 196 | ### 4. Create Terraform State Buckets 197 | 198 | For each account (bucket names automatically include the AWS account ID for uniqueness): 199 | 200 | ```bash 201 | # Main environment 202 | ACCOUNT_ID_MAIN=$(aws sts get-caller-identity --profile main --query Account --output text) 203 | aws s3 mb s3://terraform-state-main-${ACCOUNT_ID_MAIN} --region us-east-1 --profile main 204 | aws s3api put-bucket-versioning --bucket terraform-state-main-${ACCOUNT_ID_MAIN} \ 205 | --versioning-configuration Status=Enabled --profile main 206 | 207 | # Preview environment 208 | ACCOUNT_ID_PREVIEW=$(aws sts get-caller-identity --profile preview --query Account --output text) 209 | aws s3 mb s3://terraform-state-preview-${ACCOUNT_ID_PREVIEW} --region us-east-1 --profile preview 210 | aws s3api put-bucket-versioning --bucket terraform-state-preview-${ACCOUNT_ID_PREVIEW} \ 211 | --versioning-configuration Status=Enabled --profile preview 212 | 213 | # Dev environment 214 | ACCOUNT_ID_DEV=$(aws sts get-caller-identity --profile dev --query Account --output text) 215 | aws s3 mb s3://terraform-state-dev-${ACCOUNT_ID_DEV} --region us-east-1 --profile dev 216 | aws s3api put-bucket-versioning --bucket terraform-state-dev-${ACCOUNT_ID_DEV} \ 217 | --versioning-configuration Status=Enabled --profile dev 218 | ``` 219 | 220 | ### 5. Deploy Infrastructure 221 | 222 | ```bash 223 | # Deploy to main 224 | AWS_PROFILE=main ENVIRONMENT=main ./deploy.sh 225 | 226 | # Deploy to preview 227 | AWS_PROFILE=preview ENVIRONMENT=preview ./deploy.sh 228 | 229 | # Deploy to dev 230 | AWS_PROFILE=dev ENVIRONMENT=dev ./deploy.sh 231 | ``` 232 | 233 | ### 6. Configure GitHub Actions Secrets 234 | 235 | In your GitHub repository, go to Settings → Secrets and variables → Actions, and add: 236 | 237 | **Main Environment:** 238 | - `AWS_ACCESS_KEY_ID_MAIN` 239 | - `AWS_SECRET_ACCESS_KEY_MAIN` 240 | - `AWS_ACCOUNT_ID_MAIN` 241 | - `AWS_REGION_MAIN` (e.g., `us-east-1`) 242 | 243 | **Preview Environment:** 244 | - `AWS_ACCESS_KEY_ID_PREVIEW` 245 | - `AWS_SECRET_ACCESS_KEY_PREVIEW` 246 | - `AWS_ACCOUNT_ID_PREVIEW` 247 | - `AWS_REGION_PREVIEW` (e.g., `us-east-1`) 248 | 249 | **Dev Environment:** 250 | - `AWS_ACCESS_KEY_ID_DEV` 251 | - `AWS_SECRET_ACCESS_KEY_DEV` 252 | - `AWS_ACCOUNT_ID_DEV` 253 | - `AWS_REGION_DEV` (e.g., `us-east-1`) 254 | 255 | **Shared:** 256 | - `MONGODB_URI` (optional) 257 | 258 | ### 7. Test Deployments 259 | 260 | Push to each branch to trigger automatic deployments: 261 | - Push to `main` branch → deploys to main AWS account 262 | - Push to `preview` branch → deploys to preview AWS account 263 | - Push to `dev` branch → deploys to dev AWS account 264 | 265 | ## Benefits of Multi-Account Architecture 266 | 267 | ✅ **Complete Isolation**: No resource conflicts between environments 268 | ✅ **Enhanced Security**: Account-level security boundaries 269 | ✅ **Independent Scaling**: Each environment can scale independently 270 | ✅ **Cost Tracking**: Clear cost separation per environment 271 | ✅ **Compliance**: Easier to meet security and compliance requirements 272 | ✅ **Blast Radius**: Issues in one environment don't affect others 273 | 274 | ## Support 275 | 276 | For detailed setup instructions, see: 277 | - `ENVIRONMENTS.md` - Complete multi-account setup guide 278 | - `README.md` - Quick start and API usage 279 | 280 | ## Rollback (If Needed) 281 | 282 | If you need to rollback to the old shared infrastructure model: 283 | 284 | 1. Checkout the previous commit: `git checkout ` 285 | 2. Redeploy shared infrastructure: `./deploy-shared.sh` 286 | 3. Redeploy environments: `./deploy.sh` 287 | 288 | **Note**: The codebase has been updated and the old model is no longer recommended. 289 | 290 | -------------------------------------------------------------------------------- /lambda/parser/ai_parser.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from google import genai 4 | from google.genai import types 5 | from typing import Dict, Any, Optional 6 | import traceback 7 | import boto3 8 | import uuid 9 | import requests 10 | 11 | # Try importing Daytona, handle if not installed/configured to avoid crash on load 12 | try: 13 | from daytona_sdk import DaytonaConfig, Daytona, CreateSandboxFromSnapshotParams 14 | DAYTONA_AVAILABLE = True 15 | except ImportError: 16 | DAYTONA_AVAILABLE = False 17 | print("Daytona SDK not available") 18 | 19 | class AIParser: 20 | def __init__(self): 21 | self.api_key = os.environ.get('GEMINI_API_KEY') 22 | self.daytona_api_key = os.environ.get('DAYTONA_API_KEY') 23 | # Default to Gemini 3 (preview) as requested, fallback to 1.5 if needed 24 | self.model_name = os.environ.get('GEMINI_MODEL', 'gemini-3-pro-preview') 25 | self.active_sandboxes = {} 26 | 27 | self.s3_client = boto3.client('s3') 28 | self.attachments_bucket_name = os.environ.get('ATTACHMENTS_BUCKET_NAME', 'email-attachments-bucket-3rfrd') 29 | 30 | if self.api_key: 31 | self.client = genai.Client(api_key=self.api_key) 32 | else: 33 | print("Warning: GEMINI_API_KEY not set") 34 | 35 | def create_sandbox(self) -> str: 36 | """ 37 | Creates a new Daytona sandbox and returns its ID. 38 | """ 39 | if not DAYTONA_AVAILABLE: 40 | return "Daytona SDK not installed." 41 | if not self.daytona_api_key: 42 | return "DAYTONA_API_KEY not set." 43 | 44 | print("Creating Daytona sandbox...") 45 | try: 46 | config = DaytonaConfig(api_key=self.daytona_api_key) 47 | daytona = Daytona(config=config) 48 | 49 | # Create a sandbox 50 | params = CreateSandboxFromSnapshotParams( 51 | ephemeral=True, 52 | auto_stop_interval=1 # the ephemeral sandbox will be deleted after 5 minutes of inactivity 53 | ) 54 | sandbox = daytona.create(params) 55 | self.active_sandboxes[sandbox.id] = sandbox 56 | print(f"Sandbox created: {sandbox.id}") 57 | return sandbox.id 58 | except Exception as e: 59 | print(f"Error creating sandbox: {e}") 60 | return f"Error creating sandbox: {str(e)}" 61 | 62 | def download_file_to_tmp(self, url: str) -> str: 63 | """ 64 | Downloads a file from a URL to the local /tmp directory in Lambda. 65 | Returns the local file path. 66 | """ 67 | try: 68 | # Create a local filename from URL or generate unique name 69 | filename = url.split('/')[-1].split('?')[0] or f"file_{uuid.uuid4().hex}" 70 | local_path = f"/tmp/{filename}" 71 | 72 | print(f"Downloading {url} to {local_path}") 73 | 74 | response = requests.get(url, stream=True, timeout=30) 75 | response.raise_for_status() 76 | 77 | with open(local_path, 'wb') as f: 78 | for chunk in response.iter_content(chunk_size=8192): 79 | f.write(chunk) 80 | 81 | return local_path 82 | except Exception as e: 83 | print(f"Error downloading from URL: {e}") 84 | return f"Error downloading file: {str(e)}" 85 | 86 | def upload_file(self, sandbox_id: str, destination_path: str, local_file_path: str) -> str: 87 | """ 88 | Uploads a file to the specified sandbox. 89 | Can upload ONLY from 'local_file_path' (e.g. /tmp/...). 90 | """ 91 | sandbox = self.active_sandboxes.get(sandbox_id) 92 | if not sandbox: 93 | return f"Sandbox {sandbox_id} not found" 94 | 95 | try: 96 | if local_file_path: 97 | print(f"Uploading local file {local_file_path} to sandbox {sandbox_id} at {destination_path}") 98 | 99 | if not os.path.exists(local_file_path): 100 | return f"Error: Local file not found at {local_file_path}" 101 | 102 | sandbox.fs.upload_file(local_file_path,destination_path) 103 | 104 | else: 105 | return "Error: local_file_path is required" 106 | 107 | return f"File uploaded successfully to {destination_path}" 108 | except Exception as e: 109 | print(f"Error uploading file: {e}") 110 | return f"Error uploading file: {str(e)}" 111 | 112 | def run_code(self, sandbox_id: str, code: str) -> str: 113 | """ 114 | Runs Python code in the specified sandbox. 115 | """ 116 | sandbox = self.active_sandboxes.get(sandbox_id) 117 | if not sandbox: 118 | return f"Sandbox {sandbox_id} not found" 119 | 120 | print(f"Executing code in sandbox {sandbox_id}:\n{code}") 121 | try: 122 | execution = sandbox.process.code_run(code) 123 | 124 | output = "" 125 | if execution.result: 126 | output += f"Result: {execution.result}\n" 127 | 128 | if execution.exit_code != 0: 129 | output += f"Exit Code: {execution.exit_code}\n" 130 | 131 | return output.strip() 132 | except Exception as e: 133 | print(f"Error executing code in Daytona: {e}") 134 | return f"Error: {str(e)}" 135 | 136 | def download_from_sandbox_to_s3(self, sandbox_id: str, sandbox_file_path: str) -> str: 137 | """ 138 | Downloads a file from the sandbox, uploads it to S3, and returns the public URL. 139 | Useful for retrieving generated files (charts, reports, etc.). 140 | """ 141 | sandbox = self.active_sandboxes.get(sandbox_id) 142 | if not sandbox: 143 | return f"Sandbox {sandbox_id} not found" 144 | 145 | try: 146 | # 1. Download from Sandbox to local /tmp 147 | filename = os.path.basename(sandbox_file_path) or f"output_{uuid.uuid4().hex}.bin" 148 | local_path = f"/tmp/{filename}" 149 | 150 | print(f"Downloading {sandbox_file_path} from sandbox {sandbox_id} to {local_path}") 151 | 152 | # Use Daytona SDK download_file method 153 | # Signature: download_file(remote_path: str, local_path: str, timeout: int = 1800) -> None 154 | try: 155 | sandbox.fs.download_file(sandbox_file_path, local_path) 156 | except Exception as e: 157 | print(f"Error downloading file from sandbox: {e}") 158 | return f"Error downloading file from sandbox: {str(e)}" 159 | 160 | if not os.path.exists(local_path): 161 | return f"Error: Failed to download file from sandbox to {local_path}" 162 | 163 | # 2. Upload from local /tmp to S3 164 | s3_key = f"ai_generated/{uuid.uuid4().hex}/{filename}" 165 | print(f"Uploading {local_path} to S3 bucket {self.attachments_bucket_name} at {s3_key}") 166 | 167 | # Determine content type based on extension? S3 can auto-detect or we default 168 | self.s3_client.upload_file( 169 | local_path, 170 | self.attachments_bucket_name, 171 | s3_key 172 | ) 173 | 174 | # 3. Return Public URL 175 | # Construct URL (assuming standard S3 public URL format) 176 | s3_url = f"https://{self.attachments_bucket_name}.s3.amazonaws.com/{s3_key}" 177 | print(f"File uploaded to S3: {s3_url}") 178 | 179 | # Cleanup local tmp file 180 | try: 181 | os.remove(local_path) 182 | except: 183 | pass 184 | 185 | return s3_url 186 | 187 | except Exception as e: 188 | print(f"Error in download_from_sandbox_to_s3: {e}") 189 | return f"Error retrieving file: {str(e)}" 190 | 191 | def parse_email(self, email_data: Dict[str, Any], prompt: str = None) -> Dict[str, Any]: 192 | """ 193 | Send email data to Gemini to extract structured information. 194 | Optionally uses tools if the prompt implies complex analysis. 195 | """ 196 | if not self.api_key: 197 | print("Skipping AI parsing: No API key") 198 | return {} 199 | 200 | if not prompt: 201 | prompt = """ 202 | Analyze the following email data and extract key entities and intent. 203 | Return a JSON object with the following schema: 204 | { 205 | "is_image_contain_a_cat": "boolean", 206 | "summary": "Brief summary of the email", 207 | "intent": "The primary intent of the sender (e.g., 'inquiry', 'complaint', 'purchase')", 208 | "sentiment": "sentiment analysis (positive, neutral, negative)", 209 | "key_entities": ["list of extracted entities like names, companies, dates"], 210 | "action_items": ["list of suggested actions"] 211 | } 212 | """ 213 | 214 | full_prompt = f""" 215 | {prompt} 216 | 217 | Email Data: 218 | {json.dumps(email_data, default=str)} 219 | """ 220 | 221 | # Define tools 222 | # We expose the tool to the model 223 | # Replaced download_file_from_s3 with download_file_to_tmp 224 | tools = [self.create_sandbox, self.download_file_to_tmp, self.upload_file, self.run_code, self.download_from_sandbox_to_s3] 225 | 226 | try: 227 | run_tools = bool(self.daytona_api_key) 228 | 229 | if run_tools: 230 | # Use chat to handle potential tool loops 231 | chat = self.client.chats.create( 232 | model=self.model_name, 233 | config={"tools": tools} 234 | ) 235 | 236 | response = chat.send_message(full_prompt) 237 | 238 | # Simple loop for function calls 239 | while response.function_calls: 240 | for call in response.function_calls: 241 | tool_name = call.name 242 | args = call.args 243 | 244 | print(f"Tool call: {tool_name}") 245 | tool_result = "Unknown tool" 246 | 247 | if tool_name == "create_sandbox": 248 | tool_result = self.create_sandbox() 249 | elif tool_name == "download_file_to_tmp": 250 | tool_result = self.download_file_to_tmp( 251 | args.get("url") 252 | ) 253 | elif tool_name == "upload_file": 254 | tool_result = self.upload_file( 255 | args.get("sandbox_id"), 256 | args.get("destination_path"), 257 | args.get("local_file_path") 258 | ) 259 | elif tool_name == "run_code": 260 | tool_result = self.run_code( 261 | args.get("sandbox_id"), 262 | args.get("code") 263 | ) 264 | elif tool_name == "download_from_sandbox_to_s3": 265 | tool_result = self.download_from_sandbox_to_s3( 266 | args.get("sandbox_id"), 267 | args.get("sandbox_file_path") 268 | ) 269 | 270 | # Send result back 271 | response = chat.send_message( 272 | types.Part.from_function_response( 273 | name=tool_name, 274 | response={"result": tool_result} 275 | ) 276 | ) 277 | 278 | # After tool loop (or if no tools called), get the final text. 279 | final_text = response.text 280 | 281 | # Try to parse 282 | try: 283 | # Strip markdown code blocks if present 284 | clean_text = final_text.strip() 285 | if clean_text.startswith("```json"): 286 | clean_text = clean_text[7:] 287 | if clean_text.endswith("```"): 288 | clean_text = clean_text[:-3] 289 | return json.loads(clean_text) 290 | except json.JSONDecodeError: 291 | # Fallback: Ask model to format as JSON 292 | json_response = chat.send_message( 293 | "Format the previous analysis as a valid JSON object matching the requested schema." 294 | ) 295 | clean_text = json_response.text.strip() 296 | if clean_text.startswith("```json"): 297 | clean_text = clean_text[7:] 298 | if clean_text.endswith("```"): 299 | clean_text = clean_text[:-3] 300 | return json.loads(clean_text) 301 | 302 | else: 303 | # Original extraction flow (fast, forced JSON) 304 | response = self.client.models.generate_content( 305 | model=self.model_name, 306 | contents=full_prompt, 307 | config={ 308 | "response_mime_type": "application/json" 309 | } 310 | ) 311 | return json.loads(response.text) 312 | 313 | except Exception as e: 314 | print(f"Error during AI parsing: {traceback.format_exc()}") 315 | return {"error": str(e)} 316 | finally: 317 | # Cleanup sandboxes 318 | if self.active_sandboxes: 319 | print(f"Cleaning up {len(self.active_sandboxes)} sandboxes...") 320 | for sid, sandbox in self.active_sandboxes.items(): 321 | try: 322 | sandbox.delete() 323 | except Exception as e: 324 | print(f"Error deleting sandbox {sid}: {e}") 325 | self.active_sandboxes.clear() 326 | -------------------------------------------------------------------------------- /ENVIRONMENTS.md: -------------------------------------------------------------------------------- 1 | # Multi-Account Environment Management Guide 2 | 3 | This project uses a **multi-account architecture** where each environment (main, preview, dev) is deployed to its own isolated AWS account. 4 | 5 | ## Architecture Overview 6 | 7 | ### Multi-Account Model 8 | 9 | Each environment runs in complete isolation within its own AWS account: 10 | 11 | **Benefits:** 12 | - **Complete Isolation**: No resource conflicts between environments 13 | - **Security**: Blast radius is limited to a single account 14 | - **Cost Tracking**: Easy to track costs per environment 15 | - **Compliance**: Easier to meet security and compliance requirements 16 | - **Flexibility**: Each environment can have different configurations, regions, and settings 17 | 18 | **Environments:** 19 | - **main**: Production environment (separate AWS account) 20 | - **preview**: Staging/preview environment (separate AWS account) 21 | - **dev**: Development environment (separate AWS account) 22 | 23 | ### Resources Per Environment 24 | 25 | Each AWS account contains its complete infrastructure stack: 26 | 27 | - **SES Configuration:** 28 | - Receipt rule set (unique per account) 29 | - Active rule set activation 30 | - Receipt rules for email routing 31 | - Domain verification 32 | 33 | - **Compute:** 34 | - Lambda functions (check and parser) 35 | - Lambda execution roles and policies 36 | 37 | - **Storage:** 38 | - Email S3 bucket for incoming emails 39 | - Attachments S3 bucket (publicly readable) 40 | - KV database S3 bucket 41 | - Terraform state S3 bucket 42 | 43 | - **API:** 44 | - API Gateway HTTP API 45 | - Routes and integrations 46 | 47 | - **IAM:** 48 | - Lambda execution roles 49 | - Service-specific policies 50 | 51 | ### State Management 52 | 53 | Each AWS account has its own Terraform state bucket, suffixed by the account ID for global uniqueness: 54 | 55 | ``` 56 | Account 111111111111 (main): terraform-state-main-111111111111 57 | Account 222222222222 (preview): terraform-state-preview-222222222222 58 | Account 333333333333 (dev): terraform-state-dev-333333333333 59 | ``` 60 | 61 | State files are stored at: `s3://{bucket}/terraform.tfstate` 62 | 63 | --- 64 | 65 | ## AWS Account Setup 66 | 67 | ### Step 1: Create AWS Accounts 68 | 69 | You'll need three separate AWS accounts. The recommended approach is using **AWS Organizations**: 70 | 71 | 1. **Sign in to AWS Organizations** (or create a new organization) 72 | 2. **Create member accounts:** 73 | - Account name: `email-to-webhook-main` (for production) 74 | - Account name: `email-to-webhook-preview` (for staging) 75 | - Account name: `email-to-webhook-dev` (for development) 76 | 3. **Note the account IDs** - you'll need these for configuration 77 | 78 | **Alternative:** You can use standalone AWS accounts if you don't want to use Organizations. 79 | 80 | ### Step 2: Create IAM Users or Roles 81 | 82 | For each AWS account, create credentials for Terraform deployments: 83 | 84 | #### Option A: IAM User (Simpler for getting started) 85 | 86 | 1. Sign in to each AWS account 87 | 2. Go to **IAM → Users → Create User** 88 | 3. User name: `terraform-deployer` 89 | 4. Enable **Access key - Programmatic access** 90 | 5. Attach policies (see Required IAM Permissions below) 91 | 6. **Save the Access Key ID and Secret Access Key** securely 92 | 93 | #### Option B: IAM Role with Assume Role (Better for production) 94 | 95 | 1. Create a role in each member account 96 | 2. Configure trust relationship to allow assumption from your management account 97 | 3. Use AWS SSO or `aws sts assume-role` for authentication 98 | 99 | ### Step 3: Required IAM Permissions 100 | 101 | The IAM user or role needs these AWS managed policies: 102 | - `AdministratorAccess` (for initial setup) 103 | 104 | Or create a custom policy with these permissions: 105 | - SES: Full access 106 | - S3: Full access 107 | - Lambda: Full access 108 | - API Gateway: Full access 109 | - IAM: Full access (for creating Lambda execution roles) 110 | - CloudWatch Logs: Full access 111 | 112 | ### Step 4: Configure AWS CLI Profiles 113 | 114 | Configure AWS CLI profiles for each environment: 115 | 116 | ```bash 117 | # Configure main environment 118 | aws configure --profile main 119 | # Enter Access Key ID for main account 120 | # Enter Secret Access Key for main account 121 | # Enter region: us-east-1 (or your preferred region) 122 | # Enter output format: json 123 | 124 | # Configure preview environment 125 | aws configure --profile preview 126 | # Enter Access Key ID for preview account 127 | # Enter Secret Access Key for preview account 128 | # Enter region: us-east-1 129 | # Enter output format: json 130 | 131 | # Configure dev environment 132 | aws configure --profile dev 133 | # Enter Access Key ID for dev account 134 | # Enter Secret Access Key for dev account 135 | # Enter region: us-east-1 136 | # Enter output format: json 137 | ``` 138 | 139 | Verify your profiles: 140 | 141 | ```bash 142 | aws sts get-caller-identity --profile main 143 | aws sts get-caller-identity --profile preview 144 | aws sts get-caller-identity --profile dev 145 | ``` 146 | 147 | ### Step 5: Create Terraform State Buckets 148 | 149 | For each AWS account, create an S3 bucket for Terraform state. The examples below automatically suffix the bucket name with the AWS account ID to ensure uniqueness: 150 | 151 | ```bash 152 | # Create state bucket for main environment 153 | ACCOUNT_ID_MAIN=$(aws sts get-caller-identity --profile main --query Account --output text) 154 | aws s3 mb s3://terraform-state-main-${ACCOUNT_ID_MAIN} --region us-east-1 --profile main 155 | aws s3api put-bucket-versioning \ 156 | --bucket terraform-state-main-${ACCOUNT_ID_MAIN} \ 157 | --versioning-configuration Status=Enabled \ 158 | --profile main 159 | 160 | # Create state bucket for preview environment 161 | ACCOUNT_ID_PREVIEW=$(aws sts get-caller-identity --profile preview --query Account --output text) 162 | aws s3 mb s3://terraform-state-preview-${ACCOUNT_ID_PREVIEW} --region us-east-1 --profile preview 163 | aws s3api put-bucket-versioning \ 164 | --bucket terraform-state-preview-${ACCOUNT_ID_PREVIEW} \ 165 | --versioning-configuration Status=Enabled \ 166 | --profile preview 167 | 168 | # Create state bucket for dev environment 169 | ACCOUNT_ID_DEV=$(aws sts get-caller-identity --profile dev --query Account --output text) 170 | aws s3 mb s3://terraform-state-dev-${ACCOUNT_ID_DEV} --region us-east-1 --profile dev 171 | aws s3api put-bucket-versioning \ 172 | --bucket terraform-state-dev-${ACCOUNT_ID_DEV} \ 173 | --versioning-configuration Status=Enabled \ 174 | --profile dev 175 | ``` 176 | 177 | --- 178 | 179 | ## Local Deployment 180 | 181 | ### Deploy to an Environment 182 | 183 | Use the `AWS_PROFILE` environment variable to target the correct AWS account: 184 | 185 | **Deploy to Main (Production):** 186 | 187 | ```bash 188 | AWS_PROFILE=main ENVIRONMENT=main ./deploy.sh 189 | ``` 190 | 191 | **Deploy to Preview (Staging):** 192 | 193 | ```bash 194 | AWS_PROFILE=preview ENVIRONMENT=preview ./deploy.sh 195 | ``` 196 | 197 | **Deploy to Dev (Development):** 198 | 199 | ```bash 200 | AWS_PROFILE=dev ENVIRONMENT=dev ./deploy.sh 201 | ``` 202 | 203 | ### Destroy an Environment 204 | 205 | **Destroy Main Environment:** 206 | 207 | ```bash 208 | AWS_PROFILE=main ENVIRONMENT=main ./destroy.sh 209 | ``` 210 | 211 | **Destroy Preview Environment:** 212 | 213 | ```bash 214 | AWS_PROFILE=preview ENVIRONMENT=preview ./destroy.sh 215 | ``` 216 | 217 | **Destroy Dev Environment:** 218 | 219 | ```bash 220 | AWS_PROFILE=dev ENVIRONMENT=dev ./destroy.sh 221 | ``` 222 | 223 | --- 224 | 225 | ## CI/CD with GitHub Actions 226 | 227 | ### Required GitHub Secrets 228 | 229 | Configure the following secrets in your GitHub repository: 230 | 231 | #### Main Environment Secrets 232 | - `AWS_ACCESS_KEY_ID_MAIN` - Access key for main AWS account 233 | - `AWS_SECRET_ACCESS_KEY_MAIN` - Secret key for main AWS account 234 | - `AWS_ACCOUNT_ID_MAIN` - Main AWS account ID 235 | - `AWS_REGION_MAIN` - AWS region for main (e.g., `us-east-1`) 236 | 237 | #### Preview Environment Secrets 238 | - `AWS_ACCESS_KEY_ID_PREVIEW` - Access key for preview AWS account 239 | - `AWS_SECRET_ACCESS_KEY_PREVIEW` - Secret key for preview AWS account 240 | - `AWS_ACCOUNT_ID_PREVIEW` - Preview AWS account ID 241 | - `AWS_REGION_PREVIEW` - AWS region for preview (e.g., `us-east-1`) 242 | 243 | #### Dev Environment Secrets 244 | - `AWS_ACCESS_KEY_ID_DEV` - Access key for dev AWS account 245 | - `AWS_SECRET_ACCESS_KEY_DEV` - Secret key for dev AWS account 246 | - `AWS_ACCOUNT_ID_DEV` - Dev AWS account ID 247 | - `AWS_REGION_DEV` - AWS region for dev (e.g., `us-east-1`) 248 | 249 | #### Shared Secrets 250 | - `MONGODB_URI` - MongoDB connection string (can be shared or per-environment) 251 | 252 | ### Setting Secrets in GitHub 253 | 254 | 1. Go to your repository on GitHub 255 | 2. Navigate to **Settings → Secrets and variables → Actions** 256 | 3. Click **New repository secret** 257 | 4. Add each secret with the exact name listed above 258 | 259 | ### Automatic Deployments 260 | 261 | The GitHub Actions workflow automatically deploys based on the branch: 262 | - Push to `main` branch → deploys to main AWS account 263 | - Push to `preview` branch → deploys to preview AWS account 264 | - Push to `dev` branch → deploys to dev AWS account 265 | 266 | --- 267 | 268 | ## How It Works 269 | 270 | ### 1. Environment Variable 271 | 272 | The `ENVIRONMENT` variable determines the environment name (used for resource naming): 273 | - Defaults to `main` if not specified 274 | - Used in resource names: `email-to-webhook-emails-${environment}` 275 | 276 | ### 2. AWS Profile 277 | 278 | The `AWS_PROFILE` variable determines which AWS account to deploy to: 279 | - Must be set for all local deployments 280 | - Corresponds to profiles in `~/.aws/credentials` or `~/.aws/config` 281 | 282 | ### 3. State File Isolation 283 | 284 | Each AWS account has its own state bucket and state file: 285 | - State bucket: `terraform-state-${environment}` 286 | - State file: `terraform.tfstate` 287 | - Complete isolation between environments 288 | 289 | ### 4. Resource Naming 290 | 291 | All resources are namespaced by environment: 292 | - Email S3 bucket: `email-to-webhook-emails-${environment}` 293 | - Attachments S3 bucket: `email-to-webhook-attachments-${environment}` 294 | - Database S3 bucket: `email-to-webhook-kv-database-${environment}` 295 | - SES receipt rule set: `${environment}-rule-set` 296 | - SES receipt rule: `catch-emails-${environment}` 297 | - API Gateway: `EmailParserAPI-${environment}` 298 | - Lambda functions: `function-name-${environment}` 299 | - IAM roles/policies: `role-name-${environment}` 300 | 301 | --- 302 | 303 | ## Best Practices 304 | 305 | ### ✅ DO 306 | 307 | - Use separate AWS accounts for each environment 308 | - Configure AWS CLI profiles for easy switching 309 | - Always specify both `AWS_PROFILE` and `ENVIRONMENT` 310 | - Keep `main` as your production environment 311 | - Review `terraform plan` before applying changes 312 | - Enable MFA on all AWS accounts 313 | - Use AWS Organizations for centralized account management 314 | - Enable CloudTrail in each account for audit logging 315 | - Set up budget alerts in each account 316 | 317 | ### ❌ DON'T 318 | 319 | - Don't use the same AWS account for multiple environments 320 | - Don't share credentials between environments 321 | - Don't manually edit state files 322 | - Don't deploy to production without testing in preview/dev first 323 | - Don't use root account credentials for deployments 324 | 325 | --- 326 | 327 | ## Troubleshooting 328 | 329 | ### "AWS_PROFILE is not set" Error 330 | 331 | Make sure you set the AWS_PROFILE environment variable: 332 | 333 | ```bash 334 | export AWS_PROFILE=main 335 | ./deploy.sh 336 | ``` 337 | 338 | Or set it inline: 339 | 340 | ```bash 341 | AWS_PROFILE=main ./deploy.sh 342 | ``` 343 | 344 | ### "Failed to authenticate with AWS" Error 345 | 346 | 1. Verify your profile exists: 347 | ```bash 348 | aws configure list --profile main 349 | ``` 350 | 351 | 2. Verify credentials are valid: 352 | ```bash 353 | aws sts get-caller-identity --profile main 354 | ``` 355 | 356 | 3. Check that credentials haven't expired (if using temporary credentials) 357 | 358 | ### State Bucket Not Found 359 | 360 | Create the state bucket in the target AWS account: 361 | 362 | ```bash 363 | aws s3 mb s3://terraform-state-main --profile main 364 | ``` 365 | 366 | ### Wrong Account Deployed 367 | 368 | State files are isolated per account, so you can safely: 369 | 370 | ```bash 371 | # Destroy from wrong account (if needed) 372 | AWS_PROFILE=wrong ENVIRONMENT=wrong ./destroy.sh 373 | 374 | # Deploy to correct account 375 | AWS_PROFILE=correct ENVIRONMENT=correct ./deploy.sh 376 | ``` 377 | 378 | --- 379 | 380 | ## Cost Management 381 | 382 | ### Multiple Accounts 383 | 384 | - Each environment incurs separate AWS costs 385 | - Use AWS Cost Explorer in each account to track spending 386 | - Set up billing alerts in each account 387 | - Consider AWS Organizations for consolidated billing 388 | - Destroy unused environments to save costs 389 | 390 | ### Estimated Monthly Costs (per environment) 391 | 392 | - Lambda: ~$5-20 (depends on usage) 393 | - S3: ~$1-5 (depends on storage) 394 | - API Gateway: ~$3.50 per million requests 395 | - SES: $0.10 per 1,000 emails 396 | 397 | --- 398 | 399 | ## Migration from Shared Infrastructure 400 | 401 | If you're migrating from the old shared infrastructure model: 402 | 403 | 1. **Backup current state** from the shared S3 bucket 404 | 2. **Destroy all old environments** using the old scripts 405 | 3. **Destroy shared infrastructure** using `./destroy-shared.sh` 406 | 4. **Set up new AWS accounts** as described above 407 | 5. **Deploy to new accounts** using the new scripts 408 | 6. **Update DNS records** and domain configurations as needed 409 | 410 | --- 411 | 412 | ## Security Considerations 413 | 414 | ### Account-Level Security 415 | 416 | - Enable MFA on all AWS accounts 417 | - Use AWS Organizations SCPs (Service Control Policies) for guardrails 418 | - Enable AWS CloudTrail in all accounts 419 | - Enable AWS Config for compliance monitoring 420 | - Use IAM roles instead of IAM users where possible 421 | - Rotate access keys regularly 422 | - Use AWS Secrets Manager for sensitive data 423 | 424 | ### Network Security 425 | 426 | - Consider VPC deployment for Lambda functions 427 | - Use VPC endpoints for S3 access 428 | - Enable S3 bucket encryption 429 | - Use HTTPS for all API endpoints 430 | 431 | --- 432 | 433 | ## Additional Resources 434 | 435 | - [AWS Organizations Documentation](https://docs.aws.amazon.com/organizations/) 436 | - [AWS Multi-Account Strategy](https://aws.amazon.com/organizations/getting-started/best-practices/) 437 | - [Terraform S3 Backend](https://www.terraform.io/docs/language/settings/backends/s3.html) 438 | - [AWS CLI Configuration](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) 439 | -------------------------------------------------------------------------------- /infra/main.tf: -------------------------------------------------------------------------------- 1 | # SES Receipt Rule Set (per environment in multi-account setup) 2 | # Each AWS account gets its own rule set 3 | resource "aws_ses_receipt_rule_set" "env_rule_set" { 4 | rule_set_name = "${var.environment}-rule-set" 5 | } 6 | 7 | # Activate the Rule Set (only one can be active per AWS account) 8 | resource "aws_ses_active_receipt_rule_set" "activate_rule_set" { 9 | rule_set_name = aws_ses_receipt_rule_set.env_rule_set.rule_set_name 10 | } 11 | 12 | # Create per-environment email bucket for SES to store incoming emails 13 | resource "aws_s3_bucket" "emails_bucket" { 14 | bucket = "email-to-webhook-emails-${var.environment}-${var.aws_account_id}" 15 | force_destroy = true 16 | } 17 | 18 | # Lifecycle policy to expire emails after 14 days 19 | resource "aws_s3_bucket_lifecycle_configuration" "emails_bucket_lifecycle" { 20 | bucket = aws_s3_bucket.emails_bucket.id 21 | 22 | rule { 23 | id = "expire-emails-after-14-days" 24 | status = "Enabled" 25 | 26 | filter { 27 | prefix = "" 28 | } 29 | 30 | expiration { 31 | days = 14 32 | } 33 | } 34 | } 35 | 36 | # S3 Bucket Policy to Allow SES Write Access 37 | resource "aws_s3_bucket_policy" "email_storage_policy" { 38 | bucket = aws_s3_bucket.emails_bucket.id 39 | 40 | policy = jsonencode({ 41 | Version = "2012-10-17", 42 | Statement = [ 43 | { 44 | Effect = "Allow", 45 | Principal = { 46 | Service = "ses.amazonaws.com" 47 | }, 48 | Action = "s3:PutObject", 49 | Resource = "${aws_s3_bucket.emails_bucket.arn}/*" 50 | }, 51 | { 52 | Effect = "Allow", 53 | Principal = { 54 | Service = "lambda.amazonaws.com" 55 | }, 56 | Action = [ 57 | "s3:GetObject", 58 | "s3:ListBucket" 59 | ], 60 | Resource = [ 61 | "${aws_s3_bucket.emails_bucket.arn}", 62 | "${aws_s3_bucket.emails_bucket.arn}/*" 63 | ] 64 | } 65 | ] 66 | }) 67 | } 68 | 69 | # IAM Role for Lambda 70 | resource "aws_iam_role" "lambda_role" { 71 | name = "lambda_ses_dns_role-${var.environment}" 72 | 73 | assume_role_policy = jsonencode({ 74 | Version = "2012-10-17", 75 | Statement = [ 76 | { 77 | Effect = "Allow", 78 | Principal = { 79 | Service = "lambda.amazonaws.com" 80 | }, 81 | Action = "sts:AssumeRole" 82 | } 83 | ] 84 | }) 85 | } 86 | 87 | resource "aws_iam_policy" "lambda_policy" { 88 | name = "lambda_ses_policy-${var.environment}" 89 | description = "Policy to allow Lambda to access SES, S3, and CloudWatch" 90 | 91 | policy = jsonencode({ 92 | Version = "2012-10-17", 93 | Statement = [ 94 | { 95 | Effect = "Allow", 96 | Action = [ 97 | "ses:VerifyDomainIdentity", 98 | "ses:VerifyDomainDkim", 99 | "ses:GetIdentityVerificationAttributes", # Add this action 100 | "ses:GetIdentityDkimAttributes" 101 | ], 102 | Resource = "*" 103 | }, 104 | { 105 | Effect = "Allow", 106 | Action = [ 107 | "logs:CreateLogGroup", 108 | "logs:CreateLogStream", 109 | "logs:PutLogEvents" 110 | ], 111 | Resource = "*" 112 | } 113 | ] 114 | }) 115 | } 116 | 117 | resource "aws_iam_role_policy_attachment" "lambda_role_attachment" { 118 | role = aws_iam_role.lambda_role.name 119 | policy_arn = aws_iam_policy.lambda_policy.arn 120 | } 121 | 122 | # Create the IAM Role for the Lambda Function 123 | resource "aws_iam_role" "verify_domain_lambda_role" { 124 | name = "verify-domain-lambda-role-${var.environment}" 125 | 126 | assume_role_policy = jsonencode({ 127 | Version = "2012-10-17", 128 | Statement = [ 129 | { 130 | Effect = "Allow", 131 | Principal = { 132 | Service = "lambda.amazonaws.com" 133 | }, 134 | Action = "sts:AssumeRole" 135 | } 136 | ] 137 | }) 138 | } 139 | 140 | # Attach a Policy to the Lambda Role 141 | resource "aws_iam_policy" "verify_domain_lambda_policy" { 142 | name = "verify-domain-lambda-policy-${var.environment}" 143 | 144 | policy = jsonencode({ 145 | Version = "2012-10-17", 146 | Statement = [ 147 | { 148 | Effect = "Allow", 149 | Action = [ 150 | "ses:VerifyDomainIdentity", 151 | "ses:VerifyDomainDkim", 152 | "ses:GetIdentityVerificationAttributes", 153 | "logs:CreateLogGroup", 154 | "logs:CreateLogStream", 155 | "logs:PutLogEvents", 156 | "s3:PutObject", 157 | "s3:DeleteObject", 158 | "ses:DescribeReceiptRule", 159 | "ses:UpdateReceiptRule", 160 | "ses:CreateReceiptRule", 161 | "ses:DeleteReceiptRule" 162 | ], 163 | Resource = "*" 164 | }, 165 | # Existing S3 Permissions 166 | { 167 | Effect = "Allow" 168 | Action = [ 169 | "s3:ListBucket", 170 | "s3:GetObject", 171 | "s3:PutObject", 172 | "s3:DeleteObject" 173 | ] 174 | Resource = [ 175 | "${aws_s3_bucket.attachments_bucket.arn}", 176 | "${aws_s3_bucket.attachments_bucket.arn}/*" 177 | ] 178 | }, 179 | # CloudWatch Logs Permissions 180 | { 181 | Effect = "Allow" 182 | Action = [ 183 | "logs:CreateLogGroup", 184 | "logs:CreateLogStream", 185 | "logs:PutLogEvents" 186 | ] 187 | Resource = "arn:aws:logs:*:*:*" 188 | }, 189 | # SES Permissions 190 | { 191 | Effect = "Allow" 192 | Action = [ 193 | "ses:VerifyDomainIdentity", 194 | "ses:GetIdentityVerificationAttributes", 195 | "ses:DeleteIdentity", 196 | "ses:GetIdentityDkimAttributes" 197 | ] 198 | Resource = "*" 199 | }, 200 | # SMTP User Creation Permissions 201 | { 202 | Effect = "Allow" 203 | Action = [ 204 | "iam:CreateUser", 205 | "iam:PutUserPolicy", 206 | "iam:CreateAccessKey", 207 | "ses:ListIdentities", 208 | "ses:GetIdentityVerificationAttributes", 209 | "iam:ListAccessKeys" 210 | ] 211 | Resource = [ 212 | "arn:aws:iam::${var.aws_account_id}:user/smtp-*" 213 | ] 214 | }, 215 | # Allow IAM policy attachment 216 | { 217 | Effect = "Allow" 218 | Action = [ 219 | "iam:AttachUserPolicy", 220 | "iam:PutUserPolicy" 221 | ] 222 | Resource = "arn:aws:iam::${var.aws_account_id}:user/smtp-*" 223 | }, 224 | # Allow IAM user management 225 | { 226 | Effect = "Allow" 227 | Action = [ 228 | "iam:PassRole" 229 | ] 230 | Resource = "arn:aws:iam::${var.aws_account_id}:role/verify-domain-lambda-role" 231 | }, 232 | # Allow IAM GetUser permission 233 | { 234 | Effect = "Allow" 235 | Action = [ 236 | "iam:GetUser" 237 | ] 238 | Resource = "*" 239 | } 240 | ] 241 | }) 242 | } 243 | 244 | resource "aws_iam_role_policy_attachment" "verify_domain_lambda_role_attachment" { 245 | role = aws_iam_role.verify_domain_lambda_role.name 246 | policy_arn = aws_iam_policy.verify_domain_lambda_policy.arn 247 | } 248 | 249 | # Lambda Function 250 | locals { 251 | verify_lambda_hash = filebase64sha256(var.verify_lambda_file_path) 252 | verify_domain_lambda_name = "verify-domain-lambda-${var.environment}" 253 | } 254 | 255 | # CloudWatch Log Group for verify domain lambda 256 | resource "aws_cloudwatch_log_group" "verify_domain_lambda_logs" { 257 | name = "/aws/lambda/${local.verify_domain_lambda_name}" 258 | retention_in_days = 14 259 | } 260 | 261 | resource "aws_lambda_function" "verify_domain_lambda" { 262 | depends_on = [aws_cloudwatch_log_group.verify_domain_lambda_logs] 263 | function_name = local.verify_domain_lambda_name 264 | filename = var.verify_lambda_file_path 265 | handler = "lambda_function.lambda_handler" 266 | runtime = "python3.12" 267 | role = aws_iam_role.verify_domain_lambda_role.arn 268 | 269 | source_code_hash = local.verify_lambda_hash 270 | 271 | environment { 272 | variables = { 273 | MONGODB_URI = var.mongodb_uri 274 | ENVIRONMENT = var.environment 275 | CODE_VERSION = local.verify_lambda_hash 276 | RECEIPT_RULE_SET = aws_ses_receipt_rule_set.env_rule_set.rule_set_name 277 | } 278 | } 279 | 280 | timeout = 20 281 | 282 | # Prevent Lambda replacement unless specific attributes change 283 | lifecycle { 284 | ignore_changes = [ 285 | # Ignore changes to tags and other metadata that don't affect functionality 286 | tags, 287 | description 288 | ] 289 | } 290 | } 291 | 292 | # API Gateway 293 | resource "aws_apigatewayv2_api" "lambda_api" { 294 | name = "EmailParserAPI-${var.environment}" 295 | protocol_type = "HTTP" 296 | 297 | lifecycle { 298 | prevent_destroy = false 299 | } 300 | } 301 | 302 | # API Gateway Integration with Lambda 303 | resource "aws_apigatewayv2_integration" "lambda_integration" { 304 | api_id = aws_apigatewayv2_api.lambda_api.id 305 | integration_type = "AWS_PROXY" 306 | integration_uri = aws_lambda_function.verify_domain_lambda.arn 307 | payload_format_version = "2.0" 308 | } 309 | 310 | 311 | # API Gateway Stage (per environment/branch) 312 | resource "aws_apigatewayv2_stage" "env_stage" { 313 | api_id = aws_apigatewayv2_api.lambda_api.id 314 | name = "prod" 315 | auto_deploy = true 316 | } 317 | 318 | ########### 319 | # API Gateway Integration with Lambda 320 | resource "aws_apigatewayv2_integration" "verify_lambda_integration" { 321 | api_id = aws_apigatewayv2_api.lambda_api.id 322 | integration_type = "AWS_PROXY" 323 | integration_uri = aws_lambda_function.verify_domain_lambda.arn 324 | payload_format_version = "2.0" 325 | } 326 | 327 | # API Gateway Route 328 | resource "aws_apigatewayv2_route" "verify_lambda_route" { 329 | api_id = aws_apigatewayv2_api.lambda_api.id 330 | route_key = "POST /v1/domain/{domain}" 331 | target = "integrations/${aws_apigatewayv2_integration. verify_lambda_integration.id}" 332 | } 333 | 334 | # API Gateway DELETE Route for domain removal 335 | resource "aws_apigatewayv2_route" "delete_domain_route" { 336 | api_id = aws_apigatewayv2_api.lambda_api.id 337 | route_key = "DELETE /v1/domain/{domain}" 338 | target = "integrations/${aws_apigatewayv2_integration.verify_lambda_integration.id}" 339 | } 340 | 341 | # API Gateway PUT Route for updating domain data 342 | resource "aws_apigatewayv2_route" "update_domain_route" { 343 | api_id = aws_apigatewayv2_api.lambda_api.id 344 | route_key = "PUT /v1/domain/{domain}" 345 | target = "integrations/${aws_apigatewayv2_integration.verify_lambda_integration.id}" 346 | } 347 | 348 | # API Gateway GET Route for retrieving domain status and data 349 | resource "aws_apigatewayv2_route" "get_domain_route" { 350 | api_id = aws_apigatewayv2_api.lambda_api.id 351 | route_key = "GET /v1/domain/{domain}" 352 | target = "integrations/${aws_apigatewayv2_integration.verify_lambda_integration.id}" 353 | } 354 | 355 | # Lambda Permission for API Gateway 356 | resource "aws_lambda_permission" "verify_api_gateway_permission" { 357 | statement_id = "AllowAPIGatewayInvoke-${var.environment}" 358 | action = "lambda:InvokeFunction" 359 | function_name = aws_lambda_function.verify_domain_lambda.function_name 360 | principal = "apigateway.amazonaws.com" 361 | source_arn = "${aws_apigatewayv2_api.lambda_api.execution_arn}/prod/*" 362 | } 363 | 364 | resource "aws_s3_bucket" "attachments_bucket" { 365 | bucket = "${var.attachments_bucket_name}-${var.environment}-${var.aws_account_id}" 366 | force_destroy = true 367 | } 368 | 369 | # Lifecycle policy to expire attachments after 14 days 370 | resource "aws_s3_bucket_lifecycle_configuration" "attachments_bucket_lifecycle" { 371 | bucket = aws_s3_bucket.attachments_bucket.id 372 | 373 | rule { 374 | id = "expire-attachments-after-14-days" 375 | status = "Enabled" 376 | 377 | filter { 378 | prefix = "" 379 | } 380 | 381 | expiration { 382 | days = 14 383 | } 384 | } 385 | } 386 | 387 | # Configure public access block to allow public policies 388 | resource "aws_s3_bucket_public_access_block" "public_access_block" { 389 | bucket = aws_s3_bucket.attachments_bucket.id 390 | block_public_acls = false 391 | block_public_policy = false # Allow bucket policies to enable public access 392 | ignore_public_acls = false 393 | restrict_public_buckets = false 394 | } 395 | # Add a bucket policy to allow public read access 396 | resource "aws_s3_bucket_policy" "public_access_policy" { 397 | bucket = aws_s3_bucket.attachments_bucket.id 398 | 399 | policy = jsonencode({ 400 | Version = "2012-10-17" 401 | Statement = [ 402 | { 403 | Sid = "PublicReadGetObject" 404 | Effect = "Allow" 405 | Principal = "*" 406 | Action = "s3:GetObject" 407 | Resource = "${aws_s3_bucket.attachments_bucket.arn}/*" 408 | } 409 | ] 410 | }) 411 | 412 | depends_on = [aws_s3_bucket_public_access_block.public_access_block] 413 | } 414 | 415 | ####3 parse email lambda 416 | locals { 417 | # Calculate hash once to ensure consistency and avoid unnecessary Lambda redeployments 418 | parser_lambda_hash = filebase64sha256(var.parser_lambda_file_path) 419 | parsing_lambda_name = "email-parser-lambda-${var.environment}" 420 | } 421 | 422 | # CloudWatch Log Group for parsing lambda 423 | resource "aws_cloudwatch_log_group" "parsing_lambda_logs" { 424 | name = "/aws/lambda/${local.parsing_lambda_name}" 425 | retention_in_days = 14 426 | } 427 | 428 | resource "aws_lambda_function" "parsing_lambda" { 429 | depends_on = [aws_cloudwatch_log_group.parsing_lambda_logs] 430 | function_name = local.parsing_lambda_name 431 | role = aws_iam_role.lambda_exec.arn 432 | handler = "lambda_function.lambda_handler" 433 | runtime = "python3.12" 434 | filename = var.parser_lambda_file_path # Directly reference the ZIP file 435 | 436 | # Use the pre-calculated hash from locals 437 | source_code_hash = local.parser_lambda_hash 438 | timeout = 90 439 | memory_size = 256 440 | 441 | environment { 442 | variables = { 443 | EMAILS_BUCKET_NAME = aws_s3_bucket.emails_bucket.id 444 | ATTACHMENTS_BUCKET_NAME = aws_s3_bucket.attachments_bucket.id 445 | MONGODB_URI = var.mongodb_uri 446 | ENVIRONMENT = var.environment 447 | # Add a marker to track deployments - only changes when code actually changes 448 | CODE_VERSION = local.parser_lambda_hash 449 | GEMINI_API_KEY = var.gemini_api_key 450 | GEMINI_MODEL = var.gemini_model 451 | DAYTONA_API_KEY = var.daytona_api_key 452 | } 453 | } 454 | 455 | # Prevent Lambda replacement unless specific attributes change 456 | lifecycle { 457 | ignore_changes = [ 458 | # Ignore changes to tags and other metadata that don't affect functionality 459 | tags, 460 | description 461 | ] 462 | } 463 | } 464 | 465 | resource "aws_iam_role" "lambda_exec" { 466 | name = "lambda_exec_role-${var.environment}" 467 | 468 | assume_role_policy = jsonencode({ 469 | Version = "2012-10-17" 470 | Statement = [ 471 | { 472 | Effect = "Allow" 473 | Principal = { 474 | Service = "lambda.amazonaws.com" 475 | } 476 | Action = "sts:AssumeRole" 477 | } 478 | ] 479 | }) 480 | } 481 | resource "aws_iam_role_policy" "lambda_ses_smtp_policy" { 482 | name = "lambda_ses_smtp_policy-${var.environment}" 483 | role = aws_iam_role.lambda_exec.name 484 | 485 | policy = jsonencode({ 486 | Version = "2012-10-17" 487 | Statement = [ 488 | # Existing S3 Permissions 489 | { 490 | Effect = "Allow" 491 | Action = [ 492 | "s3:ListBucket", 493 | "s3:GetObject", 494 | "s3:PutObject", 495 | "s3:DeleteObject" 496 | ] 497 | Resource = [ 498 | "${aws_s3_bucket.attachments_bucket.arn}", 499 | "${aws_s3_bucket.attachments_bucket.arn}/*" 500 | ] 501 | }, 502 | # CloudWatch Logs Permissions 503 | { 504 | Effect = "Allow" 505 | Action = [ 506 | "logs:CreateLogGroup", 507 | "logs:CreateLogStream", 508 | "logs:PutLogEvents" 509 | ] 510 | Resource = "arn:aws:logs:*:*:*" 511 | }, 512 | # SES Permissions 513 | { 514 | Effect = "Allow" 515 | Action = [ 516 | "ses:VerifyDomainIdentity", 517 | "ses:GetIdentityVerificationAttributes", 518 | "ses:DeleteIdentity", 519 | "ses:GetIdentityDkimAttributes" 520 | ] 521 | Resource = "*" 522 | }, 523 | # SMTP User Creation Permissions 524 | { 525 | Effect = "Allow" 526 | Action = [ 527 | "iam:CreateUser", 528 | "iam:PutUserPolicy", 529 | "iam:CreateAccessKey", 530 | "ses:ListIdentities", 531 | "ses:GetIdentityVerificationAttributes", 532 | "iam:ListAccessKeys" 533 | ] 534 | Resource = [ 535 | "arn:aws:iam::${var.aws_account_id}:user/smtp-*" 536 | ] 537 | }, 538 | # Allow IAM policy attachment 539 | { 540 | Effect = "Allow" 541 | Action = [ 542 | "iam:AttachUserPolicy", 543 | "iam:PutUserPolicy" 544 | ] 545 | Resource = "arn:aws:iam::${var.aws_account_id}:user/smtp-*" 546 | }, 547 | # Allow IAM user management 548 | { 549 | Effect = "Allow" 550 | Action = [ 551 | "iam:PassRole" 552 | ] 553 | Resource = "arn:aws:iam::${var.aws_account_id}:role/verify-domain-lambda-role" 554 | }, 555 | # Allow IAM GetUser permission 556 | { 557 | Effect = "Allow" 558 | Action = [ 559 | "iam:GetUser" 560 | ] 561 | Resource = "*" 562 | } 563 | ] 564 | }) 565 | } 566 | 567 | resource "aws_iam_role_policy_attachment" "lambda_policy_attachment" { 568 | role = aws_iam_role.lambda_exec.name 569 | policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" 570 | } 571 | 572 | resource "aws_s3_bucket_notification" "bucket_notification" { 573 | bucket = aws_s3_bucket.emails_bucket.id 574 | 575 | lambda_function { 576 | lambda_function_arn = aws_lambda_function.parsing_lambda.arn 577 | events = ["s3:ObjectCreated:*"] 578 | } 579 | 580 | depends_on = [aws_lambda_permission.allow_s3_to_invoke] 581 | } 582 | 583 | resource "aws_lambda_permission" "allow_s3_to_invoke" { 584 | statement_id = "AllowS3Invoke-${var.environment}" 585 | action = "lambda:InvokeFunction" 586 | function_name = aws_lambda_function.parsing_lambda.function_name 587 | principal = "s3.amazonaws.com" 588 | source_arn = aws_s3_bucket.emails_bucket.arn 589 | } 590 | 591 | # Add this new resource to attach S3 read permissions to the role 592 | resource "aws_iam_role_policy" "lambda_s3_policy" { 593 | name = "lambda_s3_policy-${var.environment}" 594 | role = aws_iam_role.lambda_exec.id 595 | policy = jsonencode({ 596 | Version = "2012-10-17", 597 | Statement = [ 598 | { 599 | Action = [ 600 | "s3:GetObject", 601 | "s3:ListBucket" 602 | ], 603 | Effect = "Allow", 604 | Resource = [ 605 | "${aws_s3_bucket.emails_bucket.arn}", 606 | "${aws_s3_bucket.emails_bucket.arn}/*" 607 | ] 608 | } 609 | ] 610 | }) 611 | } 612 | 613 | # SES Receipt Rule - catch emails for this environment and store in per-environment S3 bucket 614 | # In multi-account setup, each environment has its own rule set 615 | resource "aws_ses_receipt_rule" "env_catch_rule" { 616 | rule_set_name = aws_ses_receipt_rule_set.env_rule_set.rule_set_name 617 | name = "catch-emails-${var.environment}" 618 | enabled = true 619 | 620 | # Match all recipients (empty list means all verified domains) 621 | # Catch-all rule automatically routes emails to all verified domains 622 | recipients = [] 623 | 624 | # Actions for the receipt rule 625 | s3_action { 626 | bucket_name = aws_s3_bucket.emails_bucket.id 627 | position = 1 628 | } 629 | 630 | # Enable email scanning for spam/viruses 631 | scan_enabled = true 632 | 633 | depends_on = [ 634 | aws_s3_bucket_policy.email_storage_policy, 635 | aws_s3_bucket.emails_bucket, 636 | aws_ses_active_receipt_rule_set.activate_rule_set 637 | ] 638 | } 639 | -------------------------------------------------------------------------------- /lambda/parser/lambda_function.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # Copyright (c) 2023 [Your Name or Organization] 3 | # See LICENSE file for details 4 | 5 | import json 6 | import boto3 7 | import email 8 | from email import policy 9 | from email.parser import BytesParser 10 | import requests # For HTTP POST requests 11 | import uuid 12 | import os 13 | import re 14 | from datetime import datetime 15 | import pystache # Python implementation of Mustache.js 16 | import ipaddress 17 | from urllib.parse import urlparse 18 | from pymongo import MongoClient 19 | from pymongo.errors import PyMongoError 20 | try: 21 | from ai_parser import AIParser 22 | except ImportError: 23 | print("Could not import AIParser. AI features disabled.") 24 | AIParser = None 25 | 26 | # Initialize clients 27 | s3_client = boto3.client('s3') 28 | 29 | # MongoDB connection 30 | mongodb_uri = os.environ.get('MONGODB_URI', '') 31 | environment = os.environ.get('ENVIRONMENT', 'main') 32 | mongo_client = None 33 | db = None 34 | 35 | if mongodb_uri: 36 | try: 37 | mongo_client = MongoClient(mongodb_uri) 38 | # Use environment-specific database name 39 | db_name = f"email_webhooks_{environment.replace('/', '_')}" # Replace / in branch names 40 | db = mongo_client[db_name] 41 | print(f"MongoDB connection initialized successfully, using database: {db.name}") 42 | except Exception as e: 43 | print(f"Failed to initialize MongoDB connection: {e}") 44 | 45 | attachments_bucket_name = os.environ.get('ATTACHMENTS_BUCKET_NAME', 'email-attachments-bucket-3rfrd') 46 | 47 | 48 | def validate_webhook_url(url): 49 | """ 50 | Strictly validate the webhook URL to prevent SSRF attacks. 51 | - Only allow http(s) schemes. 52 | - Block localhost and private/internal IPs. 53 | """ 54 | try: 55 | parsed = urlparse(url) 56 | if parsed.scheme not in ("http", "https"): 57 | return False 58 | host = parsed.hostname 59 | if not host: 60 | return False 61 | # Block localhost 62 | if host in ("localhost", "127.0.0.1", "0.0.0.0"): 63 | return False 64 | # Block internal IPs 65 | try: 66 | ip = ipaddress.ip_address(host) 67 | if ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved or ip.is_multicast: 68 | return False 69 | except ValueError: 70 | # Not an IP, might be a domain 71 | pass 72 | # Optionally, enforce HTTPS only: 73 | # if parsed.scheme != "https": 74 | # return False 75 | return True 76 | except Exception: 77 | return False 78 | 79 | 80 | def process_template(template, data): 81 | """ 82 | Process a mustache-style template string using pystache (Mustache.js for Python). 83 | 84 | Args: 85 | template (str): Template string with {{variable}} placeholders 86 | data (dict): Dictionary containing values to replace placeholders 87 | 88 | Returns: 89 | str: Processed string with variables replaced by their values 90 | """ 91 | if not template or "{{" not in template: 92 | return template 93 | 94 | return pystache.render(template, data) 95 | 96 | def extract_email_body(msg): 97 | """ 98 | Recursively extract body content from an email message. 99 | Handles nested multipart structures and prefers the most complete parts. 100 | 101 | Returns: 102 | tuple: (body_text, html_body) 103 | """ 104 | plain_candidates = [] 105 | html_candidates = [] 106 | 107 | def walk(part): 108 | try: 109 | if part.is_multipart(): 110 | for sub in part.iter_parts(): 111 | walk(sub) 112 | return 113 | 114 | content_type = part.get_content_type() 115 | content_disposition = str(part.get_content_disposition() or "").lower() 116 | 117 | # Skip attachments 118 | if "attachment" in content_disposition: 119 | return 120 | 121 | payload = part.get_payload(decode=True) 122 | if not payload: 123 | return 124 | 125 | charset = part.get_content_charset() or "utf-8" 126 | text = payload.decode(charset, errors="replace").strip() 127 | 128 | if content_type == "text/plain": 129 | plain_candidates.append(text) 130 | print(f"Found text/plain candidate: {len(text)} chars") 131 | elif content_type == "text/html": 132 | html_candidates.append(text) 133 | print(f"Found text/html candidate: {len(text)} chars") 134 | except Exception as e: 135 | print(f"Error walking part: {e}") 136 | 137 | walk(msg) 138 | 139 | body_text = "" 140 | html_body = None 141 | 142 | if plain_candidates: 143 | # Choose the longest non-empty plain text candidate 144 | body_text = max(plain_candidates, key=len) 145 | if html_candidates: 146 | # Choose the longest non-empty html candidate 147 | html_body = max(html_candidates, key=len) 148 | 149 | # Use HTML as fallback if no plain text 150 | if not body_text and html_body: 151 | body_text = html_body 152 | 153 | return body_text, html_body 154 | 155 | def save_email_to_mongodb(email_data, webhook_url=None, webhook_response=None, webhook_status_code=None): 156 | """ 157 | Save parsed email to MongoDB database if MONGODB_URI environment variable exists 158 | 159 | Args: 160 | email_data (dict): Dictionary containing: 161 | - domain: The domain part of the recipient address 162 | - local_part: The local part of the recipient address 163 | - email_id: Full email address identifier 164 | - attachments: Array of attachment metadata 165 | - All other parsed email fields 166 | webhook_url (str, optional): The webhook URL to send the email data to 167 | webhook_response (str, optional): The response from the webhook 168 | webhook_status_code (int, optional): The status code from the webhook 169 | """ 170 | if db is None or not mongodb_uri: 171 | print("MongoDB connection not available, skipping database save") 172 | return 173 | 174 | try: 175 | # Prepare document for MongoDB 176 | email_document = { 177 | "_id": f"email_{str(uuid.uuid4())}", 178 | "email_id": email_data['email_id'], 179 | "domain": email_data['domain'], 180 | "local_part": email_data['local_part'], 181 | "sender": email_data.get('sender'), 182 | "recipient": email_data.get('recipient'), 183 | "subject": email_data.get('subject'), 184 | "date": email_data.get('date'), 185 | "message_id": email_data.get('message_id'), 186 | "cc": email_data.get('cc'), 187 | "bcc": email_data.get('bcc'), 188 | "reply_to": email_data.get('reply_to'), 189 | "references": email_data.get('references'), 190 | "in_reply_to": email_data.get('in_reply_to'), 191 | "importance": email_data.get('importance'), 192 | "custom_headers": email_data.get('custom_headers', {}), 193 | "body": email_data.get('body'), 194 | "html_body": email_data.get('html_body'), 195 | "attachments": email_data.get('attachments', []), 196 | "email_data": email_data, # Store complete email data 197 | "is_webhook_sent": True, 198 | "webhook_url": webhook_url, 199 | "webhook_response": webhook_response, 200 | "webhook_status_code": webhook_status_code, 201 | "created_at": datetime.utcnow() 202 | } 203 | 204 | # Insert into parsed_emails collection 205 | collection = db['parsed_emails'] 206 | result = collection.insert_one(email_document) 207 | 208 | print(f"Email {email_data['email_id']} saved to MongoDB successfully with ID: {result.inserted_id}") 209 | 210 | except PyMongoError as e: 211 | print(f"Failed to save email to MongoDB: {e}") 212 | except Exception as e: 213 | print(f"Unexpected error saving email to MongoDB: {e}") 214 | 215 | def lambda_handler(event, context): 216 | # Parse the S3 event 217 | for record in event['Records']: 218 | email_bucket_name = record['s3']['bucket']['name'] 219 | email_object_key = record['s3']['object']['key'] 220 | 221 | # Get the email object from S3 222 | response = s3_client.get_object(Bucket=email_bucket_name, Key=email_object_key) 223 | raw_email = response['Body'].read() 224 | 225 | print(f"Received email from {email_bucket_name}/{email_object_key}") 226 | print(f"Email content: {raw_email}") 227 | # Parse the email 228 | msg = BytesParser(policy=policy.default).parsebytes(raw_email) 229 | 230 | # Extract email headers first 231 | sender = msg.get('From', '') 232 | recipient = msg.get('To', '') 233 | 234 | # Retrieve webhook URL for the domain from the S3 bucket 235 | # Extract domain from recipient email using regex 236 | pattern = r"by ([\w\.-]+) with SMTP id ([\w\d]+).*?for ([\w@\.-]+);" 237 | received_header = msg.get('Received', '') 238 | match = re.search(pattern, received_header, re.DOTALL) if received_header else None 239 | kv_key = match.group(3).split('@')[1] if match else (recipient.split('@')[-1].strip('>') if recipient else '') 240 | 241 | received_from = match.group(3) if match else None 242 | subject = msg.get('Subject', '') 243 | date = msg.get('Date', '') # Extract email date/timestamp 244 | message_id = msg.get('Message-ID', '') # Extract unique message ID 245 | cc = msg.get('Cc', '') # Extract CC recipients 246 | bcc = msg.get('Bcc', '') # Extract BCC recipients 247 | reply_to = msg.get('Reply-To', '') # Extract Reply-To header 248 | references = msg.get('References', '') # Extract message references 249 | in_reply_to = msg.get('In-Reply-To', '') # Extract In-Reply-To header 250 | importance = msg.get('Importance', '') # Extract importance/priority 251 | 252 | # Extract custom headers if needed 253 | custom_headers = {} 254 | for header in msg.keys(): 255 | if header.lower().startswith('x-'): # Most custom headers start with X- 256 | custom_headers[header] = msg[header] 257 | 258 | body = "" 259 | html_body = None # Store HTML version separately 260 | attachments = [] 261 | 262 | print(f"Sender: {sender}") 263 | print(f"Recipient: {recipient}") 264 | print(f"Subject: {subject}") 265 | print(f"Webhook key: {kv_key}") 266 | print(f"Email is multipart: {msg.is_multipart()}") 267 | print(f"Email content type: {msg.get_content_type()}") 268 | 269 | # Debug: Print all parts structure for multipart emails 270 | if msg.is_multipart(): 271 | print("=== Email Parts Structure ===") 272 | for i, part in enumerate(msg.iter_parts()): 273 | print(f"Part {i}:") 274 | print(f" Content-Type: {part.get_content_type()}") 275 | print(f" Content-Disposition: {part.get_content_disposition()}") 276 | print(f" Has Content-ID: {part.get('Content-ID') is not None}") 277 | print(f" Has filename: {part.get_filename()}") 278 | payload = part.get_payload(decode=True) 279 | print(f" Payload length: {len(payload) if payload else 0} bytes") 280 | else: 281 | payload = msg.get_payload(decode=True) 282 | print(f"Non-multipart payload length: {len(payload) if payload else 0} bytes") 283 | 284 | # Retrieve webhook URL from MongoDB 285 | webhook_url = None 286 | ai_prompt = None 287 | try: 288 | if db is not None and mongodb_uri: 289 | # Query MongoDB for domain configuration 290 | domain_configs = db['domain_configs'] 291 | domain_config = domain_configs.find_one({"domain": kv_key}) 292 | 293 | if domain_config and 'webhook' in domain_config: 294 | webhook_url = domain_config['webhook'] 295 | ai_prompt = domain_config.get('ai_analysis') 296 | # SECURITY: Validate webhook URL strictly 297 | if not validate_webhook_url(webhook_url): 298 | print(f"Blocked unsafe webhook URL: {webhook_url}") 299 | return { 300 | 'statusCode': 400, 301 | 'body': "Invalid or unsafe webhook URL." 302 | } 303 | else: 304 | print(f"No webhook configuration found for domain {kv_key}") 305 | return { 306 | 'statusCode': 404, 307 | 'body': f"Webhook configuration for domain {kv_key} not found." 308 | } 309 | else: 310 | print("MongoDB connection not available") 311 | return { 312 | 'statusCode': 500, 313 | 'body': "Database connection not available." 314 | } 315 | except PyMongoError as e: 316 | print(f"MongoDB error retrieving webhook for domain {kv_key}: {e}") 317 | return { 318 | 'statusCode': 500, 319 | 'body': "Error retrieving webhook configuration." 320 | } 321 | except Exception as e: 322 | print(f"Error retrieving webhook for domain {kv_key}: {e}") 323 | return { 324 | 'statusCode': 500, 325 | 'body': "Webhook for domain not found or error occurred." 326 | } 327 | 328 | # Extract email body using dedicated function 329 | print("=== Extracting Email Body ===") 330 | body, html_body = extract_email_body(msg) 331 | print(f"After extraction - body: '{body[:100]}...' (length: {len(body)})") 332 | print(f"After extraction - html_body: {html_body[:100] if html_body else 'None'}... (length: {len(html_body) if html_body else 0})") 333 | 334 | # Extract attachments 335 | print("=== Extracting Attachments ===") 336 | if msg.is_multipart(): 337 | for part in msg.iter_parts(): 338 | content_type = part.get_content_type() 339 | content_disposition = str(part.get_content_disposition() or "").lower() 340 | 341 | # Check if this is truly an inline image/attachment 342 | has_content_id = part.get("Content-ID") is not None 343 | is_inline_image = has_content_id and content_type not in ("text/plain", "text/html") 344 | is_attachment = "attachment" in content_disposition 345 | has_filename = part.get_filename() is not None 346 | 347 | # Skip text parts (already processed for body) 348 | if content_type in ("text/plain", "text/html") and not is_attachment: 349 | continue 350 | 351 | # Process attachments and inline images 352 | if is_attachment or is_inline_image or has_filename: 353 | try: 354 | attachment_data = part.get_payload(decode=True) 355 | if not attachment_data: 356 | continue 357 | 358 | attachment_name = part.get_filename() 359 | content_id = part.get("Content-ID", "").strip('<>') 360 | 361 | if is_inline_image and not attachment_name: 362 | # Generate a name for inline images without filenames 363 | extension = content_type.split('/')[-1] if '/' in content_type else 'bin' 364 | attachment_name = f"inline_{content_id or uuid.uuid4().hex}.{extension}" 365 | 366 | if not attachment_name: 367 | attachment_name = f"attachment_{uuid.uuid4().hex}.bin" 368 | 369 | attachment_key = f"{uuid.uuid4().hex}/{attachment_name}" 370 | s3_client.put_object( 371 | Bucket=attachments_bucket_name, 372 | Key=attachment_key, 373 | Body=attachment_data, 374 | ContentType=content_type 375 | ) 376 | 377 | s3_url = f"https://{attachments_bucket_name}.s3.amazonaws.com/{attachment_key}" 378 | print(f"Processed {'inline image' if is_inline_image else 'attachment'}: {attachment_name}, URL: {s3_url}") 379 | 380 | attachments.append({ 381 | "filename": attachment_name, 382 | "public_url": s3_url, 383 | "content_type": content_type, 384 | "inline": is_inline_image, 385 | "content_id": content_id if is_inline_image else None 386 | }) 387 | except Exception as e: 388 | print(f"Error processing attachment: {e}") 389 | 390 | # Replace cid: references in the HTML body with the public URLs 391 | if html_body: 392 | for attachment in attachments: 393 | if attachment.get("inline") and attachment.get("content_id"): 394 | html_body = html_body.replace(f"cid:{attachment['content_id']}", attachment['public_url']) 395 | 396 | # Log final body extraction results 397 | print(f"Final body length: {len(body)} characters") 398 | print(f"Final html_body length: {len(html_body) if html_body else 0} characters") 399 | print(f"Number of attachments: {len(attachments)}") 400 | 401 | # Construct payload 402 | print("=== Constructing Payload ===") 403 | # Determine local_part and domain robustly 404 | domain_part = None 405 | local_part = None 406 | try: 407 | if received_from and '@' in received_from: 408 | local_part, domain_part = received_from.split('@', 1) 409 | else: 410 | from email.utils import getaddresses 411 | addresses = getaddresses([recipient]) if recipient else [] 412 | email_addr = addresses[0][1] if addresses else '' 413 | if email_addr and '@' in email_addr: 414 | local_part, domain_part = email_addr.split('@', 1) 415 | else: 416 | # Fallbacks 417 | domain_part = kv_key or '' 418 | local_part = '' 419 | except Exception as e: 420 | print(f"Error determining local/domain parts: {e}") 421 | domain_part = kv_key or '' 422 | local_part = '' 423 | 424 | parsed_email = { 425 | "email_id": email_object_key, 426 | "domain": domain_part, 427 | "local_part": local_part, 428 | "sender": sender, 429 | "recipient": recipient, 430 | "subject": subject, 431 | "date": date, 432 | "message_id": message_id, 433 | "cc": cc, 434 | "bcc": bcc, 435 | "reply_to": reply_to, 436 | "references": references, 437 | "in_reply_to": in_reply_to, 438 | "importance": importance, 439 | "custom_headers": custom_headers, 440 | "body": body, 441 | "html_body": html_body, # Include HTML body if available (already None if empty) 442 | "attachments": attachments 443 | } 444 | 445 | # Integrate AI Parser 446 | if AIParser and ai_prompt: 447 | try: 448 | print("=== Starting AI Parsing ===") 449 | ai_parser = AIParser() 450 | ai_result = ai_parser.parse_email(parsed_email, prompt=ai_prompt) 451 | parsed_email['ai_analysis'] = ai_result 452 | print("AI Parsing completed.") 453 | except Exception as e: 454 | print(f"Error during AI parsing integration: {e}") 455 | parsed_email['ai_analysis'] = {"error": str(e)} 456 | elif not ai_prompt: 457 | print("Skipping AI parsing: No 'ai_analysis' prompt found in domain config.") 458 | else: 459 | print("Skipping AI parsing: AIParser not available.") 460 | 461 | # Process webhook URL templates before sending 462 | webhook_url = process_template(webhook_url, parsed_email) 463 | 464 | # SECURITY: Outbound webhook call with strict timeout and no redirects 465 | try: 466 | response = requests.post( 467 | webhook_url, 468 | json=parsed_email, 469 | timeout=5, # tighter timeout 470 | allow_redirects=False # do not follow redirects 471 | ) 472 | response.raise_for_status() 473 | print(f"Data sent to webhook {webhook_url} successfully.") 474 | # Call the updated function with successful webhook details 475 | save_email_to_mongodb( 476 | parsed_email, 477 | webhook_url=webhook_url, 478 | webhook_response=response.text, 479 | webhook_status_code=response.status_code 480 | ) 481 | except Exception as e: 482 | # SECURITY: Log error internally, but do not leak details to response 483 | print(f"Error sending data to webhook {webhook_url}: {repr(e)}") 484 | save_email_to_mongodb( 485 | parsed_email, 486 | webhook_url=webhook_url, 487 | webhook_response="Webhook error", 488 | webhook_status_code=getattr(e, 'response', None).status_code if hasattr(e, 'response') and e.response else 0 489 | ) 490 | # Continue processing, but do not expose details 491 | print("Continuing despite webhook error.") 492 | 493 | # Always return generic success, regardless of webhook outcome 494 | return { 495 | 'statusCode': 200, 496 | 'body': "Email processed successfully." 497 | } 498 | -------------------------------------------------------------------------------- /lambda/check/lambda_function.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # Copyright (c) 2023 [Your Name or Organization] 3 | # See LICENSE file for details 4 | 5 | import requests 6 | import boto3 7 | import json 8 | import secrets 9 | import string 10 | import re 11 | import os 12 | import uuid 13 | import datetime 14 | from pymongo import MongoClient 15 | from pymongo.errors import PyMongoError, DuplicateKeyError 16 | 17 | ses_client = boto3.client('ses') 18 | iam_client = boto3.client('iam') 19 | 20 | # MongoDB connection 21 | mongodb_uri = os.environ.get('MONGODB_URI', '') 22 | environment = os.environ.get('ENVIRONMENT', 'main') 23 | s3_bucket = os.environ.get('EMAIL_BUCKET', '') 24 | mongo_client = None 25 | db = None 26 | receipt_rule_set = os.environ.get('RECEIPT_RULE_SET', 'default-rule-set') 27 | 28 | if mongodb_uri: 29 | try: 30 | mongo_client = MongoClient(mongodb_uri) 31 | # Use environment-specific database name 32 | db_name = f"email_webhooks_{environment.replace('/', '_')}" # Replace / in branch names 33 | db = mongo_client[db_name] 34 | # Create unique index on domain field 35 | db['domain_configs'].create_index("domain", unique=True) 36 | print(f"MongoDB connection initialized successfully, using database: {db.name}") 37 | except Exception as e: 38 | print(f"Failed to initialize MongoDB connection: {e}") 39 | 40 | 41 | def generate_password(): 42 | """Generate a secure password for SMTP credentials.""" 43 | alphabet = string.ascii_letters + string.digits + "!@#$%^&*()_+-=[]{}|" 44 | return ''.join(secrets.choice(alphabet) for i in range(16)) 45 | 46 | def get_existing_smtp_user(domain): 47 | """Check if SMTP user already exists for the domain.""" 48 | username = f"smtp-{domain.replace('.', '-')}" 49 | try: 50 | # Try to get the user 51 | iam_client.get_user(UserName=username) 52 | 53 | # If user exists, get their access keys 54 | response = iam_client.list_access_keys(UserName=username) 55 | 56 | if response['AccessKeyMetadata']: 57 | # Return existing access key if available 58 | access_key_id = response['AccessKeyMetadata'][0]['AccessKeyId'] 59 | 60 | return { 61 | "username": access_key_id, 62 | "smtp_server": "email-smtp.us-east-1.amazonaws.com", 63 | "smtp_port": 587, 64 | "smtp_tls": True 65 | } 66 | return None 67 | except iam_client.exceptions.NoSuchEntityException: 68 | return None 69 | 70 | def create_smtp_user(domain): 71 | """Create IAM user with SES SMTP permissions and generate SMTP credentials.""" 72 | # First check if user already exists 73 | existing_user = get_existing_smtp_user(domain) 74 | if existing_user: 75 | return existing_user 76 | 77 | # Create unique username based on domain 78 | username = f"smtp-{domain.replace('.', '-')}" 79 | 80 | try: 81 | # Create IAM user 82 | iam_client.create_user(UserName=username) 83 | 84 | # Attach SES sending policy 85 | policy_document = { 86 | "Version": "2012-10-17", 87 | "Statement": [{ 88 | "Effect": "Allow", 89 | "Action": [ 90 | "ses:SendRawEmail", 91 | "ses:SendEmail" 92 | ], 93 | "Resource": "*" 94 | }] 95 | } 96 | 97 | iam_client.put_user_policy( 98 | UserName=username, 99 | PolicyName=f"{username}-ses-policy", 100 | PolicyDocument=json.dumps(policy_document) 101 | ) 102 | 103 | # Create SMTP credentials 104 | response = iam_client.create_access_key(UserName=username) 105 | 106 | smtp_credentials = { 107 | "username": response['AccessKey']['AccessKeyId'], 108 | "password": response['AccessKey']['SecretAccessKey'], 109 | "smtp_server": "email-smtp.us-east-1.amazonaws.com", 110 | "smtp_port": 587, 111 | "smtp_tls": True 112 | } 113 | 114 | return smtp_credentials 115 | 116 | except Exception as e: 117 | # If there's an error, attempt to clean up the IAM user 118 | try: 119 | iam_client.delete_user(UserName=username) 120 | except: 121 | pass 122 | raise e 123 | 124 | def verify_domain(domain): 125 | """Initiate SES domain verification if not already verified.""" 126 | status = check_verification_status(domain) 127 | 128 | # Only verify if not already verified or pending 129 | if status in ['NotStarted', 'Failed']: 130 | response = ses_client.verify_domain_identity(Domain=domain) 131 | return response['VerificationToken'] 132 | 133 | # Get existing verification token 134 | response = ses_client.get_identity_verification_attributes( 135 | Identities=[domain] 136 | ) 137 | return response['VerificationAttributes'][domain].get('VerificationToken', '') 138 | 139 | def check_verification_status(domain): 140 | """Check SES domain verification status.""" 141 | response = ses_client.get_identity_verification_attributes( 142 | Identities=[domain] 143 | ) 144 | verification_status = response['VerificationAttributes'].get(domain, {}).get('VerificationStatus', 'NotStarted') 145 | return verification_status 146 | 147 | def is_valid_domain(domain): 148 | """Check if the domain has a valid format.""" 149 | # Basic domain validation pattern 150 | # Checks for valid characters, proper length, and correct format 151 | pattern = r'^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$' 152 | return bool(re.match(pattern, domain)) 153 | 154 | def is_valid_webhook(webhook): 155 | """Check if the webhook URL has a valid format.""" 156 | pattern = r'^https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+(?::\d+)?(?:/[-\w%!$&\'()*+,;=:@/~]+)*(?:\?[-\w%!$&\'()*+,;=:@/~]*)?(?:#[-\w%!$&\'()*+,;=:@/~]*)?$' 157 | return bool(re.match(pattern, webhook)) 158 | 159 | def delete_domain(domain): 160 | """Delete domain from MongoDB and SES.""" 161 | try: 162 | # Try to delete from SES 163 | try: 164 | ses_client.delete_identity( 165 | Identity=domain 166 | ) 167 | except Exception as ses_error: 168 | print(f"Error deleting domain from SES {domain}: {str(ses_error)}") 169 | # Continue with MongoDB deletion even if SES delete fails 170 | 171 | # Delete from MongoDB 172 | if db is not None and mongodb_uri: 173 | try: 174 | domain_configs = db['domain_configs'] 175 | result = domain_configs.delete_one({"domain": domain}) 176 | if result.deleted_count == 0: 177 | print(f"Domain {domain} not found in MongoDB") 178 | else: 179 | print(f"Domain {domain} deleted from MongoDB successfully") 180 | 181 | except PyMongoError as mongo_error: 182 | print(f"Error deleting domain from MongoDB {domain}: {str(mongo_error)}") 183 | raise mongo_error 184 | else: 185 | print("MongoDB connection not available") 186 | raise Exception("Database connection not available") 187 | 188 | return True 189 | except Exception as e: 190 | print(f"Error in delete_domain operation for {domain}: {str(e)}") 191 | raise e 192 | 193 | def get_dkim_tokens(domain): 194 | """Get DKIM tokens for the domain from SES.""" 195 | try: 196 | # First verify DKIM for the domain 197 | ses_client.verify_domain_dkim(Domain=domain) 198 | 199 | # Then get the DKIM tokens 200 | response = ses_client.get_identity_dkim_attributes( 201 | Identities=[domain] 202 | ) 203 | if domain in response['DkimAttributes']: 204 | return response['DkimAttributes'][domain]['DkimTokens'] 205 | except Exception as e: 206 | print(f"Error getting DKIM tokens: {str(e)}") 207 | return [] 208 | 209 | def get_public_key(domain): 210 | """Get or generate public key for the domain.""" 211 | try: 212 | response = ses_client.get_identity_mail_from_domain_attributes( 213 | Identities=[domain] 214 | ) 215 | # For now return empty as we'll use custom key if provided 216 | return "" 217 | except Exception as e: 218 | print(f"Error getting public key: {str(e)}") 219 | return "" 220 | 221 | def format_dns_records(domain, token, dkim_tokens, public_key=None, return_all=True): 222 | """Format DNS records in a structured way.""" 223 | records = {} 224 | 225 | # MX record 226 | records["MX"] = { 227 | "Type": "MX", 228 | "Name": domain, 229 | "Priority": 10, 230 | "Value": "inbound-smtp.us-east-1.amazonaws.com" 231 | } 232 | # Verification record 233 | if token: 234 | records["Verification"] = { 235 | "Type": "TXT", 236 | "Name": f"_amazonses.{domain}", 237 | "Priority": 0, 238 | "Value": token 239 | } 240 | 241 | # If return_all is False, only return the required records 242 | if not return_all: 243 | return records 244 | 245 | # SPF record 246 | records["SPF"] = { 247 | "Type": "TXT", 248 | "Name": domain, 249 | "Priority": 0, 250 | "Value": "v=spf1 include:amazonses.com -all" 251 | } 252 | 253 | # DMARC record 254 | records["DMARC"] = { 255 | "Type": "TXT", 256 | "Name": f"_dmarc.{domain}", 257 | "Priority": 0, 258 | "Value": f"v=DMARC1; p=quarantine; rua=mailto:dmarc-reports@{domain}" 259 | } 260 | 261 | # DKIM records 262 | for i, dkim_token in enumerate(dkim_tokens): 263 | records[f"DKIM_{i+1}"] = { 264 | "Type": "CNAME", 265 | "Name": f"{dkim_token}._domainkey.{domain}", 266 | "Priority": 0, 267 | "Value": f"{dkim_token}.dkim.amazonses.com" 268 | } 269 | 270 | # Custom DKIM if provided 271 | if public_key: 272 | records["CustomDKIM"] = { 273 | "Type": "TXT", 274 | "Name": f"resend._domainkey.{domain}", 275 | "Priority": 0, 276 | "Value": public_key 277 | } 278 | 279 | return records 280 | 281 | def lambda_handler(event, context): 282 | try: 283 | # Log the incoming event for debugging 284 | print("Received event:",event) 285 | 286 | http_method = event['requestContext']['http']['method'] 287 | 288 | # Handle DELETE request 289 | if http_method == 'DELETE': 290 | # Extract domain from path parameters 291 | path_params = event.get('pathParameters', {}) or {} 292 | domain = path_params.get('domain') 293 | 294 | # If no domain in path, try to get it from body as fallback 295 | if not domain: 296 | body = json.loads(event.get('body') or '{}') 297 | domain = body.get('domain') 298 | 299 | if not domain: 300 | return { 301 | "headers": { 302 | "Content-Type": "application/json" 303 | }, 304 | "statusCode": 400, 305 | "body": json.dumps({"error": "Domain is required in the path"}) 306 | } 307 | 308 | # Delete domain from S3 and SES 309 | delete_domain(domain) 310 | 311 | return { 312 | "statusCode": 200, 313 | "headers": { 314 | "Content-Type": "application/json" 315 | }, 316 | "body": json.dumps({ 317 | "message": f"Domain {domain} deleted successfully" 318 | }) 319 | } 320 | 321 | # Handle GET request 322 | elif http_method == 'GET': 323 | # Extract domain from path parameters 324 | domain = None 325 | 326 | # Check if path parameters are present 327 | path_params = event.get('pathParameters', {}) or {} 328 | if path_params and 'domain' in path_params: 329 | domain = path_params.get('domain') 330 | 331 | # If no domain in path parameters, try query parameters as fallback 332 | if not domain: 333 | query_params = event.get('queryStringParameters', {}) or {} 334 | domain = query_params.get('domain') 335 | 336 | if not domain: 337 | return { 338 | "headers": { 339 | "Content-Type": "application/json" 340 | }, 341 | "statusCode": 400, 342 | "body": json.dumps({"error": "Domain is required in the path"}) 343 | } 344 | 345 | # Check MongoDB connection 346 | if db is None or not mongodb_uri: 347 | return { 348 | "headers": { 349 | "Content-Type": "application/json" 350 | }, 351 | "statusCode": 500, 352 | "body": json.dumps({"error": "Database connection not available"}) 353 | } 354 | 355 | # Get domain data from MongoDB 356 | try: 357 | domain_configs = db['domain_configs'] 358 | mongo_data = domain_configs.find_one({"domain": domain}) 359 | 360 | if not mongo_data: 361 | return { 362 | "headers": { 363 | "Content-Type": "application/json" 364 | }, 365 | "statusCode": 404, 366 | "body": json.dumps({"error": f"Domain {domain} not found"}) 367 | } 368 | 369 | # Remove MongoDB _id field and convert datetime objects to strings 370 | if '_id' in mongo_data: 371 | del mongo_data['_id'] 372 | 373 | # Convert datetime objects to ISO format strings 374 | for key, value in mongo_data.items(): 375 | if isinstance(value, datetime.datetime): 376 | mongo_data[key] = value.isoformat() 377 | 378 | except PyMongoError as e: 379 | return { 380 | "headers": { 381 | "Content-Type": "application/json" 382 | }, 383 | "statusCode": 500, 384 | "body": json.dumps({"error": f"Error fetching MongoDB data: {str(e)}"}) 385 | } 386 | except Exception as e: 387 | return { 388 | "headers": { 389 | "Content-Type": "application/json" 390 | }, 391 | "statusCode": 500, 392 | "body": json.dumps({"error": f"Error fetching data: {str(e)}"}) 393 | } 394 | 395 | # Get domain verification status from SES 396 | status = "unknown" 397 | token = "" 398 | 399 | # Check if we should ignore SES data 400 | query_params = event.get('queryStringParameters', {}) or {} 401 | ignoreSesData = query_params.get('ignoreSesData') == "true" 402 | 403 | if not ignoreSesData: 404 | try: 405 | status = check_verification_status(domain) 406 | 407 | # Get verification token 408 | response = ses_client.get_identity_verification_attributes( 409 | Identities=[domain] 410 | ) 411 | verification_attrs = response['VerificationAttributes'].get(domain, {}) 412 | token = verification_attrs.get('VerificationToken', '') 413 | 414 | # Update verification status in MongoDB 415 | try: 416 | domain_configs.update_one( 417 | {"domain": domain}, 418 | { 419 | "$set": { 420 | "verification_status": status, 421 | "verification_status_last_checked": datetime.datetime.utcnow() 422 | } 423 | } 424 | ) 425 | print(f"Updated verification status for {domain}: {status}") 426 | except PyMongoError as e: 427 | print(f"Error updating verification status in MongoDB: {str(e)}") 428 | # Continue even if MongoDB update fails 429 | 430 | except Exception as e: 431 | print(f"Error fetching SES data: {str(e)}") 432 | 433 | # Get DKIM tokens 434 | dkim_tokens = [] 435 | if not ignoreSesData: 436 | dkim_tokens = get_dkim_tokens(domain) 437 | 438 | # Prepare DNS records information 439 | dns_records = format_dns_records(domain, token, dkim_tokens) 440 | 441 | # Include status in response only if SES data was queried 442 | response_data = {**mongo_data} 443 | 444 | if not ignoreSesData: 445 | response_data["status"] = status.lower() 446 | response_data["dns_records"] = dns_records 447 | 448 | return { 449 | "statusCode": 200, 450 | "headers": { 451 | "Content-Type": "application/json" 452 | }, 453 | "body": json.dumps(response_data, indent=4) 454 | } 455 | 456 | # Handle PUT request 457 | elif http_method == 'PUT': 458 | # Extract domain from path parameters 459 | path_params = event.get('pathParameters', {}) or {} 460 | domain = path_params.get('domain') 461 | 462 | # Extract data from request body 463 | body = json.loads(event['body']) 464 | 465 | if not domain: 466 | return { 467 | "headers": { 468 | "Content-Type": "application/json" 469 | }, 470 | "statusCode": 400, 471 | "body": json.dumps({"error": "Domain is required in the path"}) 472 | } 473 | 474 | # Check MongoDB connection 475 | if db is None or not mongodb_uri: 476 | return { 477 | "headers": { 478 | "Content-Type": "application/json" 479 | }, 480 | "statusCode": 500, 481 | "body": json.dumps({"error": "Database connection not available"}) 482 | } 483 | 484 | # Update the domain configuration in MongoDB 485 | try: 486 | domain_configs = db['domain_configs'] 487 | 488 | # Prepare update data 489 | update_data = {k: v for k, v in body.items() if k != 'domain'} 490 | update_data['updated_at'] = datetime.datetime.utcnow() 491 | 492 | # Explicitly handle ai_analysis if provided 493 | if 'ai_analysis' in body: 494 | update_data['ai_analysis'] = body['ai_analysis'] 495 | 496 | # Update the document 497 | result = domain_configs.update_one( 498 | {"domain": domain}, 499 | {"$set": update_data} 500 | ) 501 | 502 | if result.matched_count == 0: 503 | return { 504 | "headers": { 505 | "Content-Type": "application/json" 506 | }, 507 | "statusCode": 404, 508 | "body": json.dumps({"error": f"Domain '{domain}' not found"}) 509 | } 510 | 511 | # Fetch the updated document 512 | updated_data = domain_configs.find_one({"domain": domain}) 513 | if '_id' in updated_data: 514 | del updated_data['_id'] 515 | 516 | # Convert datetime objects to ISO format strings 517 | for key, value in updated_data.items(): 518 | if isinstance(value, datetime.datetime): 519 | updated_data[key] = value.isoformat() 520 | 521 | except PyMongoError as e: 522 | return { 523 | "headers": { 524 | "Content-Type": "application/json" 525 | }, 526 | "statusCode": 500, 527 | "body": json.dumps({"error": f"Error updating data: {str(e)}"}) 528 | } 529 | except Exception as e: 530 | return { 531 | "headers": { 532 | "Content-Type": "application/json" 533 | }, 534 | "statusCode": 500, 535 | "body": json.dumps({"error": f"Error updating data: {str(e)}"}) 536 | } 537 | 538 | return { 539 | "statusCode": 200, 540 | "headers": { 541 | "Content-Type": "application/json" 542 | }, 543 | "body": json.dumps(updated_data) 544 | } 545 | 546 | # Handle POST request (existing functionality) 547 | else: # POST request 548 | # Extract domain from path parameters 549 | path_params = event.get('pathParameters', {}) or {} 550 | user_domain = path_params.get('domain') 551 | 552 | # Parse input from the request body 553 | body = json.loads(event['body']) 554 | 555 | # If no domain in path, try to get it from body as fallback 556 | if not user_domain and 'domain' in body: 557 | user_domain = body.get('domain') 558 | 559 | webhook = body.get('webhook') 560 | 561 | if not user_domain: 562 | return { 563 | "headers": { 564 | "Content-Type": "application/json" 565 | }, 566 | "statusCode": 400, 567 | "body": json.dumps({"error": "Domain is required in the path"}) 568 | } 569 | 570 | # Validate domain format 571 | if not is_valid_domain(user_domain): 572 | return { 573 | "headers": { 574 | "Content-Type": "application/json" 575 | }, 576 | "statusCode": 400, 577 | "body": json.dumps({"error": "Invalid domain format"}) 578 | } 579 | 580 | # Validate webhook format if provided 581 | if webhook and not is_valid_webhook(webhook): 582 | return { 583 | "headers": { 584 | "Content-Type": "application/json" 585 | }, 586 | "statusCode": 400, 587 | "body": json.dumps({"error": "Invalid webhook URL format"}) 588 | } 589 | 590 | # Check MongoDB connection 591 | if db is None or not mongodb_uri: 592 | return { 593 | "headers": { 594 | "Content-Type": "application/json" 595 | }, 596 | "statusCode": 500, 597 | "body": json.dumps({"error": "Database connection not available"}) 598 | } 599 | 600 | # Insert domain configuration into MongoDB 601 | try: 602 | domain_configs = db['domain_configs'] 603 | 604 | # Prepare domain configuration document 605 | domain_config = { 606 | "domain": user_domain, 607 | "webhook": webhook, # Can be None 608 | "created_at": datetime.datetime.utcnow(), 609 | "updated_at": datetime.datetime.utcnow() 610 | } 611 | 612 | # Try to insert, check for duplicate 613 | try: 614 | domain_configs.insert_one(domain_config) 615 | 616 | except DuplicateKeyError: 617 | return { 618 | "statusCode": 200, 619 | "headers": { 620 | "Content-Type": "application/json" 621 | }, 622 | "body": json.dumps({"message": "Domain already exists"}) 623 | } 624 | except PyMongoError as e: 625 | return { 626 | "headers": { 627 | "Content-Type": "application/json" 628 | }, 629 | "statusCode": 500, 630 | "body": json.dumps({"error": f"Error saving domain configuration: {str(e)}"}) 631 | } 632 | 633 | # Check the current verification status 634 | status = check_verification_status(user_domain) 635 | 636 | # Update verification status in MongoDB 637 | try: 638 | domain_configs.update_one( 639 | {"domain": user_domain}, 640 | { 641 | "$set": { 642 | "verification_status": status, 643 | "verification_status_last_checked": datetime.datetime.utcnow() 644 | } 645 | } 646 | ) 647 | print(f"Saved initial verification status for {user_domain}: {status}") 648 | except PyMongoError as e: 649 | print(f"Error saving verification status to MongoDB: {str(e)}") 650 | # Continue even if MongoDB update fails 651 | 652 | # Get or create SMTP credentials 653 | # smtp_credentials = create_smtp_user(user_domain) 654 | 655 | # Get verification token (will only initiate new verification if needed) 656 | token = verify_domain(user_domain) 657 | 658 | # Get DKIM tokens 659 | dkim_tokens = get_dkim_tokens(user_domain) 660 | 661 | # Get public key if provided 662 | public_key = body.get('public_key') if isinstance(body, dict) else None 663 | 664 | # Format DNS records 665 | records = format_dns_records(user_domain, token, dkim_tokens, public_key) 666 | 667 | response_data = { 668 | "object": "domain", 669 | "id": str(uuid.uuid4()), # Generate a unique ID 670 | "name": user_domain, 671 | "status": status.lower(), 672 | "created_at": datetime.datetime.now(datetime.timezone.utc).isoformat(), 673 | "region": "us-east-1", 674 | "dns_records": records, 675 | "webhook": webhook 676 | } 677 | 678 | return { 679 | "statusCode": 200, 680 | "headers": { 681 | "Content-Type": "application/json" 682 | }, 683 | "body": json.dumps(response_data, indent=4) 684 | } 685 | 686 | except Exception as e: 687 | return { 688 | "headers": { 689 | "Content-Type": "application/json" 690 | }, 691 | "statusCode": 500, 692 | "body": json.dumps({"error": str(e)}) 693 | } --------------------------------------------------------------------------------