├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── CI.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── cloudwatch.tf ├── examples ├── autoscaling-scheduler-terminate-instances │ ├── cloudwatch_alarm.tf │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── autoscaling-scheduler │ ├── cloudwatch_alarm.tf │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── test-execution │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── documentdb-scheduler │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── test-cleanup │ │ ├── main.tf │ │ └── variables.tf │ ├── test-execution │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── wait_documentdb_status.py │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── ecs-scheduler │ ├── cloudwatch_alarm.tf │ ├── ecs.tf │ ├── iam.tf │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── instance-scheduler │ ├── cloudwatch_alarm.tf │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── test-execution │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── wait_instances.py │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── neptune-scheduler │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── test-cleanup │ │ ├── main.tf │ │ └── variables.tf │ ├── test-execution │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── wait_neptune_status.py │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── rds-scheduler │ ├── cloudwatch_alarm.tf │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── test-cleanup │ │ ├── main.tf │ │ └── variables.tf │ ├── test-execution │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ ├── wait_rds_cluster.py │ │ └── wait_rds_instance.py │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── redshift-scheduler │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── test-execution │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── wait_redshift_status.py │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── test_fixture │ ├── main.tf │ ├── terraform.tftest.hcl │ └── versions.tf ├── timezone-scheduler │ ├── main.tf │ ├── terraform.tftest.hcl │ └── versions.tf └── transfer-scheduler │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── test-execution │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── wait_transfer_status.py │ ├── variables.tf │ ├── versions.tf │ └── vpc.tf ├── iam.tf ├── main.tf ├── outputs.tf ├── package └── scheduler │ ├── __init__.py │ ├── autoscaling_handler.py │ ├── cloudwatch_handler.py │ ├── documentdb_handler.py │ ├── ecs_handler.py │ ├── exceptions.py │ ├── filter_resources_by_tags.py │ ├── instance_handler.py │ ├── main.py │ ├── rds_handler.py │ ├── redshift_handler.py │ ├── transfer_handler.py │ └── waiters.py ├── requirements.txt ├── tests ├── __init__.py ├── sanity │ ├── .pylintrc │ └── terraform_tests.sh └── unit │ ├── __init__.py │ ├── test_autoscaling_handler.py │ ├── test_filter_resources_by_tags.py │ ├── test_instance_handler.py │ ├── test_rds_handler.py │ └── utils.py ├── tox.ini ├── variables.tf └── versions.tf /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/devcontainers/base:ubuntu-22.04 2 | 3 | RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 4 | && apt-get -y install --no-install-recommends \ 5 | acl \ 6 | ansible \ 7 | curl \ 8 | git \ 9 | gnupg \ 10 | iproute2 \ 11 | iputils-ping \ 12 | jq \ 13 | less \ 14 | libssl-dev \ 15 | lsb-release \ 16 | make \ 17 | nano \ 18 | openssh-client \ 19 | procps \ 20 | python3 \ 21 | python3-pip \ 22 | python3-venv \ 23 | sudo \ 24 | unzip \ 25 | vim \ 26 | wget \ 27 | zip \ 28 | zsh \ 29 | && apt-get clean \ 30 | && rm -rf /var/lib/apt/lists/* 31 | 32 | RUN ansible-galaxy role install diodonfrost.ohmyzsh && \ 33 | ansible-pull -U https://github.com/diodonfrost/ansible-role-ohmyzsh tests/test.yml -e "ohmyzsh_theme=powerlevel10k/powerlevel10k" -e '{"ohmyzsh_users": [vscode]}' 34 | 35 | RUN ansible-galaxy role install diodonfrost.p10k && \ 36 | ansible-pull -U https://github.com/diodonfrost/ansible-role-p10k tests/test.yml -e "zsh_plugin=ohmyzsh" -e '{"p10k_users": [vscode]}' 37 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { "dockerfile": "Dockerfile", "context": "../" }, 3 | "mounts": [ 4 | "source=${localEnv:HOME}/.aws,target=/home/vscode/.aws,type=bind,consistency=cached" 5 | ], 6 | 7 | "customizations": { 8 | "vscode": { 9 | "extensions": [ 10 | "hashicorp.terraform", 11 | "redhat.vscode-yaml", 12 | "vscode-icons-team.vscode-icons", 13 | "isudox.vscode-jetbrains-keybindings", 14 | "GitHub.vscode-github-actions" 15 | ], 16 | "settings": { 17 | // Fonts MesLGS NF should be install: https://github.com/romkatv/powerlevel10k-media/blob/master/MesloLGS%20NF%20Regular.ttf 18 | "terminal.integrated.fontFamily": "MesloLGS NF", 19 | "redhat.telemetry.enabled": false, 20 | "aws.telemetry": false, 21 | "workbench.iconTheme": "vscode-icons", 22 | "vsicons.dontShowNewVersionMessage": true, 23 | "editor.rulers": [88,120] 24 | } 25 | } 26 | }, 27 | "features": { 28 | "ghcr.io/devcontainers/features/terraform:1": { 29 | "installSentinel": true, 30 | "installTFsec": true, 31 | "installTerraformDocs": true 32 | }, 33 | "ghcr.io/devcontainers/features/aws-cli:1": {} 34 | }, 35 | "postStartCommand": "pip install -r requirements.txt", 36 | "remoteEnv": { "PATH": "${containerEnv:PATH}:/home/vscode/.local/bin" }, 37 | "remoteUser": "vscode" 38 | } 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F41B Bug report" 3 | about: Create a report to help us improve 4 | --- 5 | 6 | ##### SUMMARY 7 | 8 | 9 | ##### ISSUE TYPE 10 | - Bug Report 11 | 12 | ##### TERRAFORM VERSION 13 | 14 | ```paste below 15 | 16 | ``` 17 | 18 | ##### STEPS TO REPRODUCE 19 | 20 | 21 | 22 | ```paste below 23 | 24 | ``` 25 | 26 | 27 | 28 | ##### EXPECTED RESULTS 29 | 30 | 31 | 32 | ##### ACTUAL RESULTS 33 | 34 | 35 | 36 | ```paste below 37 | 38 | ``` 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "✨ Feature request" 3 | about: Suggest an idea for this project 4 | --- 5 | 6 | ##### SUMMARY 7 | 8 | 9 | ##### ISSUE TYPE 10 | - Feature Idea 11 | 12 | ##### Additional context 13 | Add any other context or screenshots about the feature request here. 14 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | pull_request: 5 | schedule: 6 | - cron: '0 18 * * SUN' 7 | 8 | jobs: 9 | lint: 10 | name: Terraform validate ${{ matrix.terraform_version }} 11 | runs-on: ubuntu-latest 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | terraform_version: 16 | - latest 17 | - 1.2.9 18 | - 1.1.9 19 | steps: 20 | - uses: actions/checkout@master 21 | - uses: hashicorp/setup-terraform@v2 22 | with: 23 | terraform_version: "${{ matrix.terraform_version }}" 24 | 25 | - name: Terraform version 26 | id: version 27 | run: terraform version 28 | - name: Terraform fmt 29 | id: fmt 30 | run: terraform fmt -check 31 | continue-on-error: true 32 | - name: Terraform init 33 | id: init 34 | run: terraform init 35 | - name: Terraform Validate 36 | id: validate 37 | run: terraform validate -no-color 38 | 39 | pythontest: 40 | name: ${{ matrix.config.toxenv }} 41 | runs-on: ubuntu-latest 42 | strategy: 43 | fail-fast: false 44 | matrix: 45 | config: 46 | - toxenv: py37 47 | python-version: 3.7 48 | - toxenv: py38 49 | python-version: 3.8 50 | - toxenv: py39 51 | python-version: 3.9 52 | - toxenv: py310 53 | python-version: '3.10' 54 | # - toxenv: py311 55 | # python-version: 3.11 56 | - toxenv: flake8 57 | python-version: 3.8 58 | - toxenv: pylint 59 | python-version: 3.8 60 | - toxenv: black 61 | python-version: 3.8 62 | - toxenv: mypy 63 | python-version: 3.8 64 | - toxenv: pytest 65 | python-version: 3.8 66 | 67 | steps: 68 | - name: Checkout repository 69 | uses: actions/checkout@v2 70 | 71 | - name: Set up Python version 72 | uses: actions/setup-python@v2 73 | with: 74 | python-version: ${{ matrix.config.python-version }} 75 | 76 | - name: Install dependencies 77 | run: python -m pip install tox==3.20.0 78 | 79 | - name: Python test 80 | run: tox 81 | env: 82 | TOXENV: "${{ matrix.config.toxenv }}" 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | .terraform.lock.hcl 4 | *.tfstate* 5 | *.zip 6 | terraform.tfstate.d 7 | *.state 8 | 9 | # Python files 10 | .tox 11 | __pycache__ 12 | .pytest_cache 13 | .coverage 14 | .mypy_cache 15 | .venv 16 | 17 | # IDE files 18 | .idea 19 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v3.4.0 5 | hooks: 6 | - id: trailing-whitespace 7 | - id: end-of-file-fixer 8 | - id: check-added-large-files 9 | - id: check-case-conflict 10 | - id: check-merge-conflict 11 | - id: fix-encoding-pragma 12 | - id: check-builtin-literals 13 | - id: check-ast 14 | -------------------------------------------------------------------------------- /cloudwatch.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_log_group" "this" { 2 | name = "/aws/lambda/${var.name}" 3 | retention_in_days = 14 4 | tags = var.tags 5 | } 6 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_policy" "scheduled" { 2 | count = 3 3 | name = "bar-with-tag-${count.index}" 4 | scaling_adjustment = 1 5 | adjustment_type = "ChangeInCapacity" 6 | cooldown = 300 7 | autoscaling_group_name = aws_autoscaling_group.scheduled[count.index].name 8 | } 9 | 10 | resource "aws_cloudwatch_metric_alarm" "scheduled" { 11 | count = 3 12 | alarm_name = "bar-with-tag-${count.index}" 13 | namespace = "AWS/AutoScaling" 14 | comparison_operator = "GreaterThanThreshold" 15 | evaluation_periods = "2" 16 | metric_name = "CPUUtilization" 17 | period = "60" 18 | statistic = "Average" 19 | threshold = "90" 20 | alarm_actions = [aws_autoscaling_policy.scheduled[count.index].arn] 21 | dimensions = { 22 | AutoScalingGroupName = aws_autoscaling_group.scheduled[count.index].id 23 | } 24 | 25 | tags = { 26 | tostop = "true" 27 | } 28 | } 29 | 30 | resource "aws_autoscaling_policy" "not_scheduled" { 31 | count = 2 32 | name = "foo-without-tag-${count.index}" 33 | scaling_adjustment = 1 34 | adjustment_type = "ChangeInCapacity" 35 | cooldown = 300 36 | autoscaling_group_name = aws_autoscaling_group.not_scheduled[count.index].name 37 | } 38 | 39 | resource "aws_cloudwatch_metric_alarm" "not_scheduled" { 40 | count = 2 41 | alarm_name = "foo-without-tag-${count.index}" 42 | namespace = "AWS/AutoScaling" 43 | comparison_operator = "GreaterThanThreshold" 44 | evaluation_periods = "2" 45 | metric_name = "CPUUtilization" 46 | period = "60" 47 | statistic = "Average" 48 | threshold = "90" 49 | alarm_actions = [aws_autoscaling_policy.not_scheduled[count.index].arn] 50 | dimensions = { 51 | AutoScalingGroupName = aws_autoscaling_group.not_scheduled[count.index].id 52 | } 53 | 54 | tags = { 55 | tostop = "false" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform autoscaling group with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | data "aws_ami" "ubuntu" { 5 | most_recent = true 6 | 7 | filter { 8 | name = "name" 9 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 10 | } 11 | 12 | filter { 13 | name = "virtualization-type" 14 | values = ["hvm"] 15 | } 16 | 17 | owners = ["099720109477"] # Canonical 18 | } 19 | 20 | resource "aws_launch_template" "this" { 21 | name_prefix = "web_config" 22 | image_id = data.aws_ami.ubuntu.id 23 | instance_type = "t2.micro" 24 | } 25 | 26 | # Create autoscaling group with tag 27 | resource "aws_autoscaling_group" "scheduled" { 28 | count = 3 29 | name = "test-to-stop-${random_pet.suffix.id}-${count.index}" 30 | max_size = 5 31 | min_size = 1 32 | health_check_grace_period = 300 33 | health_check_type = "EC2" 34 | desired_capacity = 1 35 | force_delete = true 36 | vpc_zone_identifier = [aws_subnet.this.id] 37 | mixed_instances_policy { 38 | instances_distribution { 39 | on_demand_base_capacity = 0 40 | on_demand_percentage_above_base_capacity = 25 41 | spot_allocation_strategy = "capacity-optimized" 42 | } 43 | launch_template { 44 | launch_template_specification { 45 | launch_template_id = aws_launch_template.this.id 46 | } 47 | } 48 | } 49 | 50 | tag { 51 | key = "tostop" 52 | value = "true-${random_pet.suffix.id}" 53 | propagate_at_launch = true 54 | } 55 | } 56 | 57 | # Create autoscaling group without tag 58 | resource "aws_autoscaling_group" "not_scheduled" { 59 | count = 2 60 | name = "test-not-to-stop-${random_pet.suffix.id}-${count.index}" 61 | max_size = 5 62 | min_size = 1 63 | health_check_grace_period = 300 64 | health_check_type = "EC2" 65 | desired_capacity = 1 66 | force_delete = true 67 | vpc_zone_identifier = [aws_subnet.this.id] 68 | mixed_instances_policy { 69 | instances_distribution { 70 | on_demand_base_capacity = 0 71 | on_demand_percentage_above_base_capacity = 25 72 | spot_allocation_strategy = "capacity-optimized" 73 | } 74 | launch_template { 75 | launch_template_specification { 76 | launch_template_id = aws_launch_template.this.id 77 | } 78 | } 79 | } 80 | 81 | tag { 82 | key = "tostop" 83 | value = "false" 84 | propagate_at_launch = true 85 | } 86 | } 87 | 88 | 89 | ### Terraform modules ### 90 | 91 | module "autoscaling-stop-friday" { 92 | source = "../../" 93 | name = "stop-autoscaling-${random_pet.suffix.id}" 94 | schedule_expression = "cron(0 23 ? * FRI *)" 95 | schedule_action = "stop" 96 | ec2_schedule = "false" 97 | rds_schedule = "false" 98 | autoscaling_schedule = "true" 99 | autoscaling_terminate_instances = "true" 100 | cloudwatch_alarm_schedule = "true" 101 | 102 | scheduler_tag = { 103 | key = "tostop" 104 | value = "true-${random_pet.suffix.id}" 105 | } 106 | } 107 | 108 | module "autoscaling-start-monday" { 109 | source = "../../" 110 | name = "start-autoscaling-${random_pet.suffix.id}" 111 | schedule_expression = "cron(0 07 ? * MON *)" 112 | schedule_action = "start" 113 | ec2_schedule = "false" 114 | rds_schedule = "false" 115 | autoscaling_schedule = "true" 116 | cloudwatch_alarm_schedule = "true" 117 | 118 | scheduler_tag = { 119 | key = "tostop" 120 | value = "true-${random_pet.suffix.id}" 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform ex2-schedule outputs 2 | 3 | output "lambda_stop_name" { 4 | value = module.autoscaling-stop-friday.scheduler_lambda_name 5 | } 6 | 7 | output "lambda_stop_arn" { 8 | value = module.autoscaling-stop-friday.scheduler_lambda_arn 9 | } 10 | 11 | output "lambda_start_name" { 12 | value = module.autoscaling-start-monday.scheduler_lambda_name 13 | } 14 | 15 | output "lambda_start_arn" { 16 | value = module.autoscaling-start-monday.scheduler_lambda_arn 17 | } 18 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.autoscaling-stop-friday.scheduler_lambda_name == "stop-autoscaling-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.autoscaling-start-monday.scheduler_lambda_name == "start-autoscaling-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/variables.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diodonfrost/terraform-aws-lambda-scheduler-stop-start/28fa0565aaa494994f8a530bc3b5691ce9e3b534/examples/autoscaling-scheduler-terminate-instances/variables.tf -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "this" { 2 | cidr_block = "10.0.0.0/16" 3 | } 4 | 5 | resource "aws_subnet" "this" { 6 | vpc_id = aws_vpc.this.id 7 | cidr_block = "10.0.1.0/24" 8 | } 9 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_policy" "scheduled" { 2 | count = 3 3 | name = "bar-with-tag-${count.index}" 4 | scaling_adjustment = 1 5 | adjustment_type = "ChangeInCapacity" 6 | cooldown = 300 7 | autoscaling_group_name = aws_autoscaling_group.scheduled[count.index].name 8 | } 9 | 10 | resource "aws_cloudwatch_metric_alarm" "scheduled" { 11 | count = 3 12 | alarm_name = "bar-with-tag-${count.index}" 13 | namespace = "AWS/AutoScaling" 14 | comparison_operator = "GreaterThanThreshold" 15 | evaluation_periods = "2" 16 | metric_name = "CPUUtilization" 17 | period = "60" 18 | statistic = "Average" 19 | threshold = "90" 20 | alarm_actions = [aws_autoscaling_policy.scheduled[count.index].arn] 21 | dimensions = { 22 | AutoScalingGroupName = aws_autoscaling_group.scheduled[count.index].id 23 | } 24 | 25 | tags = { 26 | tostop = "true" 27 | } 28 | } 29 | 30 | resource "aws_autoscaling_policy" "not_scheduled" { 31 | count = 2 32 | name = "foo-without-tag-${count.index}" 33 | scaling_adjustment = 1 34 | adjustment_type = "ChangeInCapacity" 35 | cooldown = 300 36 | autoscaling_group_name = aws_autoscaling_group.not_scheduled[count.index].name 37 | } 38 | 39 | resource "aws_cloudwatch_metric_alarm" "not_scheduled" { 40 | count = 2 41 | alarm_name = "foo-without-tag-${count.index}" 42 | namespace = "AWS/AutoScaling" 43 | comparison_operator = "GreaterThanThreshold" 44 | evaluation_periods = "2" 45 | metric_name = "CPUUtilization" 46 | period = "60" 47 | statistic = "Average" 48 | threshold = "90" 49 | alarm_actions = [aws_autoscaling_policy.not_scheduled[count.index].arn] 50 | dimensions = { 51 | AutoScalingGroupName = aws_autoscaling_group.not_scheduled[count.index].id 52 | } 53 | 54 | tags = { 55 | tostop = "false" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform autoscaling group with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | data "aws_ami" "ubuntu" { 5 | most_recent = true 6 | 7 | filter { 8 | name = "name" 9 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 10 | } 11 | 12 | filter { 13 | name = "virtualization-type" 14 | values = ["hvm"] 15 | } 16 | 17 | owners = ["099720109477"] # Canonical 18 | } 19 | 20 | resource "aws_launch_template" "this" { 21 | name_prefix = "web_config" 22 | image_id = data.aws_ami.ubuntu.id 23 | instance_type = "t2.micro" 24 | } 25 | 26 | # Create autoscaling group with tag 27 | resource "aws_autoscaling_group" "scheduled" { 28 | count = 3 29 | name = "test-to-stop-${count.index}-${random_pet.suffix.id}" 30 | max_size = 5 31 | min_size = 1 32 | health_check_grace_period = 300 33 | health_check_type = "EC2" 34 | desired_capacity = 1 35 | force_delete = true 36 | vpc_zone_identifier = [aws_subnet.this.id] 37 | 38 | launch_template { 39 | id = aws_launch_template.this.id 40 | version = "$Latest" 41 | } 42 | 43 | tag { 44 | key = "tostop" 45 | value = "true-${random_pet.suffix.id}" 46 | propagate_at_launch = true 47 | } 48 | } 49 | 50 | # Create autoscaling group without tag 51 | resource "aws_autoscaling_group" "not_scheduled" { 52 | count = 2 53 | name = "test-not-to-stop-${count.index}-${random_pet.suffix.id}" 54 | max_size = 5 55 | min_size = 1 56 | health_check_grace_period = 300 57 | health_check_type = "EC2" 58 | desired_capacity = 1 59 | force_delete = true 60 | vpc_zone_identifier = [aws_subnet.this.id] 61 | 62 | launch_template { 63 | id = aws_launch_template.this.id 64 | version = "$Latest" 65 | } 66 | 67 | tag { 68 | key = "tostop" 69 | value = "false" 70 | propagate_at_launch = true 71 | } 72 | } 73 | 74 | 75 | ### Terraform modules ### 76 | 77 | module "autoscaling-stop-friday" { 78 | source = "../../" 79 | name = "stop-autoscaling-${random_pet.suffix.id}" 80 | schedule_expression = "cron(0 23 ? * FRI *)" 81 | schedule_action = "stop" 82 | ec2_schedule = "false" 83 | rds_schedule = "false" 84 | autoscaling_schedule = "true" 85 | cloudwatch_alarm_schedule = "true" 86 | 87 | scheduler_tag = { 88 | key = "tostop" 89 | value = "true-${random_pet.suffix.id}" 90 | } 91 | } 92 | 93 | module "autoscaling-start-monday" { 94 | source = "../../" 95 | name = "start-autoscaling-${random_pet.suffix.id}" 96 | schedule_expression = "cron(0 07 ? * MON *)" 97 | schedule_action = "start" 98 | ec2_schedule = "false" 99 | rds_schedule = "false" 100 | autoscaling_schedule = "true" 101 | cloudwatch_alarm_schedule = "true" 102 | 103 | scheduler_tag = { 104 | key = "tostop" 105 | value = "true-${random_pet.suffix.id}" 106 | } 107 | } 108 | 109 | module "test-execution" { 110 | count = var.test_mode ? 1 : 0 111 | source = "./test-execution" 112 | 113 | lambda_stop_name = module.autoscaling-stop-friday.scheduler_lambda_name 114 | asg_scheduled_name = aws_autoscaling_group.scheduled[0].name 115 | asg_not_scheduled_name = aws_autoscaling_group.not_scheduled[0].name 116 | } 117 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform ex2-schedule outputs 2 | 3 | output "lambda_stop_name" { 4 | value = module.autoscaling-stop-friday.scheduler_lambda_name 5 | } 6 | 7 | output "lambda_stop_arn" { 8 | value = module.autoscaling-stop-friday.scheduler_lambda_arn 9 | } 10 | 11 | output "lambda_start_name" { 12 | value = module.autoscaling-start-monday.scheduler_lambda_name 13 | } 14 | 15 | output "lambda_start_arn" { 16 | value = module.autoscaling-start-monday.scheduler_lambda_arn 17 | } 18 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.autoscaling-stop-friday.scheduler_lambda_name == "stop-autoscaling-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.autoscaling-start-monday.scheduler_lambda_name == "start-autoscaling-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test-execution[0].asg_scheduled_suspended_processes == toset([ 20 | "AZRebalance", 21 | "AddToLoadBalancer", 22 | "AlarmNotification", 23 | "HealthCheck", 24 | "InstanceRefresh", 25 | "Launch", 26 | "RemoveFromLoadBalancerLowPriority", 27 | "ReplaceUnhealthy", 28 | "ScheduledActions", 29 | "Terminate", 30 | ]) 31 | error_message = "Autoscaling group instances should be suspended" 32 | } 33 | 34 | assert { 35 | condition = length(module.test-execution[0].asg_not_scheduled_suspended_processes) == 0 36 | error_message = "Autoscaling group instances should not be suspended" 37 | } 38 | 39 | assert { 40 | condition = module.test-execution[0].asg_instance_scheduled_state == "stopped" || module.test-execution[0].asg_instance_scheduled_state == "stopping" 41 | error_message = "Autoscaling group instance should be stopped" 42 | } 43 | 44 | assert { 45 | condition = module.test-execution[0].asg_instance_not_scheduled_state == "running" 46 | error_message = "Autoscaling group instance should be running" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "time_sleep" "before_stop_wait_60_seconds" { 2 | create_duration = "60s" 3 | } 4 | 5 | resource "aws_lambda_invocation" "this" { 6 | function_name = var.lambda_stop_name 7 | 8 | input = jsonencode({ 9 | key1 = "value1" 10 | key2 = "value2" 11 | }) 12 | 13 | depends_on = [time_sleep.before_stop_wait_60_seconds] 14 | } 15 | 16 | resource "time_sleep" "after_stop_wait_60_seconds" { 17 | create_duration = "60s" 18 | 19 | depends_on = [aws_lambda_invocation.this] 20 | } 21 | 22 | data "aws_autoscaling_group" "asg_scheduled" { 23 | name = var.asg_scheduled_name 24 | 25 | depends_on = [time_sleep.after_stop_wait_60_seconds] 26 | } 27 | 28 | data "aws_autoscaling_group" "asg_not_scheduled" { 29 | name = var.asg_not_scheduled_name 30 | 31 | depends_on = [time_sleep.after_stop_wait_60_seconds] 32 | } 33 | 34 | data "aws_instances" "asg_scheduled" { 35 | instance_tags = { 36 | "aws:autoscaling:groupName" = var.asg_scheduled_name 37 | } 38 | instance_state_names = [ 39 | "running", 40 | "shutting-down", 41 | "stopped", 42 | "stopping", 43 | ] 44 | depends_on = [time_sleep.after_stop_wait_60_seconds] 45 | } 46 | 47 | data "aws_instance" "asg_scheduled" { 48 | instance_id = data.aws_instances.asg_scheduled.ids[0] 49 | } 50 | 51 | data "aws_instances" "asg_not_scheduled" { 52 | instance_tags = { 53 | "aws:autoscaling:groupName" = var.asg_not_scheduled_name 54 | } 55 | instance_state_names = [ 56 | "running", 57 | "shutting-down", 58 | "stopped", 59 | "stopping", 60 | ] 61 | 62 | depends_on = [time_sleep.after_stop_wait_60_seconds] 63 | } 64 | 65 | data "aws_instance" "asg_not_scheduled" { 66 | instance_id = data.aws_instances.asg_not_scheduled.ids[0] 67 | } 68 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "asg_scheduled_suspended_processes" { 2 | value = data.aws_autoscaling_group.asg_scheduled.suspended_processes 3 | } 4 | 5 | output "asg_not_scheduled_suspended_processes" { 6 | value = data.aws_autoscaling_group.asg_not_scheduled.suspended_processes 7 | } 8 | 9 | output "asg_instance_scheduled_state" { 10 | value = data.aws_instance.asg_scheduled.instance_state 11 | } 12 | 13 | output "asg_instance_not_scheduled_state" { 14 | value = data.aws_instance.asg_not_scheduled.instance_state 15 | } 16 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the Lambda function used for stopping instances" 3 | type = string 4 | } 5 | 6 | variable "asg_scheduled_name" { 7 | description = "Name of the scheduled autoscaling group" 8 | type = string 9 | } 10 | 11 | variable "asg_not_scheduled_name" { 12 | description = "Name of the not scheduled autoscaling group" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | time = { 9 | source = "hashicorp/time" 10 | version = "0.13.0" 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "this" { 2 | cidr_block = "10.0.0.0/16" 3 | } 4 | 5 | resource "aws_subnet" "this" { 6 | vpc_id = aws_vpc.this.id 7 | cidr_block = "10.0.1.0/24" 8 | } 9 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | variable "cleanup_mode" { 5 | description = "Whether to run in cleanup mode" 6 | type = bool 7 | default = false 8 | } 9 | 10 | resource "aws_kms_key" "scheduler" { 11 | description = "test kms option on scheduler module" 12 | deletion_window_in_days = 7 13 | } 14 | 15 | resource "aws_docdb_cluster" "scheduled" { 16 | cluster_identifier = "test-to-stop-${random_pet.suffix.id}" 17 | engine = "docdb" 18 | master_username = "foo" 19 | master_password = "mustbeeightchars" 20 | skip_final_snapshot = true 21 | db_subnet_group_name = aws_docdb_subnet_group.documentdb.name 22 | tags = { 23 | tostop = "true-${random_pet.suffix.id}" 24 | } 25 | } 26 | 27 | resource "aws_docdb_cluster_instance" "scheduled" { 28 | identifier = "test-to-stop-${random_pet.suffix.id}" 29 | cluster_identifier = aws_docdb_cluster.scheduled.id 30 | instance_class = "db.r5.large" 31 | tags = { 32 | tostop = "true-${random_pet.suffix.id}" 33 | } 34 | } 35 | 36 | resource "aws_docdb_cluster" "not_scheduled" { 37 | cluster_identifier = "test-not-to-stop-${random_pet.suffix.id}" 38 | engine = "docdb" 39 | master_username = "foo" 40 | master_password = "mustbeeightchars" 41 | skip_final_snapshot = true 42 | db_subnet_group_name = aws_docdb_subnet_group.documentdb.name 43 | tags = { 44 | tostop = "false" 45 | } 46 | } 47 | 48 | resource "aws_docdb_cluster_instance" "not_scheduled" { 49 | identifier = "test-not-to-stop-${random_pet.suffix.id}" 50 | cluster_identifier = aws_docdb_cluster.not_scheduled.id 51 | instance_class = "db.r5.large" 52 | tags = { 53 | tostop = "false" 54 | } 55 | } 56 | 57 | 58 | module "documentdb-stop-friday" { 59 | source = "../.." 60 | name = "stop-documentdb-${random_pet.suffix.id}" 61 | kms_key_arn = aws_kms_key.scheduler.arn 62 | schedule_expression = "cron(0 23 ? * FRI *)" 63 | schedule_action = "stop" 64 | documentdb_schedule = "true" 65 | 66 | scheduler_tag = { 67 | key = "tostop" 68 | value = "true-${random_pet.suffix.id}" 69 | } 70 | } 71 | 72 | module "documentdb-start-monday" { 73 | source = "../.." 74 | name = "start-documentdb-${random_pet.suffix.id}" 75 | schedule_expression = "cron(0 07 ? * MON *)" 76 | schedule_action = "start" 77 | documentdb_schedule = "true" 78 | 79 | scheduler_tag = { 80 | key = "tostop" 81 | value = "true-${random_pet.suffix.id}" 82 | } 83 | } 84 | 85 | module "test-execution" { 86 | count = var.test_mode ? 1 : 0 87 | source = "./test-execution" 88 | 89 | lambda_stop_name = module.documentdb-stop-friday.scheduler_lambda_name 90 | docdb_cluster_to_scheduled_name = aws_docdb_cluster.scheduled.cluster_identifier 91 | docdb_cluster_not_scheduled_name = aws_docdb_cluster.not_scheduled.cluster_identifier 92 | 93 | depends_on = [ 94 | aws_docdb_cluster_instance.scheduled, 95 | aws_docdb_cluster_instance.not_scheduled 96 | ] 97 | } 98 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform documentdb-scheduler outputs 2 | 3 | output "lambda_stop_name" { 4 | value = module.documentdb-stop-friday.scheduler_lambda_name 5 | } 6 | 7 | output "lambda_stop_arn" { 8 | value = module.documentdb-stop-friday.scheduler_lambda_arn 9 | } 10 | 11 | output "lambda_start_name" { 12 | value = module.documentdb-start-monday.scheduler_lambda_name 13 | } 14 | 15 | output "lambda_start_arn" { 16 | value = module.documentdb-start-monday.scheduler_lambda_arn 17 | } 18 | 19 | output "docdb_cluster_scheduled_identifier" { 20 | value = aws_docdb_cluster.scheduled.cluster_identifier 21 | } 22 | 23 | output "docdb_instance_scheduled_identifier" { 24 | value = aws_docdb_cluster_instance.scheduled.identifier 25 | } 26 | 27 | output "docdb_cluster_not_scheduled_identifier" { 28 | value = aws_docdb_cluster.not_scheduled.cluster_identifier 29 | } 30 | 31 | output "docdb_instance_not_scheduled_identifier" { 32 | value = aws_docdb_cluster_instance.not_scheduled.identifier 33 | } 34 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.documentdb-stop-friday.scheduler_lambda_name == "stop-documentdb-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.documentdb-start-monday.scheduler_lambda_name == "start-documentdb-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test-execution[0].docdb_cluster_to_scheduled_state == "stopping\n" 20 | error_message = "DocumentDB cluster with tag 'tostop=true' should be stopped" 21 | } 22 | 23 | assert { 24 | condition = module.test-execution[0].docdb_cluster_not_scheduled_state == "available\n" 25 | error_message = "DocumentDB cluster with tag 'tostop=false' should not be stopped" 26 | } 27 | } 28 | 29 | # Add this cleanup step to restore the cluster to 'available' state before destruction 30 | run "cleanup_test_resources" { 31 | command = apply 32 | 33 | variables { 34 | docdb_cluster_name = run.create_test_infrastructure.docdb_cluster_scheduled_identifier 35 | docdb_instance_name = run.create_test_infrastructure.docdb_instance_scheduled_identifier 36 | } 37 | 38 | # This will start the stopped cluster to ensure proper deletion 39 | module { 40 | source = "./test-cleanup" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-cleanup/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "start_docdb_cluster" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | TIMEOUT=600 5 | START_TIME=$(date +%s) 6 | 7 | echo "Waiting for DocumentDB cluster ${var.docdb_cluster_name} to reach 'stopped' state (timeout: $TIMEOUT seconds)..." 8 | 9 | while true; do 10 | # Check the current state of the DocumentDB cluster 11 | CURRENT_STATE=$(aws docdb describe-db-clusters --db-cluster-identifier ${var.docdb_cluster_name} --query 'DBClusters[0].Status' --output text) 12 | 13 | # Get current elapsed time 14 | CURRENT_TIME=$(date +%s) 15 | ELAPSED=$((CURRENT_TIME - START_TIME)) 16 | 17 | # Check if cluster is stopped 18 | if [ "$CURRENT_STATE" = "stopped" ]; then 19 | aws docdb start-db-cluster --db-cluster-identifier ${var.docdb_cluster_name} 20 | exit 0 21 | fi 22 | 23 | # Check if we've exceeded the timeout 24 | if [ $ELAPSED -ge $TIMEOUT ]; then 25 | echo "Timeout reached. DocumentDB cluster did not reach 'stopped' state within $TIMEOUT seconds." 26 | exit 1 27 | fi 28 | 29 | # Wait 10 seconds before checking again 30 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 31 | sleep 10 32 | done 33 | EOT 34 | } 35 | } 36 | 37 | resource "null_resource" "waiting_for_docdb_cluster_to_start" { 38 | provisioner "local-exec" { 39 | command = <<-EOT 40 | TIMEOUT=600 41 | START_TIME=$(date +%s) 42 | 43 | echo "Waiting for DocumentDB cluster ${var.docdb_cluster_name} to reach 'available' state (timeout: $TIMEOUT seconds)..." 44 | 45 | while true; do 46 | # Check the current state of the DocumentDB cluster 47 | CURRENT_STATE=$(aws docdb describe-db-clusters --db-cluster-identifier ${var.docdb_cluster_name} --query 'DBClusters[0].Status' --output text) 48 | 49 | # Get current elapsed time 50 | CURRENT_TIME=$(date +%s) 51 | ELAPSED=$((CURRENT_TIME - START_TIME)) 52 | 53 | # Check if cluster is available 54 | if [ "$CURRENT_STATE" = "available" ]; then 55 | exit 0 56 | fi 57 | 58 | # Check if we've exceeded the timeout 59 | if [ $ELAPSED -ge $TIMEOUT ]; then 60 | echo "Timeout reached. DocumentDB cluster did not reach 'available' state within $TIMEOUT seconds." 61 | exit 1 62 | fi 63 | 64 | # Wait 10 seconds before checking again 65 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 66 | sleep 10 67 | done 68 | EOT 69 | } 70 | 71 | depends_on = [null_resource.start_docdb_cluster] 72 | } 73 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-cleanup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "docdb_cluster_name" { 2 | description = "Name of the DocumentDB cluster to start before deletion" 3 | type = string 4 | } 5 | 6 | variable "docdb_instance_name" { 7 | description = "Name of the DocumentDB instance to start before deletion" 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_documentdb_cluster_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_documentdb_status.py available \ 5 | ${var.docdb_cluster_to_scheduled_name} \ 6 | ${var.docdb_cluster_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_documentdb" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_documentdb_cluster_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_documentdb_cluster_stopped_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_documentdb_status.py stopped \ 26 | ${var.docdb_cluster_to_scheduled_name} \ 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "docdb_cluster_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws docdb describe-db-clusters \ 35 | --db-cluster-identifier ${var.docdb_cluster_to_scheduled_name} \ 36 | --query 'DBClusters[0].Status' \ 37 | --output text > ${path.module}/docdb_cluster_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_documentdb_cluster_stopped_state] 42 | } 43 | 44 | data "local_file" "docdb_cluster_to_scheduled" { 45 | filename = "${path.module}/docdb_cluster_to_scheduled.state" 46 | 47 | depends_on = [null_resource.docdb_cluster_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "docdb_cluster_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws docdb describe-db-clusters \ 54 | --db-cluster-identifier ${var.docdb_cluster_not_scheduled_name} \ 55 | --query 'DBClusters[0].Status' \ 56 | --output text > ${path.module}/docdb_cluster_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_documentdb_cluster_stopped_state] 61 | } 62 | 63 | data "local_file" "docdb_cluster_not_scheduled" { 64 | filename = "${path.module}/docdb_cluster_not_scheduled.state" 65 | 66 | depends_on = [null_resource.docdb_cluster_not_scheduled] 67 | } 68 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "docdb_cluster_to_scheduled_state" { 2 | description = "State of the DocumentDB cluster that should be stopped" 3 | value = data.local_file.docdb_cluster_to_scheduled.content 4 | } 5 | 6 | output "docdb_cluster_not_scheduled_state" { 7 | description = "State of the DocumentDB cluster that should not be stopped" 8 | value = data.local_file.docdb_cluster_not_scheduled.content 9 | } -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the lambda function to stop DocumentDB clusters" 3 | type = string 4 | } 5 | 6 | variable "docdb_cluster_to_scheduled_name" { 7 | description = "Name of the DocumentDB cluster that should be stopped" 8 | type = string 9 | } 10 | 11 | variable "docdb_cluster_not_scheduled_name" { 12 | description = "Name of the DocumentDB cluster that should not be stopped" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.13.0" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/wait_documentdb_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS documentdb cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_documentdb_cluster_status( 13 | desired_status: str, cluster_identifiers: List[str] 14 | ) -> None: 15 | """Wait for documentdb clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of documentdb cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'paused') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | documentdb = boto3.client("docdb") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds." 33 | "Some documentdb clusters may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_clusters_in_desired_state = True 38 | for cluster_id in cluster_identifiers: 39 | response = documentdb.describe_db_clusters( 40 | DBClusterIdentifier=cluster_id 41 | ) 42 | instance_id = response["DBClusters"][0]["DBClusterMembers"][0][ 43 | "DBInstanceIdentifier" 44 | ] 45 | instance_response = documentdb.describe_db_instances( 46 | DBInstanceIdentifier=instance_id 47 | ) 48 | current_status = instance_response["DBInstances"][0]["DBInstanceStatus"] 49 | 50 | if current_status != desired_status: 51 | all_clusters_in_desired_state = False 52 | break 53 | 54 | if all_clusters_in_desired_state: 55 | print(f"All documentdb clusters are now {desired_status}") 56 | return 57 | 58 | print(f"Waiting for documentdb clusters to be {desired_status}...") 59 | time.sleep(10) # Wait 10 seconds before checking again 60 | 61 | except ClientError as e: 62 | print(f"Error checking documentdb status: {e}") 63 | sys.exit(1) 64 | 65 | 66 | if __name__ == "__main__": 67 | if len(sys.argv) < 3: 68 | print( 69 | "Usage: python wait_documentdb_status.py" 70 | " [cluster_id2 ...]" 71 | ) 72 | sys.exit(1) 73 | 74 | target_status = sys.argv[1] 75 | target_clusters = sys.argv[2:] 76 | 77 | wait_for_documentdb_cluster_status(target_status, target_clusters) 78 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "documentdb" { 2 | cidr_block = "10.0.0.0/16" 3 | tags = { 4 | Name = "documentdb-vpc-${random_pet.suffix.id}" 5 | } 6 | } 7 | 8 | resource "aws_subnet" "documentdb_1" { 9 | vpc_id = aws_vpc.documentdb.id 10 | cidr_block = "10.0.1.0/24" 11 | availability_zone = "eu-west-1a" 12 | map_public_ip_on_launch = true 13 | } 14 | 15 | resource "aws_subnet" "documentdb_2" { 16 | vpc_id = aws_vpc.documentdb.id 17 | cidr_block = "10.0.2.0/24" 18 | availability_zone = "eu-west-1b" 19 | map_public_ip_on_launch = true 20 | } 21 | 22 | resource "aws_docdb_subnet_group" "documentdb" { 23 | name = "documentdb-subnet-group-${random_pet.suffix.id}" 24 | subnet_ids = [aws_subnet.documentdb_1.id, aws_subnet.documentdb_2.id] 25 | } 26 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "service_count" { 2 | alarm_name = "ecs-cluster-hello-service-count" 3 | comparison_operator = "LessThanThreshold" 4 | evaluation_periods = "2" 5 | metric_name = "CPUUtilization" 6 | namespace = "AWS/ECS" 7 | period = "60" 8 | statistic = "SampleCount" 9 | threshold = "2" 10 | alarm_description = "Less than 2 Running Service on cluster" 11 | dimensions = { 12 | ClusterName = aws_ecs_cluster.this.id 13 | } 14 | 15 | tags = { 16 | tostop = "true" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/ecs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ecs_cluster" "this" { 2 | name = "test-ecs-cluster-${random_pet.suffix.id}" 3 | 4 | setting { 5 | name = "containerInsights" 6 | value = "disabled" 7 | } 8 | } 9 | 10 | resource "aws_ecs_service" "to_scheduled" { 11 | name = "test-to-stop-${random_pet.suffix.id}" 12 | cluster = aws_ecs_cluster.this.id 13 | task_definition = aws_ecs_task_definition.this.arn 14 | desired_count = 1 15 | launch_type = "FARGATE" 16 | 17 | network_configuration { 18 | subnets = [aws_subnet.primary.id] 19 | } 20 | 21 | tags = { 22 | tostop = "true", 23 | } 24 | lifecycle { 25 | ignore_changes = [ 26 | desired_count, 27 | tags 28 | ] 29 | } 30 | } 31 | 32 | resource "aws_ecs_service" "not_to_scheduled" { 33 | name = "test-not-to-stop-${random_pet.suffix.id}" 34 | cluster = aws_ecs_cluster.this.id 35 | task_definition = aws_ecs_task_definition.this.arn 36 | desired_count = 1 37 | launch_type = "FARGATE" 38 | 39 | network_configuration { 40 | subnets = [aws_subnet.primary.id] 41 | } 42 | 43 | tags = { 44 | tostop = "false", 45 | } 46 | lifecycle { 47 | ignore_changes = [ 48 | desired_count, 49 | tags 50 | ] 51 | } 52 | } 53 | 54 | resource "aws_ecs_task_definition" "this" { 55 | family = "test-${random_pet.suffix.id}" 56 | 57 | # Refer to https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html 58 | # for cpu and memory values 59 | cpu = 256 60 | memory = 512 61 | 62 | requires_compatibilities = ["FARGATE"] 63 | network_mode = "awsvpc" 64 | 65 | # execution_role_arn = aws_iam_role.ecs_service.arn 66 | task_role_arn = aws_iam_role.hello_ecs_task_execution_role.arn 67 | 68 | container_definitions = jsonencode([ 69 | { 70 | name = "hello-world-rest" 71 | image = "docker.io/library/nginx:alpine" 72 | cpu = 10 73 | memory = 128 74 | essential = true 75 | portMappings = [ 76 | { 77 | containerPort = 80 78 | hostPort = 80 79 | protocol = "tcp" 80 | } 81 | ] 82 | } 83 | ]) 84 | } 85 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/iam.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "hello_ecs_task_execution_role" { 2 | name = "hello-ecsTaskExecutionRole" 3 | 4 | assume_role_policy = < None: 15 | """Wait for instances to reach desired status. 16 | 17 | Args: 18 | instance_ids: List of instance IDs to check 19 | desired_status: Desired status to wait for (e.g. 'running', 'stopped') 20 | region: AWS region name 21 | """ 22 | if not instance_ids: 23 | return 24 | 25 | ec2 = boto3.client("ec2", region_name=region) if region else boto3.client("ec2") 26 | start_time = time.time() 27 | timeout = 600 # 10 minutes timeout 28 | 29 | while True: 30 | try: 31 | # Check if timeout has been reached 32 | if time.time() - start_time > timeout: 33 | print( 34 | f"Timeout reached after {timeout} seconds. Some instances may not have reached the desired status." 35 | ) 36 | sys.exit(1) 37 | 38 | response = ec2.describe_instances(InstanceIds=instance_ids) 39 | all_instances_in_desired_state = True 40 | 41 | for reservation in response["Reservations"]: 42 | for instance in reservation["Instances"]: 43 | current_state = instance["State"]["Name"] 44 | if current_state != desired_status: 45 | all_instances_in_desired_state = False 46 | break 47 | 48 | if not all_instances_in_desired_state: 49 | break 50 | 51 | if all_instances_in_desired_state: 52 | print(f"All instances are now {desired_status}") 53 | return 54 | 55 | print(f"Waiting for instances to be {desired_status}...") 56 | time.sleep(10) # Wait 10 seconds before checking again 57 | 58 | except ClientError as e: 59 | print(f"Error checking instance status: {e}") 60 | sys.exit(1) 61 | 62 | 63 | if __name__ == "__main__": 64 | if len(sys.argv) < 3: 65 | print( 66 | "Usage: python wait_instances.py [instance_id2 ...]" 67 | ) 68 | sys.exit(1) 69 | 70 | desired_status = sys.argv[1] 71 | instance_ids = sys.argv[2:] 72 | 73 | wait_for_instances_status(instance_ids, desired_status) 74 | -------------------------------------------------------------------------------- /examples/instance-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/instance-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/instance-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "main" { 2 | cidr_block = "10.0.0.0/16" 3 | enable_dns_hostnames = true 4 | enable_dns_support = true 5 | } 6 | 7 | resource "aws_subnet" "public" { 8 | vpc_id = aws_vpc.main.id 9 | cidr_block = "10.0.1.0/24" 10 | map_public_ip_on_launch = true 11 | availability_zone = "${data.aws_region.current.name}a" 12 | } 13 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_neptune_cluster" "to_schedule" { 5 | cluster_identifier = "test-to-stop-${random_pet.suffix.id}" 6 | engine = "neptune" 7 | skip_final_snapshot = true 8 | iam_database_authentication_enabled = true 9 | apply_immediately = true 10 | neptune_subnet_group_name = aws_neptune_subnet_group.test.name 11 | 12 | tags = { 13 | tostop = "true-${random_pet.suffix.id}" 14 | } 15 | } 16 | 17 | resource "aws_neptune_cluster_instance" "to_schedule" { 18 | identifier = "test-to-stop-${random_pet.suffix.id}" 19 | cluster_identifier = aws_neptune_cluster.to_schedule.id 20 | engine = "neptune" 21 | instance_class = "db.t3.medium" 22 | apply_immediately = true 23 | 24 | tags = { 25 | tostop = "true-${random_pet.suffix.id}" 26 | } 27 | } 28 | 29 | resource "aws_neptune_cluster" "not_to_scheduled" { 30 | cluster_identifier = "test-not-to-stop-${random_pet.suffix.id}" 31 | engine = "neptune" 32 | skip_final_snapshot = true 33 | iam_database_authentication_enabled = true 34 | apply_immediately = true 35 | neptune_subnet_group_name = aws_neptune_subnet_group.test.name 36 | 37 | tags = { 38 | tostop = "false" 39 | } 40 | } 41 | 42 | resource "aws_neptune_cluster_instance" "not_to_scheduled" { 43 | identifier = "test-not-to-stop-${random_pet.suffix.id}" 44 | cluster_identifier = aws_neptune_cluster.not_to_scheduled.id 45 | engine = "neptune" 46 | instance_class = "db.t3.medium" 47 | apply_immediately = true 48 | 49 | tags = { 50 | tostop = "false" 51 | } 52 | } 53 | 54 | 55 | module "neptune-stop-friday" { 56 | source = "../.." 57 | name = "stop-neptune-${random_pet.suffix.id}" 58 | schedule_expression = "cron(0 23 ? * FRI *)" 59 | schedule_action = "stop" 60 | rds_schedule = "true" 61 | 62 | scheduler_tag = { 63 | key = "tostop" 64 | value = "true-${random_pet.suffix.id}" 65 | } 66 | } 67 | 68 | module "neptune-start-monday" { 69 | source = "../.." 70 | name = "start-neptune-${random_pet.suffix.id}" 71 | schedule_expression = "cron(0 07 ? * MON *)" 72 | schedule_action = "start" 73 | rds_schedule = "true" 74 | 75 | scheduler_tag = { 76 | key = "tostop" 77 | value = "true-${random_pet.suffix.id}" 78 | } 79 | } 80 | 81 | module "test-execution" { 82 | count = var.test_mode ? 1 : 0 83 | source = "./test-execution" 84 | 85 | lambda_stop_name = module.neptune-stop-friday.scheduler_lambda_name 86 | neptune_cluster_to_scheduled_name = aws_neptune_cluster.to_schedule.cluster_identifier 87 | neptune_cluster_not_scheduled_name = aws_neptune_cluster.not_to_scheduled.cluster_identifier 88 | 89 | depends_on = [ 90 | aws_neptune_cluster_instance.to_schedule, 91 | aws_neptune_cluster_instance.not_to_scheduled 92 | ] 93 | } 94 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform documentdb-scheduler outputs 2 | 3 | output "lambda_stop_name" { 4 | value = module.neptune-stop-friday.scheduler_lambda_name 5 | } 6 | 7 | output "lambda_stop_arn" { 8 | value = module.neptune-stop-friday.scheduler_lambda_arn 9 | } 10 | 11 | output "lambda_start_name" { 12 | value = module.neptune-start-monday.scheduler_lambda_name 13 | } 14 | 15 | output "lambda_start_arn" { 16 | value = module.neptune-start-monday.scheduler_lambda_arn 17 | } 18 | 19 | output "neptune_cluster_scheduled_identifier" { 20 | value = aws_neptune_cluster.to_schedule.cluster_identifier 21 | } 22 | 23 | output "neptune_instance_scheduled_identifier" { 24 | value = aws_neptune_cluster_instance.to_schedule.identifier 25 | } 26 | 27 | output "neptune_cluster_not_scheduled_identifier" { 28 | value = aws_neptune_cluster.not_to_scheduled.cluster_identifier 29 | } 30 | 31 | output "neptune_instance_not_scheduled_identifier" { 32 | value = aws_neptune_cluster_instance.not_to_scheduled.identifier 33 | } 34 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.neptune-stop-friday.scheduler_lambda_name == "stop-neptune-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.neptune-start-monday.scheduler_lambda_name == "start-neptune-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test-execution[0].neptune_cluster_to_scheduled_status == "stopped\n" 20 | error_message = "neptune cluster with tag 'tostop=true' should be stopped" 21 | } 22 | 23 | assert { 24 | condition = module.test-execution[0].neptune_cluster_not_scheduled_status == "available\n" 25 | error_message = "neptune cluster with tag 'tostop=false' should not be stopped" 26 | } 27 | } 28 | 29 | # Add this cleanup step to restore the cluster to 'available' state before destruction 30 | run "cleanup_test_resources" { 31 | command = apply 32 | 33 | variables { 34 | neptune_cluster_name = run.create_test_infrastructure.neptune_cluster_scheduled_identifier 35 | } 36 | 37 | # This will start the stopped cluster to ensure proper deletion 38 | module { 39 | source = "./test-cleanup" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-cleanup/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "start_neptune_cluster" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | TIMEOUT=1800 5 | START_TIME=$(date +%s) 6 | 7 | echo "Waiting for Neptune cluster ${var.neptune_cluster_name} to reach 'stopped' state (timeout: $TIMEOUT seconds)..." 8 | 9 | while true; do 10 | # Check the current state of the Neptune cluster 11 | CURRENT_STATE=$(aws neptune describe-db-clusters --db-cluster-identifier ${var.neptune_cluster_name} --query 'DBClusters[0].Status' --output text) 12 | 13 | # Get current elapsed time 14 | CURRENT_TIME=$(date +%s) 15 | ELAPSED=$((CURRENT_TIME - START_TIME)) 16 | 17 | # Check if cluster is stopped 18 | if [ "$CURRENT_STATE" = "stopped" ]; then 19 | aws neptune start-db-cluster --db-cluster-identifier ${var.neptune_cluster_name} 20 | exit 0 21 | fi 22 | 23 | # Check if we've exceeded the timeout 24 | if [ $ELAPSED -ge $TIMEOUT ]; then 25 | echo "Timeout reached. Neptune cluster did not reach 'stopped' state within $TIMEOUT seconds." 26 | exit 1 27 | fi 28 | 29 | # Wait 10 seconds before checking again 30 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 31 | sleep 10 32 | done 33 | EOT 34 | } 35 | } 36 | 37 | resource "null_resource" "waiting_for_neptune_cluster_to_start" { 38 | provisioner "local-exec" { 39 | command = <<-EOT 40 | TIMEOUT=1800 41 | START_TIME=$(date +%s) 42 | 43 | echo "Waiting for Neptune cluster ${var.neptune_cluster_name} to reach 'available' state (timeout: $TIMEOUT seconds)..." 44 | 45 | while true; do 46 | # Check the current state of the Neptune cluster 47 | CURRENT_STATE=$(aws neptune describe-db-clusters --db-cluster-identifier ${var.neptune_cluster_name} --query 'DBClusters[0].Status' --output text) 48 | 49 | # Get current elapsed time 50 | CURRENT_TIME=$(date +%s) 51 | ELAPSED=$((CURRENT_TIME - START_TIME)) 52 | 53 | # Check if cluster is available 54 | if [ "$CURRENT_STATE" = "available" ]; then 55 | exit 0 56 | fi 57 | 58 | # Check if we've exceeded the timeout 59 | if [ $ELAPSED -ge $TIMEOUT ]; then 60 | echo "Timeout reached. Neptune cluster did not reach 'available' state within $TIMEOUT seconds." 61 | exit 1 62 | fi 63 | 64 | # Wait 10 seconds before checking again 65 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 66 | sleep 10 67 | done 68 | EOT 69 | } 70 | 71 | depends_on = [null_resource.start_neptune_cluster] 72 | } 73 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-cleanup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "neptune_cluster_name" { 2 | description = "Name of the Neptune cluster to start before deletion" 3 | type = string 4 | } 5 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_neptune_cluster_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_neptune_status.py available \ 5 | ${var.neptune_cluster_to_scheduled_name} \ 6 | ${var.neptune_cluster_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_neptune" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_neptune_cluster_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_neptune_cluster_stopped_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_neptune_status.py stopped \ 26 | ${var.neptune_cluster_to_scheduled_name} 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "neptune_cluster_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws neptune describe-db-clusters \ 35 | --db-cluster-identifier ${var.neptune_cluster_to_scheduled_name} \ 36 | --query 'DBClusters[0].Status' \ 37 | --output text > ${path.module}/neptune_cluster_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_neptune_cluster_stopped_state] 42 | } 43 | 44 | data "local_file" "neptune_cluster_to_scheduled" { 45 | filename = "${path.module}/neptune_cluster_to_scheduled.state" 46 | 47 | depends_on = [null_resource.neptune_cluster_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "neptune_cluster_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws neptune describe-db-clusters \ 54 | --db-cluster-identifier ${var.neptune_cluster_not_scheduled_name} \ 55 | --query 'DBClusters[0].Status' \ 56 | --output text > ${path.module}/neptune_cluster_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_neptune_cluster_stopped_state] 61 | } 62 | 63 | data "local_file" "neptune_cluster_not_scheduled" { 64 | filename = "${path.module}/neptune_cluster_not_scheduled.state" 65 | 66 | depends_on = [null_resource.neptune_cluster_not_scheduled] 67 | } -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "neptune_cluster_to_scheduled_status" { 2 | description = "Status of the Neptune cluster that should be scheduled for stop/start" 3 | value = data.local_file.neptune_cluster_to_scheduled.content 4 | } 5 | 6 | output "neptune_cluster_not_scheduled_status" { 7 | description = "Status of the Neptune cluster that should not be scheduled for stop/start" 8 | value = data.local_file.neptune_cluster_not_scheduled.content 9 | } 10 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "neptune_cluster_to_scheduled_name" { 2 | description = "Name of the Neptune cluster that should be scheduled for stop/start" 3 | type = string 4 | } 5 | 6 | variable "neptune_cluster_not_scheduled_name" { 7 | description = "Name of the Neptune cluster that should not be scheduled for stop/start" 8 | type = string 9 | } 10 | 11 | variable "lambda_stop_name" { 12 | description = "Name of the Lambda function that stops the Neptune cluster" 13 | type = string 14 | } -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/wait_neptune_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS neptune cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_neptune_cluster_status( 13 | desired_status: str, cluster_identifiers: List[str] 14 | ) -> None: 15 | """Wait for neptune clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of neptune cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'paused') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | neptune = boto3.client("neptune") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds." 33 | "Some neptune clusters may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_clusters_in_desired_state = True 38 | for cluster_id in cluster_identifiers: 39 | response = neptune.describe_db_clusters(DBClusterIdentifier=cluster_id) 40 | cluster_status = response["DBClusters"][0]["Status"] 41 | instance_id = response["DBClusters"][0]["DBClusterMembers"][0][ 42 | "DBInstanceIdentifier" 43 | ] 44 | instance_response = neptune.describe_db_instances( 45 | DBInstanceIdentifier=instance_id 46 | ) 47 | instance_status = instance_response["DBInstances"][0][ 48 | "DBInstanceStatus" 49 | ] 50 | 51 | if ( 52 | cluster_status != desired_status 53 | or instance_status != desired_status 54 | ): 55 | all_clusters_in_desired_state = False 56 | break 57 | 58 | if all_clusters_in_desired_state: 59 | print(f"All neptune clusters are now {desired_status}") 60 | return 61 | 62 | print(f"Waiting for neptune clusters to be {desired_status}...") 63 | time.sleep(10) # Wait 10 seconds before checking again 64 | 65 | except ClientError as e: 66 | print(f"Error checking neptune status: {e}") 67 | sys.exit(1) 68 | 69 | 70 | if __name__ == "__main__": 71 | if len(sys.argv) < 3: 72 | print( 73 | "Usage: python wait_neptune_status.py" 74 | " [cluster_id2 ...]" 75 | ) 76 | sys.exit(1) 77 | 78 | target_status = sys.argv[1] 79 | target_clusters = sys.argv[2:] 80 | 81 | wait_for_neptune_cluster_status(target_status, target_clusters) 82 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "neptune" { 2 | cidr_block = "10.0.0.0/16" 3 | tags = { 4 | Name = "neptune-vpc-${random_pet.suffix.id}" 5 | } 6 | } 7 | 8 | resource "aws_subnet" "neptune_1" { 9 | vpc_id = aws_vpc.neptune.id 10 | cidr_block = "10.0.1.0/24" 11 | availability_zone = "eu-west-1a" 12 | map_public_ip_on_launch = true 13 | } 14 | 15 | resource "aws_subnet" "neptune_2" { 16 | vpc_id = aws_vpc.neptune.id 17 | cidr_block = "10.0.2.0/24" 18 | availability_zone = "eu-west-1b" 19 | map_public_ip_on_launch = true 20 | } 21 | 22 | resource "aws_neptune_subnet_group" "test" { 23 | name = "neptune-subnet-group-${random_pet.suffix.id}" 24 | subnet_ids = [aws_subnet.neptune_1.id, aws_subnet.neptune_2.id] 25 | } 26 | -------------------------------------------------------------------------------- /examples/rds-scheduler/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "aurora_scheduled_cpu" { 2 | alarm_name = "aurora-cluster-with-tag-highCPUUtilization" 3 | comparison_operator = "GreaterThanThreshold" 4 | evaluation_periods = "2" 5 | metric_name = "CPUUtilization" 6 | namespace = "AWS/RDS" 7 | period = "60" 8 | statistic = "Average" 9 | threshold = "90" 10 | alarm_description = "Average database CPU utilization is too high." 11 | dimensions = { 12 | DBClusterIdentifier = aws_rds_cluster.aurora_scheduled.id 13 | } 14 | 15 | tags = { 16 | tostop = "true" 17 | } 18 | } 19 | 20 | resource "aws_cloudwatch_metric_alarm" "mariadb_scheduled_cpu" { 21 | alarm_name = "mariadbwithtag-highCPUUtilization" 22 | comparison_operator = "GreaterThanThreshold" 23 | evaluation_periods = "2" 24 | metric_name = "CPUUtilization" 25 | namespace = "AWS/RDS" 26 | period = "60" 27 | statistic = "Average" 28 | threshold = "90" 29 | alarm_description = "Average database CPU utilization is too high." 30 | dimensions = { 31 | DBInstanceIdentifier = aws_db_instance.mariadb_scheduled.id 32 | } 33 | 34 | tags = { 35 | tostop = "true" 36 | } 37 | } 38 | 39 | resource "aws_cloudwatch_metric_alarm" "mysql_not_scheduled_cpu" { 40 | alarm_name = "mysqlwithouttag-highCPUUtilization" 41 | comparison_operator = "GreaterThanThreshold" 42 | evaluation_periods = "2" 43 | metric_name = "CPUUtilization" 44 | namespace = "AWS/RDS" 45 | period = "60" 46 | statistic = "Average" 47 | threshold = "90" 48 | alarm_description = "Average database CPU utilization is too high." 49 | dimensions = { 50 | DBInstanceIdentifier = aws_db_instance.mysql_not_scheduled.id 51 | } 52 | 53 | tags = { 54 | tostop = "false" 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /examples/rds-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform rds with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_rds_cluster" "aurora_scheduled" { 5 | cluster_identifier = "test-to-stop-aurora-cluster-${random_pet.suffix.id}" 6 | engine = "aurora-mysql" 7 | db_subnet_group_name = aws_db_subnet_group.aurora.id 8 | database_name = "aurorawithtag" 9 | master_username = "foo" 10 | master_password = "barbut8chars" 11 | skip_final_snapshot = "true" 12 | 13 | tags = { 14 | tostop = "true-${random_pet.suffix.id}" 15 | } 16 | } 17 | 18 | resource "aws_rds_cluster_instance" "aurora_scheduled" { 19 | identifier = "test-to-stop-aurora-instance-${random_pet.suffix.id}" 20 | engine = aws_rds_cluster.aurora_scheduled.engine 21 | engine_version = aws_rds_cluster.aurora_scheduled.engine_version 22 | db_subnet_group_name = aws_db_subnet_group.aurora.id 23 | cluster_identifier = aws_rds_cluster.aurora_scheduled.id 24 | instance_class = "db.t3.medium" 25 | 26 | tags = { 27 | tostop = "true-${random_pet.suffix.id}" 28 | } 29 | } 30 | 31 | resource "aws_db_instance" "mariadb_scheduled" { 32 | identifier = "test-to-stop-mariadb-instance-${random_pet.suffix.id}" 33 | db_name = "mariadbwithtag" 34 | db_subnet_group_name = aws_db_subnet_group.aurora.id 35 | allocated_storage = 10 36 | storage_type = "gp2" 37 | engine = "mariadb" 38 | engine_version = "11.4.4" 39 | instance_class = "db.t4g.micro" 40 | username = "foo" 41 | password = "foobarbaz" 42 | skip_final_snapshot = "true" 43 | 44 | tags = { 45 | tostop = "true-${random_pet.suffix.id}" 46 | } 47 | } 48 | 49 | resource "aws_db_instance" "mysql_not_scheduled" { 50 | identifier = "test-not-to-stop-mysql-instance-${random_pet.suffix.id}" 51 | db_name = "mysqlwithouttag" 52 | db_subnet_group_name = aws_db_subnet_group.aurora.id 53 | allocated_storage = 10 54 | storage_type = "gp2" 55 | engine = "mysql" 56 | engine_version = "8.0" 57 | instance_class = "db.t4g.micro" 58 | username = "foo" 59 | password = "foobarbaz" 60 | skip_final_snapshot = "true" 61 | 62 | tags = { 63 | tostop = "false" 64 | } 65 | } 66 | 67 | 68 | ### Terraform modules ### 69 | 70 | module "rds-stop-friday" { 71 | source = "../../" 72 | name = "stop-rds-${random_pet.suffix.id}" 73 | schedule_expression = "cron(0 23 ? * FRI *)" 74 | schedule_action = "stop" 75 | ec2_schedule = "false" 76 | rds_schedule = "true" 77 | autoscaling_schedule = "false" 78 | cloudwatch_alarm_schedule = "true" 79 | 80 | scheduler_tag = { 81 | key = "tostop" 82 | value = "true-${random_pet.suffix.id}" 83 | } 84 | } 85 | 86 | module "rds-start-monday" { 87 | source = "../../" 88 | name = "start-rds-${random_pet.suffix.id}" 89 | schedule_expression = "cron(0 07 ? * MON *)" 90 | schedule_action = "start" 91 | ec2_schedule = "false" 92 | rds_schedule = "true" 93 | autoscaling_schedule = "false" 94 | cloudwatch_alarm_schedule = "true" 95 | 96 | scheduler_tag = { 97 | key = "tostop" 98 | value = "true-${random_pet.suffix.id}" 99 | } 100 | } 101 | 102 | module "test-execution" { 103 | count = var.test_mode ? 1 : 0 104 | source = "./test-execution" 105 | 106 | lambda_stop_name = module.rds-stop-friday.scheduler_lambda_name 107 | rds_aurora_cluster_to_scheduled_name = aws_rds_cluster.aurora_scheduled.cluster_identifier 108 | rds_mariadb_instance_to_scheduled_name = aws_db_instance.mariadb_scheduled.identifier 109 | rds_mysql_instance_to_not_scheduled_name = aws_db_instance.mysql_not_scheduled.identifier 110 | 111 | depends_on = [ 112 | aws_rds_cluster_instance.aurora_scheduled 113 | ] 114 | } 115 | -------------------------------------------------------------------------------- /examples/rds-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform ex2-schedule outputs 2 | 3 | output "lambda_stop_name" { 4 | value = module.rds-stop-friday.scheduler_lambda_name 5 | } 6 | 7 | output "lambda_stop_arn" { 8 | value = module.rds-stop-friday.scheduler_lambda_arn 9 | } 10 | 11 | output "lambda_start_name" { 12 | value = module.rds-start-monday.scheduler_lambda_name 13 | } 14 | 15 | output "lambda_start_arn" { 16 | value = module.rds-start-monday.scheduler_lambda_arn 17 | } 18 | 19 | output "rds_aurora_cluster_name" { 20 | value = aws_rds_cluster.aurora_scheduled.cluster_identifier 21 | } 22 | 23 | output "rds_aurora_instance_name" { 24 | value = aws_rds_cluster_instance.aurora_scheduled.identifier 25 | } 26 | -------------------------------------------------------------------------------- /examples/rds-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.rds-stop-friday.scheduler_lambda_name == "stop-rds-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.rds-start-monday.scheduler_lambda_name == "start-rds-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test-execution[0].rds_aurora_cluster_to_scheduled == "stopped\n" 20 | error_message = "Invalid RDS cluster instance state" 21 | } 22 | 23 | assert { 24 | condition = module.test-execution[0].rds_mariadb_instance_to_scheduled == "stopped\n" 25 | error_message = "Invalid RDS instance state" 26 | } 27 | 28 | assert { 29 | condition = module.test-execution[0].rds_mysql_instance_to_not_scheduled == "available\n" 30 | error_message = "Invalid RDS instance state" 31 | } 32 | } 33 | 34 | # Add this cleanup step to restore the cluster to 'available' state before destruction 35 | run "cleanup_test_resources" { 36 | command = apply 37 | 38 | variables { 39 | rds_aurora_cluster_name = run.create_test_infrastructure.rds_aurora_cluster_name 40 | rds_aurora_instance_name = run.create_test_infrastructure.rds_aurora_instance_name 41 | } 42 | 43 | # This will start the stopped cluster to ensure proper deletion 44 | module { 45 | source = "./test-cleanup" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-cleanup/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "start_rds_aurora_cluster" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | TIMEOUT=600 5 | START_TIME=$(date +%s) 6 | 7 | echo "Waiting for rds aurora cluster ${var.rds_aurora_cluster_name} to reach 'stopped' state (timeout: $TIMEOUT seconds)..." 8 | 9 | while true; do 10 | # Check the current state of the rds aurora cluster 11 | CURRENT_STATE=$(aws rds describe-db-clusters --db-cluster-identifier ${var.rds_aurora_cluster_name} --query 'DBClusters[0].Status' --output text) 12 | 13 | # Get current elapsed time 14 | CURRENT_TIME=$(date +%s) 15 | ELAPSED=$((CURRENT_TIME - START_TIME)) 16 | 17 | # Check if cluster is stopped 18 | if [ "$CURRENT_STATE" = "stopped" ]; then 19 | aws rds start-db-cluster --db-cluster-identifier ${var.rds_aurora_cluster_name} 20 | exit 0 21 | fi 22 | 23 | # Check if we've exceeded the timeout 24 | if [ $ELAPSED -ge $TIMEOUT ]; then 25 | echo "Timeout reached. rds aurora cluster did not reach 'stopped' state within $TIMEOUT seconds." 26 | exit 1 27 | fi 28 | 29 | # Wait 10 seconds before checking again 30 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 31 | sleep 10 32 | done 33 | EOT 34 | } 35 | } 36 | 37 | resource "null_resource" "waiting_for_rds_aurora_cluster_to_start" { 38 | provisioner "local-exec" { 39 | command = <<-EOT 40 | TIMEOUT=600 41 | START_TIME=$(date +%s) 42 | 43 | echo "Waiting for rds aurora cluster ${var.rds_aurora_cluster_name} to reach 'available' state (timeout: $TIMEOUT seconds)..." 44 | 45 | while true; do 46 | # Check the current state of the rds aurora cluster 47 | CURRENT_STATE=$(aws rds describe-db-clusters --db-cluster-identifier ${var.rds_aurora_cluster_name} --query 'DBClusters[0].Status' --output text) 48 | 49 | # Get current elapsed time 50 | CURRENT_TIME=$(date +%s) 51 | ELAPSED=$((CURRENT_TIME - START_TIME)) 52 | 53 | # Check if cluster is available 54 | if [ "$CURRENT_STATE" = "available" ]; then 55 | exit 0 56 | fi 57 | 58 | # Check if we've exceeded the timeout 59 | if [ $ELAPSED -ge $TIMEOUT ]; then 60 | echo "Timeout reached. rds aurora cluster did not reach 'available' state within $TIMEOUT seconds." 61 | exit 1 62 | fi 63 | 64 | # Wait 10 seconds before checking again 65 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 66 | sleep 10 67 | done 68 | EOT 69 | } 70 | 71 | depends_on = [null_resource.start_rds_aurora_cluster] 72 | } 73 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-cleanup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rds_aurora_cluster_name" { 2 | description = "Name of the rds aurora cluster to start before deletion" 3 | type = string 4 | } 5 | 6 | variable "rds_aurora_instance_name" { 7 | description = "Name of the rds aurora instance to start before deletion" 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_rds_instance_running_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_rds_instance.py available \ 5 | ${var.rds_mariadb_instance_to_scheduled_name} \ 6 | ${var.rds_mysql_instance_to_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "null_resource" "wait_rds_cluster_running_state" { 12 | provisioner "local-exec" { 13 | command = <<-EOT 14 | python3 ${path.module}/wait_rds_cluster.py available \ 15 | ${var.rds_aurora_cluster_to_scheduled_name} 16 | EOT 17 | } 18 | } 19 | 20 | resource "aws_lambda_invocation" "this" { 21 | function_name = var.lambda_stop_name 22 | 23 | input = jsonencode({ 24 | key1 = "value1" 25 | key2 = "value2" 26 | }) 27 | 28 | depends_on = [ 29 | null_resource.wait_rds_instance_running_state, 30 | null_resource.wait_rds_cluster_running_state, 31 | ] 32 | } 33 | 34 | resource "null_resource" "wait_rds_instance_stopped_state" { 35 | provisioner "local-exec" { 36 | command = <<-EOT 37 | python3 ${path.module}/wait_rds_instance.py stopped \ 38 | ${var.rds_mariadb_instance_to_scheduled_name} 39 | EOT 40 | } 41 | 42 | depends_on = [aws_lambda_invocation.this] 43 | } 44 | 45 | resource "null_resource" "wait_rds_cluster_stopped_state" { 46 | provisioner "local-exec" { 47 | command = <<-EOT 48 | python3 ${path.module}/wait_rds_cluster.py stopped \ 49 | ${var.rds_aurora_cluster_to_scheduled_name} 50 | EOT 51 | } 52 | 53 | depends_on = [null_resource.wait_rds_instance_stopped_state] 54 | } 55 | 56 | resource "null_resource" "rds_aurora_cluster_to_scheduled" { 57 | provisioner "local-exec" { 58 | command = <<-EOT 59 | aws rds describe-db-clusters \ 60 | --db-cluster-identifier ${var.rds_aurora_cluster_to_scheduled_name} \ 61 | --query 'DBClusters[0].Status' \ 62 | --output text > ${path.module}/rds_aurora_cluster_to_scheduled.state 63 | EOT 64 | } 65 | 66 | depends_on = [null_resource.wait_rds_cluster_stopped_state] 67 | } 68 | 69 | data "local_file" "rds_aurora_cluster_to_scheduled" { 70 | filename = "${path.module}/rds_aurora_cluster_to_scheduled.state" 71 | 72 | depends_on = [null_resource.rds_aurora_cluster_to_scheduled] 73 | } 74 | 75 | resource "null_resource" "rds_mariadb_instance_to_scheduled" { 76 | provisioner "local-exec" { 77 | command = <<-EOT 78 | aws rds describe-db-instances \ 79 | --db-instance-identifier ${var.rds_mariadb_instance_to_scheduled_name} \ 80 | --query 'DBInstances[0].DBInstanceStatus' \ 81 | --output text > ${path.module}/rds_mariadb_instance_to_scheduled.state 82 | EOT 83 | } 84 | 85 | depends_on = [null_resource.wait_rds_instance_stopped_state] 86 | } 87 | 88 | data "local_file" "rds_mariadb_instance_to_scheduled" { 89 | filename = "${path.module}/rds_mariadb_instance_to_scheduled.state" 90 | 91 | depends_on = [null_resource.rds_mariadb_instance_to_scheduled] 92 | } 93 | 94 | resource "null_resource" "rds_mysql_instance_to_not_scheduled" { 95 | provisioner "local-exec" { 96 | command = <<-EOT 97 | aws rds describe-db-instances \ 98 | --db-instance-identifier ${var.rds_mysql_instance_to_not_scheduled_name} \ 99 | --query 'DBInstances[0].DBInstanceStatus' \ 100 | --output text > ${path.module}/rds_mysql_instance_to_not_scheduled.state 101 | EOT 102 | } 103 | 104 | depends_on = [null_resource.wait_rds_instance_stopped_state] 105 | } 106 | 107 | data "local_file" "rds_mysql_instance_to_not_scheduled" { 108 | filename = "${path.module}/rds_mysql_instance_to_not_scheduled.state" 109 | 110 | depends_on = [null_resource.rds_mysql_instance_to_not_scheduled] 111 | } 112 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "rds_aurora_cluster_to_scheduled" { 2 | description = "The status of the RDS cluster" 3 | value = data.local_file.rds_aurora_cluster_to_scheduled.content 4 | } 5 | 6 | output "rds_mariadb_instance_to_scheduled" { 7 | description = "The status of the RDS instance" 8 | value = data.local_file.rds_mariadb_instance_to_scheduled.content 9 | } 10 | 11 | output "rds_mysql_instance_to_not_scheduled" { 12 | description = "The status of the RDS instance" 13 | value = data.local_file.rds_mysql_instance_to_not_scheduled.content 14 | } 15 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the Lambda function used for stopping instances" 3 | type = string 4 | } 5 | 6 | variable "rds_aurora_cluster_to_scheduled_name" { 7 | description = "rds cluster name to be scheduled" 8 | type = string 9 | } 10 | 11 | variable "rds_mariadb_instance_to_scheduled_name" { 12 | description = "rds instance name to be scheduled" 13 | type = string 14 | } 15 | 16 | variable "rds_mysql_instance_to_not_scheduled_name" { 17 | description = "rds instance name to not be scheduled" 18 | type = string 19 | } 20 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.13.0" 19 | } 20 | } 21 | } -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/wait_rds_cluster.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS RDS cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_rds_cluster_status( 13 | cluster_identifiers: List[str], desired_status: str 14 | ) -> None: 15 | """Wait for RDS clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of RDS cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'stopped') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | rds = boto3.client("rds") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds. Some RDS clusters may not have reached the desired status." 33 | ) 34 | sys.exit(1) 35 | 36 | all_clusters_in_desired_state = True 37 | for cluster_id in cluster_identifiers: 38 | response = rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 39 | current_status = response["DBClusters"][0]["Status"] 40 | 41 | if current_status != desired_status: 42 | all_clusters_in_desired_state = False 43 | break 44 | 45 | if all_clusters_in_desired_state: 46 | print(f"All RDS clusters are now {desired_status}") 47 | return 48 | 49 | print(f"Waiting for RDS clusters to be {desired_status}...") 50 | time.sleep(10) # Wait 30 seconds before checking again 51 | 52 | except ClientError as e: 53 | print(f"Error checking RDS cluster status: {e}") 54 | sys.exit(1) 55 | 56 | 57 | if __name__ == "__main__": 58 | if len(sys.argv) < 3: 59 | print( 60 | "Usage: python wait_rds_cluster.py [cluster_id2 ...]" 61 | ) 62 | sys.exit(1) 63 | 64 | desired_status = sys.argv[1] 65 | cluster_identifiers = sys.argv[2:] 66 | 67 | wait_for_rds_cluster_status(cluster_identifiers, desired_status) 68 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/wait_rds_instance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS RDS instance status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_rds_instance_status( 13 | instance_identifiers: List[str], desired_status: str 14 | ) -> None: 15 | """Wait for RDS instances to reach desired status. 16 | 17 | Args: 18 | instance_identifiers: List of RDS instance identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'stopped') 20 | region: AWS region name 21 | """ 22 | if not instance_identifiers: 23 | return 24 | 25 | rds = boto3.client("rds") 26 | start_time = time.time() 27 | timeout = 1800 # 30 minutes timeout 28 | 29 | while True: 30 | try: 31 | if time.time() - start_time > timeout: 32 | print( 33 | f"Timeout reached after {timeout} seconds. Some RDS instances may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_instances_in_desired_state = True 38 | for instance_id in instance_identifiers: 39 | response = rds.describe_db_instances(DBInstanceIdentifier=instance_id) 40 | current_status = response["DBInstances"][0]["DBInstanceStatus"] 41 | 42 | if current_status != desired_status: 43 | all_instances_in_desired_state = False 44 | break 45 | 46 | if all_instances_in_desired_state: 47 | print(f"All RDS instances are now {desired_status}") 48 | return 49 | 50 | print(f"Waiting for RDS instances to be {desired_status}...") 51 | time.sleep(10) # Wait 30 seconds before checking again 52 | 53 | except ClientError as e: 54 | print(f"Error checking RDS status: {e}") 55 | sys.exit(1) 56 | 57 | 58 | if __name__ == "__main__": 59 | if len(sys.argv) < 3: 60 | print( 61 | "Usage: python wait_rds_instance.py [instance_id2 ...]" 62 | ) 63 | sys.exit(1) 64 | 65 | desired_status = sys.argv[1] 66 | instance_identifiers = sys.argv[2:] 67 | 68 | wait_for_rds_instance_status(instance_identifiers, desired_status) 69 | -------------------------------------------------------------------------------- /examples/rds-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/rds-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/rds-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | resource "aws_vpc" "this" { 4 | cidr_block = "10.103.0.0/16" 5 | } 6 | 7 | resource "aws_subnet" "primary" { 8 | availability_zone = data.aws_availability_zones.available.names[0] 9 | vpc_id = aws_vpc.this.id 10 | cidr_block = "10.103.98.0/24" 11 | } 12 | 13 | resource "aws_subnet" "secondary" { 14 | availability_zone = data.aws_availability_zones.available.names[1] 15 | vpc_id = aws_vpc.this.id 16 | cidr_block = "10.103.99.0/24" 17 | } 18 | 19 | resource "aws_db_subnet_group" "aurora" { 20 | name = "aurora-subnet-${random_pet.suffix.id}" 21 | subnet_ids = [aws_subnet.primary.id, aws_subnet.secondary.id] 22 | } 23 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_kms_key" "scheduler" { 5 | description = "test kms option on scheduler module" 6 | deletion_window_in_days = 7 7 | } 8 | 9 | resource "aws_redshift_cluster" "scheduled" { 10 | cluster_identifier = "test-to-stop-${random_pet.suffix.id}" 11 | database_name = "mydb" 12 | master_username = "exampleuser" 13 | master_password = "Mustbe8characters" 14 | node_type = "ra3.large" 15 | cluster_type = "single-node" 16 | publicly_accessible = false 17 | skip_final_snapshot = true 18 | cluster_subnet_group_name = aws_redshift_subnet_group.redshif.name 19 | 20 | tags = { 21 | tostop = "true-${random_pet.suffix.id}" 22 | } 23 | } 24 | 25 | resource "aws_redshift_cluster_snapshot" "scheduled" { 26 | cluster_identifier = aws_redshift_cluster.scheduled.id 27 | snapshot_identifier = "test-to-stop-${random_pet.suffix.id}" 28 | } 29 | 30 | resource "aws_redshift_cluster" "not_scheduled" { 31 | cluster_identifier = "test-not-to-stop-${random_pet.suffix.id}" 32 | database_name = "mydb" 33 | master_username = "exampleuser" 34 | master_password = "Mustbe8characters" 35 | node_type = "ra3.large" 36 | cluster_type = "single-node" 37 | publicly_accessible = false 38 | skip_final_snapshot = true 39 | cluster_subnet_group_name = aws_redshift_subnet_group.redshif.name 40 | 41 | tags = { 42 | tostop = "false" 43 | } 44 | } 45 | 46 | resource "aws_redshift_cluster_snapshot" "not_scheduled" { 47 | cluster_identifier = aws_redshift_cluster.not_scheduled.id 48 | snapshot_identifier = "test-not-to-stop-${random_pet.suffix.id}" 49 | } 50 | 51 | 52 | module "redshift-stop-friday" { 53 | source = "../.." 54 | name = "stop-redshift-${random_pet.suffix.id}" 55 | kms_key_arn = aws_kms_key.scheduler.arn 56 | schedule_expression = "cron(0 23 ? * FRI *)" 57 | schedule_action = "stop" 58 | redshift_schedule = "true" 59 | 60 | scheduler_tag = { 61 | key = "tostop" 62 | value = "true-${random_pet.suffix.id}" 63 | } 64 | } 65 | 66 | module "redshift-start-monday" { 67 | source = "../.." 68 | name = "start-redshift-${random_pet.suffix.id}" 69 | schedule_expression = "cron(0 07 ? * MON *)" 70 | schedule_action = "start" 71 | redshift_schedule = "true" 72 | 73 | scheduler_tag = { 74 | key = "tostop" 75 | value = "true-${random_pet.suffix.id}" 76 | } 77 | } 78 | 79 | module "test-execution" { 80 | count = var.test_mode ? 1 : 0 81 | source = "./test-execution" 82 | 83 | lambda_stop_name = module.redshift-stop-friday.scheduler_lambda_name 84 | redshift_cluster_to_scheduled_name = aws_redshift_cluster.scheduled.cluster_identifier 85 | redshift_cluster_not_scheduled_name = aws_redshift_cluster.not_scheduled.cluster_identifier 86 | } 87 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform redshift-scheduler outputs 2 | 3 | output "lambda_stop_name" { 4 | value = module.redshift-stop-friday.scheduler_lambda_name 5 | } 6 | 7 | output "lambda_stop_arn" { 8 | value = module.redshift-stop-friday.scheduler_lambda_arn 9 | } 10 | 11 | output "lambda_start_name" { 12 | value = module.redshift-start-monday.scheduler_lambda_name 13 | } 14 | 15 | output "lambda_start_arn" { 16 | value = module.redshift-start-monday.scheduler_lambda_arn 17 | } 18 | 19 | output "redshift_cluster_scheduled_identifier" { 20 | value = aws_redshift_cluster.scheduled.cluster_identifier 21 | } 22 | 23 | output "redshift_cluster_not_scheduled_identifier" { 24 | value = aws_redshift_cluster.not_scheduled.cluster_identifier 25 | } 26 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.redshift-stop-friday.scheduler_lambda_name == "stop-redshift-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.redshift-start-monday.scheduler_lambda_name == "start-redshift-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test-execution[0].redshift_cluster_to_scheduled_state == "paused\n" 20 | error_message = "Invalid Redshift cluster state" 21 | } 22 | 23 | assert { 24 | condition = module.test-execution[0].redshift_cluster_not_scheduled_state == "available\n" 25 | error_message = "Invalid Redshift cluster state" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_redshift_cluster_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_redshift_status.py Available \ 5 | ${var.redshift_cluster_to_scheduled_name} \ 6 | ${var.redshift_cluster_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_redshift" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_redshift_cluster_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_redshift_cluster_paused_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_redshift_status.py Paused \ 26 | ${var.redshift_cluster_to_scheduled_name} \ 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "redshift_cluster_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws redshift describe-clusters \ 35 | --cluster-identifier ${var.redshift_cluster_to_scheduled_name} \ 36 | --query 'Clusters[0].ClusterStatus' \ 37 | --output text > ${path.module}/redshift_cluster_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_redshift_cluster_paused_state] 42 | } 43 | 44 | data "local_file" "redshift_cluster_to_scheduled" { 45 | filename = "${path.module}/redshift_cluster_to_scheduled.state" 46 | 47 | depends_on = [null_resource.redshift_cluster_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "redshift_cluster_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws redshift describe-clusters \ 54 | --cluster-identifier ${var.redshift_cluster_not_scheduled_name} \ 55 | --query 'Clusters[0].ClusterStatus' \ 56 | --output text > ${path.module}/redshift_cluster_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_redshift_cluster_paused_state] 61 | } 62 | 63 | data "local_file" "redshift_cluster_not_scheduled" { 64 | filename = "${path.module}/redshift_cluster_not_scheduled.state" 65 | 66 | depends_on = [null_resource.redshift_cluster_not_scheduled] 67 | } 68 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "redshift_cluster_to_scheduled_state" { 2 | description = "State of the Redshift cluster that should be stopped" 3 | value = data.local_file.redshift_cluster_to_scheduled.content 4 | } 5 | 6 | output "redshift_cluster_not_scheduled_state" { 7 | description = "State of the Redshift cluster that should not be stopped" 8 | value = data.local_file.redshift_cluster_not_scheduled.content 9 | } -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the lambda function to stop Redshift clusters" 3 | type = string 4 | } 5 | 6 | variable "redshift_cluster_to_scheduled_name" { 7 | description = "Name of the Redshift cluster that should be stopped" 8 | type = string 9 | } 10 | 11 | variable "redshift_cluster_not_scheduled_name" { 12 | description = "Name of the Redshift cluster that should not be stopped" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.13.0" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/wait_redshift_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS Redshift cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_redshift_cluster_status( 13 | desired_status: str, cluster_identifiers: List[str] 14 | ) -> None: 15 | """Wait for Redshift clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of Redshift cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'paused') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | redshift = boto3.client("redshift") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds." 33 | "Some Redshift clusters may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_clusters_in_desired_state = True 38 | for cluster_id in cluster_identifiers: 39 | response = redshift.describe_clusters(ClusterIdentifier=cluster_id) 40 | current_availability_status = response["Clusters"][0][ 41 | "ClusterAvailabilityStatus" 42 | ] 43 | 44 | if current_availability_status != desired_status: 45 | all_clusters_in_desired_state = False 46 | break 47 | 48 | if all_clusters_in_desired_state: 49 | print(f"All Redshift clusters are now {desired_status}") 50 | return 51 | 52 | print(f"Waiting for Redshift clusters to be {desired_status}...") 53 | time.sleep(10) # Wait 10 seconds before checking again 54 | 55 | except ClientError as e: 56 | print(f"Error checking Redshift status: {e}") 57 | sys.exit(1) 58 | 59 | 60 | if __name__ == "__main__": 61 | if len(sys.argv) < 3: 62 | print( 63 | "Usage: python wait_redshift_status.py" 64 | " [cluster_id2 ...]" 65 | ) 66 | sys.exit(1) 67 | 68 | target_status = sys.argv[1] 69 | target_clusters = sys.argv[2:] 70 | 71 | wait_for_redshift_cluster_status(target_status, target_clusters) 72 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "redshif" { 2 | cidr_block = "10.0.0.0/16" 3 | tags = { 4 | Name = "redshift-vpc-${random_pet.suffix.id}" 5 | } 6 | } 7 | 8 | resource "aws_subnet" "redshift_1" { 9 | vpc_id = aws_vpc.redshif.id 10 | cidr_block = "10.0.1.0/24" 11 | availability_zone = "eu-west-1a" 12 | map_public_ip_on_launch = true 13 | } 14 | 15 | resource "aws_subnet" "redshift_2" { 16 | vpc_id = aws_vpc.redshif.id 17 | cidr_block = "10.0.2.0/24" 18 | availability_zone = "eu-west-1b" 19 | map_public_ip_on_launch = true 20 | } 21 | 22 | resource "aws_redshift_subnet_group" "redshif" { 23 | name = "redshift-subnet-group-${random_pet.suffix.id}" 24 | subnet_ids = [aws_subnet.redshift_1.id, aws_subnet.redshift_2.id] 25 | } 26 | -------------------------------------------------------------------------------- /examples/test_fixture/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_kms_key" "scheduler" { 5 | description = "test kms option on scheduler module" 6 | deletion_window_in_days = 7 7 | } 8 | 9 | module "aws-stop-friday" { 10 | source = "../.." 11 | name = "stop-aws-${random_pet.suffix.id}" 12 | kms_key_arn = aws_kms_key.scheduler.arn 13 | schedule_expression = "cron(0 23 ? * FRI *)" 14 | schedule_action = "stop" 15 | autoscaling_schedule = "true" 16 | ec2_schedule = "true" 17 | rds_schedule = "true" 18 | 19 | scheduler_tag = { 20 | key = "tostop" 21 | value = "true-${random_pet.suffix.id}" 22 | } 23 | } 24 | 25 | module "aws-start-monday" { 26 | source = "../.." 27 | name = "start-aws-${random_pet.suffix.id}" 28 | schedule_expression = "cron(0 07 ? * MON *)" 29 | schedule_action = "start" 30 | autoscaling_schedule = "true" 31 | ec2_schedule = "true" 32 | rds_schedule = "true" 33 | 34 | scheduler_tag = { 35 | key = "tostop" 36 | value = "true-${random_pet.suffix.id}" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /examples/test_fixture/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | assert { 5 | condition = module.aws-stop-friday.scheduler_lambda_name == "stop-aws-${random_pet.suffix.id}" 6 | error_message = "Invalid Stop lambda name" 7 | } 8 | 9 | assert { 10 | condition = module.aws-start-monday.scheduler_lambda_name == "start-aws-${random_pet.suffix.id}" 11 | error_message = "Invalid Start lambda name" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/test_fixture/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | module "aws-stop-friday" { 5 | source = "../.." 6 | name = "stop-aws-${random_pet.suffix.id}" 7 | schedule_expression = "cron(0 23 ? * FRI *)" 8 | schedule_expression_timezone = "Europe/Paris" 9 | schedule_action = "stop" 10 | autoscaling_schedule = "true" 11 | ec2_schedule = "true" 12 | rds_schedule = "true" 13 | 14 | scheduler_tag = { 15 | key = "tostop" 16 | value = "true-${random_pet.suffix.id}" 17 | } 18 | } 19 | 20 | module "aws-start-monday" { 21 | source = "../.." 22 | name = "start-aws-${random_pet.suffix.id}" 23 | schedule_expression = "cron(0 07 ? * MON *)" 24 | schedule_expression_timezone = "Europe/Berlin" 25 | schedule_action = "start" 26 | autoscaling_schedule = "true" 27 | ec2_schedule = "true" 28 | rds_schedule = "true" 29 | 30 | scheduler_tag = { 31 | key = "tostop" 32 | value = "true-${random_pet.suffix.id}" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | assert { 5 | condition = module.aws-stop-friday.scheduler_lambda_name == "stop-aws-${random_pet.suffix.id}" 6 | error_message = "Invalid Stop lambda name" 7 | } 8 | 9 | assert { 10 | condition = module.aws-start-monday.scheduler_lambda_name == "start-aws-${random_pet.suffix.id}" 11 | error_message = "Invalid Start lambda name" 12 | } 13 | 14 | assert { 15 | condition = module.aws-stop-friday.scheduler_expression == "cron(0 23 ? * FRI *)" 16 | error_message = "Invalid scheduler expression" 17 | } 18 | 19 | assert { 20 | condition = module.aws-start-monday.scheduler_expression == "cron(0 07 ? * MON *)" 21 | error_message = "Invalid scheduler expression" 22 | } 23 | 24 | assert { 25 | condition = module.aws-stop-friday.scheduler_timezone == "Europe/Paris" 26 | error_message = "Invalid scheduler timezone" 27 | } 28 | 29 | assert { 30 | condition = module.aws-start-monday.scheduler_timezone == "Europe/Berlin" 31 | error_message = "Invalid scheduler timezone" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "suffix" {} 2 | 3 | resource "aws_transfer_server" "to_scheduled" { 4 | endpoint_type = "VPC" 5 | 6 | endpoint_details { 7 | subnet_ids = [aws_subnet.transfer_1.id] 8 | vpc_id = aws_vpc.transfer.id 9 | } 10 | 11 | protocols = ["SFTP"] 12 | 13 | tags = { 14 | tostop = "true-${random_pet.suffix.id}" 15 | } 16 | } 17 | 18 | resource "aws_transfer_server" "not_to_scheduled" { 19 | endpoint_type = "VPC" 20 | 21 | endpoint_details { 22 | subnet_ids = [aws_subnet.transfer_2.id] 23 | vpc_id = aws_vpc.transfer.id 24 | } 25 | 26 | protocols = ["SFTP"] 27 | 28 | tags = { 29 | tostop = "false" 30 | } 31 | } 32 | 33 | 34 | module "transfer-stop-friday" { 35 | source = "../.." 36 | 37 | name = "stop-transfer-${random_pet.suffix.id}" 38 | schedule_expression = "cron(0 23 ? * FRI *)" 39 | schedule_action = "stop" 40 | transfer_schedule = true 41 | 42 | scheduler_tag = { 43 | key = "tostop" 44 | value = "true-${random_pet.suffix.id}" 45 | } 46 | } 47 | 48 | module "transfer-start-monday" { 49 | source = "../.." 50 | 51 | name = "start-transfer-${random_pet.suffix.id}" 52 | schedule_expression = "cron(0 07 ? * MON *)" 53 | schedule_action = "start" 54 | transfer_schedule = true 55 | 56 | scheduler_tag = { 57 | key = "tostop" 58 | value = "true-${random_pet.suffix.id}" 59 | } 60 | } 61 | 62 | module "test-execution" { 63 | count = var.test_mode ? 1 : 0 64 | source = "./test-execution" 65 | 66 | lambda_stop_name = module.transfer-stop-friday.scheduler_lambda_name 67 | transfer_server_to_scheduled_id = aws_transfer_server.to_scheduled.id 68 | transfer_server_not_scheduled_id = aws_transfer_server.not_to_scheduled.id 69 | } 70 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "lambda_stop_name" { 2 | value = module.transfer-stop-friday.scheduler_lambda_name 3 | } 4 | 5 | output "lambda_stop_arn" { 6 | value = module.transfer-stop-friday.scheduler_lambda_arn 7 | } 8 | 9 | output "lambda_start_name" { 10 | value = module.transfer-start-monday.scheduler_lambda_name 11 | } 12 | 13 | output "lambda_start_arn" { 14 | value = module.transfer-start-monday.scheduler_lambda_arn 15 | } 16 | 17 | output "transfer_server_scheduled_id" { 18 | description = "ID of the scheduled Transfer server" 19 | value = aws_transfer_server.to_scheduled.id 20 | } 21 | 22 | output "transfer_server_not_scheduled_id" { 23 | description = "ID of the non-scheduled Transfer server" 24 | value = aws_transfer_server.not_to_scheduled.id 25 | } -------------------------------------------------------------------------------- /examples/transfer-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "test_transfer_scheduler" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.transfer-stop-friday.scheduler_lambda_name == "stop-transfer-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.transfer-start-monday.scheduler_lambda_name == "start-transfer-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test-execution[0].transfer_server_to_scheduled_state == "OFFLINE\n" 20 | error_message = "Invalid Transfer server state" 21 | } 22 | 23 | assert { 24 | condition = module.test-execution[0].transfer_server_not_scheduled_state == "ONLINE\n" 25 | error_message = "Invalid Transfer server state" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_transfer_server_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_transfer_status.py ONLINE \ 5 | ${var.transfer_server_to_scheduled_id} \ 6 | ${var.transfer_server_not_scheduled_id} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_transfer" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_transfer_server_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_transfer_server_offline_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_transfer_status.py OFFLINE \ 26 | ${var.transfer_server_to_scheduled_id} \ 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "transfer_server_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws transfer describe-server \ 35 | --server-id ${var.transfer_server_to_scheduled_id} \ 36 | --query 'Server.State' \ 37 | --output text > ${path.module}/transfer_server_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_transfer_server_offline_state] 42 | } 43 | 44 | data "local_file" "transfer_server_to_scheduled" { 45 | filename = "${path.module}/transfer_server_to_scheduled.state" 46 | 47 | depends_on = [null_resource.transfer_server_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "transfer_server_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws transfer describe-server \ 54 | --server-id ${var.transfer_server_not_scheduled_id} \ 55 | --query 'Server.State' \ 56 | --output text > ${path.module}/transfer_server_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_transfer_server_offline_state] 61 | } 62 | 63 | data "local_file" "transfer_server_not_scheduled" { 64 | filename = "${path.module}/transfer_server_not_scheduled.state" 65 | 66 | depends_on = [null_resource.transfer_server_not_scheduled] 67 | } 68 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "transfer_server_to_scheduled_state" { 2 | description = "State of the Transfer server that should be stopped" 3 | value = data.local_file.transfer_server_to_scheduled.content 4 | } 5 | 6 | output "transfer_server_not_scheduled_state" { 7 | description = "State of the Transfer server that should not be stopped" 8 | value = data.local_file.transfer_server_not_scheduled.content 9 | } -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the lambda function" 3 | type = string 4 | } 5 | 6 | variable "transfer_server_to_scheduled_id" { 7 | description = "ID of the scheduled Transfer server" 8 | type = string 9 | } 10 | 11 | variable "transfer_server_not_scheduled_id" { 12 | description = "ID of the non-scheduled Transfer server" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/wait_transfer_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS Transfer server status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_transfer_server_status(desired_status: str, server_ids: List[str]) -> None: 13 | """Wait for AWS Transfer servers to reach desired status. 14 | 15 | Args: 16 | server_ids: List of AWS Transfer server IDs 17 | desired_status: Desired status to wait for (e.g. 'ONLINE', 'OFFLINE', 'STARTING', 'STOPPING') 18 | """ 19 | if not server_ids: 20 | return 21 | 22 | transfer = boto3.client("transfer") 23 | start_time = time.time() 24 | timeout = 900 # 15 minutes timeout 25 | 26 | while True: 27 | try: 28 | if time.time() - start_time > timeout: 29 | print( 30 | f"Timeout reached after {timeout} seconds. " 31 | "Some Transfer servers may not have reached the desired status." 32 | ) 33 | sys.exit(1) 34 | 35 | all_servers_in_desired_state = True 36 | for server_id in server_ids: 37 | response = transfer.describe_server(ServerId=server_id) 38 | current_status = response["Server"]["State"] 39 | 40 | if current_status != desired_status: 41 | all_servers_in_desired_state = False 42 | break 43 | 44 | if all_servers_in_desired_state: 45 | print(f"All Transfer servers are now {desired_status}") 46 | return 47 | 48 | print(f"Waiting for Transfer servers to be {desired_status}...") 49 | time.sleep(10) # Wait 10 seconds before checking again 50 | 51 | except ClientError as e: 52 | print(f"Error checking Transfer server status: {e}") 53 | sys.exit(1) 54 | 55 | 56 | if __name__ == "__main__": 57 | if len(sys.argv) < 3: 58 | print( 59 | "Usage: python wait_transfer_status.py " 60 | " [server_id2 ...]" 61 | ) 62 | sys.exit(1) 63 | 64 | target_status = sys.argv[1] 65 | target_servers = sys.argv[2:] 66 | 67 | wait_for_transfer_server_status(target_status, target_servers) 68 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Enable test mode" 3 | type = bool 4 | default = false 5 | } -------------------------------------------------------------------------------- /examples/transfer-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /examples/transfer-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "transfer" { 2 | cidr_block = "10.0.0.0/16" 3 | tags = { 4 | Name = "transfer-vpc-${random_pet.suffix.id}" 5 | } 6 | } 7 | 8 | resource "aws_subnet" "transfer_1" { 9 | vpc_id = aws_vpc.transfer.id 10 | cidr_block = "10.0.1.0/24" 11 | availability_zone = "eu-west-1a" 12 | map_public_ip_on_launch = true 13 | } 14 | 15 | resource "aws_subnet" "transfer_2" { 16 | vpc_id = aws_vpc.transfer.id 17 | cidr_block = "10.0.2.0/24" 18 | availability_zone = "eu-west-1b" 19 | map_public_ip_on_launch = true 20 | } 21 | -------------------------------------------------------------------------------- /iam.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "this" { 2 | count = var.custom_iam_role_arn == null ? 1 : 0 3 | name = "${var.name}-scheduler-lambda" 4 | description = "Allows Lambda functions to stop and start ec2 and rds resources" 5 | assume_role_policy = data.aws_iam_policy_document.this.json 6 | tags = var.tags 7 | } 8 | 9 | data "aws_iam_policy_document" "this" { 10 | statement { 11 | actions = ["sts:AssumeRole"] 12 | 13 | principals { 14 | type = "Service" 15 | identifiers = ["lambda.amazonaws.com"] 16 | } 17 | } 18 | } 19 | 20 | resource "aws_iam_role_policy" "autoscaling_group_scheduler" { 21 | count = var.custom_iam_role_arn == null && (var.autoscaling_schedule == true || var.autoscaling_terminate_instances == true) ? 1 : 0 22 | name = "${var.name}-autoscaling-custom-policy-scheduler" 23 | role = aws_iam_role.this[0].id 24 | policy = data.aws_iam_policy_document.autoscaling_group_scheduler.json 25 | } 26 | 27 | data "aws_iam_policy_document" "autoscaling_group_scheduler" { 28 | statement { 29 | actions = [ 30 | "autoscaling:DescribeScalingProcessTypes", 31 | "autoscaling:DescribeAutoScalingGroups", 32 | "autoscaling:DescribeTags", 33 | "autoscaling:SuspendProcesses", 34 | "autoscaling:ResumeProcesses", 35 | "autoscaling:UpdateAutoScalingGroup", 36 | "autoscaling:DescribeAutoScalingInstances", 37 | "autoscaling:TerminateInstanceInAutoScalingGroup", 38 | "ec2:StopInstances", 39 | "ec2:StartInstances", 40 | "ec2:TerminateInstances", 41 | ] 42 | 43 | resources = [ 44 | "*", 45 | ] 46 | } 47 | } 48 | 49 | resource "aws_iam_role_policy" "spot_instance_scheduler" { 50 | count = var.custom_iam_role_arn == null && var.ec2_schedule == true ? 1 : 0 51 | name = "${var.name}-spot-custom-policy-scheduler" 52 | role = aws_iam_role.this[0].id 53 | policy = data.aws_iam_policy_document.spot_instance_scheduler.json 54 | } 55 | 56 | data "aws_iam_policy_document" "spot_instance_scheduler" { 57 | statement { 58 | actions = [ 59 | "ec2:DescribeInstances", 60 | "ec2:TerminateSpotInstances", 61 | ] 62 | 63 | resources = [ 64 | "*", 65 | ] 66 | } 67 | } 68 | 69 | resource "aws_iam_role_policy" "instance_scheduler" { 70 | count = var.custom_iam_role_arn == null && var.ec2_schedule == true ? 1 : 0 71 | name = "${var.name}-ec2-custom-policy-scheduler" 72 | role = aws_iam_role.this[0].id 73 | policy = data.aws_iam_policy_document.instance_scheduler.json 74 | } 75 | 76 | data "aws_iam_policy_document" "instance_scheduler" { 77 | statement { 78 | actions = [ 79 | "ec2:StopInstances", 80 | "ec2:StartInstances", 81 | "autoscaling:DescribeAutoScalingInstances", 82 | ] 83 | 84 | resources = [ 85 | "*", 86 | ] 87 | } 88 | } 89 | 90 | resource "aws_iam_role_policy" "rds_scheduler" { 91 | count = var.custom_iam_role_arn == null && (var.rds_schedule == true || var.documentdb_schedule == true) ? 1 : 0 92 | name = "${var.name}-rds-custom-policy-scheduler" 93 | role = aws_iam_role.this[0].id 94 | policy = data.aws_iam_policy_document.rds_scheduler.json 95 | } 96 | 97 | data "aws_iam_policy_document" "rds_scheduler" { 98 | statement { 99 | actions = [ 100 | "rds:StartDBCluster", 101 | "rds:StopDBCluster", 102 | "rds:StartDBInstance", 103 | "rds:StopDBInstance", 104 | "rds:DescribeDBClusters", 105 | ] 106 | 107 | resources = [ 108 | "*", 109 | ] 110 | } 111 | } 112 | 113 | resource "aws_iam_role_policy" "ecs_scheduler" { 114 | count = var.custom_iam_role_arn == null && var.ecs_schedule == true ? 1 : 0 115 | name = "${var.name}-ecs-custom-policy-scheduler" 116 | role = aws_iam_role.this[0].id 117 | policy = data.aws_iam_policy_document.ecs_scheduler.json 118 | } 119 | 120 | data "aws_iam_policy_document" "ecs_scheduler" { 121 | statement { 122 | actions = [ 123 | "ecs:UpdateService", 124 | "ecs:DescribeService", 125 | ] 126 | 127 | resources = [ 128 | "*", 129 | ] 130 | } 131 | } 132 | 133 | resource "aws_iam_role_policy" "redshift_scheduler" { 134 | count = var.custom_iam_role_arn == null && var.redshift_schedule == true ? 1 : 0 135 | name = "${var.name}-redshift-custom-policy-scheduler" 136 | role = aws_iam_role.this[0].id 137 | policy = data.aws_iam_policy_document.redshift_scheduler.json 138 | } 139 | 140 | data "aws_iam_policy_document" "redshift_scheduler" { 141 | statement { 142 | actions = [ 143 | "redshift:ResumeCluster", 144 | "redshift:PauseCluster", 145 | ] 146 | 147 | resources = [ 148 | "*", 149 | ] 150 | } 151 | } 152 | 153 | resource "aws_iam_role_policy" "cloudwatch_alarm_scheduler" { 154 | count = var.custom_iam_role_arn == null && var.cloudwatch_alarm_schedule == true ? 1 : 0 155 | name = "${var.name}-cloudwatch-custom-policy-scheduler" 156 | role = aws_iam_role.this[0].id 157 | policy = data.aws_iam_policy_document.cloudwatch_alarm_scheduler.json 158 | } 159 | 160 | data "aws_iam_policy_document" "cloudwatch_alarm_scheduler" { 161 | statement { 162 | actions = [ 163 | "cloudwatch:DisableAlarmActions", 164 | "cloudwatch:EnableAlarmActions", 165 | ] 166 | 167 | resources = [ 168 | "*", 169 | ] 170 | } 171 | } 172 | 173 | resource "aws_iam_role_policy" "resource_groups_tagging_api" { 174 | count = var.custom_iam_role_arn == null ? 1 : 0 175 | name = "${var.name}-resource-groups-tagging-api-scheduler" 176 | role = aws_iam_role.this[0].id 177 | policy = data.aws_iam_policy_document.resource_groups_tagging_api.json 178 | } 179 | 180 | data "aws_iam_policy_document" "resource_groups_tagging_api" { 181 | statement { 182 | actions = [ 183 | "tag:GetResources", 184 | ] 185 | 186 | resources = [ 187 | "*", 188 | ] 189 | } 190 | } 191 | 192 | resource "aws_iam_role_policy" "lambda_logging" { 193 | count = var.custom_iam_role_arn == null ? 1 : 0 194 | name = "${var.name}-lambda-logging" 195 | role = aws_iam_role.this[0].id 196 | policy = data.aws_iam_policy_document.lambda_logging_policy.json 197 | } 198 | 199 | data "aws_iam_policy_document" "lambda_logging_policy" { 200 | statement { 201 | actions = [ 202 | "logs:CreateLogStream", 203 | "logs:PutLogEvents" 204 | ] 205 | resources = [ 206 | "${aws_cloudwatch_log_group.this.arn}:*" 207 | ] 208 | effect = "Allow" 209 | } 210 | 211 | dynamic "statement" { 212 | for_each = var.kms_key_arn == null ? [] : [var.kms_key_arn] 213 | content { 214 | actions = [ 215 | "kms:Encrypt", 216 | "kms:Decrypt", 217 | "kms:CreateGrant" 218 | ] 219 | resources = [statement.value] 220 | effect = "Allow" 221 | } 222 | } 223 | } 224 | 225 | locals { 226 | # Backward compatibility with the former scheduler variable name. 227 | scheduler_tag = var.resources_tag == null ? var.scheduler_tag : var.resources_tag 228 | } 229 | 230 | resource "aws_iam_role" "scheduler_lambda" { 231 | name = "${var.name}-scheduler-lambda-role" 232 | description = "Allows scheduler to invoke ${var.name} lambda function" 233 | assume_role_policy = data.aws_iam_policy_document.scheduler_assume_role_policy.json 234 | tags = var.tags 235 | } 236 | 237 | resource "aws_iam_role_policy_attachment" "scheduler_lambda" { 238 | role = aws_iam_role.scheduler_lambda.name 239 | policy_arn = aws_iam_policy.scheduler_lambda.arn 240 | } 241 | 242 | data "aws_iam_policy_document" "scheduler_assume_role_policy" { 243 | statement { 244 | actions = ["sts:AssumeRole"] 245 | 246 | principals { 247 | type = "Service" 248 | identifiers = ["scheduler.amazonaws.com"] 249 | } 250 | } 251 | } 252 | 253 | resource "aws_iam_policy" "scheduler_lambda" { 254 | name = "${var.name}-Scheduler-Lambda-Policy" 255 | 256 | policy = jsonencode({ 257 | Version = "2012-10-17" 258 | Statement = [ 259 | { 260 | "Action" : [ 261 | "lambda:InvokeFunction" 262 | ], 263 | Effect = "Allow" 264 | Resource = aws_lambda_function.this.arn 265 | }, 266 | ] 267 | }) 268 | } 269 | 270 | resource "aws_iam_role_policy" "transfer_scheduler" { 271 | count = var.custom_iam_role_arn == null && var.transfer_schedule == true ? 1 : 0 272 | name = "${var.name}-transfer-custom-policy-scheduler" 273 | role = aws_iam_role.this[0].id 274 | policy = data.aws_iam_policy_document.transfer_scheduler.json 275 | } 276 | 277 | data "aws_iam_policy_document" "transfer_scheduler" { 278 | statement { 279 | actions = [ 280 | "transfer:StartServer", 281 | "transfer:StopServer", 282 | "transfer:ListServers", 283 | "transfer:DescribeServer" 284 | ] 285 | 286 | resources = [ 287 | "*", 288 | ] 289 | } 290 | } 291 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | # Convert *.py to .zip because AWS Lambda need .zip 4 | data "archive_file" "this" { 5 | type = "zip" 6 | source_dir = "${path.module}/package/" 7 | output_path = "${path.module}/aws-stop-start-resources.zip" 8 | } 9 | 10 | # Create Lambda function for stop or start aws resources 11 | resource "aws_lambda_function" "this" { 12 | filename = data.archive_file.this.output_path 13 | source_code_hash = data.archive_file.this.output_base64sha256 14 | function_name = var.name 15 | role = var.custom_iam_role_arn == null ? aws_iam_role.this[0].arn : var.custom_iam_role_arn 16 | handler = "scheduler.main.lambda_handler" 17 | runtime = var.runtime 18 | timeout = "600" 19 | kms_key_arn = var.kms_key_arn == null ? "" : var.kms_key_arn 20 | 21 | environment { 22 | variables = { 23 | AWS_REGIONS = var.aws_regions == null ? data.aws_region.current.name : join(", ", var.aws_regions) 24 | SCHEDULE_ACTION = var.schedule_action 25 | TAG_KEY = local.scheduler_tag["key"] 26 | TAG_VALUE = local.scheduler_tag["value"] 27 | DOCUMENTDB_SCHEDULE = tostring(var.documentdb_schedule) 28 | EC2_SCHEDULE = tostring(var.ec2_schedule) 29 | ECS_SCHEDULE = tostring(var.ecs_schedule) 30 | RDS_SCHEDULE = tostring(var.rds_schedule) 31 | REDSHIFT_SCHEDULE = tostring(var.redshift_schedule) 32 | AUTOSCALING_SCHEDULE = tostring(var.autoscaling_schedule) 33 | AUTOSCALING_TERMINATE_INSTANCES = tostring(var.autoscaling_terminate_instances) 34 | CLOUDWATCH_ALARM_SCHEDULE = tostring(var.cloudwatch_alarm_schedule) 35 | TRANSFER_SCHEDULE = tostring(var.transfer_schedule) 36 | } 37 | } 38 | 39 | tags = var.tags 40 | } 41 | 42 | resource "aws_scheduler_schedule" "this" { 43 | name = "trigger-lambda-scheduler-${var.name}" 44 | description = "Trigger lambda scheduler" 45 | schedule_expression = var.schedule_expression 46 | schedule_expression_timezone = var.schedule_expression_timezone 47 | 48 | flexible_time_window { 49 | mode = "OFF" 50 | } 51 | 52 | target { 53 | arn = aws_lambda_function.this.arn 54 | role_arn = aws_iam_role.scheduler_lambda.arn 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "lambda_iam_role_arn" { 2 | description = "The ARN of the IAM role used by Lambda function" 3 | value = var.custom_iam_role_arn == null ? aws_iam_role.this[0].arn : var.custom_iam_role_arn 4 | } 5 | 6 | output "lambda_iam_role_name" { 7 | description = "The name of the IAM role used by Lambda function" 8 | value = var.custom_iam_role_arn == null ? aws_iam_role.this[0].name : split("/", var.custom_iam_role_arn)[1] 9 | } 10 | 11 | output "scheduler_lambda_arn" { 12 | description = "The ARN of the Lambda function" 13 | value = aws_lambda_function.this.arn 14 | } 15 | 16 | output "scheduler_lambda_name" { 17 | description = "The name of the Lambda function" 18 | value = aws_lambda_function.this.function_name 19 | } 20 | 21 | output "scheduler_lambda_invoke_arn" { 22 | description = "The ARN to be used for invoking Lambda function from API Gateway" 23 | value = aws_lambda_function.this.invoke_arn 24 | } 25 | 26 | output "scheduler_lambda_function_last_modified" { 27 | description = "The date Lambda function was last modified" 28 | value = aws_lambda_function.this.last_modified 29 | } 30 | 31 | output "scheduler_lambda_function_version" { 32 | description = "Latest published version of your Lambda function" 33 | value = aws_lambda_function.this.version 34 | } 35 | 36 | output "scheduler_log_group_name" { 37 | description = "The name of the scheduler log group" 38 | value = aws_cloudwatch_log_group.this.name 39 | } 40 | 41 | output "scheduler_log_group_arn" { 42 | description = "The Amazon Resource Name (ARN) specifying the log group" 43 | value = aws_cloudwatch_log_group.this.arn 44 | } 45 | 46 | output "scheduler_expression" { 47 | description = "The expression of the scheduler" 48 | value = aws_scheduler_schedule.this.schedule_expression 49 | } 50 | 51 | output "scheduler_timezone" { 52 | description = "The timezone of the scheduler" 53 | value = aws_scheduler_schedule.this.schedule_expression_timezone 54 | } 55 | -------------------------------------------------------------------------------- /package/scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | """Module containing the logic for the lambda scheduler entry-points.""" 2 | -------------------------------------------------------------------------------- /package/scheduler/autoscaling_handler.py: -------------------------------------------------------------------------------- 1 | """Autoscaling instances scheduler.""" 2 | 3 | import logging 4 | from typing import Dict, Iterator, List, Optional, Union, Set 5 | 6 | import boto3 7 | from botocore.exceptions import ClientError 8 | 9 | from .exceptions import ec2_exception 10 | from .waiters import AwsWaiters 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class AutoscalingScheduler: 16 | """Manages AWS Auto Scaling groups scheduling operations.""" 17 | 18 | def __init__(self, region_name: Optional[str] = None) -> None: 19 | """Initialize AWS clients for EC2 and Auto Scaling. 20 | 21 | Args: 22 | region_name: AWS region name. If None, default region will be used. 23 | """ 24 | self.region_name = region_name 25 | self.ec2 = ( 26 | boto3.client("ec2", region_name=region_name) 27 | if region_name 28 | else boto3.client("ec2") 29 | ) 30 | self.asg = ( 31 | boto3.client("autoscaling", region_name=region_name) 32 | if region_name 33 | else boto3.client("autoscaling") 34 | ) 35 | self.waiter = AwsWaiters(region_name=region_name) 36 | 37 | def _extract_tag_info(self, aws_tags: List[Dict]) -> tuple: 38 | """Extract tag key and value from aws_tags. 39 | 40 | Args: 41 | aws_tags: List of AWS tag dictionaries 42 | 43 | Returns: 44 | Tuple containing (tag_key, tag_value) 45 | """ 46 | tag_key = aws_tags[0]["Key"] 47 | tag_value = "".join(aws_tags[0]["Values"]) 48 | return tag_key, tag_value 49 | 50 | def stop(self, aws_tags: List[Dict], terminate_instances: bool = False) -> None: 51 | """Suspend Auto Scaling groups and stop/terminate their instances. 52 | 53 | Args: 54 | aws_tags: AWS tags to filter resources by. Format: 55 | [{'Key': 'tag_key', 'Values': ['tag_value']}] 56 | terminate_instances: If True, terminate instances instead of stopping them 57 | """ 58 | tag_key, tag_value = self._extract_tag_info(aws_tags) 59 | asg_name_list = self.list_groups(tag_key, tag_value) 60 | instance_id_list = list(self.list_instances(asg_name_list)) 61 | 62 | # Suspend Auto Scaling groups 63 | self._suspend_asg_processes(asg_name_list) 64 | 65 | # Stop or terminate instances 66 | self._manage_instances(instance_id_list, terminate=terminate_instances) 67 | 68 | def start(self, aws_tags: List[Dict]) -> None: 69 | """Resume Auto Scaling groups and start their instances. 70 | 71 | Args: 72 | aws_tags: AWS tags to filter resources by. Format: 73 | [{'Key': 'tag_key', 'Values': ['tag_value']}] 74 | """ 75 | tag_key, tag_value = self._extract_tag_info(aws_tags) 76 | asg_name_list = self.list_groups(tag_key, tag_value) 77 | instance_id_list = list(self.list_instances(asg_name_list)) 78 | 79 | # Start instances 80 | started_instances = self._start_instances(instance_id_list) 81 | 82 | # Wait for instances to be running 83 | if started_instances: 84 | self.waiter.instance_running(instance_ids=started_instances) 85 | 86 | # Resume Auto Scaling groups 87 | self._resume_asg_processes(asg_name_list) 88 | 89 | def _suspend_asg_processes(self, asg_names: List[str]) -> None: 90 | """Suspend processes for the specified Auto Scaling groups. 91 | 92 | Args: 93 | asg_names: List of Auto Scaling group names 94 | """ 95 | for asg_name in asg_names: 96 | try: 97 | self.asg.suspend_processes(AutoScalingGroupName=asg_name) 98 | logger.info(f"Suspended Auto Scaling group: {asg_name}") 99 | except ClientError as exc: 100 | ec2_exception("Auto Scaling group", asg_name, exc) 101 | 102 | def _resume_asg_processes(self, asg_names: List[str]) -> None: 103 | """Resume processes for the specified Auto Scaling groups. 104 | 105 | Args: 106 | asg_names: List of Auto Scaling group names 107 | """ 108 | for asg_name in asg_names: 109 | try: 110 | self.asg.resume_processes(AutoScalingGroupName=asg_name) 111 | logger.info(f"Resumed Auto Scaling group: {asg_name}") 112 | except ClientError as exc: 113 | ec2_exception("Auto Scaling group", asg_name, exc) 114 | 115 | def _manage_instances( 116 | self, instance_ids: List[str], terminate: bool = False 117 | ) -> None: 118 | """Stop or terminate EC2 instances. 119 | 120 | Args: 121 | instance_ids: List of EC2 instance IDs 122 | terminate: If True, terminate instances; otherwise stop them 123 | """ 124 | if not instance_ids: 125 | logger.info("No instances to manage") 126 | return 127 | 128 | for instance_id in instance_ids: 129 | try: 130 | if terminate: 131 | self.ec2.terminate_instances(InstanceIds=[instance_id]) 132 | logger.info(f"Terminated instance: {instance_id}") 133 | else: 134 | self.ec2.stop_instances(InstanceIds=[instance_id]) 135 | logger.info(f"Stopped instance: {instance_id}") 136 | except ClientError as exc: 137 | ec2_exception("instance", instance_id, exc) 138 | 139 | def _start_instances(self, instance_ids: List[str]) -> List[str]: 140 | """Start EC2 instances and return list of successfully started instance IDs. 141 | 142 | Args: 143 | instance_ids: List of EC2 instance IDs to start 144 | 145 | Returns: 146 | List of successfully started instance IDs 147 | """ 148 | started_instances = [] 149 | 150 | if not instance_ids: 151 | logger.info("No instances to start") 152 | return started_instances 153 | 154 | for instance_id in instance_ids: 155 | try: 156 | self.ec2.start_instances(InstanceIds=[instance_id]) 157 | logger.info(f"Started instance: {instance_id}") 158 | started_instances.append(instance_id) 159 | except ClientError as exc: 160 | ec2_exception("instance", instance_id, exc) 161 | 162 | return started_instances 163 | 164 | def list_groups(self, tag_key: str, tag_value: str) -> List[str]: 165 | """List Auto Scaling groups with the specified tag. 166 | 167 | Args: 168 | tag_key: AWS tag key to filter by 169 | tag_value: AWS tag value to filter by 170 | 171 | Returns: 172 | List of Auto Scaling group names 173 | """ 174 | asg_name_list = [] 175 | paginator = self.asg.get_paginator("describe_auto_scaling_groups") 176 | 177 | for page in paginator.paginate(): 178 | for group in page["AutoScalingGroups"]: 179 | for tag in group["Tags"]: 180 | if tag["Key"] == tag_key and tag["Value"] == tag_value: 181 | asg_name_list.append(group["AutoScalingGroupName"]) 182 | 183 | logger.info( 184 | f"Found {len(asg_name_list)} Auto Scaling groups with tag {tag_key}={tag_value}" 185 | ) 186 | return asg_name_list 187 | 188 | def list_instances(self, asg_name_list: List[str]) -> Iterator[str]: 189 | """List instances in the specified Auto Scaling groups. 190 | 191 | Args: 192 | asg_name_list: List of Auto Scaling group names 193 | 194 | Returns: 195 | Iterator yielding instance IDs 196 | """ 197 | if not asg_name_list: 198 | logger.info("No Auto Scaling groups to list instances for") 199 | return iter([]) 200 | 201 | paginator = self.asg.get_paginator("describe_auto_scaling_groups") 202 | 203 | for page in paginator.paginate(AutoScalingGroupNames=asg_name_list): 204 | for scalinggroup in page["AutoScalingGroups"]: 205 | for instance in scalinggroup["Instances"]: 206 | yield instance["InstanceId"] 207 | -------------------------------------------------------------------------------- /package/scheduler/cloudwatch_handler.py: -------------------------------------------------------------------------------- 1 | """Cloudwatch alarm action scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | 7 | from botocore.exceptions import ClientError 8 | 9 | from .exceptions import cloudwatch_exception 10 | from .filter_resources_by_tags import FilterByTags 11 | 12 | 13 | class CloudWatchAlarmScheduler: 14 | """Abstract Cloudwatch alarm scheduler in a class.""" 15 | 16 | def __init__(self, region_name=None) -> None: 17 | """Initialize Cloudwatch alarm scheduler.""" 18 | if region_name: 19 | self.cloudwatch = boto3.client("cloudwatch", region_name=region_name) 20 | else: 21 | self.cloudwatch = boto3.client("cloudwatch") 22 | self.tag_api = FilterByTags(region_name=region_name) 23 | 24 | def stop(self, aws_tags: List[Dict]) -> None: 25 | """Aws Cloudwatch alarm disable function. 26 | 27 | Disable Cloudwatch alarm with defined tags. 28 | 29 | :param list[map] aws_tags: 30 | Aws tags to use for filter resources. 31 | For example: 32 | [ 33 | { 34 | 'Key': 'string', 35 | 'Values': [ 36 | 'string', 37 | ] 38 | } 39 | ] 40 | """ 41 | for alarm_arn in self.tag_api.get_resources("cloudwatch:alarm", aws_tags): 42 | alarm_name = alarm_arn.split(":")[-1] 43 | try: 44 | self.cloudwatch.disable_alarm_actions(AlarmNames=[alarm_name]) 45 | print(f"Disable Cloudwatch alarm {alarm_name}") 46 | except ClientError as exc: 47 | cloudwatch_exception("cloudwatch alarm", alarm_name, exc) 48 | 49 | def start(self, aws_tags: List[Dict]) -> None: 50 | """Aws Cloudwatch alarm enable function. 51 | 52 | Enable Cloudwatch alarm with defined tags. 53 | 54 | :param list[map] aws_tags: 55 | Aws tags to use for filter resources. 56 | For example: 57 | [ 58 | { 59 | 'Key': 'string', 60 | 'Values': [ 61 | 'string', 62 | ] 63 | } 64 | ] 65 | """ 66 | for alarm_arn in self.tag_api.get_resources("cloudwatch:alarm", aws_tags): 67 | alarm_name = alarm_arn.split(":")[-1] 68 | try: 69 | self.cloudwatch.enable_alarm_actions(AlarmNames=[alarm_name]) 70 | print(f"Enable Cloudwatch alarm {alarm_name}") 71 | except ClientError as exc: 72 | cloudwatch_exception("cloudwatch alarm", alarm_name, exc) 73 | -------------------------------------------------------------------------------- /package/scheduler/documentdb_handler.py: -------------------------------------------------------------------------------- 1 | """documentdb instances scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | 7 | from botocore.exceptions import ClientError 8 | 9 | from .exceptions import documentdb_exception 10 | from .filter_resources_by_tags import FilterByTags 11 | 12 | 13 | class DocumentDBScheduler: 14 | """documentdb scheduler.""" 15 | 16 | def __init__(self, region_name=None) -> None: 17 | """Initialize documentdb scheduler.""" 18 | if region_name: 19 | self.documentdb = boto3.client("docdb", region_name=region_name) 20 | else: 21 | self.documentdb = boto3.client("docdb") 22 | self.tag_api = FilterByTags(region_name=region_name) 23 | 24 | def stop(self, aws_tags: List[Dict]) -> None: 25 | """Aws documentdb cluster stop function. 26 | 27 | Stop documentdb clusters with defined tags. 28 | 29 | :param list[map] aws_tags: 30 | Aws tags to use for filter resources. 31 | For example: 32 | [ 33 | { 34 | 'Key': 'string', 35 | 'Values': [ 36 | 'string', 37 | ] 38 | } 39 | ] 40 | """ 41 | for cluster_arn in self.tag_api.get_resources("rds:cluster", aws_tags): 42 | cluster_id = cluster_arn.split(":")[-1] 43 | try: 44 | self.documentdb.stop_db_cluster(DBClusterIdentifier=cluster_id) 45 | print(f"Stop documentdb cluster {cluster_id}") 46 | except ClientError as exc: 47 | documentdb_exception("documentdb cluster", cluster_id, exc) 48 | 49 | def start(self, aws_tags: List[Dict]) -> None: 50 | """Aws documentdb cluster start function. 51 | 52 | Start documentdb clusters with defined tags. 53 | 54 | :param list[map] aws_tags: 55 | Aws tags to use for filter resources. 56 | For example: 57 | [ 58 | { 59 | 'Key': 'string', 60 | 'Values': [ 61 | 'string', 62 | ] 63 | } 64 | ] 65 | """ 66 | for cluster_arn in self.tag_api.get_resources("rds:cluster", aws_tags): 67 | cluster_id = cluster_arn.split(":")[-1] 68 | try: 69 | self.documentdb.start_db_cluster(DBClusterIdentifier=cluster_id) 70 | print(f"Start documentdb cluster {cluster_id}") 71 | except ClientError as exc: 72 | documentdb_exception("documentdb cluster", cluster_id, exc) 73 | -------------------------------------------------------------------------------- /package/scheduler/ecs_handler.py: -------------------------------------------------------------------------------- 1 | """ecs service scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | 7 | from botocore.exceptions import ClientError 8 | 9 | from .exceptions import ecs_exception 10 | from .filter_resources_by_tags import FilterByTags 11 | 12 | 13 | class EcsScheduler: 14 | """Abstract ECS Service scheduler in a class.""" 15 | 16 | def __init__(self, region_name=None) -> None: 17 | """Initialize ECS service scheduler.""" 18 | if region_name: 19 | self.ecs = boto3.client("ecs", region_name=region_name) 20 | else: 21 | self.ecs = boto3.client("ecs") 22 | self.tag_api = FilterByTags(region_name=region_name) 23 | 24 | def stop(self, aws_tags: List[Dict]) -> None: 25 | """Aws ecs instance stop function. 26 | 27 | Stop ecs service with defined tags and disable its Cloudwatch 28 | alarms. 29 | 30 | :param list[map] aws_tags: 31 | Aws tags to use for filter resources. 32 | For example: 33 | [ 34 | { 35 | 'Key': 'string', 36 | 'Values': [ 37 | 'string', 38 | ] 39 | } 40 | ] 41 | """ 42 | for service_arn in self.tag_api.get_resources("ecs:service", aws_tags): 43 | service_name = service_arn.split("/")[-1] 44 | cluster_name = service_arn.split("/")[-2] 45 | try: 46 | self.ecs.update_service( 47 | cluster=cluster_name, service=service_name, desiredCount=0 48 | ) 49 | print(f"Stop ECS Service {service_name} on Cluster {cluster_name}") 50 | except ClientError as exc: 51 | ecs_exception("ECS Service", service_name, exc) 52 | 53 | def start(self, aws_tags: List[Dict]) -> None: 54 | """Aws ec2 instance start function. 55 | 56 | Start ec2 instances with defined tags. 57 | 58 | Aws tags to use for filter resources 59 | Aws tags to use for filter resources. 60 | For example: 61 | [ 62 | { 63 | 'Key': 'string', 64 | 'Values': [ 65 | 'string', 66 | ] 67 | } 68 | ] 69 | """ 70 | for service_arn in self.tag_api.get_resources("ecs:service", aws_tags): 71 | service_name = service_arn.split("/")[-1] 72 | cluster_name = service_arn.split("/")[-2] 73 | try: 74 | self.ecs.update_service( 75 | cluster=cluster_name, service=service_name, desiredCount=1 76 | ) 77 | print(f"Start ECS Service {service_name} on Cluster {cluster_name}") 78 | except ClientError as exc: 79 | ecs_exception("ECS Service", service_name, exc) 80 | -------------------------------------------------------------------------------- /package/scheduler/exceptions.py: -------------------------------------------------------------------------------- 1 | """Exception function for all aws scheduler.""" 2 | 3 | import logging 4 | 5 | 6 | def documentdb_exception(resource_name: str, resource_id: str, exception) -> None: 7 | """Exception raised during execution of documentdb scheduler. 8 | 9 | Log instance, spot instance and autoscaling groups exceptions 10 | on the specific aws resources. 11 | 12 | :param str resource_name: 13 | Aws resource name 14 | :param str resource_id: 15 | Aws resource id 16 | :param str exception: 17 | Human readable string describing the exception 18 | """ 19 | info_codes = ["InvalidDBClusterStateFault"] 20 | warning_codes = [ 21 | "InvalidDBClusterStateFault", 22 | "DBClusterNotFoundFault", 23 | "DBClusterParameterGroupNotFound", 24 | ] 25 | 26 | if exception.response["Error"]["Code"] in info_codes: 27 | logging.info( 28 | "%s %s: %s", 29 | resource_name, 30 | resource_id, 31 | exception, 32 | ) 33 | elif exception.response["Error"]["Code"] in warning_codes: 34 | logging.warning( 35 | "%s %s: %s", 36 | resource_name, 37 | resource_id, 38 | exception, 39 | ) 40 | else: 41 | logging.error( 42 | "Unexpected error on %s %s: %s", 43 | resource_name, 44 | resource_id, 45 | exception, 46 | ) 47 | 48 | 49 | def ec2_exception(resource_name: str, resource_id: str, exception) -> None: 50 | """Exception raised during execution of ec2 scheduler. 51 | 52 | Log instance, spot instance and autoscaling groups exceptions 53 | on the specific aws resources. 54 | 55 | :param str resource_name: 56 | Aws resource name 57 | :param str resource_id: 58 | Aws resource id 59 | :param str exception: 60 | Human readable string describing the exception 61 | """ 62 | info_codes = ["IncorrectInstanceState"] 63 | warning_codes = [ 64 | "UnsupportedOperation", 65 | "IncorrectInstanceState", 66 | "InvalidParameterCombination", 67 | ] 68 | 69 | if exception.response["Error"]["Code"] in info_codes: 70 | logging.info( 71 | "%s %s: %s", 72 | resource_name, 73 | resource_id, 74 | exception, 75 | ) 76 | elif exception.response["Error"]["Code"] in warning_codes: 77 | logging.warning( 78 | "%s %s: %s", 79 | resource_name, 80 | resource_id, 81 | exception, 82 | ) 83 | else: 84 | logging.error( 85 | "Unexpected error on %s %s: %s", 86 | resource_name, 87 | resource_id, 88 | exception, 89 | ) 90 | 91 | 92 | def ecs_exception(resource_name: str, resource_id: str, exception) -> None: 93 | """Exception raised during execution of ecs scheduler. 94 | 95 | Log instance, spot instance and autoscaling groups exceptions 96 | on the specific aws resources. 97 | 98 | :param str resource_name: 99 | Aws resource name 100 | :param str resource_id: 101 | Aws resource id 102 | :param str exception: 103 | Human readable string describing the exception 104 | """ 105 | info_codes = ["ClusterNotFoundException"] 106 | warning_codes = [ 107 | "ServiceNotActiveException", 108 | "ServiceNotFoundException", 109 | "InvalidParameterException", 110 | ] 111 | 112 | if exception.response["Error"]["Code"] in info_codes: 113 | logging.info( 114 | "%s %s: %s", 115 | resource_name, 116 | resource_id, 117 | exception, 118 | ) 119 | elif exception.response["Error"]["Code"] in warning_codes: 120 | logging.warning( 121 | "%s %s: %s", 122 | resource_name, 123 | resource_id, 124 | exception, 125 | ) 126 | else: 127 | logging.error( 128 | "Unexpected error on %s %s: %s", 129 | resource_name, 130 | resource_id, 131 | exception, 132 | ) 133 | 134 | 135 | def rds_exception(resource_name: str, resource_id: str, exception) -> None: 136 | """Exception raised during execution of rds scheduler. 137 | 138 | Log rds exceptions on the specific aws resources. 139 | 140 | :param str resource_name: 141 | Aws resource name 142 | :param str resource_id: 143 | Aws resource id 144 | :param str exception: 145 | Human readable string describing the exception 146 | """ 147 | info_codes = ["InvalidParameterCombination", "DBClusterNotFoundFault"] 148 | warning_codes = ["InvalidDBClusterStateFault", "InvalidDBInstanceState"] 149 | 150 | if exception.response["Error"]["Code"] in info_codes: 151 | logging.info( 152 | "%s %s: %s", 153 | resource_name, 154 | resource_id, 155 | exception, 156 | ) 157 | elif exception.response["Error"]["Code"] in warning_codes: 158 | logging.warning( 159 | "%s %s: %s", 160 | resource_name, 161 | resource_id, 162 | exception, 163 | ) 164 | else: 165 | logging.error( 166 | "Unexpected error on %s %s: %s", 167 | resource_name, 168 | resource_id, 169 | exception, 170 | ) 171 | 172 | 173 | def redshift_exception(resource_name: str, resource_id: str, exception): 174 | """Exception raised during execution of redshift scheduler. 175 | 176 | Log redshift exceptions on the specific aws resources. 177 | 178 | :param str resource_name: 179 | Aws resource name 180 | :param str resource_id: 181 | Aws resource id 182 | :param str exception: 183 | Human readable string describing the exception 184 | """ 185 | info_codes = ["ClusterNotFound"] 186 | warning_codes = ["ClusterNotFound", "InvalidClusterState"] 187 | 188 | if exception.response["Error"]["Code"] in info_codes: 189 | logging.info( 190 | "%s %s: %s", 191 | resource_name, 192 | resource_id, 193 | exception, 194 | ) 195 | elif exception.response["Error"]["Code"] in warning_codes: 196 | logging.warning( 197 | "%s %s: %s", 198 | resource_name, 199 | resource_id, 200 | exception, 201 | ) 202 | else: 203 | logging.error( 204 | "Unexpected error on %s %s: %s", 205 | resource_name, 206 | resource_id, 207 | exception, 208 | ) 209 | 210 | 211 | def cloudwatch_exception(resource_name: str, resource_id: str, exception): 212 | """Exception raised during execution of Cloudwatch scheduler. 213 | 214 | Log Cloudwatch exceptions on the specific aws resources. 215 | 216 | :param str resource_name: 217 | Aws resource name 218 | :param str resource_id: 219 | Aws resource id 220 | :param str exception: 221 | Human readable string describing the exception 222 | """ 223 | logging.error( 224 | "Unexpected error on %s %s: %s", 225 | resource_name, 226 | resource_id, 227 | exception, 228 | ) 229 | 230 | 231 | def transfer_exception(resource_name: str, resource_id: str, exception): 232 | """Exception raised during execution of Transfer scheduler. 233 | 234 | Log Transfer server exceptions on the specific aws resources. 235 | 236 | :param str resource_name: 237 | Aws resource name 238 | :param str resource_id: 239 | Aws resource id 240 | :param str exception: 241 | Human readable string describing the exception 242 | """ 243 | info_codes = ["ResourceNotFoundException"] 244 | warning_codes = ["InvalidRequestException", "ThrottlingException"] 245 | 246 | if exception.response["Error"]["Code"] in info_codes: 247 | logging.info( 248 | "%s %s: %s", 249 | resource_name, 250 | resource_id, 251 | exception, 252 | ) 253 | elif exception.response["Error"]["Code"] in warning_codes: 254 | logging.warning( 255 | "%s %s: %s", 256 | resource_name, 257 | resource_id, 258 | exception, 259 | ) 260 | else: 261 | logging.error( 262 | "Unexpected error on %s %s: %s", 263 | resource_name, 264 | resource_id, 265 | exception, 266 | ) 267 | -------------------------------------------------------------------------------- /package/scheduler/filter_resources_by_tags.py: -------------------------------------------------------------------------------- 1 | """Filter aws resouces with tags.""" 2 | 3 | from typing import Iterator 4 | 5 | import boto3 6 | 7 | 8 | class FilterByTags: 9 | """Abstract Filter aws resources by tags in a class.""" 10 | 11 | def __init__(self, region_name=None) -> None: 12 | """Initialize resourcegroupstaggingapi client.""" 13 | if region_name: 14 | self.rgta = boto3.client( 15 | "resourcegroupstaggingapi", region_name=region_name 16 | ) 17 | else: 18 | self.rgta = boto3.client("resourcegroupstaggingapi") 19 | 20 | def get_resources(self, resource_type, aws_tags) -> Iterator[str]: 21 | """Filter aws resources using resource type and defined tags. 22 | 23 | Returns all the tagged defined resources that are located in 24 | the specified Region for the AWS account. 25 | 26 | :param str resource_type: 27 | The constraints on the resources that you want returned. 28 | The format of each resource type is service[:resourceType] . 29 | For example, specifying a resource type of ec2 returns all 30 | Amazon EC2 resources (which includes EC2 instances). 31 | Specifying a resource type of ec2:instance returns only 32 | EC2 instances. 33 | :param list[map] aws_tags: 34 | A list of TagFilters (keys and values). 35 | Each TagFilter specified must contain a key with values 36 | as optional. For example: 37 | [ 38 | { 39 | 'Key': 'string', 40 | 'Values': [ 41 | 'string', 42 | ] 43 | }, 44 | ] 45 | :yield Iterator[str]: 46 | The ids of the resources 47 | """ 48 | paginator = self.rgta.get_paginator("get_resources") 49 | page_iterator = paginator.paginate( 50 | TagFilters=aws_tags, ResourceTypeFilters=[resource_type] 51 | ) 52 | for page in page_iterator: 53 | for resource_tag_map in page["ResourceTagMappingList"]: 54 | yield resource_tag_map["ResourceARN"] 55 | -------------------------------------------------------------------------------- /package/scheduler/instance_handler.py: -------------------------------------------------------------------------------- 1 | """EC2 instances scheduler. 2 | 3 | This module provides functionality to start and stop EC2 instances based on tags. 4 | """ 5 | 6 | import logging 7 | from typing import Dict, List, Optional 8 | 9 | import boto3 10 | from botocore.exceptions import ClientError 11 | 12 | from .exceptions import ec2_exception 13 | from .filter_resources_by_tags import FilterByTags 14 | 15 | # Set up logger 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class InstanceScheduler: 20 | """EC2 instance scheduler to start and stop instances based on tags.""" 21 | 22 | def __init__(self, region_name: Optional[str] = None) -> None: 23 | """Initialize EC2 scheduler with AWS clients. 24 | 25 | Args: 26 | region_name: AWS region name. If None, default region is used. 27 | """ 28 | self.region_name = region_name 29 | self.ec2 = ( 30 | boto3.client("ec2", region_name=region_name) 31 | if region_name 32 | else boto3.client("ec2") 33 | ) 34 | self.asg = ( 35 | boto3.client("autoscaling", region_name=region_name) 36 | if region_name 37 | else boto3.client("autoscaling") 38 | ) 39 | self.tag_api = FilterByTags(region_name=region_name) 40 | 41 | def _process_instances(self, aws_tags: List[Dict], action: str) -> None: 42 | """Process EC2 instances based on the specified action. 43 | 44 | Args: 45 | aws_tags: List of tag dictionaries to filter resources 46 | action: Action to perform ('start' or 'stop') 47 | """ 48 | for instance_arn in self.tag_api.get_resources("ec2:instance", aws_tags): 49 | instance_id = instance_arn.split("/")[-1] 50 | try: 51 | # Skip instances that are part of an Auto Scaling Group 52 | if self.asg.describe_auto_scaling_instances(InstanceIds=[instance_id])[ 53 | "AutoScalingInstances" 54 | ]: 55 | logger.info( 56 | f"Skipping {instance_id} as it belongs to an Auto Scaling Group" 57 | ) 58 | continue 59 | 60 | # Perform the requested action 61 | if action == "start": 62 | self.ec2.start_instances(InstanceIds=[instance_id]) 63 | logger.info(f"Started instance {instance_id}") 64 | elif action == "stop": 65 | self.ec2.stop_instances(InstanceIds=[instance_id]) 66 | logger.info(f"Stopped instance {instance_id}") 67 | 68 | except ClientError as exc: 69 | ec2_exception("instance", instance_id, exc) 70 | logger.error(f"Failed to {action} instance {instance_id}: {str(exc)}") 71 | 72 | def stop(self, aws_tags: List[Dict]) -> None: 73 | """Stop EC2 instances with defined tags. 74 | 75 | Args: 76 | aws_tags: List of tag dictionaries to filter resources. 77 | For example: 78 | [ 79 | { 80 | 'Key': 'Environment', 81 | 'Values': ['Development'] 82 | } 83 | ] 84 | """ 85 | self._process_instances(aws_tags, "stop") 86 | 87 | def start(self, aws_tags: List[Dict]) -> None: 88 | """Start EC2 instances with defined tags. 89 | 90 | Args: 91 | aws_tags: List of tag dictionaries to filter resources. 92 | For example: 93 | [ 94 | { 95 | 'Key': 'Environment', 96 | 'Values': ['Development'] 97 | } 98 | ] 99 | """ 100 | self._process_instances(aws_tags, "start") 101 | -------------------------------------------------------------------------------- /package/scheduler/main.py: -------------------------------------------------------------------------------- 1 | """This script stop and start aws resources.""" 2 | 3 | import os 4 | 5 | from .autoscaling_handler import AutoscalingScheduler 6 | from .cloudwatch_handler import CloudWatchAlarmScheduler 7 | from .documentdb_handler import DocumentDBScheduler 8 | from .ecs_handler import EcsScheduler 9 | from .instance_handler import InstanceScheduler 10 | from .rds_handler import RdsScheduler 11 | from .redshift_handler import RedshiftScheduler 12 | from .transfer_handler import TransferScheduler 13 | 14 | 15 | def lambda_handler(event, context): 16 | """Main function entrypoint for lambda. 17 | 18 | Stop and start AWS resources: 19 | - rds instances 20 | - rds aurora clusters 21 | - instance ec2 22 | - ecs services 23 | - redshift clusters 24 | - transfer servers 25 | 26 | Suspend and resume AWS resources: 27 | - ec2 autoscaling groups 28 | 29 | Terminate spot instances (spot instance cannot be stopped by a user) 30 | """ 31 | # Retrieve variables from aws lambda ENVIRONMENT 32 | schedule_action = os.getenv("SCHEDULE_ACTION") 33 | aws_regions = os.getenv("AWS_REGIONS").replace(" ", "").split(",") 34 | format_tags = [{"Key": os.getenv("TAG_KEY"), "Values": [os.getenv("TAG_VALUE")]}] 35 | autoscaling_terminate_instances = strtobool( 36 | os.getenv("AUTOSCALING_TERMINATE_INSTANCES") 37 | ) 38 | 39 | _strategy = { 40 | AutoscalingScheduler: os.getenv("AUTOSCALING_SCHEDULE"), 41 | DocumentDBScheduler: os.getenv("DOCUMENTDB_SCHEDULE"), 42 | InstanceScheduler: os.getenv("EC2_SCHEDULE"), 43 | EcsScheduler: os.getenv("ECS_SCHEDULE"), 44 | RdsScheduler: os.getenv("RDS_SCHEDULE"), 45 | RedshiftScheduler: os.getenv("REDSHIFT_SCHEDULE"), 46 | CloudWatchAlarmScheduler: os.getenv("CLOUDWATCH_ALARM_SCHEDULE"), 47 | TransferScheduler: os.getenv("TRANSFER_SCHEDULE"), 48 | } 49 | 50 | for service, to_schedule in _strategy.items(): 51 | if strtobool(to_schedule): 52 | for aws_region in aws_regions: 53 | strategy = service(aws_region) 54 | if service == AutoscalingScheduler and autoscaling_terminate_instances: 55 | getattr(strategy, schedule_action)( 56 | aws_tags=format_tags, terminate_instances=True 57 | ) 58 | else: 59 | getattr(strategy, schedule_action)(aws_tags=format_tags) 60 | 61 | 62 | def strtobool(value: str) -> bool: 63 | """Convert string to boolean.""" 64 | return value.lower() in ("yes", "true", "t", "1") 65 | -------------------------------------------------------------------------------- /package/scheduler/rds_handler.py: -------------------------------------------------------------------------------- 1 | """RDS instances scheduler.""" 2 | 3 | from typing import Dict, List, Literal, Optional 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import rds_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class RdsScheduler: 13 | """RDS resource scheduler for controlling instances and clusters.""" 14 | 15 | def __init__(self, region_name: Optional[str] = None) -> None: 16 | """Initialize RDS scheduler. 17 | 18 | Args: 19 | region_name: AWS region name. Uses default configuration if not specified. 20 | """ 21 | self.rds = ( 22 | boto3.client("rds", region_name=region_name) 23 | if region_name 24 | else boto3.client("rds") 25 | ) 26 | self.tag_api = FilterByTags(region_name=region_name) 27 | 28 | def stop(self, aws_tags: List[Dict]) -> None: 29 | """Stop RDS Aurora clusters and RDS DB instances with defined tags. 30 | 31 | Args: 32 | aws_tags: AWS tags to filter resources. 33 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 34 | """ 35 | self._process_resources(aws_tags, action="stop") 36 | 37 | def start(self, aws_tags: List[Dict]) -> None: 38 | """Start RDS Aurora clusters and RDS DB instances with defined tags. 39 | 40 | Args: 41 | aws_tags: AWS tags to filter resources. 42 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 43 | """ 44 | self._process_resources(aws_tags, action="start") 45 | 46 | def _process_resources( 47 | self, aws_tags: List[Dict], action: Literal["start", "stop"] 48 | ) -> None: 49 | """Process RDS resources with the specified action. 50 | 51 | Args: 52 | aws_tags: AWS tags to filter resources. 53 | action: Action to perform ("start" or "stop"). 54 | """ 55 | # Handle clusters 56 | for cluster_arn in self.tag_api.get_resources("rds:cluster", aws_tags): 57 | cluster_id = cluster_arn.split(":")[-1] 58 | self._process_cluster(cluster_id, action) 59 | 60 | # Handle instances 61 | for db_arn in self.tag_api.get_resources("rds:db", aws_tags): 62 | db_id = db_arn.split(":")[-1] 63 | self._process_instance(db_id, action) 64 | 65 | def _process_cluster( 66 | self, cluster_id: str, action: Literal["start", "stop"] 67 | ) -> None: 68 | """Process an RDS cluster with the specified action. 69 | 70 | Args: 71 | cluster_id: RDS cluster identifier. 72 | action: Action to perform ("start" or "stop"). 73 | """ 74 | try: 75 | # Identifier must be cluster id, not resource id 76 | self.rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 77 | 78 | if action == "start": 79 | self.rds.start_db_cluster(DBClusterIdentifier=cluster_id) 80 | print(f"Start RDS cluster {cluster_id}") 81 | else: 82 | self.rds.stop_db_cluster(DBClusterIdentifier=cluster_id) 83 | print(f"Stop RDS cluster {cluster_id}") 84 | 85 | except ClientError as exc: 86 | rds_exception("RDS cluster", cluster_id, exc) 87 | 88 | def _process_instance(self, db_id: str, action: Literal["start", "stop"]) -> None: 89 | """Process an RDS instance with the specified action. 90 | 91 | Args: 92 | db_id: RDS instance identifier. 93 | action: Action to perform ("start" or "stop"). 94 | """ 95 | try: 96 | if action == "start": 97 | self.rds.start_db_instance(DBInstanceIdentifier=db_id) 98 | print(f"Start RDS instance {db_id}") 99 | else: 100 | self.rds.stop_db_instance(DBInstanceIdentifier=db_id) 101 | print(f"Stop RDS instance {db_id}") 102 | 103 | except ClientError as exc: 104 | rds_exception("RDS instance", db_id, exc) 105 | -------------------------------------------------------------------------------- /package/scheduler/redshift_handler.py: -------------------------------------------------------------------------------- 1 | """redshift cluster scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | 7 | from botocore.exceptions import ClientError 8 | 9 | from .exceptions import redshift_exception 10 | from .filter_resources_by_tags import FilterByTags 11 | 12 | 13 | class RedshiftScheduler: 14 | """Abstract redshift scheduler in a class.""" 15 | 16 | def __init__(self, region_name=None) -> None: 17 | """Initialize redshift scheduler.""" 18 | if region_name: 19 | self.redshift = boto3.client("redshift", region_name=region_name) 20 | else: 21 | self.redshift = boto3.client("redshift") 22 | self.tag_api = FilterByTags(region_name=region_name) 23 | 24 | def stop(self, aws_tags: List[Dict]) -> None: 25 | """Aws redshift cluster stop function. 26 | 27 | Stop redshift clusters with defined tags. 28 | 29 | :param list[map] aws_tags: 30 | Aws tags to use for filter resources. 31 | For example: 32 | [ 33 | { 34 | 'Key': 'string', 35 | 'Values': [ 36 | 'string', 37 | ] 38 | } 39 | ] 40 | """ 41 | for cluster_arn in self.tag_api.get_resources("redshift:cluster", aws_tags): 42 | cluster_id = cluster_arn.split(":")[-1] 43 | try: 44 | self.redshift.pause_cluster(ClusterIdentifier=cluster_id) 45 | print(f"Stop redshift cluster {cluster_id}") 46 | except ClientError as exc: 47 | redshift_exception("redshift cluster", cluster_id, exc) 48 | 49 | def start(self, aws_tags: List[Dict]) -> None: 50 | """Aws redshift cluster start function. 51 | 52 | Start redshift clusters with defined tags. 53 | 54 | :param list[map] aws_tags: 55 | Aws tags to use for filter resources. 56 | For example: 57 | [ 58 | { 59 | 'Key': 'string', 60 | 'Values': [ 61 | 'string', 62 | ] 63 | } 64 | ] 65 | """ 66 | for cluster_arn in self.tag_api.get_resources("redshift:cluster", aws_tags): 67 | cluster_id = cluster_arn.split(":")[-1] 68 | try: 69 | self.redshift.resume_cluster(ClusterIdentifier=cluster_id) 70 | print(f"Start redshift cluster {cluster_id}") 71 | except ClientError as exc: 72 | redshift_exception("redshift cluster", cluster_id, exc) 73 | -------------------------------------------------------------------------------- /package/scheduler/transfer_handler.py: -------------------------------------------------------------------------------- 1 | """AWS Transfer (SFTP) server scheduler.""" 2 | 3 | from typing import Dict, List, Literal, Optional 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import transfer_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class TransferScheduler: 13 | """AWS Transfer (SFTP) server scheduler for controlling servers.""" 14 | 15 | def __init__(self, region_name: Optional[str] = None) -> None: 16 | """Initialize Transfer scheduler. 17 | 18 | Args: 19 | region_name: AWS region name. Uses default configuration if not specified. 20 | """ 21 | self.transfer = ( 22 | boto3.client("transfer", region_name=region_name) 23 | if region_name 24 | else boto3.client("transfer") 25 | ) 26 | self.tag_api = FilterByTags(region_name=region_name) 27 | 28 | def stop(self, aws_tags: List[Dict]) -> None: 29 | """Stop AWS Transfer servers with defined tags. 30 | 31 | Args: 32 | aws_tags: AWS tags to filter resources. 33 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 34 | """ 35 | self._process_servers(aws_tags, action="stop") 36 | 37 | def start(self, aws_tags: List[Dict]) -> None: 38 | """Start AWS Transfer servers with defined tags. 39 | 40 | Args: 41 | aws_tags: AWS tags to filter resources. 42 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 43 | """ 44 | self._process_servers(aws_tags, action="start") 45 | 46 | def _process_servers( 47 | self, aws_tags: List[Dict], action: Literal["start", "stop"] 48 | ) -> None: 49 | """Process Transfer servers with the specified action. 50 | 51 | Args: 52 | aws_tags: AWS tags to filter resources. 53 | action: Action to perform ("start" or "stop"). 54 | """ 55 | for server_arn in self.tag_api.get_resources("transfer:server", aws_tags): 56 | server_id = server_arn.split("/")[-1] 57 | try: 58 | if action == "start": 59 | self.transfer.start_server(ServerId=server_id) 60 | print(f"Start Transfer server {server_id}") 61 | else: 62 | self.transfer.stop_server(ServerId=server_id) 63 | print(f"Stop Transfer server {server_id}") 64 | except ClientError as exc: 65 | transfer_exception("Transfer server", server_id, exc) 66 | -------------------------------------------------------------------------------- /package/scheduler/waiters.py: -------------------------------------------------------------------------------- 1 | """Autoscaling instances scheduler.""" 2 | 3 | from typing import List 4 | 5 | import boto3 6 | 7 | from botocore.exceptions import ClientError 8 | 9 | from .exceptions import ec2_exception 10 | 11 | 12 | class AwsWaiters: 13 | """Abstract aws waiter in a class.""" 14 | 15 | def __init__(self, region_name=None) -> None: 16 | """Initialize aws waiter.""" 17 | if region_name: 18 | self.ec2 = boto3.client("ec2", region_name=region_name) 19 | else: 20 | self.ec2 = boto3.client("ec2") 21 | 22 | def instance_running(self, instance_ids: List[str]) -> None: 23 | """Aws waiter for instance running. 24 | 25 | Wait ec2 instances are in running state. 26 | 27 | :param list instance_ids: 28 | The instance IDs to wait. 29 | """ 30 | if instance_ids: 31 | try: 32 | instance_waiter = self.ec2.get_waiter("instance_running") 33 | instance_waiter.wait( 34 | InstanceIds=instance_ids, 35 | WaiterConfig={"Delay": 15, "MaxAttempts": 15}, 36 | ) 37 | except ClientError as exc: 38 | ec2_exception("waiter", instance_waiter, exc) 39 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible-base==2.* 2 | Babel==2.* 3 | black==25.* 4 | boto3==1.* 5 | botocore==1.* 6 | cachetools==5.* 7 | certifi==2025.* 8 | cfgv==3.* 9 | chardet==5.* 10 | charset-normalizer==3.* 11 | click==8.* 12 | colorama==0.* 13 | coverage==7.* 14 | cryptography==44.* 15 | distlib==0.* 16 | dnspython==2.* 17 | docker==7.* 18 | exceptiongroup==1.* 19 | filelock==3.* 20 | httplib2==0.* 21 | identify==2.* 22 | idna==3.* 23 | iniconfig==2.* 24 | Jinja2==3.* 25 | jmespath==1.* 26 | MarkupSafe==3.* 27 | moto==5.* 28 | mypy-extensions==1.* 29 | netaddr==1.* 30 | nodeenv==1.* 31 | packaging==24.* 32 | pathspec==0.* 33 | platformdirs==4.* 34 | pluggy==1.* 35 | pre-commit==4.* 36 | pycryptodomex==3.* 37 | pyparsing==3.* 38 | pyproject-api==1.* 39 | pytest==8.* 40 | pytest-cov==6.* 41 | pytest-pythonpath==0.* 42 | python-apt==2.* 43 | python-dateutil==2.* 44 | pytz==2025.* 45 | PyYAML==6.* 46 | requests==2.* 47 | responses==0.* 48 | s3transfer==0.* 49 | six==1.* 50 | tomli==2.* 51 | tox==4.* 52 | types-PyYAML==6.* 53 | urllib3==2.* 54 | virtualenv==20.* 55 | websocket-client==1.* 56 | Werkzeug==3.* 57 | xmltodict==0.* 58 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Main entry point for unit tests.""" 2 | -------------------------------------------------------------------------------- /tests/sanity/terraform_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Install the Latest version of Terraform 5 | sudo pip install ansible 6 | sudo ansible-galaxy install diodonfrost.terraform 7 | sudo ansible-pull -U https://github.com/diodonfrost/ansible-role-terraform tests/test.yml -e "terraform_version=${terraform_version}" 8 | terraform -version 9 | terraform init 10 | 11 | # Test Terraform syntax 12 | export AWS_DEFAULT_REGION=eu-west-1 13 | terraform validate 14 | 15 | # Terraform lint 16 | terraform fmt -check -diff main.tf 17 | 18 | # Test Terraform fixture example 19 | cd examples/test_fixture || exist 20 | terraform init 21 | terraform validate 22 | terraform -v 23 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | """Main entry point for unit tests.""" 2 | -------------------------------------------------------------------------------- /tests/unit/test_filter_resources_by_tags.py: -------------------------------------------------------------------------------- 1 | """Tests for the class FilterByTags class.""" 2 | 3 | from moto import mock_aws 4 | 5 | from package.scheduler.filter_resources_by_tags import FilterByTags 6 | 7 | from .utils import launch_ec2_instances 8 | 9 | import pytest 10 | 11 | @pytest.mark.parametrize( 12 | "aws_region, instance_tag, scheduler_tag, result_count", 13 | [ 14 | ( 15 | "eu-west-1", 16 | [{"Key": "tostop-ec2-test-1", "Values": ["true"]}], 17 | [{"Key": "tostop-ec2-test-1", "Values": ["true"]}], 18 | 2, 19 | ), 20 | ( 21 | "eu-west-1", 22 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 23 | [{"Key": "tostop-ec2-test-1", "Values": ["true"]}], 24 | 0, 25 | ), 26 | ], 27 | ) 28 | @mock_aws 29 | def test_filter_instances(aws_region, instance_tag, scheduler_tag, result_count): 30 | """Filter instances class method.""" 31 | tag_key = instance_tag[0]["Key"] 32 | tag_value = "".join(instance_tag[0]["Values"]) 33 | launch_ec2_instances(2, aws_region, tag_key, tag_value) 34 | launch_ec2_instances(3, aws_region, "wrongkey", "wrongvalue") 35 | 36 | tag_api = FilterByTags(region_name=aws_region) 37 | instance_arns = tag_api.get_resources("ec2:instance", scheduler_tag) 38 | 39 | assert len(list(instance_arns)) == result_count 40 | -------------------------------------------------------------------------------- /tests/unit/test_instance_handler.py: -------------------------------------------------------------------------------- 1 | """Tests for the instance scheduler class.""" 2 | 3 | import boto3 4 | 5 | from moto import mock_aws 6 | 7 | from package.scheduler.cloudwatch_handler import CloudWatchAlarmScheduler 8 | from package.scheduler.instance_handler import InstanceScheduler 9 | 10 | from .utils import launch_asg, launch_ec2_instances 11 | 12 | import pytest 13 | 14 | 15 | @pytest.mark.parametrize( 16 | "aws_region, aws_tags, result_count", 17 | [ 18 | ( 19 | "eu-west-1", 20 | [{"Key": "tostop", "Values": ["true"]}], 21 | {"Code": 16, "Name": "running"}, 22 | ), 23 | ( 24 | "eu-west-2", 25 | [{"Key": "tostop", "Values": ["true"]}], 26 | {"Code": 16, "Name": "running"}, 27 | ), 28 | ( 29 | "eu-west-2", 30 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 31 | {"Code": 80, "Name": "stopped"}, 32 | ), 33 | ], 34 | ) 35 | @mock_aws 36 | def test_start_ec2_instance(aws_region, aws_tags, result_count): 37 | """Verify start ec2 instance function.""" 38 | client = boto3.client("ec2", region_name=aws_region) 39 | launch_ec2_instances(3, aws_region, "tostop", "true") 40 | for ec2 in client.describe_instances()["Reservations"][0]["Instances"]: 41 | client.stop_instances(InstanceIds=[ec2["InstanceId"]]) 42 | 43 | ec2_scheduler = InstanceScheduler(aws_region) 44 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 45 | ec2_scheduler.start(aws_tags) 46 | for ec2 in client.describe_instances()["Reservations"][0]["Instances"]: 47 | assert ec2["State"] == result_count 48 | 49 | 50 | @pytest.mark.parametrize( 51 | "aws_region, aws_tags, result_count", 52 | [ 53 | ( 54 | "eu-west-1", 55 | [{"Key": "tostop", "Values": ["true"]}], 56 | {"Code": 80, "Name": "stopped"}, 57 | ), 58 | ( 59 | "eu-west-2", 60 | [{"Key": "tostop", "Values": ["true"]}], 61 | {"Code": 80, "Name": "stopped"}, 62 | ), 63 | ( 64 | "eu-west-2", 65 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 66 | {"Code": 16, "Name": "running"}, 67 | ), 68 | ], 69 | ) 70 | @mock_aws 71 | def test_stop_ec2_instance(aws_region, aws_tags, result_count): 72 | """Verify stop ec2 instance function.""" 73 | client = boto3.client("ec2", region_name=aws_region) 74 | launch_ec2_instances(3, aws_region, "tostop", "true") 75 | 76 | ec2_scheduler = InstanceScheduler(aws_region) 77 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 78 | ec2_scheduler.stop(aws_tags) 79 | instances = client.describe_instances()["Reservations"][0]["Instances"] 80 | assert len(instances) == 3 81 | for instance in instances: 82 | assert instance["State"] == result_count 83 | 84 | 85 | @pytest.mark.parametrize( 86 | "aws_region, aws_tags, result_count", 87 | [ 88 | ( 89 | "eu-west-1", 90 | [{"Key": "tostop", "Values": ["true"]}], 91 | {"Code": 16, "Name": "running"}, 92 | ), 93 | ( 94 | "eu-west-2", 95 | [{"Key": "tostop", "Values": ["true"]}], 96 | {"Code": 16, "Name": "running"}, 97 | ), 98 | ], 99 | ) 100 | @mock_aws 101 | def test_do_not_stop_asg_instance(aws_region, aws_tags, result_count): 102 | client = boto3.client("ec2", region_name=aws_region) 103 | launch_asg(aws_region, "tostop", "true") 104 | 105 | ec2_scheduler = InstanceScheduler(aws_region) 106 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 107 | ec2_scheduler.stop(aws_tags) 108 | instances = client.describe_instances()["Reservations"][0]["Instances"] 109 | assert len(instances) == 3 110 | for instance in instances: 111 | assert instance["State"] == result_count 112 | 113 | 114 | @pytest.mark.parametrize( 115 | "aws_region, aws_tags, result_count", 116 | [ 117 | ( 118 | "eu-west-1", 119 | [{"Key": "tostop", "Values": ["true"]}], 120 | {"Code": 80, "Name": "stopped"}, 121 | ), 122 | ( 123 | "eu-west-2", 124 | [{"Key": "tostop", "Values": ["true"]}], 125 | {"Code": 80, "Name": "stopped"}, 126 | ), 127 | ], 128 | ) 129 | @mock_aws 130 | def test_do_not_start_asg_instance(aws_region, aws_tags, result_count): 131 | client = boto3.client("ec2", region_name=aws_region) 132 | launch_asg(aws_region, "tostop", "true") 133 | instances = client.describe_instances()["Reservations"][0]["Instances"] 134 | for instance in instances: 135 | client.stop_instances(InstanceIds=[instance["InstanceId"]]) 136 | 137 | ec2_scheduler = InstanceScheduler(aws_region) 138 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 139 | ec2_scheduler.start(aws_tags) 140 | instances = client.describe_instances()["Reservations"][0]["Instances"] 141 | assert len(instances) == 3 142 | for instance in instances: 143 | assert instance["State"] == result_count 144 | -------------------------------------------------------------------------------- /tests/unit/test_rds_handler.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from moto import mock_aws 3 | import pytest 4 | from package.scheduler.rds_handler import RdsScheduler 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "aws_region", 9 | [ 10 | "us-east-1", 11 | "us-west-2", 12 | "eu-west-1", 13 | ], 14 | ) 15 | @mock_aws 16 | def test_rds_scheduler_initialization(aws_region): 17 | """Test that RdsScheduler initializes correctly with and without region.""" 18 | scheduler = RdsScheduler() 19 | assert scheduler.rds is not None 20 | assert scheduler.tag_api is not None 21 | 22 | scheduler = RdsScheduler(region_name=aws_region) 23 | assert scheduler.rds is not None 24 | assert scheduler.tag_api is not None 25 | 26 | 27 | @pytest.mark.parametrize( 28 | "aws_region, aws_tags, result_count", 29 | [ 30 | ( 31 | "us-east-1", 32 | [{"Key": "tostop", "Values": ["true"]}], 33 | "stopped", 34 | ), 35 | ( 36 | "us-west-2", 37 | [{"Key": "tostop", "Values": ["true"]}], 38 | "stopped", 39 | ), 40 | ( 41 | "eu-west-1", 42 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 43 | "available", 44 | ), 45 | ], 46 | ) 47 | @mock_aws 48 | def test_stop_rds_instance(aws_region, aws_tags, result_count): 49 | """Test stopping RDS instances.""" 50 | rds = boto3.client("rds", region_name=aws_region) 51 | 52 | instance_id = f"test-db-instance-{aws_region}" 53 | rds.create_db_instance( 54 | DBInstanceIdentifier=instance_id, 55 | DBInstanceClass="db.t2.micro", 56 | Engine="mysql", 57 | MasterUsername="test", 58 | MasterUserPassword="test1234", 59 | AllocatedStorage=20, 60 | ) 61 | 62 | rds.add_tags_to_resource( 63 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:db:{instance_id}", 64 | Tags=[{"Key": "tostop", "Value": "true"}], 65 | ) 66 | 67 | scheduler = RdsScheduler(region_name=aws_region) 68 | scheduler.stop(aws_tags) 69 | 70 | response = rds.describe_db_instances(DBInstanceIdentifier=instance_id) 71 | assert response["DBInstances"][0]["DBInstanceStatus"] == result_count 72 | 73 | 74 | @pytest.mark.parametrize( 75 | "aws_region, aws_tags, result_count", 76 | [ 77 | ( 78 | "us-east-1", 79 | [{"Key": "tostop", "Values": ["true"]}], 80 | "available", 81 | ), 82 | ( 83 | "us-west-2", 84 | [{"Key": "tostop", "Values": ["true"]}], 85 | "available", 86 | ), 87 | ( 88 | "eu-west-1", 89 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 90 | "stopped", 91 | ), 92 | ], 93 | ) 94 | @mock_aws 95 | def test_start_rds_instance(aws_region, aws_tags, result_count): 96 | """Test starting RDS instances.""" 97 | rds = boto3.client("rds", region_name=aws_region) 98 | 99 | instance_id = f"test-db-instance-{aws_region}" 100 | rds.create_db_instance( 101 | DBInstanceIdentifier=instance_id, 102 | DBInstanceClass="db.t2.micro", 103 | Engine="mysql", 104 | MasterUsername="test", 105 | MasterUserPassword="test1234", 106 | AllocatedStorage=20, 107 | ) 108 | 109 | rds.add_tags_to_resource( 110 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:db:{instance_id}", 111 | Tags=[{"Key": "tostop", "Value": "true"}], 112 | ) 113 | 114 | rds.stop_db_instance(DBInstanceIdentifier=instance_id) 115 | 116 | scheduler = RdsScheduler(region_name=aws_region) 117 | scheduler.start(aws_tags) 118 | 119 | response = rds.describe_db_instances(DBInstanceIdentifier=instance_id) 120 | assert response["DBInstances"][0]["DBInstanceStatus"] == result_count 121 | 122 | 123 | @pytest.mark.parametrize( 124 | "aws_region, aws_tags, result_count", 125 | [ 126 | ( 127 | "us-east-1", 128 | [{"Key": "tostop", "Values": ["true"]}], 129 | "stopped", 130 | ), 131 | ( 132 | "us-west-2", 133 | [{"Key": "tostop", "Values": ["true"]}], 134 | "stopped", 135 | ), 136 | ( 137 | "eu-west-1", 138 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 139 | "available", 140 | ), 141 | ], 142 | ) 143 | @mock_aws 144 | def test_stop_rds_cluster(aws_region, aws_tags, result_count): 145 | """Test stopping RDS Aurora clusters.""" 146 | rds = boto3.client("rds", region_name=aws_region) 147 | 148 | cluster_id = f"test-cluster-{aws_region}" 149 | rds.create_db_cluster( 150 | DBClusterIdentifier=cluster_id, 151 | Engine="aurora-mysql", 152 | MasterUsername="test", 153 | MasterUserPassword="test1234", 154 | ) 155 | 156 | rds.add_tags_to_resource( 157 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:cluster:{cluster_id}", 158 | Tags=[{"Key": "tostop", "Value": "true"}], 159 | ) 160 | 161 | scheduler = RdsScheduler(region_name=aws_region) 162 | scheduler.stop(aws_tags) 163 | 164 | response = rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 165 | assert response["DBClusters"][0]["Status"] == result_count 166 | 167 | 168 | @pytest.mark.parametrize( 169 | "aws_region, aws_tags, result_count", 170 | [ 171 | ( 172 | "us-east-1", 173 | [{"Key": "tostop", "Values": ["true"]}], 174 | "available", 175 | ), 176 | ( 177 | "us-west-2", 178 | [{"Key": "tostop", "Values": ["true"]}], 179 | "available", 180 | ), 181 | ( 182 | "eu-west-1", 183 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 184 | "stopped", 185 | ), 186 | ], 187 | ) 188 | @mock_aws 189 | def test_start_rds_cluster(aws_region, aws_tags, result_count): 190 | """Test starting RDS Aurora clusters.""" 191 | rds = boto3.client("rds", region_name=aws_region) 192 | 193 | cluster_id = f"test-cluster-{aws_region}" 194 | rds.create_db_cluster( 195 | DBClusterIdentifier=cluster_id, 196 | Engine="aurora-mysql", 197 | MasterUsername="test", 198 | MasterUserPassword="test1234", 199 | ) 200 | 201 | rds.add_tags_to_resource( 202 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:cluster:{cluster_id}", 203 | Tags=[{"Key": "tostop", "Value": "true"}], 204 | ) 205 | 206 | rds.stop_db_cluster(DBClusterIdentifier=cluster_id) 207 | 208 | scheduler = RdsScheduler(region_name=aws_region) 209 | scheduler.start(aws_tags) 210 | 211 | response = rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 212 | assert response["DBClusters"][0]["Status"] == result_count 213 | 214 | 215 | @pytest.mark.parametrize( 216 | "aws_region, aws_tags, result_count", 217 | [ 218 | ( 219 | "us-east-1", 220 | [{"Key": "tostop", "Values": ["true"]}], 221 | "available", 222 | ), 223 | ( 224 | "us-west-2", 225 | [{"Key": "tostop", "Values": ["true"]}], 226 | "available", 227 | ), 228 | ( 229 | "eu-west-1", 230 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 231 | "stopped", 232 | ), 233 | ], 234 | ) 235 | @mock_aws 236 | def test_handle_nonexistent_resources(aws_region, aws_tags, result_count): 237 | """Test handling of nonexistent RDS resources.""" 238 | scheduler = RdsScheduler(region_name=aws_region) 239 | scheduler.stop(aws_tags) 240 | scheduler.start(aws_tags) 241 | -------------------------------------------------------------------------------- /tests/unit/utils.py: -------------------------------------------------------------------------------- 1 | """Module use by ec2 scheduler unit tests.""" 2 | 3 | import boto3 4 | 5 | 6 | def launch_ec2_instances(count, region_name, tag_key, tag_value): 7 | """Create ec2 instances.""" 8 | client = boto3.client("ec2", region_name=region_name) 9 | instance = client.run_instances( 10 | ImageId="ami-02df9ea15c1778c9c", 11 | MaxCount=count, 12 | MinCount=count, 13 | TagSpecifications=[ 14 | { 15 | "ResourceType": "instance", 16 | "Tags": [ 17 | {"Key": "Name", "Value": "instance_test"}, 18 | {"Key": tag_key, "Value": tag_value}, 19 | ], 20 | } 21 | ], 22 | ) 23 | return instance 24 | 25 | 26 | def launch_ec2_spot(count, region_name, tag_key, tag_value): 27 | """Create ec2 spot instances.""" 28 | client = boto3.client("ec2", region_name=region_name) 29 | spot = client.run_instances( 30 | ImageId="ami-02df9ea15c1778c9c", 31 | MaxCount=count, 32 | MinCount=count, 33 | InstanceMarketOptions={ 34 | "MarketType": "spot", 35 | "SpotOptions": { 36 | "SpotInstanceType": "one-time", 37 | "InstanceInterruptionBehavior": "terminate", 38 | }, 39 | }, 40 | TagSpecifications=[ 41 | { 42 | "ResourceType": "instance", 43 | "Tags": [ 44 | {"Key": "Name", "Value": "instance_test"}, 45 | {"Key": tag_key, "Value": tag_value}, 46 | ], 47 | } 48 | ], 49 | ) 50 | return spot 51 | 52 | 53 | def launch_asg(region_name, tag_key, tag_value): 54 | """Create autoscaling group with aws tags.""" 55 | client = boto3.client("autoscaling", region_name=region_name) 56 | client.create_launch_configuration( 57 | LaunchConfigurationName="lc-test", 58 | ImageId="ami-02df9ea15c1778c9c", 59 | InstanceType="t2.micro", 60 | ) 61 | asg = client.create_auto_scaling_group( 62 | AutoScalingGroupName="asg-test", 63 | MaxSize=5, 64 | DesiredCapacity=3, 65 | MinSize=1, 66 | LaunchConfigurationName="lc-test", 67 | AvailabilityZones=[region_name + "a", region_name + "b"], 68 | Tags=[ 69 | { 70 | "ResourceId": "asg-test", 71 | "ResourceType": "auto-scaling-group", 72 | "Key": tag_key, 73 | "Value": tag_value, 74 | "PropagateAtLaunch": True, 75 | } 76 | ], 77 | ) 78 | return client.describe_auto_scaling_groups(AutoScalingGroupNames=["asg-test"]) 79 | 80 | 81 | def launch_rds_instance(region_name, tag_key, tag_value): 82 | """Create rds instances with aws tags.""" 83 | client = boto3.client("rds", region_name=region_name) 84 | rds_instance = client.create_db_instance( 85 | DBInstanceIdentifier="db-instance", 86 | AllocatedStorage=10, 87 | DBName="db-instance", 88 | DBInstanceClass="db.m4.large", 89 | Engine="mariadb", 90 | MasterUsername="root", 91 | MasterUserPassword="IamNotHere", 92 | Tags=[ 93 | {"Key": "Name", "Value": "db-instance"}, 94 | {"Key": tag_key, "Value": tag_value}, 95 | ], 96 | ) 97 | return rds_instance 98 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion=4.6.0 3 | envlist = py313,pytest,flake8,black,pylint,mypy 4 | skipsdist = True 5 | 6 | # Unit tests 7 | [testenv:pytest] 8 | basepython = python3 9 | skip_install = true 10 | deps = 11 | botocore==1.* 12 | boto3==1.* 13 | moto==5.* 14 | pytest==8.* 15 | pytest-cov==6.* 16 | pytest-pythonpath==0.* 17 | docker==7.* 18 | commands = 19 | coverage run -m pytest tests/unit --cov package 20 | 21 | # Autoformatter 22 | [testenv:black] 23 | basepython = python3 24 | skip_install = true 25 | deps = 26 | black==25.* 27 | commands = 28 | black package/ --check 29 | 30 | # Linters 31 | [testenv:flake8] 32 | basepython = python3 33 | skip_install = true 34 | deps = 35 | flake8==7.* 36 | flake8-colors==0.* 37 | flake8-docstrings==1.* 38 | flake8-import-order==0.* 39 | flake8-typing-imports==1.* 40 | pep8-naming==0.* 41 | pycodestyle==2.* 42 | importlib_metadata==8.* 43 | commands = 44 | flake8 package/ 45 | 46 | [testenv:pylint] 47 | basepython = python3 48 | skip_install = true 49 | deps = 50 | pyflakes==3.* 51 | pylint==3.* 52 | commands = 53 | pylint package/ --rcfile=tests/sanity/.pylintrc 54 | 55 | [testenv:mypy] 56 | basepython = python3 57 | skip_install = true 58 | deps = 59 | mypy==1.* 60 | commands = 61 | mypy --ignore-missing-imports package/ 62 | 63 | [flake8] 64 | ignore = D401 65 | max-complexity = 10 66 | max-line-length = 88 67 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # Terraform variables file 2 | 3 | variable "schedule_expression" { 4 | description = "Define the aws event rule schedule expression, https://docs.aws.amazon.com/scheduler/latest/UserGuide/schedule-types.html" 5 | type = string 6 | default = "cron(0 22 ? * MON-FRI *)" 7 | } 8 | 9 | variable "schedule_expression_timezone" { 10 | description = "Timezone in which the scheduling expression is evaluated. Example : 'America/New_York', 'Europe/Paris'" 11 | type = string 12 | default = "UTC" 13 | } 14 | 15 | variable "name" { 16 | description = "Define name to use for lambda function, cloudwatch event and iam role" 17 | type = string 18 | } 19 | 20 | variable "custom_iam_role_arn" { 21 | description = "Custom IAM role arn for the scheduling lambda" 22 | type = string 23 | default = null 24 | } 25 | 26 | variable "kms_key_arn" { 27 | description = "The ARN for the KMS encryption key. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key." 28 | type = string 29 | default = null 30 | } 31 | 32 | variable "aws_regions" { 33 | description = "A list of one or more aws regions where the lambda will be apply, default use the current region" 34 | type = list(string) 35 | default = null 36 | } 37 | 38 | variable "runtime" { 39 | description = "The runtime environment for the Lambda function that you are uploading" 40 | type = string 41 | default = "python3.13" 42 | } 43 | 44 | variable "schedule_action" { 45 | description = "Define schedule action to apply on resources, accepted value are 'stop or 'start" 46 | type = string 47 | default = "stop" 48 | } 49 | 50 | variable "resources_tag" { 51 | # This variable has been renamed to "scheduler_tag" 52 | description = "DEPRECATED, use scheduler_tag variable instead" 53 | type = map(string) 54 | default = null 55 | } 56 | 57 | variable "scheduler_tag" { 58 | description = "Set the tag to use for identify aws resources to stop or start" 59 | type = map(string) 60 | 61 | default = { 62 | "key" = "tostop" 63 | "value" = "true" 64 | } 65 | } 66 | 67 | variable "autoscaling_schedule" { 68 | description = "Enable scheduling on autoscaling resources" 69 | type = bool 70 | default = false 71 | } 72 | 73 | variable "autoscaling_terminate_instances" { 74 | description = "Terminate instances when autoscaling group is scheduled to stop" 75 | type = bool 76 | default = false 77 | } 78 | 79 | variable "ec2_schedule" { 80 | description = "Enable scheduling on ec2 resources" 81 | type = bool 82 | default = false 83 | } 84 | 85 | variable "documentdb_schedule" { 86 | description = "Enable scheduling on documentdb resources" 87 | type = bool 88 | default = false 89 | } 90 | 91 | variable "ecs_schedule" { 92 | description = "Enable scheduling on ecs services" 93 | type = bool 94 | default = false 95 | } 96 | 97 | variable "rds_schedule" { 98 | description = "Enable scheduling on rds resources" 99 | type = bool 100 | default = false 101 | } 102 | 103 | variable "redshift_schedule" { 104 | description = "Enable scheduling on redshift resources" 105 | type = bool 106 | default = false 107 | } 108 | 109 | variable "cloudwatch_alarm_schedule" { 110 | description = "Enable scheduleding on cloudwatch alarm resources" 111 | type = bool 112 | default = false 113 | } 114 | 115 | variable "transfer_schedule" { 116 | description = "Enable scheduling on AWS Transfer (SFTP) servers" 117 | type = bool 118 | default = false 119 | } 120 | 121 | variable "tags" { 122 | description = "Custom tags on aws resources" 123 | type = map(any) 124 | default = null 125 | } 126 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | archive = { 10 | source = "hashicorp/archive" 11 | version = "2.3.0" 12 | } 13 | } 14 | } 15 | --------------------------------------------------------------------------------