├── examples ├── test_fixture │ ├── outputs.tf │ ├── variables.tf │ ├── versions.tf │ ├── terraform.tftest.hcl │ └── main.tf ├── date-exclusion │ ├── outputs.tf │ ├── variables.tf │ ├── test-execution │ │ ├── versions.tf │ │ ├── variables.tf │ │ ├── outputs.tf │ │ ├── main.tf │ │ └── wait_instances.py │ ├── versions.tf │ ├── vpc.tf │ ├── terraform.tftest.hcl │ └── main.tf ├── ecs-scheduler │ ├── variables.tf │ ├── versions.tf │ ├── terraform.tftest.hcl │ ├── cloudwatch_alarm.tf │ ├── iam.tf │ ├── outputs.tf │ ├── main.tf │ ├── vpc.tf │ └── ecs.tf ├── timezone-scheduler │ ├── outputs.tf │ ├── variables.tf │ ├── versions.tf │ ├── terraform.tftest.hcl │ └── main.tf ├── rds-scheduler │ ├── test-cleanup │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── main.tf │ ├── variables.tf │ ├── versions.tf │ ├── test-execution │ │ ├── versions.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── wait_rds_cluster.py │ │ ├── wait_rds_instance.py │ │ └── main.tf │ ├── vpc.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── cloudwatch_alarm.tf │ └── main.tf ├── documentdb-scheduler │ ├── test-cleanup │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── main.tf │ ├── variables.tf │ ├── versions.tf │ ├── test-execution │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ ├── main.tf │ │ └── wait_documentdb_status.py │ ├── vpc.tf │ ├── terraform.tftest.hcl │ ├── outputs.tf │ └── main.tf ├── neptune-scheduler │ ├── test-cleanup │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── main.tf │ ├── variables.tf │ ├── versions.tf │ ├── test-execution │ │ ├── versions.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── main.tf │ │ └── wait_neptune_status.py │ ├── vpc.tf │ ├── terraform.tftest.hcl │ ├── outputs.tf │ └── main.tf ├── autoscaling-scheduler-terminate-instances │ ├── variables.tf │ ├── vpc.tf │ ├── versions.tf │ ├── terraform.tftest.hcl │ ├── outputs.tf │ ├── cloudwatch_alarm.tf │ └── main.tf ├── transfer-scheduler │ ├── variables.tf │ ├── versions.tf │ ├── test-execution │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ ├── main.tf │ │ └── wait_transfer_status.py │ ├── vpc.tf │ ├── terraform.tftest.hcl │ ├── outputs.tf │ └── main.tf ├── autoscaling-scheduler │ ├── variables.tf │ ├── vpc.tf │ ├── test-execution │ │ ├── versions.tf │ │ ├── variables.tf │ │ ├── outputs.tf │ │ └── main.tf │ ├── versions.tf │ ├── outputs.tf │ ├── terraform.tftest.hcl │ ├── cloudwatch_alarm.tf │ └── main.tf ├── instance-scheduler │ ├── variables.tf │ ├── versions.tf │ ├── vpc.tf │ ├── test-execution │ │ ├── versions.tf │ │ ├── variables.tf │ │ ├── outputs.tf │ │ ├── main.tf │ │ └── wait_instances.py │ ├── outputs.tf │ ├── cloudwatch_alarm.tf │ ├── terraform.tftest.hcl │ └── main.tf └── redshift-scheduler │ ├── variables.tf │ ├── versions.tf │ ├── test-execution │ ├── outputs.tf │ ├── variables.tf │ ├── versions.tf │ ├── main.tf │ └── wait_redshift_status.py │ ├── vpc.tf │ ├── terraform.tftest.hcl │ ├── outputs.tf │ └── main.tf ├── tests ├── __init__.py └── unit │ ├── __init__.py │ ├── test_filter_resources_by_tags.py │ ├── utils.py │ ├── test_instance_handler.py │ └── test_rds_handler.py ├── versions.tofu ├── package └── scheduler │ ├── __init__.py │ ├── utils.py │ ├── waiters.py │ ├── filter_resources_by_tags.py │ ├── transfer_handler.py │ ├── redshift_handler.py │ ├── documentdb_handler.py │ ├── main.py │ ├── cloudwatch_handler.py │ ├── ecs_handler.py │ ├── instance_handler.py │ └── rds_handler.py ├── cloudwatch.tf ├── .gitignore ├── versions.tf ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── workflows │ └── CI.yml ├── requirements-dev.txt ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .pre-commit-config.yaml ├── outputs.tf ├── main.tf └── variables.tf /examples/test_fixture/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/date-exclusion/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/test_fixture/variables.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-cleanup/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-cleanup/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-cleanup/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/variables.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Main entry point for unit tests.""" 2 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | """Main entry point for unit tests.""" 2 | -------------------------------------------------------------------------------- /versions.tofu: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.8" 3 | } 4 | -------------------------------------------------------------------------------- /package/scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | """Module containing the logic for the lambda scheduler entry-points.""" 2 | -------------------------------------------------------------------------------- /examples/date-exclusion/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Enable test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Enable test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/rds-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/instance-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/variables.tf: -------------------------------------------------------------------------------- 1 | variable "test_mode" { 2 | description = "Whether to run in test mode" 3 | type = bool 4 | default = false 5 | } 6 | -------------------------------------------------------------------------------- /cloudwatch.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_log_group" "this" { 2 | name = "/aws/lambda/${var.name}" 3 | retention_in_days = 14 4 | tags = var.tags 5 | } 6 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-cleanup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "neptune_cluster_name" { 2 | description = "Name of the Neptune cluster to start before deletion" 3 | type = string 4 | } 5 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-cleanup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "docdb_cluster_name" { 2 | description = "Name of the DocumentDB cluster to start before deletion" 3 | type = string 4 | } 5 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-cleanup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rds_aurora_cluster_name" { 2 | description = "Name of the rds aurora cluster to start before deletion" 3 | type = string 4 | } 5 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "this" { 2 | cidr_block = "10.0.0.0/16" 3 | } 4 | 5 | resource "aws_subnet" "this" { 6 | vpc_id = aws_vpc.this.id 7 | cidr_block = "10.0.1.0/24" 8 | } 9 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "this" { 2 | cidr_block = "10.0.0.0/16" 3 | } 4 | 5 | resource "aws_subnet" "this" { 6 | vpc_id = aws_vpc.this.id 7 | cidr_block = "10.0.1.0/24" 8 | } 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | .terraform.lock.hcl 4 | *.tfstate* 5 | *.zip 6 | terraform.tfstate.d 7 | *.state 8 | 9 | # Python files 10 | .tox 11 | __pycache__ 12 | .pytest_cache 13 | .coverage 14 | .mypy_cache 15 | .venv 16 | 17 | # IDE files 18 | .idea 19 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | archive = { 10 | source = "hashicorp/archive" 11 | version = "2.3.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/date-exclusion/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.0" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/date-exclusion/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/rds-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/test_fixture/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | time = { 9 | source = "hashicorp/time" 10 | version = "0.13.0" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/instance-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-cleanup/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 3.0.0, < 4.0" 7 | } 8 | local = { 9 | source = "hashicorp/local" 10 | version = ">= 2.0.0, < 3.0" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-cleanup/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 3.0.0, < 4.0" 7 | } 8 | local = { 9 | source = "hashicorp/local" 10 | version = ">= 2.0.0, < 3.0" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.94.1" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = ">= 3.0.0, < 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "✨ Feature request" 3 | about: Suggest an idea for this project 4 | --- 5 | 6 | ##### SUMMARY 7 | 8 | 9 | ##### ISSUE TYPE 10 | - Feature Idea 11 | 12 | ##### Additional context 13 | Add any other context or screenshots about the feature request here. 14 | -------------------------------------------------------------------------------- /examples/instance-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "main" { 2 | cidr_block = "10.0.0.0/16" 3 | enable_dns_hostnames = true 4 | enable_dns_support = true 5 | } 6 | 7 | resource "aws_subnet" "public" { 8 | vpc_id = aws_vpc.main.id 9 | cidr_block = "10.0.1.0/24" 10 | map_public_ip_on_launch = true 11 | availability_zone = "${data.aws_region.current.name}a" 12 | } 13 | -------------------------------------------------------------------------------- /examples/instance-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/date-exclusion/vpc.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | resource "aws_vpc" "test" { 4 | cidr_block = "10.0.0.0/16" 5 | enable_dns_hostnames = true 6 | enable_dns_support = true 7 | } 8 | 9 | resource "aws_subnet" "test" { 10 | vpc_id = aws_vpc.test.id 11 | cidr_block = "10.0.1.0/24" 12 | map_public_ip_on_launch = true 13 | availability_zone = "${data.aws_region.current.name}a" 14 | } 15 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-cleanup/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 3.0.0, < 4.0" 7 | } 8 | local = { 9 | source = "hashicorp/local" 10 | version = ">= 2.0.0, < 3.0" 11 | } 12 | random = { 13 | source = "hashicorp/random" 14 | version = ">= 3.0.0, < 4.0" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "docdb_cluster_to_scheduled_state" { 2 | description = "State of the DocumentDB cluster that should be stopped" 3 | value = data.local_file.docdb_cluster_to_scheduled.content 4 | } 5 | 6 | output "docdb_cluster_not_scheduled_state" { 7 | description = "State of the DocumentDB cluster that should not be stopped" 8 | value = data.local_file.docdb_cluster_not_scheduled.content 9 | } 10 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "transfer_server_to_scheduled_state" { 2 | description = "State of the Transfer server that should be stopped" 3 | value = data.local_file.transfer_server_to_scheduled.content 4 | } 5 | 6 | output "transfer_server_not_scheduled_state" { 7 | description = "State of the Transfer server that should not be stopped" 8 | value = data.local_file.transfer_server_not_scheduled.content 9 | } 10 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the lambda function" 3 | type = string 4 | } 5 | 6 | variable "transfer_server_to_scheduled_id" { 7 | description = "ID of the scheduled Transfer server" 8 | type = string 9 | } 10 | 11 | variable "transfer_server_not_scheduled_id" { 12 | description = "ID of the non-scheduled Transfer server" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "redshift_cluster_to_scheduled_state" { 2 | description = "State of the Redshift cluster that should be stopped" 3 | value = data.local_file.redshift_cluster_to_scheduled.content 4 | } 5 | 6 | output "redshift_cluster_not_scheduled_state" { 7 | description = "State of the Redshift cluster that should not be stopped" 8 | value = data.local_file.redshift_cluster_not_scheduled.content 9 | } 10 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the Lambda function used for stopping instances" 3 | type = string 4 | } 5 | 6 | variable "asg_scheduled_name" { 7 | description = "Name of the scheduled autoscaling group" 8 | type = string 9 | } 10 | 11 | variable "asg_not_scheduled_name" { 12 | description = "Name of the not scheduled autoscaling group" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/test_fixture/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | assert { 5 | condition = module.aws_stop_friday.scheduler_lambda_name == "stop-aws-${random_pet.suffix.id}" 6 | error_message = "Invalid Stop lambda name" 7 | } 8 | 9 | assert { 10 | condition = module.aws_start_monday.scheduler_lambda_name == "start-aws-${random_pet.suffix.id}" 11 | error_message = "Invalid Start lambda name" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | assert { 5 | condition = module.ecs_stop_friday.scheduler_lambda_name == "stop-ecs-${random_pet.suffix.id}" 6 | error_message = "Invalid Stop lambda name" 7 | } 8 | 9 | assert { 10 | condition = module.ecs_start_monday.scheduler_lambda_name == "start-ecs-${random_pet.suffix.id}" 11 | error_message = "Invalid Start lambda name" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "neptune_cluster_to_scheduled_status" { 2 | description = "Status of the Neptune cluster that should be scheduled for stop/start" 3 | value = data.local_file.neptune_cluster_to_scheduled.content 4 | } 5 | 6 | output "neptune_cluster_not_scheduled_status" { 7 | description = "Status of the Neptune cluster that should not be scheduled for stop/start" 8 | value = data.local_file.neptune_cluster_not_scheduled.content 9 | } 10 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the lambda function to stop DocumentDB clusters" 3 | type = string 4 | } 5 | 6 | variable "docdb_cluster_to_scheduled_name" { 7 | description = "Name of the DocumentDB cluster that should be stopped" 8 | type = string 9 | } 10 | 11 | variable "docdb_cluster_not_scheduled_name" { 12 | description = "Name of the DocumentDB cluster that should not be stopped" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the lambda function to stop Redshift clusters" 3 | type = string 4 | } 5 | 6 | variable "redshift_cluster_to_scheduled_name" { 7 | description = "Name of the Redshift cluster that should be stopped" 8 | type = string 9 | } 10 | 11 | variable "redshift_cluster_not_scheduled_name" { 12 | description = "Name of the Redshift cluster that should not be stopped" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.13.0" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.13.0" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.13.0" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 5.94.1" 7 | } 8 | null = { 9 | source = "hashicorp/null" 10 | version = ">= 3.0.0, < 4.0" 11 | } 12 | local = { 13 | source = "hashicorp/local" 14 | version = ">= 2.0.0, < 3.0" 15 | } 16 | time = { 17 | source = "hashicorp/time" 18 | version = "0.13.0" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "neptune_cluster_to_scheduled_name" { 2 | description = "Name of the Neptune cluster that should be scheduled for stop/start" 3 | type = string 4 | } 5 | 6 | variable "neptune_cluster_not_scheduled_name" { 7 | description = "Name of the Neptune cluster that should not be scheduled for stop/start" 8 | type = string 9 | } 10 | 11 | variable "lambda_stop_name" { 12 | description = "Name of the Lambda function that stops the Neptune cluster" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.autoscaling_stop_friday.scheduler_lambda_name == "stop-autoscaling-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.autoscaling_start_monday.scheduler_lambda_name == "start-autoscaling-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/date-exclusion/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the lambda function to stop EC2" 3 | type = string 4 | } 5 | 6 | variable "instance_1_to_scheduled_id" { 7 | description = "Instance 1 ID to test scheduled action" 8 | type = string 9 | } 10 | 11 | variable "instance_2_to_scheduled_id" { 12 | description = "Instance 2 ID to test scheduled action" 13 | type = string 14 | } 15 | 16 | variable "instance_not_to_scheduled_id" { 17 | description = "Instance ID not to be scheduled" 18 | type = string 19 | } 20 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "rds_aurora_cluster_to_scheduled" { 2 | description = "The status of the RDS cluster" 3 | value = data.local_file.rds_aurora_cluster_to_scheduled.content 4 | } 5 | 6 | output "rds_mariadb_instance_to_scheduled" { 7 | description = "The status of the RDS instance" 8 | value = data.local_file.rds_mariadb_instance_to_scheduled.content 9 | } 10 | 11 | output "rds_mysql_instance_to_not_scheduled" { 12 | description = "The status of the RDS instance" 13 | value = data.local_file.rds_mysql_instance_to_not_scheduled.content 14 | } 15 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "service_count" { 2 | alarm_name = "ecs-cluster-hello-service-count" 3 | comparison_operator = "LessThanThreshold" 4 | evaluation_periods = "2" 5 | metric_name = "CPUUtilization" 6 | namespace = "AWS/ECS" 7 | period = "60" 8 | statistic = "SampleCount" 9 | threshold = "2" 10 | alarm_description = "Less than 2 Running Service on cluster" 11 | dimensions = { 12 | ClusterName = aws_ecs_cluster.this.id 13 | } 14 | 15 | tags = { 16 | tostop = "true" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/variables.tf: -------------------------------------------------------------------------------- 1 | variable "lambda_stop_name" { 2 | description = "Name of the Lambda function used for stopping instances" 3 | type = string 4 | } 5 | 6 | variable "rds_aurora_cluster_to_scheduled_name" { 7 | description = "rds cluster name to be scheduled" 8 | type = string 9 | } 10 | 11 | variable "rds_mariadb_instance_to_scheduled_name" { 12 | description = "rds instance name to be scheduled" 13 | type = string 14 | } 15 | 16 | variable "rds_mysql_instance_to_not_scheduled_name" { 17 | description = "rds instance name to not be scheduled" 18 | type = string 19 | } 20 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "transfer" { 2 | cidr_block = "10.0.0.0/16" 3 | tags = { 4 | Name = "transfer-vpc-${random_pet.suffix.id}" 5 | } 6 | } 7 | 8 | resource "aws_subnet" "transfer_1" { 9 | vpc_id = aws_vpc.transfer.id 10 | cidr_block = "10.0.1.0/24" 11 | availability_zone = "eu-west-1a" 12 | map_public_ip_on_launch = true 13 | } 14 | 15 | resource "aws_subnet" "transfer_2" { 16 | vpc_id = aws_vpc.transfer.id 17 | cidr_block = "10.0.2.0/24" 18 | availability_zone = "eu-west-1b" 19 | map_public_ip_on_launch = true 20 | } 21 | -------------------------------------------------------------------------------- /examples/date-exclusion/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "instance_1_scheduled_state" { 2 | description = "State of instance 1 after scheduler execution" 3 | value = data.aws_instance.instance_1_to_scheduled_id.instance_state 4 | } 5 | 6 | output "instance_2_scheduled_state" { 7 | description = "State of instance 2 after scheduler execution" 8 | value = data.aws_instance.instance_2_to_scheduled_id.instance_state 9 | } 10 | 11 | output "instance_not_scheduled_state" { 12 | description = "State of instance not scheduled after scheduler execution" 13 | value = data.aws_instance.instance_not_to_scheduled_id.instance_state 14 | } 15 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/iam.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "hello_ecs_task_execution_role" { 2 | name = "hello-ecsTaskExecutionRole" 3 | 4 | assume_role_policy = < 8 | 9 | ##### ISSUE TYPE 10 | - Bug Report 11 | 12 | ##### TERRAFORM VERSION 13 | 14 | ```paste below 15 | 16 | ``` 17 | 18 | ##### STEPS TO REPRODUCE 19 | 20 | 21 | 22 | ```paste below 23 | 24 | ``` 25 | 26 | 27 | 28 | ##### EXPECTED RESULTS 29 | 30 | 31 | 32 | ##### ACTUAL RESULTS 33 | 34 | 35 | 36 | ```paste below 37 | 38 | ``` 39 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | pull_request: 5 | schedule: 6 | - cron: '0 18 * * SUN' 7 | 8 | jobs: 9 | pre-commit: 10 | name: Pre-commit checks 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout repository 14 | uses: actions/checkout@v4 15 | 16 | - name: Set up Python 17 | uses: actions/setup-python@v5 18 | with: 19 | python-version: '3.13' 20 | 21 | - name: Install Terraform 22 | uses: hashicorp/setup-terraform@v3 23 | with: 24 | terraform_version: 'latest' 25 | 26 | - name: Install TFLint 27 | uses: terraform-linters/setup-tflint@v4 28 | with: 29 | tflint_version: v0.58.0 30 | 31 | - name: Install dependencies 32 | run: | 33 | python -m pip install --upgrade pip 34 | pip install -r requirements-dev.txt 35 | 36 | - name: Run pre-commit 37 | uses: pre-commit/action@v3.0.1 38 | -------------------------------------------------------------------------------- /examples/instance-scheduler/test-execution/outputs.tf: -------------------------------------------------------------------------------- 1 | output "instance_1_scheduled_state" { 2 | description = "State of the first instance to be scheduled" 3 | value = data.aws_instance.instance_1_to_scheduled_id.instance_state 4 | } 5 | 6 | output "instance_2_scheduled_state" { 7 | description = "State of the second instance to be scheduled" 8 | value = data.aws_instance.instance_2_to_scheduled_id.instance_state 9 | } 10 | 11 | output "instance_3_scheduled_state" { 12 | description = "State of the third instance to be scheduled" 13 | value = data.aws_instance.instance_3_to_scheduled_id.instance_state 14 | } 15 | 16 | output "instance_1_not_scheduled_state" { 17 | description = "State of the first instance not to be scheduled" 18 | value = data.aws_instance.instance_1_not_to_scheduled_id.instance_state 19 | } 20 | 21 | output "instance_2_not_scheduled_state" { 22 | description = "State of the second instance not to be scheduled" 23 | value = data.aws_instance.instance_2_not_to_scheduled_id.instance_state 24 | } 25 | -------------------------------------------------------------------------------- /package/scheduler/utils.py: -------------------------------------------------------------------------------- 1 | """Utility functions for scheduler operations.""" 2 | 3 | import logging 4 | from datetime import datetime 5 | 6 | 7 | def is_date_excluded(excluded_dates: list[str]) -> bool: 8 | """Check if the current date should be excluded from scheduling. 9 | 10 | Args: 11 | excluded_dates: List of dates in MM-DD format to exclude 12 | 13 | Returns: 14 | True if current date should be excluded, False otherwise 15 | """ 16 | if not excluded_dates: 17 | return False 18 | 19 | current_date = datetime.now() 20 | current_date_str = current_date.strftime("%m-%d") 21 | 22 | if current_date_str in excluded_dates: 23 | logging.info( 24 | "Skipping execution - current date (%s) is in excluded dates: %s", 25 | current_date_str, 26 | excluded_dates, 27 | ) 28 | return True 29 | 30 | return False 31 | 32 | 33 | def strtobool(value: str) -> bool: 34 | """Convert string to boolean.""" 35 | return value.lower() in ("yes", "true", "t", "1") 36 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | output "lambda_stop_name" { 2 | description = "The name of the lambda function to stop the transfer server" 3 | value = module.transfer_stop_friday.scheduler_lambda_name 4 | } 5 | 6 | output "lambda_stop_arn" { 7 | description = "The ARN of the lambda function to stop the transfer server" 8 | value = module.transfer_stop_friday.scheduler_lambda_arn 9 | } 10 | 11 | output "lambda_start_name" { 12 | description = "The name of the lambda function to start the transfer server" 13 | value = module.transfer_start_monday.scheduler_lambda_name 14 | } 15 | 16 | output "lambda_start_arn" { 17 | description = "The ARN of the lambda function to start the transfer server" 18 | value = module.transfer_start_monday.scheduler_lambda_arn 19 | } 20 | 21 | output "transfer_server_scheduled_id" { 22 | description = "ID of the scheduled Transfer server" 23 | value = aws_transfer_server.to_scheduled.id 24 | } 25 | 26 | output "transfer_server_not_scheduled_id" { 27 | description = "ID of the non-scheduled Transfer server" 28 | value = aws_transfer_server.not_to_scheduled.id 29 | } 30 | -------------------------------------------------------------------------------- /examples/date-exclusion/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.ec2_stop_with_exclusions.scheduler_lambda_name == "stop-ec2-exclusions-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.ec2_start_with_exclusions.scheduler_lambda_name == "start-ec2-exclusions-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test_execution[0].instance_1_scheduled_state == "running" 20 | error_message = "Instance 1 should remain running (current date is excluded)" 21 | } 22 | 23 | assert { 24 | condition = module.test_execution[0].instance_2_scheduled_state == "running" 25 | error_message = "Instance 2 should remain running (current date is excluded)" 26 | } 27 | 28 | assert { 29 | condition = module.test_execution[0].instance_not_scheduled_state == "running" 30 | error_message = "Instance not scheduled should remain running" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /examples/rds-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform ex2-schedule outputs 2 | 3 | output "lambda_stop_name" { 4 | description = "The name of the lambda function to stop the RDS cluster" 5 | value = module.rds_stop_friday.scheduler_lambda_name 6 | } 7 | 8 | output "lambda_stop_arn" { 9 | description = "The ARN of the lambda function to stop the RDS cluster" 10 | value = module.rds_stop_friday.scheduler_lambda_arn 11 | } 12 | 13 | output "lambda_start_name" { 14 | description = "The name of the lambda function to start the RDS cluster" 15 | value = module.rds_start_monday.scheduler_lambda_name 16 | } 17 | 18 | output "lambda_start_arn" { 19 | description = "The ARN of the lambda function to start the RDS cluster" 20 | value = module.rds_start_monday.scheduler_lambda_arn 21 | } 22 | 23 | output "rds_aurora_cluster_name" { 24 | description = "The name of the scheduled RDS cluster" 25 | value = aws_rds_cluster.aurora_scheduled.cluster_identifier 26 | } 27 | 28 | output "rds_aurora_instance_name" { 29 | description = "The name of the scheduled RDS instance" 30 | value = aws_rds_cluster_instance.aurora_scheduled.identifier 31 | } 32 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | assert { 5 | condition = module.aws_stop_friday.scheduler_lambda_name == "stop-aws-${random_pet.suffix.id}" 6 | error_message = "Invalid Stop lambda name" 7 | } 8 | 9 | assert { 10 | condition = module.aws_start_monday.scheduler_lambda_name == "start-aws-${random_pet.suffix.id}" 11 | error_message = "Invalid Start lambda name" 12 | } 13 | 14 | assert { 15 | condition = module.aws_stop_friday.scheduler_expression == "cron(0 23 ? * FRI *)" 16 | error_message = "Invalid scheduler expression" 17 | } 18 | 19 | assert { 20 | condition = module.aws_start_monday.scheduler_expression == "cron(0 07 ? * MON *)" 21 | error_message = "Invalid scheduler expression" 22 | } 23 | 24 | assert { 25 | condition = module.aws_stop_friday.scheduler_timezone == "Europe/Paris" 26 | error_message = "Invalid scheduler timezone" 27 | } 28 | 29 | assert { 30 | condition = module.aws_start_monday.scheduler_timezone == "Europe/Berlin" 31 | error_message = "Invalid scheduler timezone" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /examples/test_fixture/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_kms_key" "scheduler" { 5 | description = "test kms option on scheduler module" 6 | deletion_window_in_days = 7 7 | } 8 | 9 | module "aws_stop_friday" { 10 | source = "../.." 11 | name = "stop-aws-${random_pet.suffix.id}" 12 | kms_key_arn = aws_kms_key.scheduler.arn 13 | schedule_expression = "cron(0 23 ? * FRI *)" 14 | schedule_action = "stop" 15 | autoscaling_schedule = "true" 16 | ec2_schedule = "true" 17 | rds_schedule = "true" 18 | 19 | scheduler_tag = { 20 | key = "tostop" 21 | value = "true-${random_pet.suffix.id}" 22 | } 23 | } 24 | 25 | module "aws_start_monday" { 26 | source = "../.." 27 | name = "start-aws-${random_pet.suffix.id}" 28 | schedule_expression = "cron(0 07 ? * MON *)" 29 | schedule_action = "start" 30 | autoscaling_schedule = "true" 31 | ec2_schedule = "true" 32 | rds_schedule = "true" 33 | 34 | scheduler_tag = { 35 | key = "tostop" 36 | value = "true-${random_pet.suffix.id}" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /examples/timezone-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | module "aws_stop_friday" { 5 | source = "../.." 6 | name = "stop-aws-${random_pet.suffix.id}" 7 | schedule_expression = "cron(0 23 ? * FRI *)" 8 | schedule_expression_timezone = "Europe/Paris" 9 | schedule_action = "stop" 10 | autoscaling_schedule = "true" 11 | ec2_schedule = "true" 12 | rds_schedule = "true" 13 | 14 | scheduler_tag = { 15 | key = "tostop" 16 | value = "true-${random_pet.suffix.id}" 17 | } 18 | } 19 | 20 | module "aws_start_monday" { 21 | source = "../.." 22 | name = "start-aws-${random_pet.suffix.id}" 23 | schedule_expression = "cron(0 07 ? * MON *)" 24 | schedule_expression_timezone = "Europe/Berlin" 25 | schedule_action = "start" 26 | autoscaling_schedule = "true" 27 | ec2_schedule = "true" 28 | rds_schedule = "true" 29 | 30 | scheduler_tag = { 31 | key = "tostop" 32 | value = "true-${random_pet.suffix.id}" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /package/scheduler/waiters.py: -------------------------------------------------------------------------------- 1 | """Autoscaling instances scheduler.""" 2 | 3 | from typing import List 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import ec2_exception 9 | 10 | 11 | class AwsWaiters: 12 | """Abstract aws waiter in a class.""" 13 | 14 | def __init__(self, region_name=None) -> None: 15 | """Initialize aws waiter.""" 16 | if region_name: 17 | self.ec2 = boto3.client("ec2", region_name=region_name) 18 | else: 19 | self.ec2 = boto3.client("ec2") 20 | 21 | def instance_running(self, instance_ids: List[str]) -> None: 22 | """Aws waiter for instance running. 23 | 24 | Wait ec2 instances are in running state. 25 | 26 | :param list instance_ids: 27 | The instance IDs to wait. 28 | """ 29 | if instance_ids: 30 | try: 31 | instance_waiter = self.ec2.get_waiter("instance_running") 32 | instance_waiter.wait( 33 | InstanceIds=instance_ids, 34 | WaiterConfig={"Delay": 15, "MaxAttempts": 15}, 35 | ) 36 | except ClientError as exc: 37 | ec2_exception("waiter", instance_waiter, exc) 38 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform redshift-scheduler outputs 2 | 3 | output "lambda_stop_name" { 4 | description = "The name of the lambda function to stop the redshift cluster" 5 | value = module.redshift_stop_friday.scheduler_lambda_name 6 | } 7 | 8 | output "lambda_stop_arn" { 9 | description = "The ARN of the lambda function to stop the redshift cluster" 10 | value = module.redshift_stop_friday.scheduler_lambda_arn 11 | } 12 | 13 | output "lambda_start_name" { 14 | description = "The name of the lambda function to start the redshift cluster" 15 | value = module.redshift_start_monday.scheduler_lambda_name 16 | } 17 | 18 | output "lambda_start_arn" { 19 | description = "The ARN of the lambda function to start the redshift cluster" 20 | value = module.redshift_start_monday.scheduler_lambda_arn 21 | } 22 | 23 | output "redshift_cluster_scheduled_identifier" { 24 | description = "The identifier of the scheduled redshift cluster" 25 | value = aws_redshift_cluster.scheduled.cluster_identifier 26 | } 27 | 28 | output "redshift_cluster_not_scheduled_identifier" { 29 | description = "The identifier of the not scheduled redshift cluster" 30 | value = aws_redshift_cluster.not_scheduled.cluster_identifier 31 | } 32 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform ecs with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | # Get aws availability zones 5 | data "aws_availability_zones" "available" {} 6 | 7 | ### Terraform modules ### 8 | 9 | module "ecs_stop_friday" { 10 | source = "../../" 11 | name = "stop-ecs-${random_pet.suffix.id}" 12 | schedule_expression = "cron(0 23 ? * FRI *)" 13 | schedule_action = "stop" 14 | ec2_schedule = "false" 15 | ecs_schedule = "true" 16 | rds_schedule = "false" 17 | autoscaling_schedule = "false" 18 | cloudwatch_alarm_schedule = "true" 19 | 20 | scheduler_tag = { 21 | key = "tostop" 22 | value = "true" 23 | } 24 | } 25 | 26 | module "ecs_start_monday" { 27 | source = "../../" 28 | name = "start-ecs-${random_pet.suffix.id}" 29 | schedule_expression = "cron(0 07 ? * MON *)" 30 | schedule_action = "start" 31 | ec2_schedule = "false" 32 | ecs_schedule = "true" 33 | autoscaling_schedule = "false" 34 | cloudwatch_alarm_schedule = "true" 35 | 36 | scheduler_tag = { 37 | key = "tostop" 38 | value = "true" 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/devcontainers/base:ubuntu-22.04 2 | 3 | RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 4 | && apt-get -y install --no-install-recommends \ 5 | acl \ 6 | ansible \ 7 | curl \ 8 | git \ 9 | gnupg \ 10 | iproute2 \ 11 | iputils-ping \ 12 | jq \ 13 | less \ 14 | libssl-dev \ 15 | lsb-release \ 16 | make \ 17 | nano \ 18 | openssh-client \ 19 | procps \ 20 | python3 \ 21 | python3-pip \ 22 | python3-venv \ 23 | sudo \ 24 | unzip \ 25 | vim \ 26 | wget \ 27 | zip \ 28 | zsh \ 29 | && apt-get clean \ 30 | && rm -rf /var/lib/apt/lists/* 31 | 32 | RUN ansible-galaxy role install diodonfrost.ohmyzsh && \ 33 | ansible-pull -U https://github.com/diodonfrost/ansible-role-ohmyzsh tests/test.yml -e "ohmyzsh_theme=powerlevel10k/powerlevel10k" -e '{"ohmyzsh_users": [vscode]}' 34 | 35 | RUN ansible-galaxy role install diodonfrost.p10k && \ 36 | ansible-pull -U https://github.com/diodonfrost/ansible-role-p10k tests/test.yml -e "zsh_plugin=ohmyzsh" -e '{"p10k_users": [vscode]}' 37 | 38 | RUN ansible-galaxy role install diodonfrost.opentofu && \ 39 | ansible-pull -U https://github.com/diodonfrost/ansible-role-opentofu tests/test.yml 40 | -------------------------------------------------------------------------------- /tests/unit/test_filter_resources_by_tags.py: -------------------------------------------------------------------------------- 1 | """Tests for the class FilterByTags class.""" 2 | 3 | import pytest 4 | from moto import mock_aws 5 | 6 | from package.scheduler.filter_resources_by_tags import FilterByTags 7 | 8 | from .utils import launch_ec2_instances 9 | 10 | 11 | @pytest.mark.parametrize( 12 | "aws_region, instance_tag, scheduler_tag, result_count", 13 | [ 14 | ( 15 | "eu-west-1", 16 | [{"Key": "tostop-ec2-test-1", "Values": ["true"]}], 17 | [{"Key": "tostop-ec2-test-1", "Values": ["true"]}], 18 | 2, 19 | ), 20 | ( 21 | "eu-west-1", 22 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 23 | [{"Key": "tostop-ec2-test-1", "Values": ["true"]}], 24 | 0, 25 | ), 26 | ], 27 | ) 28 | @mock_aws 29 | def test_filter_instances(aws_region, instance_tag, scheduler_tag, result_count): 30 | """Filter instances class method.""" 31 | tag_key = instance_tag[0]["Key"] 32 | tag_value = "".join(instance_tag[0]["Values"]) 33 | launch_ec2_instances(2, aws_region, tag_key, tag_value) 34 | launch_ec2_instances(3, aws_region, "wrongkey", "wrongvalue") 35 | 36 | tag_api = FilterByTags(region_name=aws_region) 37 | instance_arns = tag_api.get_resources("ec2:instance", scheduler_tag) 38 | 39 | assert len(list(instance_arns)) == result_count 40 | -------------------------------------------------------------------------------- /examples/date-exclusion/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_running_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_instances.py running \ 5 | ${var.instance_1_to_scheduled_id} \ 6 | ${var.instance_2_to_scheduled_id} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "this" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_running_state] 20 | } 21 | 22 | resource "null_resource" "wait_exclusion_check" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | echo "Waiting for exclusion date verification..." 26 | sleep 30 27 | EOT 28 | } 29 | 30 | depends_on = [aws_lambda_invocation.this] 31 | } 32 | 33 | data "aws_instance" "instance_1_to_scheduled_id" { 34 | instance_id = var.instance_1_to_scheduled_id 35 | 36 | depends_on = [null_resource.wait_exclusion_check] 37 | } 38 | 39 | data "aws_instance" "instance_2_to_scheduled_id" { 40 | instance_id = var.instance_2_to_scheduled_id 41 | 42 | depends_on = [null_resource.wait_exclusion_check] 43 | } 44 | 45 | data "aws_instance" "instance_not_to_scheduled_id" { 46 | instance_id = var.instance_not_to_scheduled_id 47 | 48 | depends_on = [null_resource.wait_exclusion_check] 49 | } 50 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.neptune_stop_friday.scheduler_lambda_name == "stop-neptune-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.neptune_start_monday.scheduler_lambda_name == "start-neptune-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test_execution[0].neptune_cluster_to_scheduled_status == "stopped\n" 20 | error_message = "neptune cluster with tag 'tostop=true' should be stopped" 21 | } 22 | 23 | assert { 24 | condition = module.test_execution[0].neptune_cluster_not_scheduled_status == "available\n" 25 | error_message = "neptune cluster with tag 'tostop=false' should not be stopped" 26 | } 27 | } 28 | 29 | # Add this cleanup step to restore the cluster to 'available' state before destruction 30 | run "cleanup_test_resources" { 31 | command = apply 32 | 33 | variables { 34 | neptune_cluster_name = run.create_test_infrastructure.neptune_cluster_scheduled_identifier 35 | } 36 | 37 | # This will start the stopped cluster to ensure proper deletion 38 | module { 39 | source = "./test-cleanup" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.documentdb_stop_friday.scheduler_lambda_name == "stop-documentdb-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.documentdb_start_monday.scheduler_lambda_name == "start-documentdb-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test_execution[0].docdb_cluster_to_scheduled_state == "stopping\n" 20 | error_message = "DocumentDB cluster with tag 'tostop=true' should be stopped" 21 | } 22 | 23 | assert { 24 | condition = module.test_execution[0].docdb_cluster_not_scheduled_state == "available\n" 25 | error_message = "DocumentDB cluster with tag 'tostop=false' should not be stopped" 26 | } 27 | } 28 | 29 | # Add this cleanup step to restore the cluster to 'available' state before destruction 30 | run "cleanup_test_resources" { 31 | command = apply 32 | 33 | variables { 34 | docdb_cluster_name = run.create_test_infrastructure.docdb_cluster_scheduled_identifier 35 | docdb_instance_name = run.create_test_infrastructure.docdb_instance_scheduled_identifier 36 | } 37 | 38 | # This will start the stopped cluster to ensure proper deletion 39 | module { 40 | source = "./test-cleanup" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /examples/instance-scheduler/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "scheduled" { 2 | count = 3 3 | alarm_name = "instance-scheduled-autorecovery-${count.index}" 4 | namespace = "AWS/EC2" 5 | evaluation_periods = "2" 6 | period = "60" 7 | alarm_description = "This metric auto recovers EC2 instances" 8 | alarm_actions = ["arn:aws:automate:${data.aws_region.current.name}:ec2:reboot"] 9 | statistic = "Minimum" 10 | comparison_operator = "GreaterThanThreshold" 11 | threshold = "0.0" 12 | metric_name = "StatusCheckFailed_Instance" 13 | dimensions = { 14 | InstanceId = aws_instance.scheduled[count.index].id 15 | } 16 | 17 | tags = { 18 | tostop = "true" 19 | } 20 | } 21 | 22 | 23 | resource "aws_cloudwatch_metric_alarm" "not_scheduled" { 24 | count = 2 25 | alarm_name = "instance-not-scheduled-autorecovery-${count.index}" 26 | namespace = "AWS/EC2" 27 | evaluation_periods = "2" 28 | period = "60" 29 | alarm_description = "This metric auto recovers EC2 instances" 30 | alarm_actions = ["arn:aws:automate:${data.aws_region.current.name}:ec2:reboot"] 31 | statistic = "Minimum" 32 | comparison_operator = "GreaterThanThreshold" 33 | threshold = "0.0" 34 | metric_name = "StatusCheckFailed_Instance" 35 | dimensions = { 36 | InstanceId = aws_instance.not_scheduled[count.index].id 37 | } 38 | 39 | tags = { 40 | tostop = "false" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /examples/rds-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.rds_stop_friday.scheduler_lambda_name == "stop-rds-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.rds_start_monday.scheduler_lambda_name == "start-rds-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test_execution[0].rds_aurora_cluster_to_scheduled == "stopped\n" 20 | error_message = "Invalid RDS cluster instance state" 21 | } 22 | 23 | assert { 24 | condition = module.test_execution[0].rds_mariadb_instance_to_scheduled == "stopped\n" 25 | error_message = "Invalid RDS instance state" 26 | } 27 | 28 | assert { 29 | condition = module.test_execution[0].rds_mysql_instance_to_not_scheduled == "available\n" 30 | error_message = "Invalid RDS instance state" 31 | } 32 | } 33 | 34 | # Add this cleanup step to restore the cluster to 'available' state before destruction 35 | run "cleanup_test_resources" { 36 | command = apply 37 | 38 | variables { 39 | rds_aurora_cluster_name = run.create_test_infrastructure.rds_aurora_cluster_name 40 | rds_aurora_instance_name = run.create_test_infrastructure.rds_aurora_instance_name 41 | } 42 | 43 | # This will start the stopped cluster to ensure proper deletion 44 | module { 45 | source = "./test-cleanup" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { "dockerfile": "Dockerfile", "context": "../" }, 3 | "mounts": [ 4 | "source=${localEnv:HOME}/.aws,target=/home/vscode/.aws,type=bind,consistency=cached" 5 | ], 6 | 7 | "customizations": { 8 | "vscode": { 9 | "extensions": [ 10 | "hashicorp.terraform", 11 | "redhat.vscode-yaml", 12 | "vscode-icons-team.vscode-icons", 13 | "isudox.vscode-jetbrains-keybindings", 14 | "GitHub.vscode-github-actions" 15 | ], 16 | "settings": { 17 | // Fonts MesLGS NF should be install: https://github.com/romkatv/powerlevel10k-media/blob/master/MesloLGS%20NF%20Regular.ttf 18 | "terminal.integrated.fontFamily": "MesloLGS NF", 19 | "redhat.telemetry.enabled": false, 20 | "aws.telemetry": false, 21 | "workbench.iconTheme": "vscode-icons", 22 | "vsicons.dontShowNewVersionMessage": true, 23 | "editor.rulers": [88,120] 24 | } 25 | } 26 | }, 27 | "features": { 28 | "ghcr.io/devcontainers/features/terraform:1": { 29 | "installSentinel": true, 30 | "installTFsec": true, 31 | "installTerraformDocs": true 32 | }, 33 | "ghcr.io/devcontainers/features/aws-cli:1": {}, 34 | "ghcr.io/devcontainers/features/github-cli:1": {}, 35 | "ghcr.io/devcontainers/features/node:1": {}, 36 | "ghcr.io/devcontainers/features/python:1": { 37 | "version": "3.13" 38 | } 39 | }, 40 | "postStartCommand": "pip install -r requirements-dev.txt", 41 | "remoteEnv": { "PATH": "${containerEnv:PATH}:/home/vscode/.local/bin" }, 42 | "remoteUser": "vscode" 43 | } 44 | -------------------------------------------------------------------------------- /examples/instance-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.ec2_stop_friday.scheduler_lambda_name == "stop-ec2-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.ec2_start_monday.scheduler_lambda_name == "start-ec2-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test_execution[0].instance_1_scheduled_state == "stopped" || module.test_execution[0].instance_1_scheduled_state == "stopping" 20 | error_message = "Virtual machine 1 to stop is not stopped" 21 | } 22 | 23 | assert { 24 | condition = module.test_execution[0].instance_2_scheduled_state == "stopped" || module.test_execution[0].instance_2_scheduled_state == "stopping" 25 | error_message = "Virtual machine 2 to stop is not stopped" 26 | } 27 | 28 | assert { 29 | condition = module.test_execution[0].instance_3_scheduled_state == "stopped" || module.test_execution[0].instance_3_scheduled_state == "stopping" 30 | error_message = "Virtual machine 3 to stop is not stopped" 31 | } 32 | 33 | assert { 34 | condition = module.test_execution[0].instance_1_not_scheduled_state == "running" 35 | error_message = "Virtual machine 1 to stop is not Running" 36 | } 37 | 38 | assert { 39 | condition = module.test_execution[0].instance_2_not_scheduled_state == "running" 40 | error_message = "Virtual machine 2 to stop is not Running" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform documentdb-scheduler outputs 2 | 3 | output "lambda_stop_name" { 4 | description = "The name of the lambda function to stop the Neptune cluster" 5 | value = module.neptune_stop_friday.scheduler_lambda_name 6 | } 7 | 8 | output "lambda_stop_arn" { 9 | description = "The ARN of the lambda function to stop the Neptune cluster" 10 | value = module.neptune_stop_friday.scheduler_lambda_arn 11 | } 12 | 13 | output "lambda_start_name" { 14 | description = "The name of the lambda function to start the Neptune cluster" 15 | value = module.neptune_start_monday.scheduler_lambda_name 16 | } 17 | 18 | output "lambda_start_arn" { 19 | description = "The ARN of the lambda function to start the Neptune cluster" 20 | value = module.neptune_start_monday.scheduler_lambda_arn 21 | } 22 | 23 | output "neptune_cluster_scheduled_identifier" { 24 | description = "The identifier of the scheduled Neptune cluster" 25 | value = aws_neptune_cluster.to_schedule.cluster_identifier 26 | } 27 | 28 | output "neptune_instance_scheduled_identifier" { 29 | description = "The identifier of the scheduled Neptune instance" 30 | value = aws_neptune_cluster_instance.to_schedule.identifier 31 | } 32 | 33 | output "neptune_cluster_not_scheduled_identifier" { 34 | description = "The identifier of the not scheduled Neptune cluster" 35 | value = aws_neptune_cluster.not_to_scheduled.cluster_identifier 36 | } 37 | 38 | output "neptune_instance_not_scheduled_identifier" { 39 | description = "The identifier of the not scheduled Neptune instance" 40 | value = aws_neptune_cluster_instance.not_to_scheduled.identifier 41 | } 42 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/outputs.tf: -------------------------------------------------------------------------------- 1 | # Terraform documentdb-scheduler outputs 2 | 3 | output "lambda_stop_name" { 4 | description = "The name of the lambda function to stop the documentdb cluster" 5 | value = module.documentdb_stop_friday.scheduler_lambda_name 6 | } 7 | 8 | output "lambda_stop_arn" { 9 | description = "The ARN of the lambda function to stop the documentdb cluster" 10 | value = module.documentdb_stop_friday.scheduler_lambda_arn 11 | } 12 | 13 | output "lambda_start_name" { 14 | description = "The name of the lambda function to start the documentdb cluster" 15 | value = module.documentdb_start_monday.scheduler_lambda_name 16 | } 17 | 18 | output "lambda_start_arn" { 19 | description = "The ARN of the lambda function to start the documentdb cluster" 20 | value = module.documentdb_start_monday.scheduler_lambda_arn 21 | } 22 | 23 | output "docdb_cluster_scheduled_identifier" { 24 | description = "The identifier of the scheduled documentdb cluster" 25 | value = aws_docdb_cluster.scheduled.cluster_identifier 26 | } 27 | 28 | output "docdb_instance_scheduled_identifier" { 29 | description = "The identifier of the scheduled documentdb instance" 30 | value = aws_docdb_cluster_instance.scheduled.identifier 31 | } 32 | 33 | output "docdb_cluster_not_scheduled_identifier" { 34 | description = "The identifier of the not scheduled documentdb cluster" 35 | value = aws_docdb_cluster.not_scheduled.cluster_identifier 36 | } 37 | 38 | output "docdb_instance_not_scheduled_identifier" { 39 | description = "The identifier of the not scheduled documentdb instance" 40 | value = aws_docdb_cluster_instance.not_scheduled.identifier 41 | } 42 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/terraform.tftest.hcl: -------------------------------------------------------------------------------- 1 | run "create_test_infrastructure" { 2 | command = apply 3 | 4 | variables { 5 | test_mode = true 6 | } 7 | 8 | assert { 9 | condition = module.autoscaling_stop_friday.scheduler_lambda_name == "stop-autoscaling-${random_pet.suffix.id}" 10 | error_message = "Invalid Stop lambda name" 11 | } 12 | 13 | assert { 14 | condition = module.autoscaling_start_monday.scheduler_lambda_name == "start-autoscaling-${random_pet.suffix.id}" 15 | error_message = "Invalid Start lambda name" 16 | } 17 | 18 | assert { 19 | condition = module.test_execution[0].asg_scheduled_suspended_processes == toset([ 20 | "AZRebalance", 21 | "AddToLoadBalancer", 22 | "AlarmNotification", 23 | "HealthCheck", 24 | "InstanceRefresh", 25 | "Launch", 26 | "RemoveFromLoadBalancerLowPriority", 27 | "ReplaceUnhealthy", 28 | "ScheduledActions", 29 | "Terminate", 30 | ]) 31 | error_message = "Autoscaling group instances should be suspended" 32 | } 33 | 34 | assert { 35 | condition = length(module.test_execution[0].asg_not_scheduled_suspended_processes) == 0 36 | error_message = "Autoscaling group instances should not be suspended" 37 | } 38 | 39 | assert { 40 | condition = module.test_execution[0].asg_instance_scheduled_state == "stopped" || module.test_execution[0].asg_instance_scheduled_state == "stopping" 41 | error_message = "Autoscaling group instance should be stopped" 42 | } 43 | 44 | assert { 45 | condition = module.test_execution[0].asg_instance_not_scheduled_state == "running" 46 | error_message = "Autoscaling group instance should be running" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "time_sleep" "before_stop_wait_60_seconds" { 2 | create_duration = "60s" 3 | } 4 | 5 | resource "aws_lambda_invocation" "this" { 6 | function_name = var.lambda_stop_name 7 | 8 | input = jsonencode({ 9 | key1 = "value1" 10 | key2 = "value2" 11 | }) 12 | 13 | depends_on = [time_sleep.before_stop_wait_60_seconds] 14 | } 15 | 16 | resource "time_sleep" "after_stop_wait_60_seconds" { 17 | create_duration = "60s" 18 | 19 | depends_on = [aws_lambda_invocation.this] 20 | } 21 | 22 | data "aws_autoscaling_group" "asg_scheduled" { 23 | name = var.asg_scheduled_name 24 | 25 | depends_on = [time_sleep.after_stop_wait_60_seconds] 26 | } 27 | 28 | data "aws_autoscaling_group" "asg_not_scheduled" { 29 | name = var.asg_not_scheduled_name 30 | 31 | depends_on = [time_sleep.after_stop_wait_60_seconds] 32 | } 33 | 34 | data "aws_instances" "asg_scheduled" { 35 | instance_tags = { 36 | "aws:autoscaling:groupName" = var.asg_scheduled_name 37 | } 38 | instance_state_names = [ 39 | "running", 40 | "shutting-down", 41 | "stopped", 42 | "stopping", 43 | ] 44 | depends_on = [time_sleep.after_stop_wait_60_seconds] 45 | } 46 | 47 | data "aws_instance" "asg_scheduled" { 48 | instance_id = data.aws_instances.asg_scheduled.ids[0] 49 | } 50 | 51 | data "aws_instances" "asg_not_scheduled" { 52 | instance_tags = { 53 | "aws:autoscaling:groupName" = var.asg_not_scheduled_name 54 | } 55 | instance_state_names = [ 56 | "running", 57 | "shutting-down", 58 | "stopped", 59 | "stopping", 60 | ] 61 | 62 | depends_on = [time_sleep.after_stop_wait_60_seconds] 63 | } 64 | 65 | data "aws_instance" "asg_not_scheduled" { 66 | instance_id = data.aws_instances.asg_not_scheduled.ids[0] 67 | } 68 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "suffix" {} 2 | 3 | resource "aws_transfer_server" "to_scheduled" { 4 | endpoint_type = "VPC" 5 | 6 | endpoint_details { 7 | subnet_ids = [aws_subnet.transfer_1.id] 8 | vpc_id = aws_vpc.transfer.id 9 | } 10 | 11 | protocols = ["SFTP"] 12 | 13 | tags = { 14 | tostop = "true-${random_pet.suffix.id}" 15 | } 16 | } 17 | 18 | resource "aws_transfer_server" "not_to_scheduled" { 19 | endpoint_type = "VPC" 20 | 21 | endpoint_details { 22 | subnet_ids = [aws_subnet.transfer_2.id] 23 | vpc_id = aws_vpc.transfer.id 24 | } 25 | 26 | protocols = ["SFTP"] 27 | 28 | tags = { 29 | tostop = "false" 30 | } 31 | } 32 | 33 | 34 | module "transfer_stop_friday" { 35 | source = "../.." 36 | 37 | name = "stop-transfer-${random_pet.suffix.id}" 38 | schedule_expression = "cron(0 23 ? * FRI *)" 39 | schedule_action = "stop" 40 | transfer_schedule = true 41 | 42 | scheduler_tag = { 43 | key = "tostop" 44 | value = "true-${random_pet.suffix.id}" 45 | } 46 | } 47 | 48 | module "transfer_start_monday" { 49 | source = "../.." 50 | 51 | name = "start-transfer-${random_pet.suffix.id}" 52 | schedule_expression = "cron(0 07 ? * MON *)" 53 | schedule_action = "start" 54 | transfer_schedule = true 55 | 56 | scheduler_tag = { 57 | key = "tostop" 58 | value = "true-${random_pet.suffix.id}" 59 | } 60 | } 61 | 62 | module "test_execution" { 63 | count = var.test_mode ? 1 : 0 64 | source = "./test-execution" 65 | 66 | lambda_stop_name = module.transfer_stop_friday.scheduler_lambda_name 67 | transfer_server_to_scheduled_id = aws_transfer_server.to_scheduled.id 68 | transfer_server_not_scheduled_id = aws_transfer_server.not_to_scheduled.id 69 | } 70 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "this" { 2 | cidr_block = "10.103.0.0/16" 3 | } 4 | 5 | resource "aws_subnet" "primary" { 6 | availability_zone = data.aws_availability_zones.available.names[0] 7 | vpc_id = aws_vpc.this.id 8 | cidr_block = "10.103.98.0/24" 9 | } 10 | 11 | resource "aws_subnet" "public" { 12 | availability_zone = data.aws_availability_zones.available.names[0] 13 | vpc_id = aws_vpc.this.id 14 | cidr_block = "10.103.99.0/24" 15 | } 16 | 17 | 18 | resource "aws_route_table" "public_rt" { 19 | vpc_id = aws_vpc.this.id 20 | } 21 | 22 | resource "aws_route_table_association" "rt_assocations_public" { 23 | subnet_id = aws_subnet.public.id 24 | route_table_id = aws_route_table.public_rt.id 25 | } 26 | 27 | resource "aws_route_table" "primary_rt" { 28 | vpc_id = aws_vpc.this.id 29 | } 30 | 31 | resource "aws_route_table_association" "rt_assocations_primary" { 32 | subnet_id = aws_subnet.primary.id 33 | route_table_id = aws_route_table.primary_rt.id 34 | } 35 | 36 | 37 | ##### 38 | # Gateways 39 | ##### 40 | resource "aws_internet_gateway" "igw" { 41 | vpc_id = aws_vpc.this.id 42 | } 43 | 44 | # NAT Gateways 45 | resource "aws_eip" "nat_gw_eip" { 46 | domain = "vpc" 47 | } 48 | 49 | resource "aws_nat_gateway" "nat_gw" { 50 | allocation_id = aws_eip.nat_gw_eip.id 51 | subnet_id = aws_subnet.public.id 52 | } 53 | 54 | resource "aws_route" "to_igw" { 55 | route_table_id = aws_route_table.public_rt.id 56 | destination_cidr_block = "0.0.0.0/0" 57 | gateway_id = aws_internet_gateway.igw.id 58 | } 59 | 60 | resource "aws_route" "primary_to_ngw" { 61 | route_table_id = aws_route_table.primary_rt.id 62 | destination_cidr_block = "0.0.0.0/0" 63 | nat_gateway_id = aws_nat_gateway.nat_gw.id 64 | } 65 | -------------------------------------------------------------------------------- /examples/instance-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_running_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_instances.py running \ 5 | ${var.instance_1_to_scheduled_id} \ 6 | ${var.instance_2_to_scheduled_id} \ 7 | ${var.instance_3_to_scheduled_id} 8 | EOT 9 | } 10 | } 11 | 12 | resource "aws_lambda_invocation" "this" { 13 | function_name = var.lambda_stop_name 14 | 15 | input = jsonencode({ 16 | key1 = "value1" 17 | key2 = "value2" 18 | }) 19 | 20 | depends_on = [null_resource.wait_running_state] 21 | } 22 | 23 | resource "null_resource" "wait_stopped_state" { 24 | provisioner "local-exec" { 25 | command = <<-EOT 26 | python3 ${path.module}/wait_instances.py stopped \ 27 | ${var.instance_1_to_scheduled_id} \ 28 | ${var.instance_2_to_scheduled_id} \ 29 | ${var.instance_3_to_scheduled_id} 30 | EOT 31 | } 32 | 33 | depends_on = [aws_lambda_invocation.this] 34 | } 35 | 36 | data "aws_instance" "instance_1_to_scheduled_id" { 37 | instance_id = var.instance_1_to_scheduled_id 38 | 39 | depends_on = [null_resource.wait_stopped_state] 40 | } 41 | 42 | data "aws_instance" "instance_2_to_scheduled_id" { 43 | instance_id = var.instance_2_to_scheduled_id 44 | 45 | depends_on = [null_resource.wait_stopped_state] 46 | } 47 | 48 | data "aws_instance" "instance_3_to_scheduled_id" { 49 | instance_id = var.instance_3_to_scheduled_id 50 | 51 | depends_on = [null_resource.wait_stopped_state] 52 | } 53 | 54 | data "aws_instance" "instance_1_not_to_scheduled_id" { 55 | instance_id = var.instance_1_not_to_scheduled_id 56 | 57 | depends_on = [null_resource.wait_stopped_state] 58 | } 59 | 60 | data "aws_instance" "instance_2_not_to_scheduled_id" { 61 | instance_id = var.instance_2_not_to_scheduled_id 62 | 63 | depends_on = [null_resource.wait_stopped_state] 64 | } 65 | -------------------------------------------------------------------------------- /examples/rds-scheduler/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "aurora_scheduled_cpu" { 2 | alarm_name = "aurora-cluster-with-tag-highCPUUtilization" 3 | comparison_operator = "GreaterThanThreshold" 4 | evaluation_periods = "2" 5 | metric_name = "CPUUtilization" 6 | namespace = "AWS/RDS" 7 | period = "60" 8 | statistic = "Average" 9 | threshold = "90" 10 | alarm_description = "Average database CPU utilization is too high." 11 | dimensions = { 12 | DBClusterIdentifier = aws_rds_cluster.aurora_scheduled.id 13 | } 14 | 15 | tags = { 16 | tostop = "true" 17 | } 18 | } 19 | 20 | resource "aws_cloudwatch_metric_alarm" "mariadb_scheduled_cpu" { 21 | alarm_name = "mariadbwithtag-highCPUUtilization" 22 | comparison_operator = "GreaterThanThreshold" 23 | evaluation_periods = "2" 24 | metric_name = "CPUUtilization" 25 | namespace = "AWS/RDS" 26 | period = "60" 27 | statistic = "Average" 28 | threshold = "90" 29 | alarm_description = "Average database CPU utilization is too high." 30 | dimensions = { 31 | DBInstanceIdentifier = aws_db_instance.mariadb_scheduled.id 32 | } 33 | 34 | tags = { 35 | tostop = "true" 36 | } 37 | } 38 | 39 | resource "aws_cloudwatch_metric_alarm" "mysql_not_scheduled_cpu" { 40 | alarm_name = "mysqlwithouttag-highCPUUtilization" 41 | comparison_operator = "GreaterThanThreshold" 42 | evaluation_periods = "2" 43 | metric_name = "CPUUtilization" 44 | namespace = "AWS/RDS" 45 | period = "60" 46 | statistic = "Average" 47 | threshold = "90" 48 | alarm_description = "Average database CPU utilization is too high." 49 | dimensions = { 50 | DBInstanceIdentifier = aws_db_instance.mysql_not_scheduled.id 51 | } 52 | 53 | tags = { 54 | tostop = "false" 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: 'CHANGELOG.md' 2 | repos: 3 | - repo: https://github.com/antonbabenko/pre-commit-terraform 4 | rev: v1.99.4 5 | hooks: 6 | - id: terraform_fmt 7 | - id: terraform_tflint 8 | args: 9 | - '--args=--only=terraform_deprecated_interpolation' 10 | - '--args=--only=terraform_deprecated_index' 11 | - '--args=--only=terraform_unused_declarations' 12 | - '--args=--only=terraform_comment_syntax' 13 | - '--args=--only=terraform_documented_outputs' 14 | - '--args=--only=terraform_documented_variables' 15 | - '--args=--only=terraform_typed_variables' 16 | - '--args=--only=terraform_module_pinned_source' 17 | - '--args=--only=terraform_naming_convention' 18 | - '--args=--only=terraform_required_version' 19 | - '--args=--only=terraform_required_providers' 20 | - '--args=--only=terraform_standard_module_structure' 21 | - '--args=--only=terraform_workspace_remote' 22 | - id: terraform_validate 23 | - repo: https://github.com/pre-commit/pre-commit-hooks 24 | rev: v5.0.0 25 | hooks: 26 | - id: check-merge-conflict 27 | - id: end-of-file-fixer 28 | - id: trailing-whitespace 29 | - repo: https://github.com/psf/black-pre-commit-mirror 30 | rev: 25.1.0 31 | hooks: 32 | - id: black 33 | language_version: python3.13 34 | - repo: https://github.com/pycqa/isort 35 | rev: 6.0.1 36 | hooks: 37 | - id: isort 38 | name: isort 39 | - repo: https://github.com/astral-sh/ruff-pre-commit 40 | rev: v0.12.0 41 | hooks: 42 | - id: ruff-check 43 | - id: ruff-format 44 | - repo: local 45 | hooks: 46 | - id: pytest-unit-tests 47 | name: Run unit tests with coverage 48 | entry: coverage run -m pytest tests/unit --cov package 49 | language: python 50 | types: [python] 51 | pass_filenames: false 52 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "lambda_iam_role_arn" { 2 | description = "The ARN of the IAM role used by Lambda function" 3 | value = var.custom_iam_role_arn == null ? aws_iam_role.this[0].arn : var.custom_iam_role_arn 4 | } 5 | 6 | output "lambda_iam_role_name" { 7 | description = "The name of the IAM role used by Lambda function" 8 | value = var.custom_iam_role_arn == null ? aws_iam_role.this[0].name : split("/", var.custom_iam_role_arn)[1] 9 | } 10 | 11 | output "scheduler_lambda_arn" { 12 | description = "The ARN of the Lambda function" 13 | value = aws_lambda_function.this.arn 14 | } 15 | 16 | output "scheduler_lambda_name" { 17 | description = "The name of the Lambda function" 18 | value = aws_lambda_function.this.function_name 19 | } 20 | 21 | output "scheduler_lambda_invoke_arn" { 22 | description = "The ARN to be used for invoking Lambda function from API Gateway" 23 | value = aws_lambda_function.this.invoke_arn 24 | } 25 | 26 | output "scheduler_lambda_function_last_modified" { 27 | description = "The date Lambda function was last modified" 28 | value = aws_lambda_function.this.last_modified 29 | } 30 | 31 | output "scheduler_lambda_function_version" { 32 | description = "Latest published version of your Lambda function" 33 | value = aws_lambda_function.this.version 34 | } 35 | 36 | output "scheduler_log_group_name" { 37 | description = "The name of the scheduler log group" 38 | value = aws_cloudwatch_log_group.this.name 39 | } 40 | 41 | output "scheduler_log_group_arn" { 42 | description = "The Amazon Resource Name (ARN) specifying the log group" 43 | value = aws_cloudwatch_log_group.this.arn 44 | } 45 | 46 | output "scheduler_expression" { 47 | description = "The expression of the scheduler" 48 | value = aws_scheduler_schedule.this.schedule_expression 49 | } 50 | 51 | output "scheduler_timezone" { 52 | description = "The timezone of the scheduler" 53 | value = aws_scheduler_schedule.this.schedule_expression_timezone 54 | } 55 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_policy" "scheduled" { 2 | count = 3 3 | name = "bar-with-tag-${count.index}" 4 | scaling_adjustment = 1 5 | adjustment_type = "ChangeInCapacity" 6 | cooldown = 300 7 | autoscaling_group_name = aws_autoscaling_group.scheduled[count.index].name 8 | } 9 | 10 | resource "aws_cloudwatch_metric_alarm" "scheduled" { 11 | count = 3 12 | alarm_name = "bar-with-tag-${count.index}" 13 | namespace = "AWS/AutoScaling" 14 | comparison_operator = "GreaterThanThreshold" 15 | evaluation_periods = "2" 16 | metric_name = "CPUUtilization" 17 | period = "60" 18 | statistic = "Average" 19 | threshold = "90" 20 | alarm_actions = [aws_autoscaling_policy.scheduled[count.index].arn] 21 | dimensions = { 22 | AutoScalingGroupName = aws_autoscaling_group.scheduled[count.index].id 23 | } 24 | 25 | tags = { 26 | tostop = "true" 27 | } 28 | } 29 | 30 | resource "aws_autoscaling_policy" "not_scheduled" { 31 | count = 2 32 | name = "foo-without-tag-${count.index}" 33 | scaling_adjustment = 1 34 | adjustment_type = "ChangeInCapacity" 35 | cooldown = 300 36 | autoscaling_group_name = aws_autoscaling_group.not_scheduled[count.index].name 37 | } 38 | 39 | resource "aws_cloudwatch_metric_alarm" "not_scheduled" { 40 | count = 2 41 | alarm_name = "foo-without-tag-${count.index}" 42 | namespace = "AWS/AutoScaling" 43 | comparison_operator = "GreaterThanThreshold" 44 | evaluation_periods = "2" 45 | metric_name = "CPUUtilization" 46 | period = "60" 47 | statistic = "Average" 48 | threshold = "90" 49 | alarm_actions = [aws_autoscaling_policy.not_scheduled[count.index].arn] 50 | dimensions = { 51 | AutoScalingGroupName = aws_autoscaling_group.not_scheduled[count.index].id 52 | } 53 | 54 | tags = { 55 | tostop = "false" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/cloudwatch_alarm.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_policy" "scheduled" { 2 | count = 3 3 | name = "bar-with-tag-${count.index}" 4 | scaling_adjustment = 1 5 | adjustment_type = "ChangeInCapacity" 6 | cooldown = 300 7 | autoscaling_group_name = aws_autoscaling_group.scheduled[count.index].name 8 | } 9 | 10 | resource "aws_cloudwatch_metric_alarm" "scheduled" { 11 | count = 3 12 | alarm_name = "bar-with-tag-${count.index}" 13 | namespace = "AWS/AutoScaling" 14 | comparison_operator = "GreaterThanThreshold" 15 | evaluation_periods = "2" 16 | metric_name = "CPUUtilization" 17 | period = "60" 18 | statistic = "Average" 19 | threshold = "90" 20 | alarm_actions = [aws_autoscaling_policy.scheduled[count.index].arn] 21 | dimensions = { 22 | AutoScalingGroupName = aws_autoscaling_group.scheduled[count.index].id 23 | } 24 | 25 | tags = { 26 | tostop = "true" 27 | } 28 | } 29 | 30 | resource "aws_autoscaling_policy" "not_scheduled" { 31 | count = 2 32 | name = "foo-without-tag-${count.index}" 33 | scaling_adjustment = 1 34 | adjustment_type = "ChangeInCapacity" 35 | cooldown = 300 36 | autoscaling_group_name = aws_autoscaling_group.not_scheduled[count.index].name 37 | } 38 | 39 | resource "aws_cloudwatch_metric_alarm" "not_scheduled" { 40 | count = 2 41 | alarm_name = "foo-without-tag-${count.index}" 42 | namespace = "AWS/AutoScaling" 43 | comparison_operator = "GreaterThanThreshold" 44 | evaluation_periods = "2" 45 | metric_name = "CPUUtilization" 46 | period = "60" 47 | statistic = "Average" 48 | threshold = "90" 49 | alarm_actions = [aws_autoscaling_policy.not_scheduled[count.index].arn] 50 | dimensions = { 51 | AutoScalingGroupName = aws_autoscaling_group.not_scheduled[count.index].id 52 | } 53 | 54 | tags = { 55 | tostop = "false" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /package/scheduler/filter_resources_by_tags.py: -------------------------------------------------------------------------------- 1 | """Filter aws resouces with tags.""" 2 | 3 | from typing import Iterator 4 | 5 | import boto3 6 | 7 | 8 | class FilterByTags: 9 | """Abstract Filter aws resources by tags in a class.""" 10 | 11 | def __init__(self, region_name=None) -> None: 12 | """Initialize resourcegroupstaggingapi client.""" 13 | if region_name: 14 | self.rgta = boto3.client( 15 | "resourcegroupstaggingapi", region_name=region_name 16 | ) 17 | else: 18 | self.rgta = boto3.client("resourcegroupstaggingapi") 19 | 20 | def get_resources(self, resource_type, aws_tags) -> Iterator[str]: 21 | """Filter aws resources using resource type and defined tags. 22 | 23 | Returns all the tagged defined resources that are located in 24 | the specified Region for the AWS account. 25 | 26 | :param str resource_type: 27 | The constraints on the resources that you want returned. 28 | The format of each resource type is service[:resourceType] . 29 | For example, specifying a resource type of ec2 returns all 30 | Amazon EC2 resources (which includes EC2 instances). 31 | Specifying a resource type of ec2:instance returns only 32 | EC2 instances. 33 | :param list[map] aws_tags: 34 | A list of TagFilters (keys and values). 35 | Each TagFilter specified must contain a key with values 36 | as optional. For example: 37 | [ 38 | { 39 | 'Key': 'string', 40 | 'Values': [ 41 | 'string', 42 | ] 43 | }, 44 | ] 45 | :yield Iterator[str]: 46 | The ids of the resources 47 | """ 48 | paginator = self.rgta.get_paginator("get_resources") 49 | page_iterator = paginator.paginate( 50 | TagFilters=aws_tags, ResourceTypeFilters=[resource_type] 51 | ) 52 | for page in page_iterator: 53 | for resource_tag_map in page["ResourceTagMappingList"]: 54 | yield resource_tag_map["ResourceARN"] 55 | -------------------------------------------------------------------------------- /examples/ecs-scheduler/ecs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ecs_cluster" "this" { 2 | name = "test-ecs-cluster-${random_pet.suffix.id}" 3 | 4 | setting { 5 | name = "containerInsights" 6 | value = "disabled" 7 | } 8 | } 9 | 10 | resource "aws_ecs_service" "to_scheduled" { 11 | name = "test-to-stop-${random_pet.suffix.id}" 12 | cluster = aws_ecs_cluster.this.id 13 | task_definition = aws_ecs_task_definition.this.arn 14 | desired_count = 1 15 | launch_type = "FARGATE" 16 | 17 | network_configuration { 18 | subnets = [aws_subnet.primary.id] 19 | } 20 | 21 | tags = { 22 | tostop = "true", 23 | } 24 | lifecycle { 25 | ignore_changes = [ 26 | desired_count, 27 | tags 28 | ] 29 | } 30 | } 31 | 32 | resource "aws_ecs_service" "not_to_scheduled" { 33 | name = "test-not-to-stop-${random_pet.suffix.id}" 34 | cluster = aws_ecs_cluster.this.id 35 | task_definition = aws_ecs_task_definition.this.arn 36 | desired_count = 1 37 | launch_type = "FARGATE" 38 | 39 | network_configuration { 40 | subnets = [aws_subnet.primary.id] 41 | } 42 | 43 | tags = { 44 | tostop = "false", 45 | } 46 | lifecycle { 47 | ignore_changes = [ 48 | desired_count, 49 | tags 50 | ] 51 | } 52 | } 53 | 54 | resource "aws_ecs_task_definition" "this" { 55 | family = "test-${random_pet.suffix.id}" 56 | 57 | # Refer to https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html 58 | # for cpu and memory values 59 | cpu = 256 60 | memory = 512 61 | 62 | requires_compatibilities = ["FARGATE"] 63 | network_mode = "awsvpc" 64 | 65 | # execution_role_arn = aws_iam_role.ecs_service.arn 66 | task_role_arn = aws_iam_role.hello_ecs_task_execution_role.arn 67 | 68 | container_definitions = jsonencode([ 69 | { 70 | name = "hello-world-rest" 71 | image = "docker.io/library/nginx:alpine" 72 | cpu = 10 73 | memory = 128 74 | essential = true 75 | portMappings = [ 76 | { 77 | containerPort = 80 78 | hostPort = 80 79 | protocol = "tcp" 80 | } 81 | ] 82 | } 83 | ]) 84 | } 85 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_transfer_server_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_transfer_status.py ONLINE \ 5 | ${var.transfer_server_to_scheduled_id} \ 6 | ${var.transfer_server_not_scheduled_id} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_transfer" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_transfer_server_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_transfer_server_offline_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_transfer_status.py OFFLINE \ 26 | ${var.transfer_server_to_scheduled_id} \ 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "transfer_server_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws transfer describe-server \ 35 | --server-id ${var.transfer_server_to_scheduled_id} \ 36 | --query 'Server.State' \ 37 | --output text > ${path.module}/transfer_server_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_transfer_server_offline_state] 42 | } 43 | 44 | data "local_file" "transfer_server_to_scheduled" { 45 | filename = "${path.module}/transfer_server_to_scheduled.state" 46 | 47 | depends_on = [null_resource.transfer_server_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "transfer_server_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws transfer describe-server \ 54 | --server-id ${var.transfer_server_not_scheduled_id} \ 55 | --query 'Server.State' \ 56 | --output text > ${path.module}/transfer_server_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_transfer_server_offline_state] 61 | } 62 | 63 | data "local_file" "transfer_server_not_scheduled" { 64 | filename = "${path.module}/transfer_server_not_scheduled.state" 65 | 66 | depends_on = [null_resource.transfer_server_not_scheduled] 67 | } 68 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_documentdb_cluster_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_documentdb_status.py available \ 5 | ${var.docdb_cluster_to_scheduled_name} \ 6 | ${var.docdb_cluster_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_documentdb" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_documentdb_cluster_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_documentdb_cluster_stopped_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_documentdb_status.py stopped \ 26 | ${var.docdb_cluster_to_scheduled_name} \ 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "docdb_cluster_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws docdb describe-db-clusters \ 35 | --db-cluster-identifier ${var.docdb_cluster_to_scheduled_name} \ 36 | --query 'DBClusters[0].Status' \ 37 | --output text > ${path.module}/docdb_cluster_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_documentdb_cluster_stopped_state] 42 | } 43 | 44 | data "local_file" "docdb_cluster_to_scheduled" { 45 | filename = "${path.module}/docdb_cluster_to_scheduled.state" 46 | 47 | depends_on = [null_resource.docdb_cluster_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "docdb_cluster_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws docdb describe-db-clusters \ 54 | --db-cluster-identifier ${var.docdb_cluster_not_scheduled_name} \ 55 | --query 'DBClusters[0].Status' \ 56 | --output text > ${path.module}/docdb_cluster_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_documentdb_cluster_stopped_state] 61 | } 62 | 63 | data "local_file" "docdb_cluster_not_scheduled" { 64 | filename = "${path.module}/docdb_cluster_not_scheduled.state" 65 | 66 | depends_on = [null_resource.docdb_cluster_not_scheduled] 67 | } 68 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_neptune_cluster_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_neptune_status.py available \ 5 | ${var.neptune_cluster_to_scheduled_name} \ 6 | ${var.neptune_cluster_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_neptune" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_neptune_cluster_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_neptune_cluster_stopped_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_neptune_status.py stopped \ 26 | ${var.neptune_cluster_to_scheduled_name} 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "neptune_cluster_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws neptune describe-db-clusters \ 35 | --db-cluster-identifier ${var.neptune_cluster_to_scheduled_name} \ 36 | --query 'DBClusters[0].Status' \ 37 | --output text > ${path.module}/neptune_cluster_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_neptune_cluster_stopped_state] 42 | } 43 | 44 | data "local_file" "neptune_cluster_to_scheduled" { 45 | filename = "${path.module}/neptune_cluster_to_scheduled.state" 46 | 47 | depends_on = [null_resource.neptune_cluster_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "neptune_cluster_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws neptune describe-db-clusters \ 54 | --db-cluster-identifier ${var.neptune_cluster_not_scheduled_name} \ 55 | --query 'DBClusters[0].Status' \ 56 | --output text > ${path.module}/neptune_cluster_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_neptune_cluster_stopped_state] 61 | } 62 | 63 | data "local_file" "neptune_cluster_not_scheduled" { 64 | filename = "${path.module}/neptune_cluster_not_scheduled.state" 65 | 66 | depends_on = [null_resource.neptune_cluster_not_scheduled] 67 | } 68 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_redshift_cluster_available_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_redshift_status.py Available \ 5 | ${var.redshift_cluster_to_scheduled_name} \ 6 | ${var.redshift_cluster_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "aws_lambda_invocation" "stop_redshift" { 12 | function_name = var.lambda_stop_name 13 | 14 | input = jsonencode({ 15 | key1 = "value1" 16 | key2 = "value2" 17 | }) 18 | 19 | depends_on = [null_resource.wait_redshift_cluster_available_state] 20 | } 21 | 22 | resource "null_resource" "wait_redshift_cluster_paused_state" { 23 | provisioner "local-exec" { 24 | command = <<-EOT 25 | python3 ${path.module}/wait_redshift_status.py Paused \ 26 | ${var.redshift_cluster_to_scheduled_name} \ 27 | EOT 28 | } 29 | } 30 | 31 | resource "null_resource" "redshift_cluster_to_scheduled" { 32 | provisioner "local-exec" { 33 | command = <<-EOT 34 | aws redshift describe-clusters \ 35 | --cluster-identifier ${var.redshift_cluster_to_scheduled_name} \ 36 | --query 'Clusters[0].ClusterStatus' \ 37 | --output text > ${path.module}/redshift_cluster_to_scheduled.state 38 | EOT 39 | } 40 | 41 | depends_on = [null_resource.wait_redshift_cluster_paused_state] 42 | } 43 | 44 | data "local_file" "redshift_cluster_to_scheduled" { 45 | filename = "${path.module}/redshift_cluster_to_scheduled.state" 46 | 47 | depends_on = [null_resource.redshift_cluster_to_scheduled] 48 | } 49 | 50 | resource "null_resource" "redshift_cluster_not_scheduled" { 51 | provisioner "local-exec" { 52 | command = <<-EOT 53 | aws redshift describe-clusters \ 54 | --cluster-identifier ${var.redshift_cluster_not_scheduled_name} \ 55 | --query 'Clusters[0].ClusterStatus' \ 56 | --output text > ${path.module}/redshift_cluster_not_scheduled.state 57 | EOT 58 | } 59 | 60 | depends_on = [null_resource.wait_redshift_cluster_paused_state] 61 | } 62 | 63 | data "local_file" "redshift_cluster_not_scheduled" { 64 | filename = "${path.module}/redshift_cluster_not_scheduled.state" 65 | 66 | depends_on = [null_resource.redshift_cluster_not_scheduled] 67 | } 68 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/wait_rds_cluster.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS RDS cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_rds_cluster_status( 13 | cluster_identifiers: List[str], desired_status: str 14 | ) -> None: 15 | """Wait for RDS clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of RDS cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'stopped') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | rds = boto3.client("rds") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds. Some RDS clusters may not have reached the desired status." 33 | ) 34 | sys.exit(1) 35 | 36 | all_clusters_in_desired_state = True 37 | for cluster_id in cluster_identifiers: 38 | response = rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 39 | current_status = response["DBClusters"][0]["Status"] 40 | 41 | if current_status != desired_status: 42 | all_clusters_in_desired_state = False 43 | break 44 | 45 | if all_clusters_in_desired_state: 46 | print(f"All RDS clusters are now {desired_status}") 47 | return 48 | 49 | print(f"Waiting for RDS clusters to be {desired_status}...") 50 | time.sleep(10) # Wait 30 seconds before checking again 51 | 52 | except ClientError as e: 53 | print(f"Error checking RDS cluster status: {e}") 54 | sys.exit(1) 55 | 56 | 57 | if __name__ == "__main__": 58 | if len(sys.argv) < 3: 59 | print( 60 | "Usage: python wait_rds_cluster.py [cluster_id2 ...]" 61 | ) 62 | sys.exit(1) 63 | 64 | desired_status = sys.argv[1] 65 | cluster_identifiers = sys.argv[2:] 66 | 67 | wait_for_rds_cluster_status(cluster_identifiers, desired_status) 68 | -------------------------------------------------------------------------------- /examples/transfer-scheduler/test-execution/wait_transfer_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS Transfer server status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_transfer_server_status(desired_status: str, server_ids: List[str]) -> None: 13 | """Wait for AWS Transfer servers to reach desired status. 14 | 15 | Args: 16 | server_ids: List of AWS Transfer server IDs 17 | desired_status: Desired status to wait for (e.g. 'ONLINE', 'OFFLINE', 'STARTING', 'STOPPING') 18 | """ 19 | if not server_ids: 20 | return 21 | 22 | transfer = boto3.client("transfer") 23 | start_time = time.time() 24 | timeout = 900 # 15 minutes timeout 25 | 26 | while True: 27 | try: 28 | if time.time() - start_time > timeout: 29 | print( 30 | f"Timeout reached after {timeout} seconds. " 31 | "Some Transfer servers may not have reached the desired status." 32 | ) 33 | sys.exit(1) 34 | 35 | all_servers_in_desired_state = True 36 | for server_id in server_ids: 37 | response = transfer.describe_server(ServerId=server_id) 38 | current_status = response["Server"]["State"] 39 | 40 | if current_status != desired_status: 41 | all_servers_in_desired_state = False 42 | break 43 | 44 | if all_servers_in_desired_state: 45 | print(f"All Transfer servers are now {desired_status}") 46 | return 47 | 48 | print(f"Waiting for Transfer servers to be {desired_status}...") 49 | time.sleep(10) # Wait 10 seconds before checking again 50 | 51 | except ClientError as e: 52 | print(f"Error checking Transfer server status: {e}") 53 | sys.exit(1) 54 | 55 | 56 | if __name__ == "__main__": 57 | if len(sys.argv) < 3: 58 | print( 59 | "Usage: python wait_transfer_status.py " 60 | " [server_id2 ...]" 61 | ) 62 | sys.exit(1) 63 | 64 | target_status = sys.argv[1] 65 | target_servers = sys.argv[2:] 66 | 67 | wait_for_transfer_server_status(target_status, target_servers) 68 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/wait_rds_instance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS RDS instance status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_rds_instance_status( 13 | instance_identifiers: List[str], desired_status: str 14 | ) -> None: 15 | """Wait for RDS instances to reach desired status. 16 | 17 | Args: 18 | instance_identifiers: List of RDS instance identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'stopped') 20 | region: AWS region name 21 | """ 22 | if not instance_identifiers: 23 | return 24 | 25 | rds = boto3.client("rds") 26 | start_time = time.time() 27 | timeout = 1800 # 30 minutes timeout 28 | 29 | while True: 30 | try: 31 | if time.time() - start_time > timeout: 32 | print( 33 | f"Timeout reached after {timeout} seconds. Some RDS instances may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_instances_in_desired_state = True 38 | for instance_id in instance_identifiers: 39 | response = rds.describe_db_instances(DBInstanceIdentifier=instance_id) 40 | current_status = response["DBInstances"][0]["DBInstanceStatus"] 41 | 42 | if current_status != desired_status: 43 | all_instances_in_desired_state = False 44 | break 45 | 46 | if all_instances_in_desired_state: 47 | print(f"All RDS instances are now {desired_status}") 48 | return 49 | 50 | print(f"Waiting for RDS instances to be {desired_status}...") 51 | time.sleep(10) # Wait 30 seconds before checking again 52 | 53 | except ClientError as e: 54 | print(f"Error checking RDS status: {e}") 55 | sys.exit(1) 56 | 57 | 58 | if __name__ == "__main__": 59 | if len(sys.argv) < 3: 60 | print( 61 | "Usage: python wait_rds_instance.py [instance_id2 ...]" 62 | ) 63 | sys.exit(1) 64 | 65 | desired_status = sys.argv[1] 66 | instance_identifiers = sys.argv[2:] 67 | 68 | wait_for_rds_instance_status(instance_identifiers, desired_status) 69 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/test-execution/wait_redshift_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS Redshift cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_redshift_cluster_status( 13 | desired_status: str, cluster_identifiers: List[str] 14 | ) -> None: 15 | """Wait for Redshift clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of Redshift cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'paused') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | redshift = boto3.client("redshift") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds." 33 | "Some Redshift clusters may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_clusters_in_desired_state = True 38 | for cluster_id in cluster_identifiers: 39 | response = redshift.describe_clusters(ClusterIdentifier=cluster_id) 40 | current_availability_status = response["Clusters"][0][ 41 | "ClusterAvailabilityStatus" 42 | ] 43 | 44 | if current_availability_status != desired_status: 45 | all_clusters_in_desired_state = False 46 | break 47 | 48 | if all_clusters_in_desired_state: 49 | print(f"All Redshift clusters are now {desired_status}") 50 | return 51 | 52 | print(f"Waiting for Redshift clusters to be {desired_status}...") 53 | time.sleep(10) # Wait 10 seconds before checking again 54 | 55 | except ClientError as e: 56 | print(f"Error checking Redshift status: {e}") 57 | sys.exit(1) 58 | 59 | 60 | if __name__ == "__main__": 61 | if len(sys.argv) < 3: 62 | print( 63 | "Usage: python wait_redshift_status.py" 64 | " [cluster_id2 ...]" 65 | ) 66 | sys.exit(1) 67 | 68 | target_status = sys.argv[1] 69 | target_clusters = sys.argv[2:] 70 | 71 | wait_for_redshift_cluster_status(target_status, target_clusters) 72 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | # Convert *.py to .zip because AWS Lambda need .zip 4 | data "archive_file" "this" { 5 | type = "zip" 6 | source_dir = "${path.module}/package/" 7 | output_path = "${path.module}/aws-stop-start-resources.zip" 8 | } 9 | 10 | # Create Lambda function for stop or start aws resources 11 | resource "aws_lambda_function" "this" { 12 | filename = data.archive_file.this.output_path 13 | source_code_hash = data.archive_file.this.output_base64sha256 14 | function_name = var.name 15 | role = var.custom_iam_role_arn == null ? aws_iam_role.this[0].arn : var.custom_iam_role_arn 16 | handler = "scheduler.main.lambda_handler" 17 | runtime = var.runtime 18 | timeout = "600" 19 | kms_key_arn = var.kms_key_arn == null ? "" : var.kms_key_arn 20 | 21 | environment { 22 | variables = { 23 | AWS_REGIONS = var.aws_regions == null ? data.aws_region.current.name : join(", ", var.aws_regions) 24 | SCHEDULE_ACTION = var.schedule_action 25 | TAG_KEY = local.scheduler_tag["key"] 26 | TAG_VALUE = local.scheduler_tag["value"] 27 | DOCUMENTDB_SCHEDULE = tostring(var.documentdb_schedule) 28 | EC2_SCHEDULE = tostring(var.ec2_schedule) 29 | ECS_SCHEDULE = tostring(var.ecs_schedule) 30 | RDS_SCHEDULE = tostring(var.rds_schedule) 31 | REDSHIFT_SCHEDULE = tostring(var.redshift_schedule) 32 | AUTOSCALING_SCHEDULE = tostring(var.autoscaling_schedule) 33 | AUTOSCALING_TERMINATE_INSTANCES = tostring(var.autoscaling_terminate_instances) 34 | CLOUDWATCH_ALARM_SCHEDULE = tostring(var.cloudwatch_alarm_schedule) 35 | TRANSFER_SCHEDULE = tostring(var.transfer_schedule) 36 | SCHEDULER_EXCLUDED_DATES = jsonencode(var.scheduler_excluded_dates) 37 | } 38 | } 39 | 40 | tags = var.tags 41 | } 42 | 43 | resource "aws_scheduler_schedule" "this" { 44 | name = "trigger-lambda-scheduler-${var.name}" 45 | description = "Trigger lambda scheduler" 46 | schedule_expression = var.schedule_expression 47 | schedule_expression_timezone = var.schedule_expression_timezone 48 | 49 | flexible_time_window { 50 | mode = "OFF" 51 | } 52 | 53 | target { 54 | arn = aws_lambda_function.this.arn 55 | role_arn = aws_iam_role.scheduler_lambda.arn 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /package/scheduler/transfer_handler.py: -------------------------------------------------------------------------------- 1 | """AWS Transfer (SFTP) server scheduler.""" 2 | 3 | from typing import Dict, List, Literal, Optional 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import transfer_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class TransferScheduler: 13 | """AWS Transfer (SFTP) server scheduler for controlling servers.""" 14 | 15 | def __init__(self, region_name: Optional[str] = None) -> None: 16 | """Initialize Transfer scheduler. 17 | 18 | Args: 19 | region_name: AWS region name. Uses default configuration if not specified. 20 | """ 21 | self.transfer = ( 22 | boto3.client("transfer", region_name=region_name) 23 | if region_name 24 | else boto3.client("transfer") 25 | ) 26 | self.tag_api = FilterByTags(region_name=region_name) 27 | 28 | def stop(self, aws_tags: List[Dict]) -> None: 29 | """Stop AWS Transfer servers with defined tags. 30 | 31 | Args: 32 | aws_tags: AWS tags to filter resources. 33 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 34 | """ 35 | self._process_servers(aws_tags, action="stop") 36 | 37 | def start(self, aws_tags: List[Dict]) -> None: 38 | """Start AWS Transfer servers with defined tags. 39 | 40 | Args: 41 | aws_tags: AWS tags to filter resources. 42 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 43 | """ 44 | self._process_servers(aws_tags, action="start") 45 | 46 | def _process_servers( 47 | self, aws_tags: List[Dict], action: Literal["start", "stop"] 48 | ) -> None: 49 | """Process Transfer servers with the specified action. 50 | 51 | Args: 52 | aws_tags: AWS tags to filter resources. 53 | action: Action to perform ("start" or "stop"). 54 | """ 55 | for server_arn in self.tag_api.get_resources("transfer:server", aws_tags): 56 | server_id = server_arn.split("/")[-1] 57 | try: 58 | if action == "start": 59 | self.transfer.start_server(ServerId=server_id) 60 | print(f"Start Transfer server {server_id}") 61 | else: 62 | self.transfer.stop_server(ServerId=server_id) 63 | print(f"Stop Transfer server {server_id}") 64 | except ClientError as exc: 65 | transfer_exception("Transfer server", server_id, exc) 66 | -------------------------------------------------------------------------------- /examples/date-exclusion/test-execution/wait_instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS instances status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_instances_status( 13 | instance_ids: List[str], desired_status: str, region: str = None 14 | ) -> None: 15 | """Wait for instances to reach desired status. 16 | 17 | Args: 18 | instance_ids: List of instance IDs to check 19 | desired_status: Desired status to wait for (e.g. 'running', 'stopped') 20 | region: AWS region name 21 | """ 22 | if not instance_ids: 23 | return 24 | 25 | ec2 = boto3.client("ec2", region_name=region) if region else boto3.client("ec2") 26 | start_time = time.time() 27 | timeout = 600 # 10 minutes timeout 28 | 29 | while True: 30 | try: 31 | # Check if timeout has been reached 32 | if time.time() - start_time > timeout: 33 | print( 34 | f"Timeout reached after {timeout} seconds. Some instances may not have reached the desired status." 35 | ) 36 | sys.exit(1) 37 | 38 | response = ec2.describe_instances(InstanceIds=instance_ids) 39 | all_instances_in_desired_state = True 40 | 41 | for reservation in response["Reservations"]: 42 | for instance in reservation["Instances"]: 43 | current_state = instance["State"]["Name"] 44 | if current_state != desired_status: 45 | all_instances_in_desired_state = False 46 | break 47 | 48 | if not all_instances_in_desired_state: 49 | break 50 | 51 | if all_instances_in_desired_state: 52 | print(f"All instances are now {desired_status}") 53 | return 54 | 55 | print(f"Waiting for instances to be {desired_status}...") 56 | time.sleep(10) # Wait 10 seconds before checking again 57 | 58 | except ClientError as e: 59 | print(f"Error checking instance status: {e}") 60 | sys.exit(1) 61 | 62 | 63 | if __name__ == "__main__": 64 | if len(sys.argv) < 3: 65 | print( 66 | "Usage: python wait_instances.py [instance_id2 ...]" 67 | ) 68 | sys.exit(1) 69 | 70 | desired_status = sys.argv[1] 71 | instance_ids = sys.argv[2:] 72 | 73 | wait_for_instances_status(instance_ids, desired_status) 74 | -------------------------------------------------------------------------------- /examples/instance-scheduler/test-execution/wait_instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS instances status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_instances_status( 13 | instance_ids: List[str], desired_status: str, region: str = None 14 | ) -> None: 15 | """Wait for instances to reach desired status. 16 | 17 | Args: 18 | instance_ids: List of instance IDs to check 19 | desired_status: Desired status to wait for (e.g. 'running', 'stopped') 20 | region: AWS region name 21 | """ 22 | if not instance_ids: 23 | return 24 | 25 | ec2 = boto3.client("ec2", region_name=region) if region else boto3.client("ec2") 26 | start_time = time.time() 27 | timeout = 600 # 10 minutes timeout 28 | 29 | while True: 30 | try: 31 | # Check if timeout has been reached 32 | if time.time() - start_time > timeout: 33 | print( 34 | f"Timeout reached after {timeout} seconds. Some instances may not have reached the desired status." 35 | ) 36 | sys.exit(1) 37 | 38 | response = ec2.describe_instances(InstanceIds=instance_ids) 39 | all_instances_in_desired_state = True 40 | 41 | for reservation in response["Reservations"]: 42 | for instance in reservation["Instances"]: 43 | current_state = instance["State"]["Name"] 44 | if current_state != desired_status: 45 | all_instances_in_desired_state = False 46 | break 47 | 48 | if not all_instances_in_desired_state: 49 | break 50 | 51 | if all_instances_in_desired_state: 52 | print(f"All instances are now {desired_status}") 53 | return 54 | 55 | print(f"Waiting for instances to be {desired_status}...") 56 | time.sleep(10) # Wait 10 seconds before checking again 57 | 58 | except ClientError as e: 59 | print(f"Error checking instance status: {e}") 60 | sys.exit(1) 61 | 62 | 63 | if __name__ == "__main__": 64 | if len(sys.argv) < 3: 65 | print( 66 | "Usage: python wait_instances.py [instance_id2 ...]" 67 | ) 68 | sys.exit(1) 69 | 70 | desired_status = sys.argv[1] 71 | instance_ids = sys.argv[2:] 72 | 73 | wait_for_instances_status(instance_ids, desired_status) 74 | -------------------------------------------------------------------------------- /package/scheduler/redshift_handler.py: -------------------------------------------------------------------------------- 1 | """redshift cluster scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import redshift_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class RedshiftScheduler: 13 | """Abstract redshift scheduler in a class.""" 14 | 15 | def __init__(self, region_name=None) -> None: 16 | """Initialize redshift scheduler.""" 17 | if region_name: 18 | self.redshift = boto3.client("redshift", region_name=region_name) 19 | else: 20 | self.redshift = boto3.client("redshift") 21 | self.tag_api = FilterByTags(region_name=region_name) 22 | 23 | def stop(self, aws_tags: List[Dict]) -> None: 24 | """Aws redshift cluster stop function. 25 | 26 | Stop redshift clusters with defined tags. 27 | 28 | :param list[map] aws_tags: 29 | Aws tags to use for filter resources. 30 | For example: 31 | [ 32 | { 33 | 'Key': 'string', 34 | 'Values': [ 35 | 'string', 36 | ] 37 | } 38 | ] 39 | """ 40 | for cluster_arn in self.tag_api.get_resources("redshift:cluster", aws_tags): 41 | cluster_id = cluster_arn.split(":")[-1] 42 | try: 43 | self.redshift.pause_cluster(ClusterIdentifier=cluster_id) 44 | print(f"Stop redshift cluster {cluster_id}") 45 | except ClientError as exc: 46 | redshift_exception("redshift cluster", cluster_id, exc) 47 | 48 | def start(self, aws_tags: List[Dict]) -> None: 49 | """Aws redshift cluster start function. 50 | 51 | Start redshift clusters with defined tags. 52 | 53 | :param list[map] aws_tags: 54 | Aws tags to use for filter resources. 55 | For example: 56 | [ 57 | { 58 | 'Key': 'string', 59 | 'Values': [ 60 | 'string', 61 | ] 62 | } 63 | ] 64 | """ 65 | for cluster_arn in self.tag_api.get_resources("redshift:cluster", aws_tags): 66 | cluster_id = cluster_arn.split(":")[-1] 67 | try: 68 | self.redshift.resume_cluster(ClusterIdentifier=cluster_id) 69 | print(f"Start redshift cluster {cluster_id}") 70 | except ClientError as exc: 71 | redshift_exception("redshift cluster", cluster_id, exc) 72 | -------------------------------------------------------------------------------- /package/scheduler/documentdb_handler.py: -------------------------------------------------------------------------------- 1 | """documentdb instances scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import documentdb_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class DocumentDBScheduler: 13 | """documentdb scheduler.""" 14 | 15 | def __init__(self, region_name=None) -> None: 16 | """Initialize documentdb scheduler.""" 17 | if region_name: 18 | self.documentdb = boto3.client("docdb", region_name=region_name) 19 | else: 20 | self.documentdb = boto3.client("docdb") 21 | self.tag_api = FilterByTags(region_name=region_name) 22 | 23 | def stop(self, aws_tags: List[Dict]) -> None: 24 | """Aws documentdb cluster stop function. 25 | 26 | Stop documentdb clusters with defined tags. 27 | 28 | :param list[map] aws_tags: 29 | Aws tags to use for filter resources. 30 | For example: 31 | [ 32 | { 33 | 'Key': 'string', 34 | 'Values': [ 35 | 'string', 36 | ] 37 | } 38 | ] 39 | """ 40 | for cluster_arn in self.tag_api.get_resources("rds:cluster", aws_tags): 41 | cluster_id = cluster_arn.split(":")[-1] 42 | try: 43 | self.documentdb.stop_db_cluster(DBClusterIdentifier=cluster_id) 44 | print(f"Stop documentdb cluster {cluster_id}") 45 | except ClientError as exc: 46 | documentdb_exception("documentdb cluster", cluster_id, exc) 47 | 48 | def start(self, aws_tags: List[Dict]) -> None: 49 | """Aws documentdb cluster start function. 50 | 51 | Start documentdb clusters with defined tags. 52 | 53 | :param list[map] aws_tags: 54 | Aws tags to use for filter resources. 55 | For example: 56 | [ 57 | { 58 | 'Key': 'string', 59 | 'Values': [ 60 | 'string', 61 | ] 62 | } 63 | ] 64 | """ 65 | for cluster_arn in self.tag_api.get_resources("rds:cluster", aws_tags): 66 | cluster_id = cluster_arn.split(":")[-1] 67 | try: 68 | self.documentdb.start_db_cluster(DBClusterIdentifier=cluster_id) 69 | print(f"Start documentdb cluster {cluster_id}") 70 | except ClientError as exc: 71 | documentdb_exception("documentdb cluster", cluster_id, exc) 72 | -------------------------------------------------------------------------------- /package/scheduler/main.py: -------------------------------------------------------------------------------- 1 | """This script stop and start aws resources.""" 2 | 3 | import json 4 | import os 5 | 6 | from .autoscaling_handler import AutoscalingScheduler 7 | from .cloudwatch_handler import CloudWatchAlarmScheduler 8 | from .documentdb_handler import DocumentDBScheduler 9 | from .ecs_handler import EcsScheduler 10 | from .instance_handler import InstanceScheduler 11 | from .rds_handler import RdsScheduler 12 | from .redshift_handler import RedshiftScheduler 13 | from .transfer_handler import TransferScheduler 14 | from .utils import is_date_excluded, strtobool 15 | 16 | 17 | def lambda_handler(event, context): 18 | """Main function entrypoint for lambda. 19 | 20 | Stop and start AWS resources: 21 | - rds instances 22 | - rds aurora clusters 23 | - instance ec2 24 | - ecs services 25 | - redshift clusters 26 | - transfer servers 27 | 28 | Suspend and resume AWS resources: 29 | - ec2 autoscaling groups 30 | 31 | Terminate spot instances (spot instance cannot be stopped by a user) 32 | """ 33 | # Retrieve variables from aws lambda ENVIRONMENT 34 | schedule_action = os.getenv("SCHEDULE_ACTION") 35 | aws_regions = os.getenv("AWS_REGIONS").replace(" ", "").split(",") 36 | format_tags = [{"Key": os.getenv("TAG_KEY"), "Values": [os.getenv("TAG_VALUE")]}] 37 | autoscaling_terminate_instances = strtobool( 38 | os.getenv("AUTOSCALING_TERMINATE_INSTANCES") 39 | ) 40 | excluded_dates = json.loads(os.environ.get("SCHEDULER_EXCLUDED_DATES", "[]")) 41 | 42 | if is_date_excluded(excluded_dates): 43 | return 44 | 45 | _strategy = { 46 | AutoscalingScheduler: os.getenv("AUTOSCALING_SCHEDULE"), 47 | DocumentDBScheduler: os.getenv("DOCUMENTDB_SCHEDULE"), 48 | InstanceScheduler: os.getenv("EC2_SCHEDULE"), 49 | EcsScheduler: os.getenv("ECS_SCHEDULE"), 50 | RdsScheduler: os.getenv("RDS_SCHEDULE"), 51 | RedshiftScheduler: os.getenv("REDSHIFT_SCHEDULE"), 52 | CloudWatchAlarmScheduler: os.getenv("CLOUDWATCH_ALARM_SCHEDULE"), 53 | TransferScheduler: os.getenv("TRANSFER_SCHEDULE"), 54 | } 55 | 56 | for service, to_schedule in _strategy.items(): 57 | if strtobool(to_schedule): 58 | for aws_region in aws_regions: 59 | strategy = service(aws_region) 60 | if service == AutoscalingScheduler and autoscaling_terminate_instances: 61 | getattr(strategy, schedule_action)( 62 | aws_tags=format_tags, terminate_instances=True 63 | ) 64 | else: 65 | getattr(strategy, schedule_action)(aws_tags=format_tags) 66 | -------------------------------------------------------------------------------- /package/scheduler/cloudwatch_handler.py: -------------------------------------------------------------------------------- 1 | """Cloudwatch alarm action scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import cloudwatch_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class CloudWatchAlarmScheduler: 13 | """Abstract Cloudwatch alarm scheduler in a class.""" 14 | 15 | def __init__(self, region_name=None) -> None: 16 | """Initialize Cloudwatch alarm scheduler.""" 17 | if region_name: 18 | self.cloudwatch = boto3.client("cloudwatch", region_name=region_name) 19 | else: 20 | self.cloudwatch = boto3.client("cloudwatch") 21 | self.tag_api = FilterByTags(region_name=region_name) 22 | 23 | def stop(self, aws_tags: List[Dict]) -> None: 24 | """Aws Cloudwatch alarm disable function. 25 | 26 | Disable Cloudwatch alarm with defined tags. 27 | 28 | :param list[map] aws_tags: 29 | Aws tags to use for filter resources. 30 | For example: 31 | [ 32 | { 33 | 'Key': 'string', 34 | 'Values': [ 35 | 'string', 36 | ] 37 | } 38 | ] 39 | """ 40 | for alarm_arn in self.tag_api.get_resources("cloudwatch:alarm", aws_tags): 41 | alarm_name = alarm_arn.split(":")[-1] 42 | try: 43 | self.cloudwatch.disable_alarm_actions(AlarmNames=[alarm_name]) 44 | print(f"Disable Cloudwatch alarm {alarm_name}") 45 | except ClientError as exc: 46 | cloudwatch_exception("cloudwatch alarm", alarm_name, exc) 47 | 48 | def start(self, aws_tags: List[Dict]) -> None: 49 | """Aws Cloudwatch alarm enable function. 50 | 51 | Enable Cloudwatch alarm with defined tags. 52 | 53 | :param list[map] aws_tags: 54 | Aws tags to use for filter resources. 55 | For example: 56 | [ 57 | { 58 | 'Key': 'string', 59 | 'Values': [ 60 | 'string', 61 | ] 62 | } 63 | ] 64 | """ 65 | for alarm_arn in self.tag_api.get_resources("cloudwatch:alarm", aws_tags): 66 | alarm_name = alarm_arn.split(":")[-1] 67 | try: 68 | self.cloudwatch.enable_alarm_actions(AlarmNames=[alarm_name]) 69 | print(f"Enable Cloudwatch alarm {alarm_name}") 70 | except ClientError as exc: 71 | cloudwatch_exception("cloudwatch alarm", alarm_name, exc) 72 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-cleanup/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "start_docdb_cluster" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | TIMEOUT=600 5 | START_TIME=$(date +%s) 6 | 7 | echo "Waiting for DocumentDB cluster ${var.docdb_cluster_name} to reach 'stopped' state (timeout: $TIMEOUT seconds)..." 8 | 9 | while true; do 10 | # Check the current state of the DocumentDB cluster 11 | CURRENT_STATE=$(aws docdb describe-db-clusters --db-cluster-identifier ${var.docdb_cluster_name} --query 'DBClusters[0].Status' --output text) 12 | 13 | # Get current elapsed time 14 | CURRENT_TIME=$(date +%s) 15 | ELAPSED=$((CURRENT_TIME - START_TIME)) 16 | 17 | # Check if cluster is stopped 18 | if [ "$CURRENT_STATE" = "stopped" ]; then 19 | aws docdb start-db-cluster --db-cluster-identifier ${var.docdb_cluster_name} 20 | exit 0 21 | fi 22 | 23 | # Check if we've exceeded the timeout 24 | if [ $ELAPSED -ge $TIMEOUT ]; then 25 | echo "Timeout reached. DocumentDB cluster did not reach 'stopped' state within $TIMEOUT seconds." 26 | exit 1 27 | fi 28 | 29 | # Wait 10 seconds before checking again 30 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 31 | sleep 10 32 | done 33 | EOT 34 | } 35 | } 36 | 37 | resource "null_resource" "waiting_for_docdb_cluster_to_start" { 38 | provisioner "local-exec" { 39 | command = <<-EOT 40 | TIMEOUT=600 41 | START_TIME=$(date +%s) 42 | 43 | echo "Waiting for DocumentDB cluster ${var.docdb_cluster_name} to reach 'available' state (timeout: $TIMEOUT seconds)..." 44 | 45 | while true; do 46 | # Check the current state of the DocumentDB cluster 47 | CURRENT_STATE=$(aws docdb describe-db-clusters --db-cluster-identifier ${var.docdb_cluster_name} --query 'DBClusters[0].Status' --output text) 48 | 49 | # Get current elapsed time 50 | CURRENT_TIME=$(date +%s) 51 | ELAPSED=$((CURRENT_TIME - START_TIME)) 52 | 53 | # Check if cluster is available 54 | if [ "$CURRENT_STATE" = "available" ]; then 55 | exit 0 56 | fi 57 | 58 | # Check if we've exceeded the timeout 59 | if [ $ELAPSED -ge $TIMEOUT ]; then 60 | echo "Timeout reached. DocumentDB cluster did not reach 'available' state within $TIMEOUT seconds." 61 | exit 1 62 | fi 63 | 64 | # Wait 10 seconds before checking again 65 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 66 | sleep 10 67 | done 68 | EOT 69 | } 70 | 71 | depends_on = [null_resource.start_docdb_cluster] 72 | } 73 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-cleanup/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "start_neptune_cluster" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | TIMEOUT=1800 5 | START_TIME=$(date +%s) 6 | 7 | echo "Waiting for Neptune cluster ${var.neptune_cluster_name} to reach 'stopped' state (timeout: $TIMEOUT seconds)..." 8 | 9 | while true; do 10 | # Check the current state of the Neptune cluster 11 | CURRENT_STATE=$(aws neptune describe-db-clusters --db-cluster-identifier ${var.neptune_cluster_name} --query 'DBClusters[0].Status' --output text) 12 | 13 | # Get current elapsed time 14 | CURRENT_TIME=$(date +%s) 15 | ELAPSED=$((CURRENT_TIME - START_TIME)) 16 | 17 | # Check if cluster is stopped 18 | if [ "$CURRENT_STATE" = "stopped" ]; then 19 | aws neptune start-db-cluster --db-cluster-identifier ${var.neptune_cluster_name} 20 | exit 0 21 | fi 22 | 23 | # Check if we've exceeded the timeout 24 | if [ $ELAPSED -ge $TIMEOUT ]; then 25 | echo "Timeout reached. Neptune cluster did not reach 'stopped' state within $TIMEOUT seconds." 26 | exit 1 27 | fi 28 | 29 | # Wait 10 seconds before checking again 30 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 31 | sleep 10 32 | done 33 | EOT 34 | } 35 | } 36 | 37 | resource "null_resource" "waiting_for_neptune_cluster_to_start" { 38 | provisioner "local-exec" { 39 | command = <<-EOT 40 | TIMEOUT=1800 41 | START_TIME=$(date +%s) 42 | 43 | echo "Waiting for Neptune cluster ${var.neptune_cluster_name} to reach 'available' state (timeout: $TIMEOUT seconds)..." 44 | 45 | while true; do 46 | # Check the current state of the Neptune cluster 47 | CURRENT_STATE=$(aws neptune describe-db-clusters --db-cluster-identifier ${var.neptune_cluster_name} --query 'DBClusters[0].Status' --output text) 48 | 49 | # Get current elapsed time 50 | CURRENT_TIME=$(date +%s) 51 | ELAPSED=$((CURRENT_TIME - START_TIME)) 52 | 53 | # Check if cluster is available 54 | if [ "$CURRENT_STATE" = "available" ]; then 55 | exit 0 56 | fi 57 | 58 | # Check if we've exceeded the timeout 59 | if [ $ELAPSED -ge $TIMEOUT ]; then 60 | echo "Timeout reached. Neptune cluster did not reach 'available' state within $TIMEOUT seconds." 61 | exit 1 62 | fi 63 | 64 | # Wait 10 seconds before checking again 65 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 66 | sleep 10 67 | done 68 | EOT 69 | } 70 | 71 | depends_on = [null_resource.start_neptune_cluster] 72 | } 73 | -------------------------------------------------------------------------------- /examples/instance-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform ec2 instance with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | data "aws_region" "current" {} 5 | 6 | data "aws_ami" "ubuntu" { 7 | most_recent = true 8 | filter { 9 | name = "name" 10 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 11 | } 12 | filter { 13 | name = "virtualization-type" 14 | values = ["hvm"] 15 | } 16 | owners = ["099720109477"] # Canonical 17 | } 18 | 19 | resource "aws_instance" "scheduled" { 20 | count = "3" 21 | ami = data.aws_ami.ubuntu.id 22 | instance_type = "t2.micro" 23 | subnet_id = aws_subnet.public.id 24 | tags = { 25 | tostop = "true-${random_pet.suffix.id}" 26 | Name = "ec2-to-scheduled-${random_pet.suffix.id}-${count.index}" 27 | } 28 | } 29 | 30 | resource "aws_instance" "not_scheduled" { 31 | count = "2" 32 | ami = data.aws_ami.ubuntu.id 33 | instance_type = "t2.micro" 34 | subnet_id = aws_subnet.public.id 35 | tags = { 36 | tostop = "false" 37 | Name = "ec2-not-to-scheduled-${random_pet.suffix.id}-${count.index}" 38 | } 39 | } 40 | 41 | 42 | ### Terraform modules ### 43 | 44 | module "ec2_stop_friday" { 45 | source = "../../" 46 | name = "stop-ec2-${random_pet.suffix.id}" 47 | schedule_expression = "cron(0 23 ? * FRI *)" 48 | schedule_action = "stop" 49 | ec2_schedule = "true" 50 | rds_schedule = "false" 51 | autoscaling_schedule = "false" 52 | cloudwatch_alarm_schedule = "true" 53 | 54 | scheduler_tag = { 55 | key = "tostop" 56 | value = "true-${random_pet.suffix.id}" 57 | } 58 | } 59 | 60 | module "ec2_start_monday" { 61 | source = "../../" 62 | name = "start-ec2-${random_pet.suffix.id}" 63 | schedule_expression = "cron(0 07 ? * MON *)" 64 | schedule_action = "start" 65 | ec2_schedule = "true" 66 | rds_schedule = "false" 67 | autoscaling_schedule = "false" 68 | cloudwatch_alarm_schedule = "true" 69 | 70 | scheduler_tag = { 71 | key = "tostop" 72 | value = "true-${random_pet.suffix.id}" 73 | } 74 | } 75 | 76 | module "test_execution" { 77 | count = var.test_mode ? 1 : 0 78 | source = "./test-execution" 79 | 80 | lambda_stop_name = module.ec2_stop_friday.scheduler_lambda_name 81 | instance_1_to_scheduled_id = aws_instance.scheduled[0].id 82 | instance_2_to_scheduled_id = aws_instance.scheduled[1].id 83 | instance_3_to_scheduled_id = aws_instance.scheduled[2].id 84 | instance_1_not_to_scheduled_id = aws_instance.not_scheduled[0].id 85 | instance_2_not_to_scheduled_id = aws_instance.not_scheduled[1].id 86 | } 87 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-cleanup/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "start_rds_aurora_cluster" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | TIMEOUT=600 5 | START_TIME=$(date +%s) 6 | 7 | echo "Waiting for rds aurora cluster ${var.rds_aurora_cluster_name} to reach 'stopped' state (timeout: $TIMEOUT seconds)..." 8 | 9 | while true; do 10 | # Check the current state of the rds aurora cluster 11 | CURRENT_STATE=$(aws rds describe-db-clusters --db-cluster-identifier ${var.rds_aurora_cluster_name} --query 'DBClusters[0].Status' --output text) 12 | 13 | # Get current elapsed time 14 | CURRENT_TIME=$(date +%s) 15 | ELAPSED=$((CURRENT_TIME - START_TIME)) 16 | 17 | # Check if cluster is stopped 18 | if [ "$CURRENT_STATE" = "stopped" ]; then 19 | aws rds start-db-cluster --db-cluster-identifier ${var.rds_aurora_cluster_name} 20 | exit 0 21 | fi 22 | 23 | # Check if we've exceeded the timeout 24 | if [ $ELAPSED -ge $TIMEOUT ]; then 25 | echo "Timeout reached. rds aurora cluster did not reach 'stopped' state within $TIMEOUT seconds." 26 | exit 1 27 | fi 28 | 29 | # Wait 10 seconds before checking again 30 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 31 | sleep 10 32 | done 33 | EOT 34 | } 35 | } 36 | 37 | resource "null_resource" "waiting_for_rds_aurora_cluster_to_start" { 38 | provisioner "local-exec" { 39 | command = <<-EOT 40 | TIMEOUT=600 41 | START_TIME=$(date +%s) 42 | 43 | echo "Waiting for rds aurora cluster ${var.rds_aurora_cluster_name} to reach 'available' state (timeout: $TIMEOUT seconds)..." 44 | 45 | while true; do 46 | # Check the current state of the rds aurora cluster 47 | CURRENT_STATE=$(aws rds describe-db-clusters --db-cluster-identifier ${var.rds_aurora_cluster_name} --query 'DBClusters[0].Status' --output text) 48 | 49 | # Get current elapsed time 50 | CURRENT_TIME=$(date +%s) 51 | ELAPSED=$((CURRENT_TIME - START_TIME)) 52 | 53 | # Check if cluster is available 54 | if [ "$CURRENT_STATE" = "available" ]; then 55 | exit 0 56 | fi 57 | 58 | # Check if we've exceeded the timeout 59 | if [ $ELAPSED -ge $TIMEOUT ]; then 60 | echo "Timeout reached. rds aurora cluster did not reach 'available' state within $TIMEOUT seconds." 61 | exit 1 62 | fi 63 | 64 | # Wait 10 seconds before checking again 65 | echo "Current state: $CURRENT_STATE (elapsed: $ELAPSED seconds/ $TIMEOUT seconds)..." 66 | sleep 10 67 | done 68 | EOT 69 | } 70 | 71 | depends_on = [null_resource.start_rds_aurora_cluster] 72 | } 73 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/test-execution/wait_documentdb_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS documentdb cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_documentdb_cluster_status( 13 | desired_status: str, cluster_identifiers: List[str] 14 | ) -> None: 15 | """Wait for documentdb clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of documentdb cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'paused') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | documentdb = boto3.client("docdb") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds." 33 | "Some documentdb clusters may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_clusters_in_desired_state = True 38 | for cluster_id in cluster_identifiers: 39 | response = documentdb.describe_db_clusters( 40 | DBClusterIdentifier=cluster_id 41 | ) 42 | instance_id = response["DBClusters"][0]["DBClusterMembers"][0][ 43 | "DBInstanceIdentifier" 44 | ] 45 | instance_response = documentdb.describe_db_instances( 46 | DBInstanceIdentifier=instance_id 47 | ) 48 | current_status = instance_response["DBInstances"][0]["DBInstanceStatus"] 49 | 50 | if current_status != desired_status: 51 | all_clusters_in_desired_state = False 52 | break 53 | 54 | if all_clusters_in_desired_state: 55 | print(f"All documentdb clusters are now {desired_status}") 56 | return 57 | 58 | print(f"Waiting for documentdb clusters to be {desired_status}...") 59 | time.sleep(10) # Wait 10 seconds before checking again 60 | 61 | except ClientError as e: 62 | print(f"Error checking documentdb status: {e}") 63 | sys.exit(1) 64 | 65 | 66 | if __name__ == "__main__": 67 | if len(sys.argv) < 3: 68 | print( 69 | "Usage: python wait_documentdb_status.py" 70 | " [cluster_id2 ...]" 71 | ) 72 | sys.exit(1) 73 | 74 | target_status = sys.argv[1] 75 | target_clusters = sys.argv[2:] 76 | 77 | wait_for_documentdb_cluster_status(target_status, target_clusters) 78 | -------------------------------------------------------------------------------- /package/scheduler/ecs_handler.py: -------------------------------------------------------------------------------- 1 | """ecs service scheduler.""" 2 | 3 | from typing import Dict, List 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import ecs_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class EcsScheduler: 13 | """Abstract ECS Service scheduler in a class.""" 14 | 15 | def __init__(self, region_name=None) -> None: 16 | """Initialize ECS service scheduler.""" 17 | if region_name: 18 | self.ecs = boto3.client("ecs", region_name=region_name) 19 | else: 20 | self.ecs = boto3.client("ecs") 21 | self.tag_api = FilterByTags(region_name=region_name) 22 | 23 | def stop(self, aws_tags: List[Dict]) -> None: 24 | """Aws ecs instance stop function. 25 | 26 | Stop ecs service with defined tags and disable its Cloudwatch 27 | alarms. 28 | 29 | :param list[map] aws_tags: 30 | Aws tags to use for filter resources. 31 | For example: 32 | [ 33 | { 34 | 'Key': 'string', 35 | 'Values': [ 36 | 'string', 37 | ] 38 | } 39 | ] 40 | """ 41 | for service_arn in self.tag_api.get_resources("ecs:service", aws_tags): 42 | service_name = service_arn.split("/")[-1] 43 | cluster_name = service_arn.split("/")[-2] 44 | try: 45 | self.ecs.update_service( 46 | cluster=cluster_name, service=service_name, desiredCount=0 47 | ) 48 | print(f"Stop ECS Service {service_name} on Cluster {cluster_name}") 49 | except ClientError as exc: 50 | ecs_exception("ECS Service", service_name, exc) 51 | 52 | def start(self, aws_tags: List[Dict]) -> None: 53 | """Aws ec2 instance start function. 54 | 55 | Start ec2 instances with defined tags. 56 | 57 | Aws tags to use for filter resources 58 | Aws tags to use for filter resources. 59 | For example: 60 | [ 61 | { 62 | 'Key': 'string', 63 | 'Values': [ 64 | 'string', 65 | ] 66 | } 67 | ] 68 | """ 69 | for service_arn in self.tag_api.get_resources("ecs:service", aws_tags): 70 | service_name = service_arn.split("/")[-1] 71 | cluster_name = service_arn.split("/")[-2] 72 | try: 73 | self.ecs.update_service( 74 | cluster=cluster_name, service=service_name, desiredCount=1 75 | ) 76 | print(f"Start ECS Service {service_name} on Cluster {cluster_name}") 77 | except ClientError as exc: 78 | ecs_exception("ECS Service", service_name, exc) 79 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/test-execution/wait_neptune_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to wait for AWS neptune cluster status.""" 3 | 4 | import sys 5 | import time 6 | from typing import List 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | 12 | def wait_for_neptune_cluster_status( 13 | desired_status: str, cluster_identifiers: List[str] 14 | ) -> None: 15 | """Wait for neptune clusters to reach desired status. 16 | 17 | Args: 18 | cluster_identifiers: List of neptune cluster identifiers 19 | desired_status: Desired status to wait for (e.g. 'available', 'paused') 20 | """ 21 | if not cluster_identifiers: 22 | return 23 | 24 | neptune = boto3.client("neptune") 25 | start_time = time.time() 26 | timeout = 1800 # 30 minutes timeout 27 | 28 | while True: 29 | try: 30 | if time.time() - start_time > timeout: 31 | print( 32 | f"Timeout reached after {timeout} seconds." 33 | "Some neptune clusters may not have reached the desired status." 34 | ) 35 | sys.exit(1) 36 | 37 | all_clusters_in_desired_state = True 38 | for cluster_id in cluster_identifiers: 39 | response = neptune.describe_db_clusters(DBClusterIdentifier=cluster_id) 40 | cluster_status = response["DBClusters"][0]["Status"] 41 | instance_id = response["DBClusters"][0]["DBClusterMembers"][0][ 42 | "DBInstanceIdentifier" 43 | ] 44 | instance_response = neptune.describe_db_instances( 45 | DBInstanceIdentifier=instance_id 46 | ) 47 | instance_status = instance_response["DBInstances"][0][ 48 | "DBInstanceStatus" 49 | ] 50 | 51 | if ( 52 | cluster_status != desired_status 53 | or instance_status != desired_status 54 | ): 55 | all_clusters_in_desired_state = False 56 | break 57 | 58 | if all_clusters_in_desired_state: 59 | print(f"All neptune clusters are now {desired_status}") 60 | return 61 | 62 | print(f"Waiting for neptune clusters to be {desired_status}...") 63 | time.sleep(10) # Wait 10 seconds before checking again 64 | 65 | except ClientError as e: 66 | print(f"Error checking neptune status: {e}") 67 | sys.exit(1) 68 | 69 | 70 | if __name__ == "__main__": 71 | if len(sys.argv) < 3: 72 | print( 73 | "Usage: python wait_neptune_status.py" 74 | " [cluster_id2 ...]" 75 | ) 76 | sys.exit(1) 77 | 78 | target_status = sys.argv[1] 79 | target_clusters = sys.argv[2:] 80 | 81 | wait_for_neptune_cluster_status(target_status, target_clusters) 82 | -------------------------------------------------------------------------------- /examples/documentdb-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_kms_key" "scheduler" { 5 | description = "test kms option on scheduler module" 6 | deletion_window_in_days = 7 7 | } 8 | 9 | resource "aws_docdb_cluster" "scheduled" { 10 | cluster_identifier = "test-to-stop-${random_pet.suffix.id}" 11 | engine = "docdb" 12 | master_username = "foo" 13 | master_password = "mustbeeightchars" 14 | skip_final_snapshot = true 15 | db_subnet_group_name = aws_docdb_subnet_group.documentdb.name 16 | tags = { 17 | tostop = "true-${random_pet.suffix.id}" 18 | } 19 | } 20 | 21 | resource "aws_docdb_cluster_instance" "scheduled" { 22 | identifier = "test-to-stop-${random_pet.suffix.id}" 23 | cluster_identifier = aws_docdb_cluster.scheduled.id 24 | instance_class = "db.r5.large" 25 | tags = { 26 | tostop = "true-${random_pet.suffix.id}" 27 | } 28 | } 29 | 30 | resource "aws_docdb_cluster" "not_scheduled" { 31 | cluster_identifier = "test-not-to-stop-${random_pet.suffix.id}" 32 | engine = "docdb" 33 | master_username = "foo" 34 | master_password = "mustbeeightchars" 35 | skip_final_snapshot = true 36 | db_subnet_group_name = aws_docdb_subnet_group.documentdb.name 37 | tags = { 38 | tostop = "false" 39 | } 40 | } 41 | 42 | resource "aws_docdb_cluster_instance" "not_scheduled" { 43 | identifier = "test-not-to-stop-${random_pet.suffix.id}" 44 | cluster_identifier = aws_docdb_cluster.not_scheduled.id 45 | instance_class = "db.r5.large" 46 | tags = { 47 | tostop = "false" 48 | } 49 | } 50 | 51 | 52 | module "documentdb_stop_friday" { 53 | source = "../.." 54 | name = "stop-documentdb-${random_pet.suffix.id}" 55 | kms_key_arn = aws_kms_key.scheduler.arn 56 | schedule_expression = "cron(0 23 ? * FRI *)" 57 | schedule_action = "stop" 58 | documentdb_schedule = "true" 59 | 60 | scheduler_tag = { 61 | key = "tostop" 62 | value = "true-${random_pet.suffix.id}" 63 | } 64 | } 65 | 66 | module "documentdb_start_monday" { 67 | source = "../.." 68 | name = "start-documentdb-${random_pet.suffix.id}" 69 | schedule_expression = "cron(0 07 ? * MON *)" 70 | schedule_action = "start" 71 | documentdb_schedule = "true" 72 | 73 | scheduler_tag = { 74 | key = "tostop" 75 | value = "true-${random_pet.suffix.id}" 76 | } 77 | } 78 | 79 | module "test_execution" { 80 | count = var.test_mode ? 1 : 0 81 | source = "./test-execution" 82 | 83 | lambda_stop_name = module.documentdb_stop_friday.scheduler_lambda_name 84 | docdb_cluster_to_scheduled_name = aws_docdb_cluster.scheduled.cluster_identifier 85 | docdb_cluster_not_scheduled_name = aws_docdb_cluster.not_scheduled.cluster_identifier 86 | 87 | depends_on = [ 88 | aws_docdb_cluster_instance.scheduled, 89 | aws_docdb_cluster_instance.not_scheduled 90 | ] 91 | } 92 | -------------------------------------------------------------------------------- /examples/redshift-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_kms_key" "scheduler" { 5 | description = "test kms option on scheduler module" 6 | deletion_window_in_days = 7 7 | } 8 | 9 | resource "aws_redshift_cluster" "scheduled" { 10 | cluster_identifier = "test-to-stop-${random_pet.suffix.id}" 11 | database_name = "mydb" 12 | master_username = "exampleuser" 13 | master_password = "Mustbe8characters" 14 | node_type = "ra3.large" 15 | cluster_type = "single-node" 16 | publicly_accessible = false 17 | skip_final_snapshot = true 18 | cluster_subnet_group_name = aws_redshift_subnet_group.redshif.name 19 | 20 | tags = { 21 | tostop = "true-${random_pet.suffix.id}" 22 | } 23 | } 24 | 25 | resource "aws_redshift_cluster_snapshot" "scheduled" { 26 | cluster_identifier = aws_redshift_cluster.scheduled.id 27 | snapshot_identifier = "test-to-stop-${random_pet.suffix.id}" 28 | } 29 | 30 | resource "aws_redshift_cluster" "not_scheduled" { 31 | cluster_identifier = "test-not-to-stop-${random_pet.suffix.id}" 32 | database_name = "mydb" 33 | master_username = "exampleuser" 34 | master_password = "Mustbe8characters" 35 | node_type = "ra3.large" 36 | cluster_type = "single-node" 37 | publicly_accessible = false 38 | skip_final_snapshot = true 39 | cluster_subnet_group_name = aws_redshift_subnet_group.redshif.name 40 | 41 | tags = { 42 | tostop = "false" 43 | } 44 | } 45 | 46 | resource "aws_redshift_cluster_snapshot" "not_scheduled" { 47 | cluster_identifier = aws_redshift_cluster.not_scheduled.id 48 | snapshot_identifier = "test-not-to-stop-${random_pet.suffix.id}" 49 | } 50 | 51 | 52 | module "redshift_stop_friday" { 53 | source = "../.." 54 | name = "stop-redshift-${random_pet.suffix.id}" 55 | kms_key_arn = aws_kms_key.scheduler.arn 56 | schedule_expression = "cron(0 23 ? * FRI *)" 57 | schedule_action = "stop" 58 | redshift_schedule = "true" 59 | 60 | scheduler_tag = { 61 | key = "tostop" 62 | value = "true-${random_pet.suffix.id}" 63 | } 64 | } 65 | 66 | module "redshift_start_monday" { 67 | source = "../.." 68 | name = "start-redshift-${random_pet.suffix.id}" 69 | schedule_expression = "cron(0 07 ? * MON *)" 70 | schedule_action = "start" 71 | redshift_schedule = "true" 72 | 73 | scheduler_tag = { 74 | key = "tostop" 75 | value = "true-${random_pet.suffix.id}" 76 | } 77 | } 78 | 79 | module "test_execution" { 80 | count = var.test_mode ? 1 : 0 81 | source = "./test-execution" 82 | 83 | lambda_stop_name = module.redshift_stop_friday.scheduler_lambda_name 84 | redshift_cluster_to_scheduled_name = aws_redshift_cluster.scheduled.cluster_identifier 85 | redshift_cluster_not_scheduled_name = aws_redshift_cluster.not_scheduled.cluster_identifier 86 | } 87 | -------------------------------------------------------------------------------- /examples/date-exclusion/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_pet" "suffix" {} 2 | 3 | data "aws_ami" "ubuntu" { 4 | most_recent = true 5 | filter { 6 | name = "name" 7 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 8 | } 9 | filter { 10 | name = "virtualization-type" 11 | values = ["hvm"] 12 | } 13 | owners = ["099720109477"] # Canonical 14 | } 15 | 16 | 17 | resource "aws_instance" "scheduled" { 18 | count = 2 19 | ami = data.aws_ami.ubuntu.id 20 | instance_type = "t2.micro" 21 | subnet_id = aws_subnet.test.id 22 | 23 | tags = { 24 | tostop = "true-${random_pet.suffix.id}" 25 | Name = "ec2-scheduled-exclusion-example-${random_pet.suffix.id}-${count.index}" 26 | } 27 | } 28 | 29 | resource "aws_instance" "not_scheduled" { 30 | ami = data.aws_ami.ubuntu.id 31 | instance_type = "t2.micro" 32 | subnet_id = aws_subnet.test.id 33 | 34 | tags = { 35 | tostop = "false" 36 | Name = "ec2-not-scheduled-exclusion-example-${random_pet.suffix.id}" 37 | } 38 | } 39 | 40 | 41 | module "ec2_stop_with_exclusions" { 42 | source = "../../" 43 | name = "stop-ec2-exclusions-${random_pet.suffix.id}" 44 | schedule_expression = "cron(0 22 ? * MON-FRI *)" 45 | schedule_action = "stop" 46 | ec2_schedule = true 47 | rds_schedule = false 48 | autoscaling_schedule = false 49 | cloudwatch_alarm_schedule = false 50 | scheduler_excluded_dates = [ 51 | "01-01", # New Year's Day 52 | "12-25", # Christmas Day 53 | "12-24", # Christmas Eve 54 | "07-04", # Independence Day (US) 55 | "11-24", # Thanksgiving (example date) 56 | "05-01", # Labor Day 57 | "12-31", # New Year's Eve 58 | formatdate("MM-DD", timestamp()) # Current date (for tests purposes) 59 | ] 60 | 61 | scheduler_tag = { 62 | key = "tostop" 63 | value = "true-${random_pet.suffix.id}" 64 | } 65 | } 66 | 67 | 68 | module "ec2_start_with_exclusions" { 69 | source = "../../" 70 | name = "start-ec2-exclusions-${random_pet.suffix.id}" 71 | schedule_expression = "cron(0 7 ? * MON-FRI *)" 72 | schedule_action = "start" 73 | ec2_schedule = true 74 | rds_schedule = false 75 | autoscaling_schedule = false 76 | cloudwatch_alarm_schedule = false 77 | 78 | scheduler_tag = { 79 | key = "tostop" 80 | value = "true-${random_pet.suffix.id}" 81 | } 82 | } 83 | 84 | module "test_execution" { 85 | count = var.test_mode ? 1 : 0 86 | source = "./test-execution" 87 | 88 | lambda_stop_name = module.ec2_stop_with_exclusions.scheduler_lambda_name 89 | instance_1_to_scheduled_id = aws_instance.scheduled[0].id 90 | instance_2_to_scheduled_id = aws_instance.scheduled[1].id 91 | instance_not_to_scheduled_id = aws_instance.not_scheduled.id 92 | } 93 | -------------------------------------------------------------------------------- /examples/neptune-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Deploy two lambda for testing with awspec 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_neptune_cluster" "to_schedule" { 5 | cluster_identifier = "test-to-stop-${random_pet.suffix.id}" 6 | engine = "neptune" 7 | skip_final_snapshot = true 8 | iam_database_authentication_enabled = true 9 | apply_immediately = true 10 | neptune_subnet_group_name = aws_neptune_subnet_group.test.name 11 | 12 | tags = { 13 | tostop = "true-${random_pet.suffix.id}" 14 | } 15 | } 16 | 17 | resource "aws_neptune_cluster_instance" "to_schedule" { 18 | identifier = "test-to-stop-${random_pet.suffix.id}" 19 | cluster_identifier = aws_neptune_cluster.to_schedule.id 20 | engine = "neptune" 21 | instance_class = "db.t3.medium" 22 | apply_immediately = true 23 | 24 | tags = { 25 | tostop = "true-${random_pet.suffix.id}" 26 | } 27 | } 28 | 29 | resource "aws_neptune_cluster" "not_to_scheduled" { 30 | cluster_identifier = "test-not-to-stop-${random_pet.suffix.id}" 31 | engine = "neptune" 32 | skip_final_snapshot = true 33 | iam_database_authentication_enabled = true 34 | apply_immediately = true 35 | neptune_subnet_group_name = aws_neptune_subnet_group.test.name 36 | 37 | tags = { 38 | tostop = "false" 39 | } 40 | } 41 | 42 | resource "aws_neptune_cluster_instance" "not_to_scheduled" { 43 | identifier = "test-not-to-stop-${random_pet.suffix.id}" 44 | cluster_identifier = aws_neptune_cluster.not_to_scheduled.id 45 | engine = "neptune" 46 | instance_class = "db.t3.medium" 47 | apply_immediately = true 48 | 49 | tags = { 50 | tostop = "false" 51 | } 52 | } 53 | 54 | 55 | module "neptune_stop_friday" { 56 | source = "../.." 57 | name = "stop-neptune-${random_pet.suffix.id}" 58 | schedule_expression = "cron(0 23 ? * FRI *)" 59 | schedule_action = "stop" 60 | rds_schedule = "true" 61 | 62 | scheduler_tag = { 63 | key = "tostop" 64 | value = "true-${random_pet.suffix.id}" 65 | } 66 | } 67 | 68 | module "neptune_start_monday" { 69 | source = "../.." 70 | name = "start-neptune-${random_pet.suffix.id}" 71 | schedule_expression = "cron(0 07 ? * MON *)" 72 | schedule_action = "start" 73 | rds_schedule = "true" 74 | 75 | scheduler_tag = { 76 | key = "tostop" 77 | value = "true-${random_pet.suffix.id}" 78 | } 79 | } 80 | 81 | module "test_execution" { 82 | count = var.test_mode ? 1 : 0 83 | source = "./test-execution" 84 | 85 | lambda_stop_name = module.neptune_stop_friday.scheduler_lambda_name 86 | neptune_cluster_to_scheduled_name = aws_neptune_cluster.to_schedule.cluster_identifier 87 | neptune_cluster_not_scheduled_name = aws_neptune_cluster.not_to_scheduled.cluster_identifier 88 | 89 | depends_on = [ 90 | aws_neptune_cluster_instance.to_schedule, 91 | aws_neptune_cluster_instance.not_to_scheduled 92 | ] 93 | } 94 | -------------------------------------------------------------------------------- /tests/unit/utils.py: -------------------------------------------------------------------------------- 1 | """Module use by ec2 scheduler unit tests.""" 2 | 3 | import boto3 4 | 5 | 6 | def launch_ec2_instances(count, region_name, tag_key, tag_value): 7 | """Create ec2 instances.""" 8 | client = boto3.client("ec2", region_name=region_name) 9 | instance = client.run_instances( 10 | ImageId="ami-02df9ea15c1778c9c", 11 | MaxCount=count, 12 | MinCount=count, 13 | TagSpecifications=[ 14 | { 15 | "ResourceType": "instance", 16 | "Tags": [ 17 | {"Key": "Name", "Value": "instance_test"}, 18 | {"Key": tag_key, "Value": tag_value}, 19 | ], 20 | } 21 | ], 22 | ) 23 | return instance 24 | 25 | 26 | def launch_ec2_spot(count, region_name, tag_key, tag_value): 27 | """Create ec2 spot instances.""" 28 | client = boto3.client("ec2", region_name=region_name) 29 | spot = client.run_instances( 30 | ImageId="ami-02df9ea15c1778c9c", 31 | MaxCount=count, 32 | MinCount=count, 33 | InstanceMarketOptions={ 34 | "MarketType": "spot", 35 | "SpotOptions": { 36 | "SpotInstanceType": "one-time", 37 | "InstanceInterruptionBehavior": "terminate", 38 | }, 39 | }, 40 | TagSpecifications=[ 41 | { 42 | "ResourceType": "instance", 43 | "Tags": [ 44 | {"Key": "Name", "Value": "instance_test"}, 45 | {"Key": tag_key, "Value": tag_value}, 46 | ], 47 | } 48 | ], 49 | ) 50 | return spot 51 | 52 | 53 | def launch_asg(region_name, tag_key, tag_value): 54 | """Create autoscaling group with aws tags.""" 55 | client = boto3.client("autoscaling", region_name=region_name) 56 | client.create_launch_configuration( 57 | LaunchConfigurationName="lc-test", 58 | ImageId="ami-02df9ea15c1778c9c", 59 | InstanceType="t2.micro", 60 | ) 61 | client.create_auto_scaling_group( 62 | AutoScalingGroupName="asg-test", 63 | MaxSize=5, 64 | DesiredCapacity=3, 65 | MinSize=1, 66 | LaunchConfigurationName="lc-test", 67 | AvailabilityZones=[region_name + "a", region_name + "b"], 68 | Tags=[ 69 | { 70 | "ResourceId": "asg-test", 71 | "ResourceType": "auto-scaling-group", 72 | "Key": tag_key, 73 | "Value": tag_value, 74 | "PropagateAtLaunch": True, 75 | } 76 | ], 77 | ) 78 | return client.describe_auto_scaling_groups(AutoScalingGroupNames=["asg-test"]) 79 | 80 | 81 | def launch_rds_instance(region_name, tag_key, tag_value): 82 | """Create rds instances with aws tags.""" 83 | client = boto3.client("rds", region_name=region_name) 84 | rds_instance = client.create_db_instance( 85 | DBInstanceIdentifier="db-instance", 86 | AllocatedStorage=10, 87 | DBName="db-instance", 88 | DBInstanceClass="db.m4.large", 89 | Engine="mariadb", 90 | MasterUsername="root", 91 | MasterUserPassword="IamNotHere", 92 | Tags=[ 93 | {"Key": "Name", "Value": "db-instance"}, 94 | {"Key": tag_key, "Value": tag_value}, 95 | ], 96 | ) 97 | return rds_instance 98 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform autoscaling group with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | data "aws_ami" "ubuntu" { 5 | most_recent = true 6 | 7 | filter { 8 | name = "name" 9 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 10 | } 11 | 12 | filter { 13 | name = "virtualization-type" 14 | values = ["hvm"] 15 | } 16 | 17 | owners = ["099720109477"] # Canonical 18 | } 19 | 20 | resource "aws_launch_template" "this" { 21 | name_prefix = "web_config" 22 | image_id = data.aws_ami.ubuntu.id 23 | instance_type = "t2.micro" 24 | } 25 | 26 | # Create autoscaling group with tag 27 | resource "aws_autoscaling_group" "scheduled" { 28 | count = 3 29 | name = "test-to-stop-${count.index}-${random_pet.suffix.id}" 30 | max_size = 5 31 | min_size = 1 32 | health_check_grace_period = 300 33 | health_check_type = "EC2" 34 | desired_capacity = 1 35 | force_delete = true 36 | vpc_zone_identifier = [aws_subnet.this.id] 37 | 38 | launch_template { 39 | id = aws_launch_template.this.id 40 | version = "$Latest" 41 | } 42 | 43 | tag { 44 | key = "tostop" 45 | value = "true-${random_pet.suffix.id}" 46 | propagate_at_launch = true 47 | } 48 | } 49 | 50 | # Create autoscaling group without tag 51 | resource "aws_autoscaling_group" "not_scheduled" { 52 | count = 2 53 | name = "test-not-to-stop-${count.index}-${random_pet.suffix.id}" 54 | max_size = 5 55 | min_size = 1 56 | health_check_grace_period = 300 57 | health_check_type = "EC2" 58 | desired_capacity = 1 59 | force_delete = true 60 | vpc_zone_identifier = [aws_subnet.this.id] 61 | 62 | launch_template { 63 | id = aws_launch_template.this.id 64 | version = "$Latest" 65 | } 66 | 67 | tag { 68 | key = "tostop" 69 | value = "false" 70 | propagate_at_launch = true 71 | } 72 | } 73 | 74 | 75 | ### Terraform modules ### 76 | 77 | module "autoscaling_stop_friday" { 78 | source = "../../" 79 | name = "stop-autoscaling-${random_pet.suffix.id}" 80 | schedule_expression = "cron(0 23 ? * FRI *)" 81 | schedule_action = "stop" 82 | ec2_schedule = "false" 83 | rds_schedule = "false" 84 | autoscaling_schedule = "true" 85 | cloudwatch_alarm_schedule = "true" 86 | 87 | scheduler_tag = { 88 | key = "tostop" 89 | value = "true-${random_pet.suffix.id}" 90 | } 91 | } 92 | 93 | module "autoscaling_start_monday" { 94 | source = "../../" 95 | name = "start-autoscaling-${random_pet.suffix.id}" 96 | schedule_expression = "cron(0 07 ? * MON *)" 97 | schedule_action = "start" 98 | ec2_schedule = "false" 99 | rds_schedule = "false" 100 | autoscaling_schedule = "true" 101 | cloudwatch_alarm_schedule = "true" 102 | 103 | scheduler_tag = { 104 | key = "tostop" 105 | value = "true-${random_pet.suffix.id}" 106 | } 107 | } 108 | 109 | module "test_execution" { 110 | count = var.test_mode ? 1 : 0 111 | source = "./test-execution" 112 | 113 | lambda_stop_name = module.autoscaling_stop_friday.scheduler_lambda_name 114 | asg_scheduled_name = aws_autoscaling_group.scheduled[0].name 115 | asg_not_scheduled_name = aws_autoscaling_group.not_scheduled[0].name 116 | } 117 | -------------------------------------------------------------------------------- /examples/rds-scheduler/test-execution/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_rds_instance_running_state" { 2 | provisioner "local-exec" { 3 | command = <<-EOT 4 | python3 ${path.module}/wait_rds_instance.py available \ 5 | ${var.rds_mariadb_instance_to_scheduled_name} \ 6 | ${var.rds_mysql_instance_to_not_scheduled_name} 7 | EOT 8 | } 9 | } 10 | 11 | resource "null_resource" "wait_rds_cluster_running_state" { 12 | provisioner "local-exec" { 13 | command = <<-EOT 14 | python3 ${path.module}/wait_rds_cluster.py available \ 15 | ${var.rds_aurora_cluster_to_scheduled_name} 16 | EOT 17 | } 18 | } 19 | 20 | resource "aws_lambda_invocation" "this" { 21 | function_name = var.lambda_stop_name 22 | 23 | input = jsonencode({ 24 | key1 = "value1" 25 | key2 = "value2" 26 | }) 27 | 28 | depends_on = [ 29 | null_resource.wait_rds_instance_running_state, 30 | null_resource.wait_rds_cluster_running_state, 31 | ] 32 | } 33 | 34 | resource "null_resource" "wait_rds_instance_stopped_state" { 35 | provisioner "local-exec" { 36 | command = <<-EOT 37 | python3 ${path.module}/wait_rds_instance.py stopped \ 38 | ${var.rds_mariadb_instance_to_scheduled_name} 39 | EOT 40 | } 41 | 42 | depends_on = [aws_lambda_invocation.this] 43 | } 44 | 45 | resource "null_resource" "wait_rds_cluster_stopped_state" { 46 | provisioner "local-exec" { 47 | command = <<-EOT 48 | python3 ${path.module}/wait_rds_cluster.py stopped \ 49 | ${var.rds_aurora_cluster_to_scheduled_name} 50 | EOT 51 | } 52 | 53 | depends_on = [null_resource.wait_rds_instance_stopped_state] 54 | } 55 | 56 | resource "null_resource" "rds_aurora_cluster_to_scheduled" { 57 | provisioner "local-exec" { 58 | command = <<-EOT 59 | aws rds describe-db-clusters \ 60 | --db-cluster-identifier ${var.rds_aurora_cluster_to_scheduled_name} \ 61 | --query 'DBClusters[0].Status' \ 62 | --output text > ${path.module}/rds_aurora_cluster_to_scheduled.state 63 | EOT 64 | } 65 | 66 | depends_on = [null_resource.wait_rds_cluster_stopped_state] 67 | } 68 | 69 | data "local_file" "rds_aurora_cluster_to_scheduled" { 70 | filename = "${path.module}/rds_aurora_cluster_to_scheduled.state" 71 | 72 | depends_on = [null_resource.rds_aurora_cluster_to_scheduled] 73 | } 74 | 75 | resource "null_resource" "rds_mariadb_instance_to_scheduled" { 76 | provisioner "local-exec" { 77 | command = <<-EOT 78 | aws rds describe-db-instances \ 79 | --db-instance-identifier ${var.rds_mariadb_instance_to_scheduled_name} \ 80 | --query 'DBInstances[0].DBInstanceStatus' \ 81 | --output text > ${path.module}/rds_mariadb_instance_to_scheduled.state 82 | EOT 83 | } 84 | 85 | depends_on = [null_resource.wait_rds_instance_stopped_state] 86 | } 87 | 88 | data "local_file" "rds_mariadb_instance_to_scheduled" { 89 | filename = "${path.module}/rds_mariadb_instance_to_scheduled.state" 90 | 91 | depends_on = [null_resource.rds_mariadb_instance_to_scheduled] 92 | } 93 | 94 | resource "null_resource" "rds_mysql_instance_to_not_scheduled" { 95 | provisioner "local-exec" { 96 | command = <<-EOT 97 | aws rds describe-db-instances \ 98 | --db-instance-identifier ${var.rds_mysql_instance_to_not_scheduled_name} \ 99 | --query 'DBInstances[0].DBInstanceStatus' \ 100 | --output text > ${path.module}/rds_mysql_instance_to_not_scheduled.state 101 | EOT 102 | } 103 | 104 | depends_on = [null_resource.wait_rds_instance_stopped_state] 105 | } 106 | 107 | data "local_file" "rds_mysql_instance_to_not_scheduled" { 108 | filename = "${path.module}/rds_mysql_instance_to_not_scheduled.state" 109 | 110 | depends_on = [null_resource.rds_mysql_instance_to_not_scheduled] 111 | } 112 | -------------------------------------------------------------------------------- /package/scheduler/instance_handler.py: -------------------------------------------------------------------------------- 1 | """EC2 instances scheduler. 2 | 3 | This module provides functionality to start and stop EC2 instances based on tags. 4 | """ 5 | 6 | import logging 7 | from typing import Dict, List, Optional 8 | 9 | import boto3 10 | from botocore.exceptions import ClientError 11 | 12 | from .exceptions import ec2_exception 13 | from .filter_resources_by_tags import FilterByTags 14 | 15 | # Set up logger 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class InstanceScheduler: 20 | """EC2 instance scheduler to start and stop instances based on tags.""" 21 | 22 | def __init__(self, region_name: Optional[str] = None) -> None: 23 | """Initialize EC2 scheduler with AWS clients. 24 | 25 | Args: 26 | region_name: AWS region name. If None, default region is used. 27 | """ 28 | self.region_name = region_name 29 | self.ec2 = ( 30 | boto3.client("ec2", region_name=region_name) 31 | if region_name 32 | else boto3.client("ec2") 33 | ) 34 | self.asg = ( 35 | boto3.client("autoscaling", region_name=region_name) 36 | if region_name 37 | else boto3.client("autoscaling") 38 | ) 39 | self.tag_api = FilterByTags(region_name=region_name) 40 | 41 | def _process_instances(self, aws_tags: List[Dict], action: str) -> None: 42 | """Process EC2 instances based on the specified action. 43 | 44 | Args: 45 | aws_tags: List of tag dictionaries to filter resources 46 | action: Action to perform ('start' or 'stop') 47 | """ 48 | for instance_arn in self.tag_api.get_resources("ec2:instance", aws_tags): 49 | instance_id = instance_arn.split("/")[-1] 50 | try: 51 | # Skip instances that are part of an Auto Scaling Group 52 | if self.asg.describe_auto_scaling_instances(InstanceIds=[instance_id])[ 53 | "AutoScalingInstances" 54 | ]: 55 | logger.info( 56 | f"Skipping {instance_id} as it belongs to an Auto Scaling Group" 57 | ) 58 | continue 59 | 60 | # Perform the requested action 61 | if action == "start": 62 | self.ec2.start_instances(InstanceIds=[instance_id]) 63 | logger.info(f"Started instance {instance_id}") 64 | elif action == "stop": 65 | self.ec2.stop_instances(InstanceIds=[instance_id]) 66 | logger.info(f"Stopped instance {instance_id}") 67 | 68 | except ClientError as exc: 69 | ec2_exception("instance", instance_id, exc) 70 | logger.error(f"Failed to {action} instance {instance_id}: {str(exc)}") 71 | 72 | def stop(self, aws_tags: List[Dict]) -> None: 73 | """Stop EC2 instances with defined tags. 74 | 75 | Args: 76 | aws_tags: List of tag dictionaries to filter resources. 77 | For example: 78 | [ 79 | { 80 | 'Key': 'Environment', 81 | 'Values': ['Development'] 82 | } 83 | ] 84 | """ 85 | self._process_instances(aws_tags, "stop") 86 | 87 | def start(self, aws_tags: List[Dict]) -> None: 88 | """Start EC2 instances with defined tags. 89 | 90 | Args: 91 | aws_tags: List of tag dictionaries to filter resources. 92 | For example: 93 | [ 94 | { 95 | 'Key': 'Environment', 96 | 'Values': ['Development'] 97 | } 98 | ] 99 | """ 100 | self._process_instances(aws_tags, "start") 101 | -------------------------------------------------------------------------------- /examples/autoscaling-scheduler-terminate-instances/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform autoscaling group with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | data "aws_ami" "ubuntu" { 5 | most_recent = true 6 | 7 | filter { 8 | name = "name" 9 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 10 | } 11 | 12 | filter { 13 | name = "virtualization-type" 14 | values = ["hvm"] 15 | } 16 | 17 | owners = ["099720109477"] # Canonical 18 | } 19 | 20 | resource "aws_launch_template" "this" { 21 | name_prefix = "web_config" 22 | image_id = data.aws_ami.ubuntu.id 23 | instance_type = "t2.micro" 24 | } 25 | 26 | # Create autoscaling group with tag 27 | resource "aws_autoscaling_group" "scheduled" { 28 | count = 3 29 | name = "test-to-stop-${random_pet.suffix.id}-${count.index}" 30 | max_size = 5 31 | min_size = 1 32 | health_check_grace_period = 300 33 | health_check_type = "EC2" 34 | desired_capacity = 1 35 | force_delete = true 36 | vpc_zone_identifier = [aws_subnet.this.id] 37 | mixed_instances_policy { 38 | instances_distribution { 39 | on_demand_base_capacity = 0 40 | on_demand_percentage_above_base_capacity = 25 41 | spot_allocation_strategy = "capacity-optimized" 42 | } 43 | launch_template { 44 | launch_template_specification { 45 | launch_template_id = aws_launch_template.this.id 46 | } 47 | } 48 | } 49 | 50 | tag { 51 | key = "tostop" 52 | value = "true-${random_pet.suffix.id}" 53 | propagate_at_launch = true 54 | } 55 | } 56 | 57 | # Create autoscaling group without tag 58 | resource "aws_autoscaling_group" "not_scheduled" { 59 | count = 2 60 | name = "test-not-to-stop-${random_pet.suffix.id}-${count.index}" 61 | max_size = 5 62 | min_size = 1 63 | health_check_grace_period = 300 64 | health_check_type = "EC2" 65 | desired_capacity = 1 66 | force_delete = true 67 | vpc_zone_identifier = [aws_subnet.this.id] 68 | mixed_instances_policy { 69 | instances_distribution { 70 | on_demand_base_capacity = 0 71 | on_demand_percentage_above_base_capacity = 25 72 | spot_allocation_strategy = "capacity-optimized" 73 | } 74 | launch_template { 75 | launch_template_specification { 76 | launch_template_id = aws_launch_template.this.id 77 | } 78 | } 79 | } 80 | 81 | tag { 82 | key = "tostop" 83 | value = "false" 84 | propagate_at_launch = true 85 | } 86 | } 87 | 88 | 89 | ### Terraform modules ### 90 | 91 | module "autoscaling_stop_friday" { 92 | source = "../../" 93 | name = "stop-autoscaling-${random_pet.suffix.id}" 94 | schedule_expression = "cron(0 23 ? * FRI *)" 95 | schedule_action = "stop" 96 | ec2_schedule = "false" 97 | rds_schedule = "false" 98 | autoscaling_schedule = "true" 99 | autoscaling_terminate_instances = "true" 100 | cloudwatch_alarm_schedule = "true" 101 | 102 | scheduler_tag = { 103 | key = "tostop" 104 | value = "true-${random_pet.suffix.id}" 105 | } 106 | } 107 | 108 | module "autoscaling_start_monday" { 109 | source = "../../" 110 | name = "start-autoscaling-${random_pet.suffix.id}" 111 | schedule_expression = "cron(0 07 ? * MON *)" 112 | schedule_action = "start" 113 | ec2_schedule = "false" 114 | rds_schedule = "false" 115 | autoscaling_schedule = "true" 116 | cloudwatch_alarm_schedule = "true" 117 | 118 | scheduler_tag = { 119 | key = "tostop" 120 | value = "true-${random_pet.suffix.id}" 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /examples/rds-scheduler/main.tf: -------------------------------------------------------------------------------- 1 | # Terraform rds with lambda scheduler 2 | resource "random_pet" "suffix" {} 3 | 4 | resource "aws_rds_cluster" "aurora_scheduled" { 5 | cluster_identifier = "test-to-stop-aurora-cluster-${random_pet.suffix.id}" 6 | engine = "aurora-mysql" 7 | db_subnet_group_name = aws_db_subnet_group.aurora.id 8 | database_name = "aurorawithtag" 9 | master_username = "foo" 10 | master_password = "barbut8chars" 11 | skip_final_snapshot = "true" 12 | 13 | tags = { 14 | tostop = "true-${random_pet.suffix.id}" 15 | } 16 | } 17 | 18 | resource "aws_rds_cluster_instance" "aurora_scheduled" { 19 | identifier = "test-to-stop-aurora-instance-${random_pet.suffix.id}" 20 | engine = aws_rds_cluster.aurora_scheduled.engine 21 | engine_version = aws_rds_cluster.aurora_scheduled.engine_version 22 | db_subnet_group_name = aws_db_subnet_group.aurora.id 23 | cluster_identifier = aws_rds_cluster.aurora_scheduled.id 24 | instance_class = "db.t3.medium" 25 | 26 | tags = { 27 | tostop = "true-${random_pet.suffix.id}" 28 | } 29 | } 30 | 31 | resource "aws_db_instance" "mariadb_scheduled" { 32 | identifier = "test-to-stop-mariadb-instance-${random_pet.suffix.id}" 33 | db_name = "mariadbwithtag" 34 | db_subnet_group_name = aws_db_subnet_group.aurora.id 35 | allocated_storage = 10 36 | storage_type = "gp2" 37 | engine = "mariadb" 38 | engine_version = "11.4.4" 39 | instance_class = "db.t4g.micro" 40 | username = "foo" 41 | password = "foobarbaz" 42 | skip_final_snapshot = "true" 43 | 44 | tags = { 45 | tostop = "true-${random_pet.suffix.id}" 46 | } 47 | } 48 | 49 | resource "aws_db_instance" "mysql_not_scheduled" { 50 | identifier = "test-not-to-stop-mysql-instance-${random_pet.suffix.id}" 51 | db_name = "mysqlwithouttag" 52 | db_subnet_group_name = aws_db_subnet_group.aurora.id 53 | allocated_storage = 10 54 | storage_type = "gp2" 55 | engine = "mysql" 56 | engine_version = "8.0" 57 | instance_class = "db.t4g.micro" 58 | username = "foo" 59 | password = "foobarbaz" 60 | skip_final_snapshot = "true" 61 | 62 | tags = { 63 | tostop = "false" 64 | } 65 | } 66 | 67 | 68 | ### Terraform modules ### 69 | 70 | module "rds_stop_friday" { 71 | source = "../../" 72 | name = "stop-rds-${random_pet.suffix.id}" 73 | schedule_expression = "cron(0 23 ? * FRI *)" 74 | schedule_action = "stop" 75 | ec2_schedule = "false" 76 | rds_schedule = "true" 77 | autoscaling_schedule = "false" 78 | cloudwatch_alarm_schedule = "true" 79 | 80 | scheduler_tag = { 81 | key = "tostop" 82 | value = "true-${random_pet.suffix.id}" 83 | } 84 | } 85 | 86 | module "rds_start_monday" { 87 | source = "../../" 88 | name = "start-rds-${random_pet.suffix.id}" 89 | schedule_expression = "cron(0 07 ? * MON *)" 90 | schedule_action = "start" 91 | ec2_schedule = "false" 92 | rds_schedule = "true" 93 | autoscaling_schedule = "false" 94 | cloudwatch_alarm_schedule = "true" 95 | 96 | scheduler_tag = { 97 | key = "tostop" 98 | value = "true-${random_pet.suffix.id}" 99 | } 100 | } 101 | 102 | module "test_execution" { 103 | count = var.test_mode ? 1 : 0 104 | source = "./test-execution" 105 | 106 | lambda_stop_name = module.rds_stop_friday.scheduler_lambda_name 107 | rds_aurora_cluster_to_scheduled_name = aws_rds_cluster.aurora_scheduled.cluster_identifier 108 | rds_mariadb_instance_to_scheduled_name = aws_db_instance.mariadb_scheduled.identifier 109 | rds_mysql_instance_to_not_scheduled_name = aws_db_instance.mysql_not_scheduled.identifier 110 | 111 | depends_on = [ 112 | aws_rds_cluster_instance.aurora_scheduled 113 | ] 114 | } 115 | -------------------------------------------------------------------------------- /package/scheduler/rds_handler.py: -------------------------------------------------------------------------------- 1 | """RDS instances scheduler.""" 2 | 3 | from typing import Dict, List, Literal, Optional 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | from .exceptions import rds_exception 9 | from .filter_resources_by_tags import FilterByTags 10 | 11 | 12 | class RdsScheduler: 13 | """RDS resource scheduler for controlling instances and clusters.""" 14 | 15 | def __init__(self, region_name: Optional[str] = None) -> None: 16 | """Initialize RDS scheduler. 17 | 18 | Args: 19 | region_name: AWS region name. Uses default configuration if not specified. 20 | """ 21 | self.rds = ( 22 | boto3.client("rds", region_name=region_name) 23 | if region_name 24 | else boto3.client("rds") 25 | ) 26 | self.tag_api = FilterByTags(region_name=region_name) 27 | 28 | def stop(self, aws_tags: List[Dict]) -> None: 29 | """Stop RDS Aurora clusters and RDS DB instances with defined tags. 30 | 31 | Args: 32 | aws_tags: AWS tags to filter resources. 33 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 34 | """ 35 | self._process_resources(aws_tags, action="stop") 36 | 37 | def start(self, aws_tags: List[Dict]) -> None: 38 | """Start RDS Aurora clusters and RDS DB instances with defined tags. 39 | 40 | Args: 41 | aws_tags: AWS tags to filter resources. 42 | Example: [{'Key': 'Environment', 'Values': ['Dev']}] 43 | """ 44 | self._process_resources(aws_tags, action="start") 45 | 46 | def _process_resources( 47 | self, aws_tags: List[Dict], action: Literal["start", "stop"] 48 | ) -> None: 49 | """Process RDS resources with the specified action. 50 | 51 | Args: 52 | aws_tags: AWS tags to filter resources. 53 | action: Action to perform ("start" or "stop"). 54 | """ 55 | # Handle clusters 56 | for cluster_arn in self.tag_api.get_resources("rds:cluster", aws_tags): 57 | cluster_id = cluster_arn.split(":")[-1] 58 | self._process_cluster(cluster_id, action) 59 | 60 | # Handle instances 61 | for db_arn in self.tag_api.get_resources("rds:db", aws_tags): 62 | db_id = db_arn.split(":")[-1] 63 | self._process_instance(db_id, action) 64 | 65 | def _process_cluster( 66 | self, cluster_id: str, action: Literal["start", "stop"] 67 | ) -> None: 68 | """Process an RDS cluster with the specified action. 69 | 70 | Args: 71 | cluster_id: RDS cluster identifier. 72 | action: Action to perform ("start" or "stop"). 73 | """ 74 | try: 75 | # Identifier must be cluster id, not resource id 76 | self.rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 77 | 78 | if action == "start": 79 | self.rds.start_db_cluster(DBClusterIdentifier=cluster_id) 80 | print(f"Start RDS cluster {cluster_id}") 81 | else: 82 | self.rds.stop_db_cluster(DBClusterIdentifier=cluster_id) 83 | print(f"Stop RDS cluster {cluster_id}") 84 | 85 | except ClientError as exc: 86 | rds_exception("RDS cluster", cluster_id, exc) 87 | 88 | def _process_instance(self, db_id: str, action: Literal["start", "stop"]) -> None: 89 | """Process an RDS instance with the specified action. 90 | 91 | Args: 92 | db_id: RDS instance identifier. 93 | action: Action to perform ("start" or "stop"). 94 | """ 95 | try: 96 | if action == "start": 97 | self.rds.start_db_instance(DBInstanceIdentifier=db_id) 98 | print(f"Start RDS instance {db_id}") 99 | else: 100 | self.rds.stop_db_instance(DBInstanceIdentifier=db_id) 101 | print(f"Stop RDS instance {db_id}") 102 | 103 | except ClientError as exc: 104 | rds_exception("RDS instance", db_id, exc) 105 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # Terraform variables file 2 | 3 | variable "schedule_expression" { 4 | description = "Define the aws event rule schedule expression, https://docs.aws.amazon.com/scheduler/latest/UserGuide/schedule-types.html" 5 | type = string 6 | default = "cron(0 22 ? * MON-FRI *)" 7 | } 8 | 9 | variable "schedule_expression_timezone" { 10 | description = "Timezone in which the scheduling expression is evaluated. Example : 'America/New_York', 'Europe/Paris'" 11 | type = string 12 | default = "UTC" 13 | } 14 | 15 | variable "scheduler_excluded_dates" { 16 | description = "List of specific dates to exclude from scheduling in MM-DD format (e.g., ['12-25', '01-01'])" 17 | type = list(string) 18 | default = [] 19 | 20 | validation { 21 | condition = alltrue([ 22 | for date in var.scheduler_excluded_dates : can(regex("^(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])$", date)) 23 | ]) 24 | error_message = "Excluded dates must be in MM-DD format (e.g., '12-25', '01-01')." 25 | } 26 | } 27 | 28 | variable "name" { 29 | description = "Define name to use for lambda function, cloudwatch event and iam role" 30 | type = string 31 | } 32 | 33 | variable "custom_iam_role_arn" { 34 | description = "Custom IAM role arn for the scheduling lambda" 35 | type = string 36 | default = null 37 | } 38 | 39 | variable "kms_key_arn" { 40 | description = "The ARN for the KMS encryption key. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key." 41 | type = string 42 | default = null 43 | } 44 | 45 | variable "aws_regions" { 46 | description = "A list of one or more aws regions where the lambda will be apply, default use the current region" 47 | type = list(string) 48 | default = null 49 | } 50 | 51 | variable "runtime" { 52 | description = "The runtime environment for the Lambda function that you are uploading" 53 | type = string 54 | default = "python3.13" 55 | } 56 | 57 | variable "schedule_action" { 58 | description = "Define schedule action to apply on resources, accepted value are 'stop or 'start" 59 | type = string 60 | default = "stop" 61 | } 62 | 63 | variable "resources_tag" { 64 | # This variable has been renamed to "scheduler_tag" 65 | description = "DEPRECATED, use scheduler_tag variable instead" 66 | type = map(string) 67 | default = null 68 | } 69 | 70 | variable "scheduler_tag" { 71 | description = "Set the tag to use for identify aws resources to stop or start" 72 | type = map(string) 73 | 74 | default = { 75 | "key" = "tostop" 76 | "value" = "true" 77 | } 78 | } 79 | 80 | variable "autoscaling_schedule" { 81 | description = "Enable scheduling on autoscaling resources" 82 | type = bool 83 | default = false 84 | } 85 | 86 | variable "autoscaling_terminate_instances" { 87 | description = "Terminate instances when autoscaling group is scheduled to stop" 88 | type = bool 89 | default = false 90 | } 91 | 92 | variable "ec2_schedule" { 93 | description = "Enable scheduling on ec2 resources" 94 | type = bool 95 | default = false 96 | } 97 | 98 | variable "documentdb_schedule" { 99 | description = "Enable scheduling on documentdb resources" 100 | type = bool 101 | default = false 102 | } 103 | 104 | variable "ecs_schedule" { 105 | description = "Enable scheduling on ecs services" 106 | type = bool 107 | default = false 108 | } 109 | 110 | variable "rds_schedule" { 111 | description = "Enable scheduling on rds resources" 112 | type = bool 113 | default = false 114 | } 115 | 116 | variable "redshift_schedule" { 117 | description = "Enable scheduling on redshift resources" 118 | type = bool 119 | default = false 120 | } 121 | 122 | variable "cloudwatch_alarm_schedule" { 123 | description = "Enable scheduleding on cloudwatch alarm resources" 124 | type = bool 125 | default = false 126 | } 127 | 128 | variable "transfer_schedule" { 129 | description = "Enable scheduling on AWS Transfer (SFTP) servers" 130 | type = bool 131 | default = false 132 | } 133 | 134 | variable "tags" { 135 | description = "Custom tags on aws resources" 136 | type = map(any) 137 | default = null 138 | } 139 | -------------------------------------------------------------------------------- /tests/unit/test_instance_handler.py: -------------------------------------------------------------------------------- 1 | """Tests for the instance scheduler class.""" 2 | 3 | import boto3 4 | import pytest 5 | from moto import mock_aws 6 | 7 | from package.scheduler.cloudwatch_handler import CloudWatchAlarmScheduler 8 | from package.scheduler.instance_handler import InstanceScheduler 9 | 10 | from .utils import launch_asg, launch_ec2_instances 11 | 12 | 13 | @pytest.mark.parametrize( 14 | "aws_region, aws_tags, result_count", 15 | [ 16 | ( 17 | "eu-west-1", 18 | [{"Key": "tostop", "Values": ["true"]}], 19 | {"Code": 16, "Name": "running"}, 20 | ), 21 | ( 22 | "eu-west-2", 23 | [{"Key": "tostop", "Values": ["true"]}], 24 | {"Code": 16, "Name": "running"}, 25 | ), 26 | ( 27 | "eu-west-2", 28 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 29 | {"Code": 80, "Name": "stopped"}, 30 | ), 31 | ], 32 | ) 33 | @mock_aws 34 | def test_start_ec2_instance(aws_region, aws_tags, result_count): 35 | """Verify start ec2 instance function.""" 36 | client = boto3.client("ec2", region_name=aws_region) 37 | launch_ec2_instances(3, aws_region, "tostop", "true") 38 | for ec2 in client.describe_instances()["Reservations"][0]["Instances"]: 39 | client.stop_instances(InstanceIds=[ec2["InstanceId"]]) 40 | 41 | ec2_scheduler = InstanceScheduler(aws_region) 42 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 43 | ec2_scheduler.start(aws_tags) 44 | for ec2 in client.describe_instances()["Reservations"][0]["Instances"]: 45 | assert ec2["State"] == result_count 46 | 47 | 48 | @pytest.mark.parametrize( 49 | "aws_region, aws_tags, result_count", 50 | [ 51 | ( 52 | "eu-west-1", 53 | [{"Key": "tostop", "Values": ["true"]}], 54 | {"Code": 80, "Name": "stopped"}, 55 | ), 56 | ( 57 | "eu-west-2", 58 | [{"Key": "tostop", "Values": ["true"]}], 59 | {"Code": 80, "Name": "stopped"}, 60 | ), 61 | ( 62 | "eu-west-2", 63 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 64 | {"Code": 16, "Name": "running"}, 65 | ), 66 | ], 67 | ) 68 | @mock_aws 69 | def test_stop_ec2_instance(aws_region, aws_tags, result_count): 70 | """Verify stop ec2 instance function.""" 71 | client = boto3.client("ec2", region_name=aws_region) 72 | launch_ec2_instances(3, aws_region, "tostop", "true") 73 | 74 | ec2_scheduler = InstanceScheduler(aws_region) 75 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 76 | ec2_scheduler.stop(aws_tags) 77 | instances = client.describe_instances()["Reservations"][0]["Instances"] 78 | assert len(instances) == 3 79 | for instance in instances: 80 | assert instance["State"] == result_count 81 | 82 | 83 | @pytest.mark.parametrize( 84 | "aws_region, aws_tags, result_count", 85 | [ 86 | ( 87 | "eu-west-1", 88 | [{"Key": "tostop", "Values": ["true"]}], 89 | {"Code": 16, "Name": "running"}, 90 | ), 91 | ( 92 | "eu-west-2", 93 | [{"Key": "tostop", "Values": ["true"]}], 94 | {"Code": 16, "Name": "running"}, 95 | ), 96 | ], 97 | ) 98 | @mock_aws 99 | def test_do_not_stop_asg_instance(aws_region, aws_tags, result_count): 100 | client = boto3.client("ec2", region_name=aws_region) 101 | launch_asg(aws_region, "tostop", "true") 102 | 103 | ec2_scheduler = InstanceScheduler(aws_region) 104 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 105 | ec2_scheduler.stop(aws_tags) 106 | instances = client.describe_instances()["Reservations"][0]["Instances"] 107 | assert len(instances) == 3 108 | for instance in instances: 109 | assert instance["State"] == result_count 110 | 111 | 112 | @pytest.mark.parametrize( 113 | "aws_region, aws_tags, result_count", 114 | [ 115 | ( 116 | "eu-west-1", 117 | [{"Key": "tostop", "Values": ["true"]}], 118 | {"Code": 80, "Name": "stopped"}, 119 | ), 120 | ( 121 | "eu-west-2", 122 | [{"Key": "tostop", "Values": ["true"]}], 123 | {"Code": 80, "Name": "stopped"}, 124 | ), 125 | ], 126 | ) 127 | @mock_aws 128 | def test_do_not_start_asg_instance(aws_region, aws_tags, result_count): 129 | client = boto3.client("ec2", region_name=aws_region) 130 | launch_asg(aws_region, "tostop", "true") 131 | instances = client.describe_instances()["Reservations"][0]["Instances"] 132 | for instance in instances: 133 | client.stop_instances(InstanceIds=[instance["InstanceId"]]) 134 | 135 | ec2_scheduler = InstanceScheduler(aws_region) 136 | ec2_scheduler.cloudwatch_alarm = CloudWatchAlarmScheduler(aws_region) 137 | ec2_scheduler.start(aws_tags) 138 | instances = client.describe_instances()["Reservations"][0]["Instances"] 139 | assert len(instances) == 3 140 | for instance in instances: 141 | assert instance["State"] == result_count 142 | -------------------------------------------------------------------------------- /tests/unit/test_rds_handler.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import pytest 3 | from moto import mock_aws 4 | 5 | from package.scheduler.rds_handler import RdsScheduler 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "aws_region", 10 | [ 11 | "us-east-1", 12 | "us-west-2", 13 | "eu-west-1", 14 | ], 15 | ) 16 | @mock_aws 17 | def test_rds_scheduler_initialization(aws_region): 18 | """Test that RdsScheduler initializes correctly with and without region.""" 19 | scheduler = RdsScheduler(region_name=aws_region) 20 | assert scheduler.rds is not None 21 | assert scheduler.tag_api is not None 22 | 23 | scheduler = RdsScheduler(region_name=aws_region) 24 | assert scheduler.rds is not None 25 | assert scheduler.tag_api is not None 26 | 27 | 28 | @pytest.mark.parametrize( 29 | "aws_region, aws_tags, result_count", 30 | [ 31 | ( 32 | "us-east-1", 33 | [{"Key": "tostop", "Values": ["true"]}], 34 | "stopped", 35 | ), 36 | ( 37 | "us-west-2", 38 | [{"Key": "tostop", "Values": ["true"]}], 39 | "stopped", 40 | ), 41 | ( 42 | "eu-west-1", 43 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 44 | "available", 45 | ), 46 | ], 47 | ) 48 | @mock_aws 49 | def test_stop_rds_instance(aws_region, aws_tags, result_count): 50 | """Test stopping RDS instances.""" 51 | rds = boto3.client("rds", region_name=aws_region) 52 | 53 | instance_id = f"test-db-instance-{aws_region}" 54 | rds.create_db_instance( 55 | DBInstanceIdentifier=instance_id, 56 | DBInstanceClass="db.t2.micro", 57 | Engine="mysql", 58 | MasterUsername="test", 59 | MasterUserPassword="test1234", 60 | AllocatedStorage=20, 61 | ) 62 | 63 | rds.add_tags_to_resource( 64 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:db:{instance_id}", 65 | Tags=[{"Key": "tostop", "Value": "true"}], 66 | ) 67 | 68 | scheduler = RdsScheduler(region_name=aws_region) 69 | scheduler.stop(aws_tags) 70 | 71 | response = rds.describe_db_instances(DBInstanceIdentifier=instance_id) 72 | assert response["DBInstances"][0]["DBInstanceStatus"] == result_count 73 | 74 | 75 | @pytest.mark.parametrize( 76 | "aws_region, aws_tags, result_count", 77 | [ 78 | ( 79 | "us-east-1", 80 | [{"Key": "tostop", "Values": ["true"]}], 81 | "available", 82 | ), 83 | ( 84 | "us-west-2", 85 | [{"Key": "tostop", "Values": ["true"]}], 86 | "available", 87 | ), 88 | ( 89 | "eu-west-1", 90 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 91 | "stopped", 92 | ), 93 | ], 94 | ) 95 | @mock_aws 96 | def test_start_rds_instance(aws_region, aws_tags, result_count): 97 | """Test starting RDS instances.""" 98 | rds = boto3.client("rds", region_name=aws_region) 99 | 100 | instance_id = f"test-db-instance-{aws_region}" 101 | rds.create_db_instance( 102 | DBInstanceIdentifier=instance_id, 103 | DBInstanceClass="db.t2.micro", 104 | Engine="mysql", 105 | MasterUsername="test", 106 | MasterUserPassword="test1234", 107 | AllocatedStorage=20, 108 | ) 109 | 110 | rds.add_tags_to_resource( 111 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:db:{instance_id}", 112 | Tags=[{"Key": "tostop", "Value": "true"}], 113 | ) 114 | 115 | rds.stop_db_instance(DBInstanceIdentifier=instance_id) 116 | 117 | scheduler = RdsScheduler(region_name=aws_region) 118 | scheduler.start(aws_tags) 119 | 120 | response = rds.describe_db_instances(DBInstanceIdentifier=instance_id) 121 | assert response["DBInstances"][0]["DBInstanceStatus"] == result_count 122 | 123 | 124 | @pytest.mark.parametrize( 125 | "aws_region, aws_tags, result_count", 126 | [ 127 | ( 128 | "us-east-1", 129 | [{"Key": "tostop", "Values": ["true"]}], 130 | "stopped", 131 | ), 132 | ( 133 | "us-west-2", 134 | [{"Key": "tostop", "Values": ["true"]}], 135 | "stopped", 136 | ), 137 | ( 138 | "eu-west-1", 139 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 140 | "available", 141 | ), 142 | ], 143 | ) 144 | @mock_aws 145 | def test_stop_rds_cluster(aws_region, aws_tags, result_count): 146 | """Test stopping RDS Aurora clusters.""" 147 | rds = boto3.client("rds", region_name=aws_region) 148 | 149 | cluster_id = f"test-cluster-{aws_region}" 150 | rds.create_db_cluster( 151 | DBClusterIdentifier=cluster_id, 152 | Engine="aurora-mysql", 153 | MasterUsername="test", 154 | MasterUserPassword="test1234", 155 | ) 156 | 157 | rds.add_tags_to_resource( 158 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:cluster:{cluster_id}", 159 | Tags=[{"Key": "tostop", "Value": "true"}], 160 | ) 161 | 162 | scheduler = RdsScheduler(region_name=aws_region) 163 | scheduler.stop(aws_tags) 164 | 165 | response = rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 166 | assert response["DBClusters"][0]["Status"] == result_count 167 | 168 | 169 | @pytest.mark.parametrize( 170 | "aws_region, aws_tags, result_count", 171 | [ 172 | ( 173 | "us-east-1", 174 | [{"Key": "tostop", "Values": ["true"]}], 175 | "available", 176 | ), 177 | ( 178 | "us-west-2", 179 | [{"Key": "tostop", "Values": ["true"]}], 180 | "available", 181 | ), 182 | ( 183 | "eu-west-1", 184 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 185 | "stopped", 186 | ), 187 | ], 188 | ) 189 | @mock_aws 190 | def test_start_rds_cluster(aws_region, aws_tags, result_count): 191 | """Test starting RDS Aurora clusters.""" 192 | rds = boto3.client("rds", region_name=aws_region) 193 | 194 | cluster_id = f"test-cluster-{aws_region}" 195 | rds.create_db_cluster( 196 | DBClusterIdentifier=cluster_id, 197 | Engine="aurora-mysql", 198 | MasterUsername="test", 199 | MasterUserPassword="test1234", 200 | ) 201 | 202 | rds.add_tags_to_resource( 203 | ResourceName=f"arn:aws:rds:{aws_region}:123456789012:cluster:{cluster_id}", 204 | Tags=[{"Key": "tostop", "Value": "true"}], 205 | ) 206 | 207 | rds.stop_db_cluster(DBClusterIdentifier=cluster_id) 208 | 209 | scheduler = RdsScheduler(region_name=aws_region) 210 | scheduler.start(aws_tags) 211 | 212 | response = rds.describe_db_clusters(DBClusterIdentifier=cluster_id) 213 | assert response["DBClusters"][0]["Status"] == result_count 214 | 215 | 216 | @pytest.mark.parametrize( 217 | "aws_region, aws_tags, result_count", 218 | [ 219 | ( 220 | "us-east-1", 221 | [{"Key": "tostop", "Values": ["true"]}], 222 | "available", 223 | ), 224 | ( 225 | "us-west-2", 226 | [{"Key": "tostop", "Values": ["true"]}], 227 | "available", 228 | ), 229 | ( 230 | "eu-west-1", 231 | [{"Key": "badtagkey", "Values": ["badtagvalue"]}], 232 | "stopped", 233 | ), 234 | ], 235 | ) 236 | @mock_aws 237 | def test_handle_nonexistent_resources(aws_region, aws_tags, result_count): 238 | """Test handling of nonexistent RDS resources.""" 239 | scheduler = RdsScheduler(region_name=aws_region) 240 | scheduler.stop(aws_tags) 241 | scheduler.start(aws_tags) 242 | --------------------------------------------------------------------------------