├── wrapper-scripts ├── terraform_runner │ ├── __init__.py │ ├── CustomLogger.py │ ├── CommandManager.py │ ├── WorkspaceManager.py │ ├── test_WorkspaceManager.py │ ├── override_manager.py │ ├── artifact_manager.py │ ├── test_CommandManager.py │ ├── __main__.py │ ├── test_artifact_manager.py │ └── test_override_manager.py ├── setup.py └── Readme.md ├── lambda-functions ├── state_machine_lambdas │ ├── core │ │ ├── __init__.py │ │ ├── exception.py │ │ ├── configuration.py │ │ ├── cli.py │ │ ├── ssm_facade.py │ │ ├── test_cli.py │ │ ├── test_ssm_facade.py │ │ └── service_catalog_facade.py │ ├── notify │ │ ├── __init__.py │ │ ├── errors.py │ │ ├── outputs.py │ │ ├── test_errors.py │ │ └── test_outputs.py │ ├── requirements.txt │ ├── notify_terminate_result.py │ ├── notify_update_result.py │ ├── poll_command_invocation.py │ ├── notify_provision_result.py │ ├── select_worker_host.py │ ├── send_destroy_command.py │ ├── test_poll_command_invocation.py │ ├── get_state_file_outputs.py │ ├── send_apply_command.py │ ├── test_notify_terminate_result.py │ ├── test_select_worker_host.py │ ├── test_send_destroy_command.py │ └── test_notify_update_result.py ├── provisioning-operations-handler │ ├── requirements.txt │ └── provisioning_operations_handler.py └── terraform_open_source_parameter_parser │ ├── artifact.go │ ├── test-artifacts │ ├── mock-artifact-with-subdirectories.tar.gz │ └── mock-artifact-with-dot-slash-root-module-prefix-files.tar.gz │ ├── Makefile │ ├── parameter.go │ ├── exceptions.go │ ├── main.go │ ├── archive_unzipper_test.go │ ├── s3_downloader.go │ ├── s3_downloader_test.go │ ├── archive_unzipper.go │ ├── config_fetcher.go │ ├── config_fetcher_test.go │ ├── validator.go │ ├── parser.go │ └── validator_test.go ├── NOTICE ├── sample-provisioning-artifacts ├── s3bucket.tar.gz ├── s3website-module.tar.gz └── Readme.md ├── CODE_OF_CONDUCT.md ├── cfn-templates ├── Bootstrap.yaml └── TerraformProvisioningAccount.yaml ├── CONTRIBUTING.md ├── bin └── bash │ ├── deploy-bootstrap-bucket-stack.sh │ ├── deploy-tre.sh │ └── replace-ec2-instances.py ├── IMPORTANT_UPDATES.md ├── .gitignore └── LICENSE /wrapper-scripts/terraform_runner/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.26.105 2 | botocore>=1.29.105 3 | -------------------------------------------------------------------------------- /lambda-functions/provisioning-operations-handler/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.26.105 2 | botocore>=1.29.105 3 | -------------------------------------------------------------------------------- /wrapper-scripts/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='terraform_runner', 5 | version='0.1', 6 | packages=['terraform_runner'], 7 | ) -------------------------------------------------------------------------------- /sample-provisioning-artifacts/s3bucket.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/service-catalog-engine-for-terraform-os/HEAD/sample-provisioning-artifacts/s3bucket.tar.gz -------------------------------------------------------------------------------- /sample-provisioning-artifacts/s3website-module.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/service-catalog-engine-for-terraform-os/HEAD/sample-provisioning-artifacts/s3website-module.tar.gz -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/artifact.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Artifact represents the location of a Provisioning Artifact 4 | type Artifact struct { 5 | Path string `json:"path"` 6 | Type string `json:"type"` 7 | } 8 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/test-artifacts/mock-artifact-with-subdirectories.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/service-catalog-engine-for-terraform-os/HEAD/lambda-functions/terraform_open_source_parameter_parser/test-artifacts/mock-artifact-with-subdirectories.tar.gz -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/Makefile: -------------------------------------------------------------------------------- 1 | build-ParameterParser: 2 | GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o bootstrap 3 | cp ./bootstrap "$(ARTIFACTS_DIR)/." 4 | build-ExternalParameterParser: 5 | GOOS=linux GOARACH=amd64 CGO_ENABLED=0 go build -o bootstrap 6 | cp ./bootstrap "$(ARTIFACTS_DIR)/." 7 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/test-artifacts/mock-artifact-with-dot-slash-root-module-prefix-files.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/service-catalog-engine-for-terraform-os/HEAD/lambda-functions/terraform_open_source_parameter_parser/test-artifacts/mock-artifact-with-dot-slash-root-module-prefix-files.tar.gz -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/parameter.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Parameter represents a single parsed variable from a Provisioning Artifact 4 | type Parameter struct { 5 | Key string `json:"key"` 6 | DefaultValue string `json:"defaultValue"` 7 | Type string `json:"type"` 8 | Description string `json:"description"` 9 | IsNoEcho bool `json:"isNoEcho"` 10 | } 11 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/exceptions.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | type ParserInvalidParameterException struct { 4 | Message string 5 | } 6 | 7 | type ParserAccessDeniedException struct { 8 | Message string 9 | } 10 | 11 | func (e ParserInvalidParameterException) Error() string { 12 | return e.Message 13 | } 14 | 15 | func (e ParserAccessDeniedException) Error() string { 16 | return e.Message 17 | } 18 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/exception.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import traceback 3 | 4 | from botocore.exceptions import ClientError 5 | 6 | log = logging.getLogger() 7 | log.setLevel(logging.ERROR) 8 | 9 | 10 | # Boto exception keys 11 | RESPONSE_METADATA_KEY = "ResponseMetadata" 12 | REQUEST_ID_KEY = "RequestId" 13 | 14 | def log_exception(exception: Exception): 15 | """Performs standard logging for exceptions across all lambdas""" 16 | if isinstance(exception, ClientError): 17 | log.error(f'Failed to execute API: {exception.operation_name} & {exception.response}') 18 | log.error(f'Failed with error: {exception} & stack trace: {traceback.format_exc()}') 19 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/configuration.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from botocore.config import Config 4 | 5 | # Constants 6 | USER_AGENT: str = 'TerraformReferenceEngine-1.0' 7 | 8 | class Configuration: 9 | 10 | def __init__(self): 11 | self.__aws_region: str = os.environ['AWS_REGION'] 12 | self.__boto_config = Config( 13 | region_name=self.__aws_region, 14 | user_agent=USER_AGENT, 15 | retries={ 16 | 'max_attempts': 3, 17 | 'mode': 'standard' 18 | } 19 | ) 20 | 21 | def get_region(self) -> str: 22 | return self.__aws_region 23 | 24 | def get_boto_config(self): 25 | return self.__boto_config 26 | -------------------------------------------------------------------------------- /wrapper-scripts/Readme.md: -------------------------------------------------------------------------------- 1 | ## Building the Instance Script 2 | 3 | This directory contains the Python module that is downloaded, installed, and executed on the EC2 instances. It is a wrapper around the Terraform CLI. 4 | 5 | To build the module from this directory: 6 | 7 | * pip3 install wheel (one-time setup) 8 | * python3 setup.py bdist_wheel 9 | 10 | Then sync it to this S3 bucket: 11 | 12 | * aws s3 sync dist s3://terraform-engine-bootstrap--\/dist 13 | 14 | Every time a new instance starts, it will download the terraform_runner module from this bucket and install it. 15 | 16 | 17 | ## Unit Tests 18 | 19 | To run the unit tests, execute this command from this directory: 20 | 21 | * python3 -m unittest 22 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/CustomLogger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | 5 | class CustomLogger: 6 | logging.basicConfig(stream=sys.stdout, level=logging.INFO) 7 | __log = logging.getLogger() 8 | 9 | def __init__(self, prefix: str): 10 | """ 11 | Parameters: 12 | 13 | prefix: str 14 | A prefix to include in every message logged 15 | """ 16 | self.__prefix = prefix 17 | 18 | def __format(self, message: str): 19 | return f'{self.__prefix} {message}' 20 | 21 | def info(self, message: str): 22 | """Logs an info entry including the prefix and message""" 23 | self.__log.info(self.__format(message)) 24 | 25 | def error(self, message: str): 26 | """Logs an error entry including the prefix and message""" 27 | self.__log.error(self.__format(message)) 28 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify/errors.py: -------------------------------------------------------------------------------- 1 | # Constants 2 | FAILURE_REASON_LENGTH_LIMIT: int = 2048 3 | LAMBDA_TIMEOUT_ERROR: str = 'States.Timeout' 4 | LAMBDA_TIMEOUT_FAILURE_REASON: str = 'A lambda function invoked by the state machine has timed out' 5 | 6 | # Input event keys 7 | ERROR_KEY: str = 'error' 8 | ERROR_MESSAGE_KEY: str = 'errorMessage' 9 | 10 | def workflow_has_error(event: dict) -> bool: 11 | """Determines if an error has occurred in the workflow based on a lambda input event""" 12 | return ERROR_MESSAGE_KEY in event and ERROR_KEY in event 13 | 14 | def get_failure_reason(event: dict) -> str: 15 | """Gets the SC failure reason based on error and error message in a lambda input event""" 16 | error_message = event[ERROR_MESSAGE_KEY] 17 | if event[ERROR_KEY] == LAMBDA_TIMEOUT_ERROR: 18 | return LAMBDA_TIMEOUT_FAILURE_REASON 19 | elif len(error_message) <= FAILURE_REASON_LENGTH_LIMIT: 20 | return error_message 21 | else: 22 | return error_message[:FAILURE_REASON_LENGTH_LIMIT - 3] + '...' 23 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/aws/aws-lambda-go/lambda" 5 | ) 6 | 7 | type TerraformOpenSourceParameterParserInput struct { 8 | Artifact Artifact `json:"artifact"` 9 | LaunchRoleArn string `json:"launchRoleArn"` 10 | } 11 | 12 | type TerraformOpenSourceParameterParserResponse struct { 13 | Parameters []*Parameter `json:"parameters"` 14 | } 15 | 16 | func main() { 17 | lambda.Start(HandleRequest) 18 | } 19 | 20 | func HandleRequest(event TerraformOpenSourceParameterParserInput) (TerraformOpenSourceParameterParserResponse, error) { 21 | if err := ValidateInput(event); err != nil { 22 | return TerraformOpenSourceParameterParserResponse{}, err 23 | } 24 | 25 | configFetcher, configFetcherErr := NewConfigFetcher(event.LaunchRoleArn) 26 | if configFetcherErr != nil { 27 | return TerraformOpenSourceParameterParserResponse{}, configFetcherErr 28 | } 29 | 30 | fileMap, fileMapErr := configFetcher.fetch(event) 31 | if fileMapErr != nil { 32 | return TerraformOpenSourceParameterParserResponse{}, fileMapErr 33 | } 34 | 35 | parameters, parseParametersErr := ParseParametersFromConfiguration(fileMap) 36 | return TerraformOpenSourceParameterParserResponse{Parameters: parameters}, parseParametersErr 37 | } 38 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/CommandManager.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | from terraform_runner.CustomLogger import CustomLogger 4 | 5 | SUCCESS_RETURN_CODE = 0 6 | 7 | 8 | class CommandManager: 9 | 10 | def __init__(self, log: CustomLogger): 11 | """ 12 | Parameters: 13 | 14 | log: CustomLogger 15 | The object used to write logs 16 | """ 17 | self.__log = log 18 | 19 | def run_command(self, command: list, log_stdout: bool = False): 20 | """ 21 | Parameters: 22 | 23 | command: list of str 24 | The command and arguments to run 25 | log_stdout: bool 26 | When True, logs the stdout of the command given a successful run. Default is False. 27 | """ 28 | self.__log.info(f'Runnning command: {command}') 29 | 30 | result = None 31 | try: 32 | result = subprocess.run(command, check=False, text=True, capture_output=True) 33 | except Exception as e: 34 | raise RuntimeError(f'subprocess.run raise and exception while running command {command}: {e}') 35 | 36 | if result.returncode != SUCCESS_RETURN_CODE: 37 | raise RuntimeError(result.stderr) 38 | 39 | if log_stdout: 40 | self.__log.info(result.stdout) 41 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify/outputs.py: -------------------------------------------------------------------------------- 1 | # Constants 2 | OUTPUTS_KEY = 'outputs' 3 | OUTPUT_KEY_KEY = 'key' 4 | OUTPUT_VALUE_KEY = 'value' 5 | OUTPUT_DESCRIPTION_KEY = 'description' 6 | 7 | SC_OUTPUT_KEY_KEY = 'OutputKey' 8 | SC_OUTPUT_VALUE_KEY = 'OutputValue' 9 | SC_OUTPUT_DESCRIPTION_KEY = 'Description' 10 | 11 | def convert_state_file_outputs_to_service_catalog_outputs(event: dict) -> list: 12 | """Converts the outputs in a lambda input event to the format for Service Catalog outputs""" 13 | if OUTPUTS_KEY not in event: 14 | return [] 15 | 16 | service_catalog_outputs: list = [] 17 | 18 | for state_file_output in event[OUTPUTS_KEY]: 19 | if OUTPUT_DESCRIPTION_KEY in state_file_output and state_file_output[OUTPUT_DESCRIPTION_KEY] is not None: 20 | service_catalog_outputs.append({ 21 | SC_OUTPUT_KEY_KEY: state_file_output[OUTPUT_KEY_KEY], 22 | SC_OUTPUT_VALUE_KEY: state_file_output[OUTPUT_VALUE_KEY], 23 | SC_OUTPUT_DESCRIPTION_KEY: state_file_output[OUTPUT_DESCRIPTION_KEY] 24 | }) 25 | else: 26 | service_catalog_outputs.append({ 27 | SC_OUTPUT_KEY_KEY: state_file_output[OUTPUT_KEY_KEY], 28 | SC_OUTPUT_VALUE_KEY: state_file_output[OUTPUT_VALUE_KEY] 29 | }) 30 | 31 | return service_catalog_outputs 32 | -------------------------------------------------------------------------------- /cfn-templates/Bootstrap.yaml: -------------------------------------------------------------------------------- 1 | Resources: 2 | 3 | # Bucket used for code assets and other objects required to bootstrap the main Sam template 4 | TerraformBootstrapBucket: 5 | Type: AWS::S3::Bucket 6 | Properties: 7 | BucketName: !Sub 'terraform-engine-bootstrap-${AWS::AccountId}-${AWS::Region}' 8 | VersioningConfiguration: 9 | Status: Enabled 10 | BucketEncryption: 11 | ServerSideEncryptionConfiguration: 12 | - ServerSideEncryptionByDefault: 13 | SSEAlgorithm: AES256 14 | 15 | TerraformBootstrapBucketPolicy: 16 | Type: AWS::S3::BucketPolicy 17 | Properties: 18 | Bucket: !Ref TerraformBootstrapBucket 19 | PolicyDocument: 20 | Statement: 21 | - Action: s3:* 22 | Effect: Deny 23 | Principal: "*" 24 | Resource: 25 | - !Sub ${TerraformBootstrapBucket.Arn}/* 26 | - !Sub ${TerraformBootstrapBucket.Arn} 27 | Condition: 28 | Bool: {"aws:SecureTransport": false} 29 | 30 | Outputs: 31 | BootstrapBucketArn: 32 | Description: Arn of the bootstrap bucket 33 | Value: !GetAtt TerraformBootstrapBucket.Arn 34 | Export: 35 | Name: TerraformEngineBootstrapBucketArn 36 | BootstrapBucketName: 37 | Description: Name of the bootstrap bucket 38 | Value: !Ref TerraformBootstrapBucket 39 | Export: 40 | Name: TerraformEngineBootstrapBucketName 41 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/archive_unzipper_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestUnzipArchiveHappy(t *testing.T) { 10 | // setup 11 | const MockArtifactPath = "./test-artifacts/mock-artifact-with-subdirectories.tar.gz" 12 | expectedFileMap := make(map[string]string) 13 | expectedFileMap["main.tf"] = "main-contents" 14 | 15 | zipFile, err := os.ReadFile(MockArtifactPath) 16 | if err != nil { 17 | t.Errorf("Error opening test artifact %s", MockArtifactPath) 18 | } 19 | 20 | // act 21 | fileMap, err := UnzipArchive(zipFile) 22 | 23 | // assert 24 | if !reflect.DeepEqual(fileMap, expectedFileMap) { 25 | t.Errorf("fileMap %s is not as expected: %s", fileMap, expectedFileMap) 26 | } 27 | } 28 | 29 | func TestUnzipArchiveHappyWithDotSlashRootModuleFiles(t *testing.T) { 30 | // setup 31 | const MockArtifactPath = "./test-artifacts/mock-artifact-with-dot-slash-root-module-prefix-files.tar.gz" 32 | expectedFileMap := make(map[string]string) 33 | expectedFileMap["./main.tf"] = "main-contents" 34 | 35 | zipFile, err := os.ReadFile(MockArtifactPath) 36 | if err != nil { 37 | t.Errorf("Error opening test artifact %s", MockArtifactPath) 38 | } 39 | 40 | // act 41 | fileMap, err := UnzipArchive(zipFile) 42 | 43 | // assert 44 | if !reflect.DeepEqual(fileMap, expectedFileMap) { 45 | t.Errorf("fileMap %s is not as expected: %s", fileMap, expectedFileMap) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/WorkspaceManager.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from terraform_runner.CommandManager import CommandManager 4 | from terraform_runner.CustomLogger import CustomLogger 5 | 6 | 7 | class WorkspaceManager: 8 | 9 | def __init__(self, log: CustomLogger, provisioned_product_descriptor: str): 10 | """ 11 | Parameters: 12 | 13 | log: CustomLogger 14 | The object used to write logs 15 | provisioned_product_descriptor: str 16 | The descriptor that uniquely identifies a provisioned product, used for naming the workspace directory 17 | """ 18 | self.__log = log 19 | self.__command_manager = CommandManager(log) 20 | home_directory = os.path.expanduser('~') 21 | self.__workspace_directory = f'{home_directory}/workspaces/{provisioned_product_descriptor}' 22 | 23 | def get_workspace_directory(self): 24 | return self.__workspace_directory 25 | 26 | def remove_workspace_directory(self): 27 | self.__command_manager.run_command(['rm', '-f', '-r', self.__workspace_directory]) 28 | 29 | def setup_workspace_directory(self): 30 | # Remove any previous workspace directory for this provisioned product in case there are old files left over from the last run 31 | self.remove_workspace_directory() 32 | os.makedirs(self.__workspace_directory) 33 | self.__log.info(f'Workspace directory set up: {self.__workspace_directory}') 34 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify_terminate_result.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from core.configuration import Configuration 4 | from core.exception import log_exception 5 | from core.service_catalog_facade import ServiceCatalogFacade 6 | from notify.errors import workflow_has_error, get_failure_reason 7 | 8 | # Globals 9 | log = logging.getLogger() 10 | log.setLevel(logging.INFO) 11 | app_config = None 12 | service_catalog_facade = None 13 | 14 | # Input keys 15 | WORKFLOW_TOKEN_KEY = 'token' 16 | RECORD_ID_KEY = 'recordId' 17 | 18 | 19 | def __notify_succeeded(event): 20 | service_catalog_facade.notify_terminate_succeeded( 21 | workflow_token = event[WORKFLOW_TOKEN_KEY], 22 | record_id = event[RECORD_ID_KEY], 23 | ) 24 | 25 | def __notify_failed(event): 26 | service_catalog_facade.notify_terminate_failed( 27 | workflow_token = event[WORKFLOW_TOKEN_KEY], 28 | record_id = event[RECORD_ID_KEY], 29 | failure_reason = get_failure_reason(event) 30 | ) 31 | 32 | def notify(event, context): 33 | log.info(f'Handling event {event}') 34 | 35 | global app_config 36 | global service_catalog_facade 37 | 38 | try: 39 | if not app_config: 40 | app_config = Configuration() 41 | if not service_catalog_facade: 42 | service_catalog_facade = ServiceCatalogFacade(app_config) 43 | 44 | if workflow_has_error(event): 45 | __notify_failed(event) 46 | else: 47 | __notify_succeeded(event) 48 | 49 | except Exception as exception: 50 | log_exception(exception) 51 | raise exception 52 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/s3_downloader.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "log" 6 | 7 | "github.com/aws/aws-sdk-go/aws" 8 | "github.com/aws/aws-sdk-go/aws/credentials" 9 | "github.com/aws/aws-sdk-go/aws/session" 10 | "github.com/aws/aws-sdk-go/service/s3" 11 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 12 | ) 13 | 14 | type Downloader interface { 15 | Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*s3manager.Downloader)) (n int64, err error) 16 | } 17 | 18 | type S3Downloader struct { 19 | downloader Downloader 20 | } 21 | 22 | func NewS3Downloader(creds *credentials.Credentials) (*S3Downloader, error) { 23 | // create a new AWS session with provided credentials 24 | sess, err := session.NewSession(&aws.Config{Credentials: creds}) 25 | if err != nil { 26 | return &S3Downloader{}, err 27 | } 28 | 29 | // create a new downloader using above session 30 | return &S3Downloader{downloader: s3manager.NewDownloader(sess)}, nil 31 | } 32 | 33 | // Downloads artifact from specified S3 bucket and objectPath in a byte array 34 | func (client *S3Downloader) download(bucket string, objectPath string) ([]byte, error) { 35 | // open byte array as download target 36 | buff := &aws.WriteAtBuffer{} 37 | 38 | // download to buffer 39 | log.Printf("Downloading %s from bucket %s", objectPath, bucket) 40 | numBytes, err := client.downloader.Download(buff, 41 | &s3.GetObjectInput{ 42 | Bucket: aws.String(bucket), 43 | Key: aws.String(objectPath), 44 | }, 45 | ) 46 | if err != nil { 47 | return []byte{}, err 48 | } 49 | 50 | log.Printf("Downloaded %d bytes", numBytes) 51 | return buff.Bytes(), nil 52 | } 53 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify/test_errors.py: -------------------------------------------------------------------------------- 1 | from unittest import main, TestCase 2 | 3 | from notify.errors import workflow_has_error, get_failure_reason 4 | 5 | class TestErrors(TestCase): 6 | 7 | def test_workflow_has_error_with_no_error_present(self): 8 | self.assertFalse(workflow_has_error({'key': 'value'})) 9 | 10 | def test_workflow_has_error_with_error_present(self): 11 | event = {'error': 'SomeError', 'errorMessage': 'What happened?', 'key': 'value'} 12 | self.assertTrue(workflow_has_error(event)) 13 | 14 | def test_get_failure_reason_happy_path(self): 15 | # Arrange 16 | event = {'error': 'SomeError', 'errorMessage': 'What happened?', 'key': 'value'} 17 | 18 | # Act 19 | actual = get_failure_reason(event) 20 | 21 | # Assert 22 | self.assertEqual(event['errorMessage'], actual) 23 | 24 | def test_get_failure_reason_lambda_timeout(self): 25 | # Arrange 26 | event = {'error': 'States.Timeout', 'errorMessage': 'What happened?', 'key': 'value'} 27 | expected_failure_reason = 'A lambda function invoked by the state machine has timed out' 28 | 29 | # Act 30 | actual = get_failure_reason(event) 31 | 32 | # Assert 33 | self.assertEqual(expected_failure_reason, actual) 34 | 35 | def test_get_failure_reason_with_very_long_error_message(self): 36 | # Arrange 37 | very_long_error_message = 'x' * 5000 38 | event = {'error': 'RuntimeError', 'errorMessage': very_long_error_message, 'key': 'value'} 39 | expected_failure_reason = 'x' * 2045 + '...' 40 | 41 | # Act 42 | actual = get_failure_reason(event) 43 | 44 | # Assert 45 | self.assertEqual(expected_failure_reason, actual) 46 | 47 | 48 | if __name__ == '__main__': 49 | main() 50 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify_update_result.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from core.configuration import Configuration 4 | from core.exception import log_exception 5 | from core.service_catalog_facade import ServiceCatalogFacade 6 | from notify.errors import workflow_has_error, get_failure_reason 7 | from notify.outputs import convert_state_file_outputs_to_service_catalog_outputs 8 | 9 | # Globals 10 | log = logging.getLogger() 11 | log.setLevel(logging.INFO) 12 | app_config = None 13 | service_catalog_facade = None 14 | 15 | # Input keys 16 | WORKFLOW_TOKEN_KEY = 'token' 17 | RECORD_ID_KEY = 'recordId' 18 | 19 | 20 | def __notify_succeeded(event): 21 | service_catalog_facade.notify_update_succeeded( 22 | workflow_token = event[WORKFLOW_TOKEN_KEY], 23 | record_id = event[RECORD_ID_KEY], 24 | outputs = convert_state_file_outputs_to_service_catalog_outputs(event) 25 | ) 26 | 27 | 28 | def __notify_failed(event): 29 | service_catalog_facade.notify_update_failed( 30 | workflow_token = event[WORKFLOW_TOKEN_KEY], 31 | record_id = event[RECORD_ID_KEY], 32 | failure_reason = get_failure_reason(event), 33 | outputs = convert_state_file_outputs_to_service_catalog_outputs(event) 34 | ) 35 | 36 | 37 | def notify(event, context): 38 | log.info(f'Handling event {event}') 39 | 40 | global app_config 41 | global service_catalog_facade 42 | 43 | try: 44 | if not app_config: 45 | app_config = Configuration() 46 | if not service_catalog_facade: 47 | service_catalog_facade = ServiceCatalogFacade(app_config) 48 | 49 | if workflow_has_error(event): 50 | __notify_failed(event) 51 | else: 52 | __notify_succeeded(event) 53 | 54 | except Exception as exception: 55 | log_exception(exception) 56 | raise exception 57 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/s3_downloader_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "reflect" 7 | "testing" 8 | 9 | "github.com/aws/aws-sdk-go/service/s3" 10 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 11 | "github.com/stretchr/testify/mock" 12 | ) 13 | 14 | const TestString = "hello world" 15 | const TestBucketHappy = "testBucketHappy" 16 | const TestBucketError = "testBucketError" 17 | const TestObjectPath = "testObjectPath" 18 | const S3ClientErrorMessage = "s3 client error" 19 | 20 | type MockDownloader struct { 21 | mock.Mock 22 | } 23 | 24 | func (m *MockDownloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*s3manager.Downloader)) (n int64, err error) { 25 | num, _ := w.WriteAt([]byte(TestString), 0) 26 | 27 | if *input.Bucket == TestBucketError { 28 | return 0, errors.New(S3ClientErrorMessage) 29 | } 30 | 31 | return int64(num), nil 32 | } 33 | 34 | func TestS3DownloaderDownloadHappy(t *testing.T) { 35 | // setup 36 | downloader := new(MockDownloader) 37 | s3Downloader := &S3Downloader{ 38 | downloader: downloader, 39 | } 40 | expectedResult := []byte(TestString) 41 | 42 | // act 43 | actualResult, err := s3Downloader.download(TestBucketHappy, TestObjectPath) 44 | 45 | // assert 46 | if err != nil { 47 | t.Errorf("Unexpected error occurred") 48 | } 49 | 50 | if !reflect.DeepEqual(actualResult, expectedResult) { 51 | t.Errorf("Returned byte array is not the same as expected") 52 | } 53 | } 54 | 55 | func TestS3DownloaderDownloadClientError(t *testing.T) { 56 | // setup 57 | downloader := new(MockDownloader) 58 | s3Downloader := &S3Downloader{ 59 | downloader: downloader, 60 | } 61 | 62 | // act 63 | _, err := s3Downloader.download(TestBucketError, TestObjectPath) 64 | 65 | // assert 66 | if err.Error() != S3ClientErrorMessage { 67 | t.Errorf("Error is not the same as expected") 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/test_WorkspaceManager.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import patch 3 | 4 | from terraform_runner.CommandManager import CommandManager 5 | from terraform_runner.WorkspaceManager import WorkspaceManager 6 | 7 | 8 | class TestWorkspaceManager(unittest.TestCase): 9 | 10 | @patch('terraform_runner.WorkspaceManager.CustomLogger') 11 | @patch('terraform_runner.WorkspaceManager.os') 12 | @patch.object(CommandManager, 'run_command') 13 | def test_setup_workspace_directory_happy_path(self, mock_run_command, mock_os, mock_logger): 14 | # arrange 15 | mock_os.path.expanduser.return_value = 'home-dir' 16 | provisioned_product_descriptor = 'pp-descriptor' 17 | 18 | # act 19 | workspace_manager = WorkspaceManager(mock_logger, provisioned_product_descriptor) 20 | workspace_manager.setup_workspace_directory() 21 | 22 | # assert 23 | mock_run_command.assert_called_once_with(['rm', '-f', '-r', workspace_manager.get_workspace_directory()]) 24 | mock_os.makedirs.assert_called_once_with(workspace_manager.get_workspace_directory()) 25 | 26 | @patch('terraform_runner.WorkspaceManager.CustomLogger') 27 | @patch('terraform_runner.WorkspaceManager.os') 28 | @patch.object(CommandManager, 'run_command') 29 | def test_remove_workspace_directory_happy_path(self, mock_run_command, mock_os, mock_logger): 30 | # arrange 31 | mock_os.path.expanduser.return_value = 'home-dir' 32 | provisioned_product_descriptor = 'pp-descriptor' 33 | 34 | # act 35 | workspace_manager = WorkspaceManager(mock_logger, provisioned_product_descriptor) 36 | workspace_manager.remove_workspace_directory() 37 | 38 | # assert 39 | mock_run_command.assert_called_once_with(['rm', '-f', '-r', workspace_manager.get_workspace_directory()]) 40 | 41 | 42 | if __name__ == '__main__': 43 | unittest.main() 44 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to the AWS Service Catalog engine for Terraform open source. 4 | At this time, we are **not** accepting contributions. If contributions are accepted in the future, the AWS Service Catalog engine for Terraform open source is released under the Apache license and any code submitted will be released under that license. 5 | 6 | Please read through this document before submitting any issues to ensure we have all the necessary 7 | information to effectively respond to your bug report. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Code of Conduct 24 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 25 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 26 | opensource-codeofconduct@amazon.com with any additional questions or comments. 27 | 28 | 29 | ## Security issue notifications 30 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 31 | 32 | 33 | ## Licensing 34 | 35 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 36 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/cli.py: -------------------------------------------------------------------------------- 1 | DEFAULT_USER = 'ec2-user' 2 | 3 | def create_runuser_command_with_default_user(base_command: str) -> str: 4 | """Creates a CLI runuser command using a base command and a default user""" 5 | if not base_command: 6 | raise ValueError('base_command must be a non-empty string') 7 | return f"runuser -l {DEFAULT_USER} -c '{base_command}'" 8 | 9 | def double_escape_double_quotes(input: str) -> str: 10 | """Adds double-escape backslashes to any double quotes in a string. 11 | Used to prepare a string to be embedded in quoted commands. 12 | If input is empty or None, input is returned. 13 | """ 14 | if not input: 15 | return input 16 | return input.replace('"', '\\"') 17 | 18 | def double_escape_double_quotes_and_backslashes(input: str) -> str: 19 | """Adds double-escape backslashes to any double quote and existing backslash in a string. 20 | Used to prepare a parameter string to be embedded in quoted commands. 21 | If input is empty or None, input is returned. 22 | """ 23 | if not input: 24 | return input 25 | if '\\' in input: 26 | input = input.replace('\\', '\\\\') 27 | return input.replace('"', '\\"') 28 | 29 | def triple_escape_double_single_quotes(input: str) -> str: 30 | """Add triple escape backslashes and a double single quotes in a string. 31 | Used to allow single quotes in user paramenter in command parameter string. 32 | If input is empty or None, input is returned 33 | """ 34 | if not input: 35 | return input 36 | return input.replace("'", "'\\\''") 37 | 38 | def escape_quotes_backslashes(input: str) -> str: 39 | """ 40 | For double backslashes adds double backslash 41 | For single double adds double backslashes 42 | For single quotes adds double backslashes and double single quotes 43 | """ 44 | if not input: 45 | return input 46 | input = double_escape_double_quotes_and_backslashes(input) 47 | return triple_escape_double_single_quotes(input) -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify/test_outputs.py: -------------------------------------------------------------------------------- 1 | from unittest import main, TestCase 2 | 3 | from notify.outputs import convert_state_file_outputs_to_service_catalog_outputs 4 | 5 | class TestOutputs(TestCase): 6 | 7 | def test_convert_state_file_outputs_to_service_catalog_outputs_happy_path(self): 8 | # Arrange 9 | input = {"outputs": 10 | [ 11 | { 12 | "key": "key1", 13 | "value": "value1", 14 | "description": "desc1" 15 | }, 16 | { 17 | "key": "key2", 18 | "value": "value2" 19 | }, 20 | { 21 | "key": "key3", 22 | "value": "value3", 23 | "description": None 24 | } 25 | ] 26 | } 27 | 28 | expected =[ 29 | { 30 | "OutputKey": "key1", 31 | "OutputValue": "value1", 32 | "Description": "desc1" 33 | }, 34 | { 35 | "OutputKey": "key2", 36 | "OutputValue": "value2" 37 | }, 38 | { 39 | "OutputKey": "key3", 40 | "OutputValue": "value3" 41 | } 42 | ] 43 | 44 | # Act 45 | actual = convert_state_file_outputs_to_service_catalog_outputs(input) 46 | 47 | # Assert 48 | self.assertEqual(expected, actual) 49 | 50 | 51 | def test_convert_state_file_outputs_to_service_catalog_outputs_empty_list(self): 52 | # Act 53 | actual = convert_state_file_outputs_to_service_catalog_outputs({"outputs": []}) 54 | 55 | # Assert 56 | self.assertEqual([], actual) 57 | 58 | def test_convert_state_file_outputs_to_service_catalog_outputs_outputs_missing(self): 59 | # Act 60 | actual = convert_state_file_outputs_to_service_catalog_outputs({}) 61 | 62 | # Assert 63 | self.assertEqual([], actual) 64 | 65 | if __name__ == '__main__': 66 | main() 67 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/poll_command_invocation.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from core.configuration import Configuration 4 | from core.exception import log_exception 5 | from core.ssm_facade import SsmFacade 6 | 7 | 8 | # PollCommandInvocationFunction input keys 9 | COMMAND_ID_KEY = 'commandId' 10 | INSTANCE_ID_KEY = 'instanceId' 11 | 12 | 13 | log = logging.getLogger() 14 | log.setLevel(logging.INFO) 15 | 16 | # Globals 17 | app_config = None 18 | ssm_facade = None 19 | 20 | 21 | def __validate_event(event): 22 | """Raises an exception if any required entries are missing from the Lambda event""" 23 | if COMMAND_ID_KEY not in event: 24 | raise RuntimeError(f'{COMMAND_ID_KEY} must be provided') 25 | if INSTANCE_ID_KEY not in event: 26 | raise RuntimeError(f'{INSTANCE_ID_KEY} must be provided') 27 | 28 | 29 | def poll(event, context) -> dict: 30 | """Lambda function to poll the status of a command invocation from Systems Manager 31 | 32 | Parameters 33 | ---------- 34 | event: dict, required 35 | The input event to the Lambda function 36 | - CommandId(Required): The parent command ID of the invocation plugin. 37 | - InstanceId(Required): The ID of the managed node targeted by the command. 38 | 39 | context: object, required 40 | Lambda Context runtime methods and attributes 41 | 42 | Returns 43 | ------ 44 | dict 45 | - InvocationStatus: Status of invocation plugin in the selected EC2 instance 46 | """ 47 | 48 | global app_config 49 | global ssm_facade 50 | 51 | try: 52 | __validate_event(event) 53 | 54 | command_id = event[COMMAND_ID_KEY] 55 | instance_id = event[INSTANCE_ID_KEY] 56 | 57 | if not app_config: 58 | app_config = Configuration() 59 | if not ssm_facade: 60 | ssm_facade = SsmFacade(app_config) 61 | 62 | response = ssm_facade.get_command_invocation(command_id, instance_id) 63 | log.info(f'Returning {response}') 64 | return response 65 | 66 | except Exception as e: 67 | log_exception(e) 68 | raise e 69 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/notify_provision_result.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from core.configuration import Configuration 4 | from core.exception import log_exception 5 | from core.service_catalog_facade import ServiceCatalogFacade 6 | from notify.errors import workflow_has_error, get_failure_reason 7 | from notify.outputs import convert_state_file_outputs_to_service_catalog_outputs 8 | 9 | # Globals 10 | log = logging.getLogger() 11 | log.setLevel(logging.INFO) 12 | app_config = None 13 | service_catalog_facade = None 14 | 15 | # Input keys 16 | WORKFLOW_TOKEN_KEY = 'token' 17 | RECORD_ID_KEY = 'recordId' 18 | TRACER_TAG_KEY = 'tracerTag' 19 | TRACER_TAG_KEY_KEY = 'key' 20 | TRACER_TAG_VALUE_KEY = 'value' 21 | 22 | 23 | def __notify_succeeded(event): 24 | service_catalog_facade.notify_provision_succeeded( 25 | workflow_token = event[WORKFLOW_TOKEN_KEY], 26 | record_id = event[RECORD_ID_KEY], 27 | tracer_tag_key = event[TRACER_TAG_KEY][TRACER_TAG_KEY_KEY], 28 | tracer_tag_value = event[TRACER_TAG_KEY][TRACER_TAG_VALUE_KEY], 29 | outputs = convert_state_file_outputs_to_service_catalog_outputs(event) 30 | ) 31 | 32 | def __notify_failed(event): 33 | service_catalog_facade.notify_provision_failed( 34 | workflow_token = event[WORKFLOW_TOKEN_KEY], 35 | record_id = event[RECORD_ID_KEY], 36 | failure_reason = get_failure_reason(event), 37 | tracer_tag_key = event[TRACER_TAG_KEY][TRACER_TAG_KEY_KEY], 38 | tracer_tag_value = event[TRACER_TAG_KEY][TRACER_TAG_VALUE_KEY], 39 | outputs = convert_state_file_outputs_to_service_catalog_outputs(event) 40 | ) 41 | 42 | def notify(event, context): 43 | log.info(f'Handling event {event}') 44 | 45 | global app_config 46 | global service_catalog_facade 47 | 48 | try: 49 | if not app_config: 50 | app_config = Configuration() 51 | if not service_catalog_facade: 52 | service_catalog_facade = ServiceCatalogFacade(app_config) 53 | 54 | if workflow_has_error(event): 55 | __notify_failed(event) 56 | else: 57 | __notify_succeeded(event) 58 | 59 | except Exception as exception: 60 | log_exception(exception) 61 | raise exception 62 | -------------------------------------------------------------------------------- /bin/bash/deploy-bootstrap-bucket-stack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | BOOTSTRAP_STACK_NAME=Bootstrap-TRE 5 | TEMPLATE_BODY='file://cfn-templates/Bootstrap.yaml' 6 | 7 | validate_aws_command_result() { 8 | # Pass the output of an aws command as $1. 9 | # If $1 is not valid json, assume it is an error returned from the aws command. 10 | if [[ ! `echo $1 | jq .` ]] 11 | then 12 | echo $1 13 | exit 14 | fi 15 | } 16 | 17 | get_stack_status() { 18 | STACK_STATUS=$(aws cloudformation describe-stacks --stack-name $BOOTSTRAP_STACK_NAME --region $AWS_REGION --query Stacks[0].StackStatus) 19 | } 20 | 21 | check_stack_operation_result() { 22 | echo "Checking stack status: $STACK_STATUS" 23 | if [[ "$STACK_STATUS" =~ CREATE_COMPLETE|UPDATE_COMPLETE ]] 24 | then 25 | echo "The stack operation succeeded." 26 | else 27 | echo "The stack operation failed." 28 | exit 1 29 | fi 30 | } 31 | 32 | await_finished_stack_status() { 33 | echo "Waiting for the stack operation to finish." 34 | while [[ -z "$STACK_STATUS" || "$STACK_STATUS" =~ IN_PROGRESS ]] 35 | do 36 | sleep 5 37 | get_stack_status 38 | echo "Stack status: $STACK_STATUS" 39 | done 40 | check_stack_operation_result 41 | } 42 | 43 | 44 | echo "Looking for the bootstrap bucket stack, using name $BOOTSTRAP_STACK_NAME" 45 | STACK_EXISTS_CHECK=`aws cloudformation describe-stacks --stack-name $BOOTSTRAP_STACK_NAME --region $AWS_REGION 2>&1 || true` 46 | 47 | if [[ "$STACK_EXISTS_CHECK" =~ "does not exist" ]] 48 | then 49 | 50 | echo "Did not find the bootstrap bucket stack. Creating it." 51 | STACK_CREATE_RESULT=`aws cloudformation create-stack --stack-name $BOOTSTRAP_STACK_NAME --template-body $TEMPLATE_BODY --capabilities CAPABILITY_NAMED_IAM --region $AWS_REGION 2>&1 || true` 52 | validate_aws_command_result "$STACK_CREATE_RESULT" 53 | await_finished_stack_status 54 | 55 | else 56 | 57 | # Make sure the last describe-stacks command succeeded. 58 | validate_aws_command_result "$STACK_EXISTS_CHECK" 59 | 60 | echo "Found the bootstrap bucket stack. Checking for updates." 61 | 62 | STACK_UPDATE_RESULT=`aws cloudformation update-stack --stack-name $BOOTSTRAP_STACK_NAME --template-body $TEMPLATE_BODY --capabilities CAPABILITY_NAMED_IAM --region $AWS_REGION 2>&1 || true` 63 | 64 | if [[ "$STACK_UPDATE_RESULT" =~ "No updates are to be performed" ]] 65 | then 66 | echo "No updates are to be performed." 67 | else 68 | validate_aws_command_result "$STACK_UPDATE_RESULT" 69 | await_finished_stack_status 70 | fi 71 | 72 | fi 73 | -------------------------------------------------------------------------------- /IMPORTANT_UPDATES.md: -------------------------------------------------------------------------------- 1 | ## [10/20/2023] Migration from `TERRAFORM_OPEN_SOURCE` to `EXTERNAL` product type 2 | AWS Service Catalog plans on introducing some important changes to their support of Terraform Open Source starting October 12, 2023. If you are not already aware, HashiCorp announced a switch from the Mozilla Public License (MPL) 2.0 to the Business Source License for Terraform. This change impacts customers who are using Terraform Open Source with Service Catalog, because references to 'open source' will be changed. Currently, Service Catalog references ‘open-source’ language in various artifacts such as our APIs, console, documentation, and a publicly available reference engine (TRE) that can be accessed through GitHub. 3 | 4 | Because of these changes, the following dates and action items should be noted: 5 | * **October 12, 2023** – Service Catalog will introduce a new parameter input for `ProductType` and `ProvisionedProductType` which will impact public APIs such as CreateProduct, UpdateProduct, DescribeRecord and more. The new input parameter will be `EXTERNAL` which will introduce a non-breaking change to both API, CLI, and console experiences. The `EXTERNAL` product type can be used by customers for 3rd party reference engines including the existing "Terraform Open Source" (now referred to as Terraform Community), Pulumi, Puppet, Chef, and more. The `EXTERNAL` product type is intended to replace the `TERRAFORM_OPEN_SOURCE` product type. 6 | * **December 14, 2023** – Service Catalog will prevent the creation of new product types and provisioned products with the type `TERRAFORM_OPEN_SOURCE`. Customers are also encouraged to upgrade their reference engines which will prevent them from using a distribution of Terraform that is no longer supported. Note, that existing resources (i.e., versions, existing provisioned products) of type `TERRAFORM_ OPEN_SOURCE` can still be updated or terminated. 7 | 8 | Between October 12, 2023 to December 14, 2023, customers are encouraged to take the following actions: 9 | 1. Upgrade your existing Terraform Reference Engine for AWS Service Catalog to include support for both the new `EXTERNAL` and previous `TERRAFORM_OPEN SOURCE` product types. 10 | 1. Recreate the existing products using the new `EXTERNAL` product type. 11 | 1. Delete any existing products that use the `TERRAFORM_OPEN_SOURCE` product type. 12 | 1. Reprovision (or relaunch) those resources using the new `EXTERNAL` product type. 13 | 1. Terminate any existing provisioned products that use the `TERRAFORM_OPEN_SOURCE` product type. 14 | 1. Any new products and launched provisioned resources should reference the `EXTERNAL` product type. 15 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/select_worker_host.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | 4 | import boto3 5 | 6 | from core.configuration import Configuration 7 | from core.exception import log_exception 8 | 9 | log = logging.getLogger() 10 | log.setLevel(logging.INFO) 11 | 12 | app_config = None 13 | ec2_client = None 14 | 15 | # EC2 client keys 16 | FILTERS: list = [ 17 | { 18 | 'Name': 'tag:Name', 19 | 'Values': ['TerraformEngineExecutionInstance'] 20 | }, 21 | { 22 | 'Name': 'instance-state-name', 23 | 'Values': ['running'] 24 | } 25 | ] 26 | RESERVATIONS = 'Reservations' 27 | INSTANCES = 'Instances' 28 | EC2_RESPONSE_INSTANCE_ID = 'InstanceId' 29 | ERROR_RESPONSE_METADATA = 'ResponseMetadata' 30 | REQUEST_ID = 'RequestId' 31 | ERROR = 'Error' 32 | ERROR_RESPONSE_MESSAGE = 'Message' 33 | 34 | # Lambda response keys 35 | RETURNED_INSTANCE_ID = 'instanceId' 36 | 37 | 38 | def __select_random_instance_id(): 39 | """Randomly selects a running EC2 instance that matches the designated tag""" 40 | instances = [] 41 | 42 | describe_instances_paginator = ec2_client.get_paginator('describe_instances') 43 | describe_instances_iterator = describe_instances_paginator.paginate( 44 | Filters=FILTERS 45 | ) 46 | 47 | for page in describe_instances_iterator: 48 | for reservation in page[RESERVATIONS]: 49 | instances += reservation[INSTANCES] 50 | 51 | if not instances: 52 | raise RuntimeError('No usable EC2 instances found') 53 | 54 | index = random.randint(0, len(instances) - 1) 55 | return instances[index][EC2_RESPONSE_INSTANCE_ID] 56 | 57 | 58 | def select(event, context) -> object: 59 | """Lambda function to select an EC2 instance from the auto-scaling group 60 | 61 | Parameters 62 | ---------- 63 | event: dict, required 64 | The input event to the Lambda function 65 | 66 | context: object, required 67 | Lambda Context runtime methods and attributes 68 | 69 | Returns 70 | ------ 71 | dict: A randomly selected instance ID 72 | """ 73 | global app_config 74 | global ec2_client 75 | 76 | try: 77 | if not app_config: 78 | app_config = Configuration() 79 | if not ec2_client: 80 | ec2_client = boto3.client('ec2', config=app_config.get_boto_config()) 81 | 82 | response = { 83 | RETURNED_INSTANCE_ID: __select_random_instance_id() 84 | } 85 | log.info(f'Returning {response}') 86 | return response 87 | 88 | except Exception as e: 89 | log_exception(e) 90 | raise e 91 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/override_manager.py: -------------------------------------------------------------------------------- 1 | import json 2 | from json.decoder import JSONDecodeError 3 | 4 | BACKEND_FILE_NAME = "backend_override.tf.json" 5 | VARIABLE_FILE_NAME = "variable_override.tf.json" 6 | PROVIDER_FILE_NAME = "provider_override.tf.json" 7 | MAX_SESSION_NAME_LENGTH = 64 8 | 9 | 10 | def write_backend_override(workspace_dir, provisioned_product_descriptor, state_bucket, state_region): 11 | backend_override = { 12 | "terraform": { 13 | "backend": { 14 | "s3": { 15 | "bucket": f"{state_bucket}", 16 | "key": f"{provisioned_product_descriptor}", 17 | "region": f"{state_region}" 18 | } 19 | } 20 | } 21 | } 22 | with open(f"{workspace_dir}/{BACKEND_FILE_NAME}", "w") as json_file: 23 | json.dump(backend_override, json_file) 24 | 25 | 26 | def write_variable_override(workspace_dir, variables): 27 | if not variables: 28 | return 29 | 30 | variable_override = {'variable': {}} 31 | for variable in variables: 32 | try: 33 | variable_value_json = json.loads(variable['value']) 34 | variable_override['variable'][variable['key']] = {"default": variable_value_json} 35 | except JSONDecodeError: 36 | variable_override['variable'][variable['key']] = {"default": variable['value']} 37 | with open(f"{workspace_dir}/{VARIABLE_FILE_NAME}", "w") as json_file: 38 | json.dump(variable_override, json_file) 39 | 40 | 41 | def write_provider_override(workspace_dir, provisioned_product_descriptor, launch_role_arn, region, tags): 42 | provider_override = { 43 | "provider": { 44 | "aws": { 45 | "region": f"{region}", 46 | "assume_role": { 47 | "role_arn": f"{launch_role_arn}", 48 | "session_name": __format_session_name(provisioned_product_descriptor) 49 | }, 50 | 'default_tags': { 51 | 'tags': { 52 | } 53 | } 54 | } 55 | } 56 | } 57 | 58 | if tags != None: 59 | for tag in tags: 60 | key = tag['key'] 61 | provider_override['provider']['aws']['default_tags']['tags'][f'{key}'] = tag['value'] 62 | 63 | with open(f"{workspace_dir}/{PROVIDER_FILE_NAME}", "w") as json_file: 64 | json.dump(provider_override, json_file) 65 | 66 | 67 | def __format_session_name(unformatted_session_name): 68 | return f"{unformatted_session_name[:MAX_SESSION_NAME_LENGTH]}".replace('/', '-') 69 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/artifact_manager.py: -------------------------------------------------------------------------------- 1 | from glob import glob 2 | import tarfile 3 | 4 | import boto3 5 | from botocore.exceptions import ClientError 6 | 7 | # Constants 8 | ROLE_SESSION_NAME = 'TerraformLaunchRole' 9 | LOCAL_ARTIFACT_FILE = 'artifact.local' 10 | REQUIRED_FILES_PATTERN = '*.tf' 11 | NO_REQUIRED_FILES_FOUND_MESSAGE = 'No .tf files found. Nothing to parse. Make sure the root directory of the Terraform open source configuration file contains the .tf files for the root module.' 12 | 13 | # Boto exception keys 14 | RESPONSE_METADATA_KEY = "ResponseMetadata" 15 | REQUEST_ID_KEY = "RequestId" 16 | 17 | 18 | def __get_s3_client(launch_role_arn): 19 | sts = boto3.client('sts') 20 | assume_role_result = sts.assume_role(RoleArn=launch_role_arn, 21 | RoleSessionName=ROLE_SESSION_NAME) 22 | credentials = assume_role_result['Credentials'] 23 | return boto3.client('s3', 24 | aws_access_key_id=credentials['AccessKeyId'], 25 | aws_secret_access_key=credentials['SecretAccessKey'], 26 | aws_session_token=credentials['SessionToken']) 27 | 28 | def __validate_required_files_exist(workspace_dir): 29 | files = glob(f'{workspace_dir}/{REQUIRED_FILES_PATTERN}') 30 | if not files: 31 | raise RuntimeError(NO_REQUIRED_FILES_FOUND_MESSAGE) 32 | 33 | def download_artifact(launch_role_arn, artifact_path, workspace_dir): 34 | # Extract bucket, key, and file name from the path. This will be the S3 URI. 35 | # Example: s3://my-bucket/test-data/main.tar.gz 36 | try: 37 | bucket = artifact_path.split('/')[2] 38 | key = artifact_path.split('/', 3)[3] 39 | except IndexError as e: 40 | raise RuntimeError(f'Invalid artifact path {artifact_path}: {e}') 41 | 42 | try: 43 | s3 = __get_s3_client(launch_role_arn) 44 | s3.download_file(bucket, key, f'{workspace_dir}/{LOCAL_ARTIFACT_FILE}') 45 | except ClientError as e: 46 | message = f'Failed to execute API: {e.operation_name} with request Id: {e.response[RESPONSE_METADATA_KEY][REQUEST_ID_KEY]}: {e}' 47 | raise RuntimeError( 48 | f'Could not download artifact {artifact_path} using launch role {launch_role_arn}: {message}') 49 | except Exception as e: 50 | raise RuntimeError(f'Could not download artifact {artifact_path} using launch role {launch_role_arn}: {e}') 51 | 52 | try: 53 | with tarfile.open(LOCAL_ARTIFACT_FILE) as file_handle: 54 | file_handle.extractall(workspace_dir) 55 | except Exception as e: 56 | raise RuntimeError(f'Could not extract files from {artifact_path}: {e}') 57 | 58 | __validate_required_files_exist(workspace_dir) 59 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/archive_unzipper.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "compress/gzip" 7 | "io" 8 | "log" 9 | "strings" 10 | ) 11 | 12 | const MetaDataFilePrefix1 = "./._" 13 | const MetaDataFilePrefix2 = "._" 14 | const RootDirectoryPrefix = "./" 15 | const SubDirectoryDelimiter = "/" 16 | const TfFileSuffix = ".tf" 17 | 18 | // UnzipArchive - Unzips a .tar.gz archive to a map where key is the file name and value is the file content 19 | func UnzipArchive(zipFile []byte) (map[string]string, error) { 20 | bytesReader := bytes.NewReader(zipFile) 21 | 22 | gzipReader, err := getGzipReader(bytesReader) 23 | if err != nil { 24 | return map[string]string{}, err 25 | } 26 | 27 | return getFileMapFromGzip(gzipReader) 28 | } 29 | 30 | func getGzipReader(bytesReader io.Reader) (io.Reader, error) { 31 | gzipReader, err := gzip.NewReader(bytesReader) 32 | if err != nil { 33 | return bytes.NewReader([]byte{}), err 34 | } 35 | defer gzipReader.Close() 36 | 37 | return gzipReader, nil 38 | } 39 | 40 | func getFileMapFromGzip(gzipReader io.Reader) (map[string]string, error) { 41 | fileMap := make(map[string]string) 42 | tarReader := tar.NewReader(gzipReader) 43 | 44 | for { 45 | hdr, err := tarReader.Next() 46 | if err == io.EOF { 47 | break 48 | } else if err != nil { 49 | return fileMap, err 50 | } 51 | 52 | if hdr.Typeflag != tar.TypeReg { 53 | log.Printf("Skipping item %s of type %s", hdr.Name, string(hdr.Typeflag)) 54 | continue 55 | } 56 | 57 | // File extension names within the zipped file will have to end with .tf 58 | // Hence header name will need to be at least 4 chars 59 | if len(hdr.Name) < 4 { 60 | log.Printf("Skipping non tf file %s", hdr.Name) 61 | continue 62 | } 63 | 64 | if !strings.HasSuffix(hdr.Name, TfFileSuffix) { 65 | log.Printf("Skipping non tf file %s", hdr.Name) 66 | continue 67 | } 68 | 69 | if strings.HasPrefix(hdr.Name, MetaDataFilePrefix1) || strings.HasPrefix(hdr.Name, MetaDataFilePrefix2) { 70 | log.Printf("Skipping potential metadata file %s", hdr.Name) 71 | continue 72 | } 73 | 74 | if !strings.HasPrefix(hdr.Name, RootDirectoryPrefix) && strings.Contains(hdr.Name, SubDirectoryDelimiter) { 75 | log.Printf("Skipping file in subdirectory %s", hdr.Name) 76 | continue 77 | } 78 | 79 | if strings.HasPrefix(hdr.Name, RootDirectoryPrefix) && strings.Count(hdr.Name, SubDirectoryDelimiter) > 1 { 80 | log.Printf("Skipping file in subdirectory %s", hdr.Name) 81 | continue 82 | } 83 | 84 | log.Printf("Found HCL file %s", hdr.Name) 85 | 86 | data, err := io.ReadAll(tarReader) 87 | if err != nil { 88 | return fileMap, err 89 | } 90 | 91 | fileMap[hdr.Name] = string(data) 92 | } 93 | 94 | return fileMap, nil 95 | } 96 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/ssm_facade.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import boto3 4 | 5 | from core.configuration import Configuration 6 | 7 | log = logging.getLogger() 8 | log.setLevel(logging.INFO) 9 | 10 | 11 | # Constants 12 | DOCUMENT_NAME_RUN_SHELL_COMMAND = "AWS-RunShellScript" 13 | 14 | # SSM response KEYS 15 | COMMAND_KEY = 'Command' 16 | COMMAND_ID_KEY = 'CommandId' 17 | STATUS_KEY = 'Status' 18 | STANDARD_ERROR_CONTENT_KEY = 'StandardErrorContent' 19 | 20 | # Function output keys 21 | INVOCATION_STATUS_KEY = 'invocationStatus' 22 | ERROR_MESSAGE_KEY = 'errorMessage' 23 | 24 | 25 | class SsmFacade: 26 | 27 | def __init__(self, app_config: Configuration): 28 | self.__ssm_client = boto3.client('ssm', config = app_config.get_boto_config()) 29 | 30 | def send_shell_command(self, command_text: str, instance_id: str) -> str: 31 | """Uses SSM to run a shell command on an instance 32 | 33 | Parameters 34 | ---------- 35 | command_text: str, required 36 | The shell command to run 37 | 38 | instance_id: str, required 39 | The instance ID of the host where the command will be run 40 | 41 | Returns 42 | ------- 43 | str: The command ID of the command that was started 44 | """ 45 | log.info(f'Sending shell command to instance {instance_id}: {command_text}') 46 | 47 | response = self.__ssm_client.send_command( 48 | InstanceIds=[instance_id], 49 | DocumentName=DOCUMENT_NAME_RUN_SHELL_COMMAND, 50 | Parameters={'commands': [command_text]}, 51 | CloudWatchOutputConfig={'CloudWatchOutputEnabled': True}) 52 | log.info(f'SendCommand response: {response}') 53 | return response[COMMAND_KEY][COMMAND_ID_KEY] 54 | 55 | def get_command_invocation(self, command_id: str, instance_id: str) -> dict: 56 | """Uses SSM to run get a command invocation 57 | 58 | Parameters 59 | ---------- 60 | command_id: str, required 61 | The command ID of the command invocation 62 | 63 | instance_id: str, required 64 | The instance ID where the command was run 65 | 66 | Returns 67 | ------- 68 | dict: Contains elements from the SSM response. 69 | Keys included are invocationStatus and errorMessage 70 | """ 71 | log.info(f'Get command invocation for command ID {command_id} & instance ID {instance_id}') 72 | 73 | get_command_invocation_response = self.__ssm_client.get_command_invocation( 74 | CommandId=command_id, 75 | InstanceId=instance_id 76 | ) 77 | 78 | response = { 79 | INVOCATION_STATUS_KEY: get_command_invocation_response[STATUS_KEY], 80 | ERROR_MESSAGE_KEY: get_command_invocation_response[STANDARD_ERROR_CONTENT_KEY] 81 | } 82 | return response 83 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/config_fetcher.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strings" 7 | 8 | "github.com/aws/aws-sdk-go/aws/credentials" 9 | "github.com/aws/aws-sdk-go/aws/credentials/stscreds" 10 | "github.com/aws/aws-sdk-go/aws/session" 11 | ) 12 | 13 | const LaunchRoleAccessDeniedErrorMessage = "Access denied while assuming launch role %s: %s" 14 | const ArtifactFetchAccessDeniedErrorMessage = "Access denied while downloading artifact from %s: %s" 15 | const UnzipFailureErrorMessage = "Artifact from %s is not a valid tar.gz file: %s" 16 | 17 | type ConfigFetcher struct { 18 | s3Downloader *S3Downloader 19 | } 20 | 21 | func NewConfigFetcher(launchRoleArn string) (*ConfigFetcher, error) { 22 | s3Downloader, err := NewS3Downloader(retrieveConfigFetcherCreds(launchRoleArn)) 23 | if err != nil { 24 | return &ConfigFetcher{}, 25 | ParserAccessDeniedException{Message: fmt.Sprintf(LaunchRoleAccessDeniedErrorMessage, launchRoleArn, err.Error())} 26 | } 27 | 28 | return &ConfigFetcher{s3Downloader: s3Downloader}, nil 29 | } 30 | 31 | // Fetches the input file from artifact location and outputs as a map of the file's name to its contents in string format 32 | func (c *ConfigFetcher) fetch(input TerraformOpenSourceParameterParserInput) (map[string]string, error) { 33 | bucket, key := resolveArtifactPath(input.Artifact.Path) 34 | 35 | configBytes, err := c.s3Downloader.download(bucket, key) 36 | if err != nil { 37 | return map[string]string{}, 38 | ParserAccessDeniedException{Message: fmt.Sprintf(ArtifactFetchAccessDeniedErrorMessage, input.Artifact.Path, err.Error())} 39 | } 40 | 41 | fileMap, err := UnzipArchive(configBytes) 42 | if err != nil { 43 | return fileMap, 44 | ParserInvalidParameterException{Message: fmt.Sprintf(UnzipFailureErrorMessage, input.Artifact.Path, err.Error())} 45 | } 46 | 47 | return fileMap, nil 48 | } 49 | 50 | func retrieveConfigFetcherCreds(launchRoleArn string) *credentials.Credentials { 51 | // use default lambda execution role creds to retrieve configuration templates if launch role is not provided 52 | if launchRoleArn == "" { 53 | log.Print("Launch role is not provided. Using default ServiceCatalogTerraformOSParameterParserRole credentials to fetch artifact.") 54 | return credentials.NewEnvCredentials() 55 | } else { 56 | log.Printf("Using launch role %s credentials to fetch artifact.", launchRoleArn) 57 | return retrieveLaunchRoleCreds(launchRoleArn) 58 | } 59 | } 60 | 61 | // Assumes provided launchRoleArn and return its credentials 62 | func retrieveLaunchRoleCreds(launchRoleArn string) *credentials.Credentials { 63 | sess := session.Must(session.NewSession()) 64 | return stscreds.NewCredentials(sess, launchRoleArn) 65 | } 66 | 67 | // Resolves artifactPath to bucket and key 68 | func resolveArtifactPath(artifactPath string) (string, string) { 69 | bucket := strings.Split(artifactPath, "/")[2] 70 | key := strings.SplitN(artifactPath, "/", 4)[3] 71 | return bucket, key 72 | } 73 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/test_CommandManager.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import Mock, patch 3 | 4 | from terraform_runner.CommandManager import CommandManager 5 | 6 | SUCCESS_RETURN_CODE = 0 7 | ERROR_RETURN_CODE = 1 8 | 9 | 10 | class TestCommandManager(unittest.TestCase): 11 | 12 | @patch('terraform_runner.CommandManager.CustomLogger') 13 | @patch('terraform_runner.CommandManager.subprocess') 14 | def test_run_command_happy_path(self, mock_subprocess, mock_logger): 15 | # arrange 16 | command_manager = CommandManager(mock_logger) 17 | command = ['foo', 'bar'] 18 | success_result = Mock() 19 | success_result.returncode = SUCCESS_RETURN_CODE 20 | mock_subprocess.run.return_value = success_result 21 | 22 | # act 23 | command_manager.run_command(command) 24 | 25 | # assert 26 | mock_subprocess.run.assert_called_once() 27 | 28 | @patch('terraform_runner.CommandManager.CustomLogger') 29 | @patch('terraform_runner.CommandManager.subprocess') 30 | def test_run_command_with_error_return_code(self, mock_subprocess, mock_logger): 31 | # arrange 32 | command_manager = CommandManager(mock_logger) 33 | command = ['foo', 'bar'] 34 | error_result = Mock() 35 | error_result.returncode = ERROR_RETURN_CODE 36 | error_result.stderr = 'standard error' 37 | mock_subprocess.run.return_value = error_result 38 | 39 | # act and assert 40 | with self.assertRaises(RuntimeError) as context: 41 | command_manager.run_command(command) 42 | self.assertEqual(context.expected, RuntimeError) 43 | self.assertTrue(str(context.exception).startswith('standard error')) 44 | 45 | @patch('terraform_runner.CommandManager.CustomLogger') 46 | @patch('terraform_runner.CommandManager.subprocess') 47 | def test_run_command_exception_raised(self, mock_subprocess, mock_logger): 48 | # arrange 49 | command_manager = CommandManager(mock_logger) 50 | command = ['foo', 'bar'] 51 | mock_subprocess.run.side_effect = Exception('Something went wrong') 52 | 53 | # act and assert 54 | with self.assertRaises(RuntimeError): 55 | command_manager.run_command(command) 56 | 57 | @patch('terraform_runner.CommandManager.CustomLogger') 58 | @patch('terraform_runner.CommandManager.subprocess') 59 | def test_run_command_with_error_return_code(self, mock_subprocess, mock_logger): 60 | # arrange 61 | command_manager = CommandManager(mock_logger) 62 | command = ['foo', 'bar'] 63 | error_result = Mock() 64 | error_result.returncode = ERROR_RETURN_CODE 65 | error_result.stderr = 'standard error' 66 | mock_subprocess.run.return_value = error_result 67 | 68 | # act and assert 69 | with self.assertRaises(RuntimeError) as context: 70 | command_manager.run_command(command) 71 | self.assertEqual(context.expected, RuntimeError) 72 | self.assertTrue(str(context.exception).startswith('standard error')) 73 | 74 | 75 | if __name__ == '__main__': 76 | unittest.main() 77 | -------------------------------------------------------------------------------- /cfn-templates/TerraformProvisioningAccount.yaml: -------------------------------------------------------------------------------- 1 | Description: Resources set up in the accounts where Terraform provisioning will occur. 2 | Parameters: 3 | TerraformEngineAccount: 4 | Type: String 5 | AllowedPattern: "^[0-9]{12}$" 6 | Description: The account where the Terraform engine has been deployed. 7 | Resources: 8 | 9 | # Example launch role with permissions to manage resources defined in a Terraform product. 10 | # See https://docs.aws.amazon.com/servicecatalog/latest/adminguide/constraints-launch.html 11 | # But for a Terraform product, we don't need to add CloudFormation permissions. 12 | SCLaunchRoleTerraformExample: 13 | Type: AWS::IAM::Role 14 | Properties: 15 | RoleName: SCLaunchRoleTerraformExample 16 | AssumeRolePolicyDocument: 17 | Version: 2012-10-17 18 | Statement: 19 | - Sid: GivePermissionsToServiceCatalog 20 | Effect: Allow 21 | Principal: 22 | Service: servicecatalog.amazonaws.com 23 | Action: sts:AssumeRole 24 | - Effect: Allow 25 | Action: sts:AssumeRole 26 | Principal: 27 | AWS: 28 | - !Sub arn:${AWS::Partition}:iam::${TerraformEngineAccount}:root 29 | Condition: 30 | StringLike: 31 | "aws:PrincipalArn": 32 | - !Sub arn:${AWS::Partition}:iam::${TerraformEngineAccount}:role/TerraformEngine/TerraformExecutionRole* 33 | - !Sub arn:${AWS::Partition}:iam::${TerraformEngineAccount}:role/TerraformEngine/ServiceCatalogTerraformOSParameterParserRole* 34 | Policies: 35 | - PolicyName: ProvisioningArtifactAccessPolicy 36 | PolicyDocument: 37 | Statement: 38 | - Effect: Allow 39 | Action: s3:GetObject 40 | Resource: '*' 41 | Condition: 42 | StringEquals: 43 | "s3:ExistingObjectTag/servicecatalog:provisioning": "true" 44 | # This policy grants permissions required to manage resources in the SC product. 45 | # This example is for provisioning an S3 bucket. 46 | - PolicyName: ResourceCreationPolicy 47 | PolicyDocument: 48 | Statement: 49 | - Effect: Allow 50 | Action: 51 | - s3:CreateBucket* 52 | - s3:DeleteBucket* 53 | - s3:Get* 54 | - s3:List* 55 | - s3:PutBucketTagging 56 | Resource: !Sub "arn:${AWS::Partition}:s3:::*" 57 | # Resource group and tagging permissions required for Service Catalog terraform open source products 58 | - Effect: Allow 59 | Action: 60 | - resource-groups:CreateGroup 61 | - resource-groups:DeleteGroup 62 | - resource-groups:Tag 63 | - resource-groups:ListGroupResources 64 | Resource: "*" 65 | - Effect: Allow 66 | Action: 67 | - tag:GetResources 68 | - tag:GetTagKeys 69 | - tag:GetTagValues 70 | - tag:TagResources 71 | - tag:UntagResources 72 | Resource: "*" 73 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/config_fetcher_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "os" 7 | "reflect" 8 | "testing" 9 | 10 | "github.com/aws/aws-sdk-go/service/s3" 11 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 12 | "github.com/stretchr/testify/mock" 13 | ) 14 | 15 | const TestArtifactPath = "s3://terraform-configurations-cross-account-demo/product_with_override_var.tar.gz" 16 | const TestArtifactType = "AWS_S3" 17 | const TestLaunchRoleArn = "arn:aws:iam::829064435212:role/SCLaunchRole" 18 | const TestS3BucketArtifactPath = "../../sample-provisioning-artifacts/s3bucket.tar.gz" 19 | const TestS3BucketArtifactFileName = "main.tf" 20 | const TestS3BucketArtifactFileContent = "\"bucket_name\" {\n type = string\n}\nprovider \"aws\" {\n}\nresource \"aws_s3_bucket\" \"bucket\" {\n bucket = var.bucket_name\n}\noutput regional_domain_name {\n value = aws_s3_bucket.bucket.bucket_regional_domain_name\n}" 21 | 22 | type MockS3Downloader struct { 23 | mock.Mock 24 | } 25 | 26 | func (m *MockS3Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*s3manager.Downloader)) (n int64, err error) { 27 | if *input.Bucket != "terraform-configurations-cross-account-demo" || *input.Key != "product_with_override_var.tar.gz" { 28 | return 0, errors.New(S3ClientErrorMessage) 29 | } 30 | 31 | b, _ := os.ReadFile(TestS3BucketArtifactPath) 32 | numBytes, _ := w.WriteAt(b, 0) 33 | return int64(numBytes), nil 34 | } 35 | 36 | func TestConfigFetcherFetchHappy(t *testing.T) { 37 | // setup 38 | downloader := new(MockS3Downloader) 39 | s3Downloader := &S3Downloader{ 40 | downloader: downloader, 41 | } 42 | configFetcher := &ConfigFetcher{ 43 | s3Downloader: s3Downloader, 44 | } 45 | input := TerraformOpenSourceParameterParserInput{ 46 | Artifact: Artifact{ 47 | Path: TestArtifactPath, 48 | Type: TestArtifactType, 49 | }, 50 | LaunchRoleArn: TestLaunchRoleArn, 51 | } 52 | 53 | // act 54 | fileMap, err := configFetcher.fetch(input) 55 | 56 | // assert 57 | if err != nil { 58 | t.Errorf("Unexpected error occured") 59 | } 60 | 61 | fileContent, ok := fileMap[TestS3BucketArtifactFileName] 62 | if !ok { 63 | t.Errorf("Expected file %s was not parsed", TestS3BucketArtifactFileName) 64 | } 65 | 66 | if reflect.DeepEqual(fileContent, TestS3BucketArtifactFileContent) { 67 | t.Errorf("File content for %s is not as expected", TestS3BucketArtifactFileName) 68 | } 69 | } 70 | 71 | func TestConfigFetcherFetchWithEmptyLaunchRoleHappy(t *testing.T) { 72 | // setup 73 | downloader := new(MockS3Downloader) 74 | s3Downloader := &S3Downloader{ 75 | downloader: downloader, 76 | } 77 | configFetcher := &ConfigFetcher{ 78 | s3Downloader: s3Downloader, 79 | } 80 | input := TerraformOpenSourceParameterParserInput{ 81 | Artifact: Artifact{ 82 | Path: TestArtifactPath, 83 | Type: TestArtifactType, 84 | }, 85 | LaunchRoleArn: "", 86 | } 87 | 88 | // act 89 | fileMap, err := configFetcher.fetch(input) 90 | 91 | // assert 92 | if err != nil { 93 | t.Errorf("Unexpected error occured") 94 | } 95 | 96 | fileContent, ok := fileMap[TestS3BucketArtifactFileName] 97 | if !ok { 98 | t.Errorf("Expected file %s was not parsed", TestS3BucketArtifactFileName) 99 | } 100 | 101 | if reflect.DeepEqual(fileContent, TestS3BucketArtifactFileContent) { 102 | t.Errorf("File content for %s is not as expected", TestS3BucketArtifactFileName) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/validator.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "reflect" 7 | 8 | "github.com/aws/aws-sdk-go/aws/arn" 9 | ) 10 | 11 | const ArtifactKey = "Artifact" 12 | const LaunchRoleArnKey = "LaunchRoleArn" 13 | const ArtifactPathKey = "Artifact.Path" 14 | const ArtifactTypeKey = "Artifact.Type" 15 | 16 | const DefaultArtifactType = "AWS_S3" 17 | const IamArnServiceKey = "iam" 18 | const S3Scheme = "s3" 19 | 20 | const RequiredKeyMissingOrEmptyErrorMessage = "%s is required and must be non empty" 21 | const InvalidLaunchRoleArnSyntaxErrorMessage = "LaunchRoleArn %s is not a syntactically valid ARN" 22 | const InvalidIamLaunchRoleArnErrorMessage = "LaunchRoleArn %s is not a valid iam ARN" 23 | const InvalidArtifactTypeErrorMessage = "Artifact type %s is not supported, must be AWS_S3" 24 | const InvalidArtifactPathErrorMessage = "Artifact path %s is not a valid S3 URI" 25 | 26 | // ValidateInput - Validates TerraformOpenSourceParameterParserInput 27 | // Returns a non nil error if an invalid input is provided 28 | func ValidateInput(input TerraformOpenSourceParameterParserInput) error { 29 | // validate required keys exist in the input 30 | if err := validateRequiredKeysExist(input); err != nil { 31 | return err 32 | } 33 | 34 | // validate the format of LaunchRoleArn 35 | if err := validateLaunchRoleArnIsSyntacticallyCorrect(input.LaunchRoleArn); err != nil { 36 | return err 37 | } 38 | 39 | // validate the Artifact 40 | if err := validateArtifact(input.Artifact); err != nil { 41 | return err 42 | } 43 | 44 | return nil 45 | } 46 | 47 | func validateRequiredKeysExist(input TerraformOpenSourceParameterParserInput) error { 48 | if reflect.DeepEqual(input.Artifact, Artifact{}) { 49 | return ParserInvalidParameterException{ 50 | Message: fmt.Sprintf(RequiredKeyMissingOrEmptyErrorMessage, ArtifactKey), 51 | } 52 | } 53 | 54 | if input.Artifact.Path == "" { 55 | return ParserInvalidParameterException{ 56 | Message: fmt.Sprintf(RequiredKeyMissingOrEmptyErrorMessage, ArtifactPathKey), 57 | } 58 | } 59 | 60 | if input.Artifact.Type == "" { 61 | return ParserInvalidParameterException{ 62 | Message: fmt.Sprintf(RequiredKeyMissingOrEmptyErrorMessage, ArtifactTypeKey), 63 | } 64 | } 65 | 66 | return nil 67 | } 68 | 69 | func validateLaunchRoleArnIsSyntacticallyCorrect(launchRoleArnString string) error { 70 | 71 | // skip validation if launch role is not provided 72 | if launchRoleArnString == "" { 73 | return nil 74 | } 75 | 76 | launchRoleArn, err := arn.Parse(launchRoleArnString) 77 | if err != nil { 78 | return ParserInvalidParameterException{ 79 | Message: fmt.Sprintf(InvalidLaunchRoleArnSyntaxErrorMessage, launchRoleArnString), 80 | } 81 | } 82 | 83 | if launchRoleArn.Service != IamArnServiceKey { 84 | return ParserInvalidParameterException{ 85 | Message: fmt.Sprintf(InvalidIamLaunchRoleArnErrorMessage, launchRoleArnString), 86 | } 87 | } 88 | 89 | return nil 90 | } 91 | 92 | func validateArtifact(artifact Artifact) error { 93 | if artifact.Type != DefaultArtifactType { 94 | return ParserInvalidParameterException{ 95 | Message: fmt.Sprintf(InvalidArtifactTypeErrorMessage, artifact.Type), 96 | } 97 | } 98 | 99 | artifactUri, err := url.Parse(artifact.Path) 100 | if err != nil { 101 | return ParserInvalidParameterException{ 102 | Message: fmt.Sprintf(InvalidArtifactPathErrorMessage, artifact.Path), 103 | } 104 | } 105 | 106 | if artifactUri.Scheme != S3Scheme || artifactUri.Host == "" || artifactUri.Path == "" { 107 | return ParserInvalidParameterException{ 108 | Message: fmt.Sprintf(InvalidArtifactPathErrorMessage, artifact.Path), 109 | } 110 | } 111 | 112 | return nil 113 | } 114 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/send_destroy_command.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from core.cli import create_runuser_command_with_default_user 6 | from core.configuration import Configuration 7 | from core.exception import log_exception 8 | from core.ssm_facade import SsmFacade 9 | 10 | log = logging.getLogger() 11 | log.setLevel(logging.INFO) 12 | 13 | # Globals 14 | app_config = None 15 | state_bucket_name = None 16 | ssm_facade = None 17 | 18 | # Constants 19 | TERMINATE_PROVISIONED_PRODUCT = 'TERMINATE_PROVISIONED_PRODUCT' 20 | 21 | # Input keys 22 | INSTANCE_ID_KEY = 'instanceId' 23 | OPERATION_KEY = "operation" 24 | PROVISIONED_PRODUCT_ID_KEY = 'provisionedProductId' 25 | AWS_ACCOUNT_ID_KEY = "awsAccountId" 26 | LAUNCH_ROLE_ARN_KEY = 'launchRoleArn' 27 | 28 | # Output keys 29 | COMMAND_ID_KEY = 'commandId' 30 | 31 | # Environment variable keys 32 | STATE_BUCKET_NAME_KEY = 'STATE_BUCKET_NAME' 33 | 34 | 35 | def __validate_event(event: dict): 36 | """Validates that all required fields are in the Lambda event and have expected values 37 | 38 | Parameters 39 | ---------- 40 | event: dict, required 41 | The Lambda event to be validated 42 | """ 43 | 44 | if INSTANCE_ID_KEY not in event: 45 | raise RuntimeError(f'{INSTANCE_ID_KEY} must be provided') 46 | if OPERATION_KEY not in event: 47 | raise RuntimeError(f'{OPERATION_KEY} must be provided') 48 | if event[OPERATION_KEY] != TERMINATE_PROVISIONED_PRODUCT: 49 | raise RuntimeError(f"{OPERATION_KEY} must be {TERMINATE_PROVISIONED_PRODUCT} but was {event[OPERATION_KEY]}") 50 | if PROVISIONED_PRODUCT_ID_KEY not in event: 51 | raise RuntimeError(f'{PROVISIONED_PRODUCT_ID_KEY} must be provided') 52 | if AWS_ACCOUNT_ID_KEY not in event: 53 | raise RuntimeError(f'{AWS_ACCOUNT_ID_KEY} must be provided') 54 | if LAUNCH_ROLE_ARN_KEY not in event: 55 | raise RuntimeError(f'{LAUNCH_ROLE_ARN_KEY} must be provided') 56 | 57 | 58 | def __get_command_text(event: dict) -> str: 59 | """Creates the command to run on the instance based on the Lambda input event. 60 | 61 | Parameters 62 | ---------- 63 | event: dict, required 64 | The input event to the Lambda function 65 | 66 | Returns 67 | ------- 68 | str: The command text 69 | """ 70 | 71 | base_command = f"""python3 -m terraform_runner --action=destroy \ 72 | --provisioned-product-descriptor={f'{event[AWS_ACCOUNT_ID_KEY]}/{event[PROVISIONED_PRODUCT_ID_KEY]}'} \ 73 | --launch-role={event[LAUNCH_ROLE_ARN_KEY]} \ 74 | --region={app_config.get_region()} \ 75 | --terraform-state-bucket={state_bucket_name}""" 76 | return create_runuser_command_with_default_user(base_command) 77 | 78 | def send(event, context) -> dict: 79 | """Lambda handler to send a command to a host to run Terraform destroy 80 | 81 | Parameters 82 | ---------- 83 | event: dict, required 84 | The input event to the Lambda function 85 | 86 | context: object, required 87 | Lambda Context runtime methods and attributes 88 | 89 | Returns 90 | ------- 91 | dict: The command ID returned by SSM 92 | """ 93 | log.info(f'Handling event: {event}') 94 | global app_config 95 | global state_bucket_name 96 | global ssm_facade 97 | 98 | try: 99 | __validate_event(event) 100 | 101 | if not app_config: 102 | app_config = Configuration() 103 | if not state_bucket_name: 104 | state_bucket_name = os.environ[STATE_BUCKET_NAME_KEY] 105 | if not ssm_facade: 106 | ssm_facade = SsmFacade(app_config) 107 | 108 | command_text = __get_command_text(event) 109 | 110 | response = { 111 | COMMAND_ID_KEY: ssm_facade.send_shell_command(command_text, event[INSTANCE_ID_KEY]) 112 | } 113 | log.info(f'Returning {response}') 114 | return response 115 | 116 | except Exception as e: 117 | log_exception(e) 118 | raise e 119 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/__main__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import sys 5 | import traceback 6 | 7 | from terraform_runner.artifact_manager import download_artifact 8 | from terraform_runner.CommandManager import CommandManager 9 | from terraform_runner.CustomLogger import CustomLogger 10 | from terraform_runner.override_manager import write_backend_override, write_variable_override, write_provider_override 11 | from terraform_runner.WorkspaceManager import WorkspaceManager 12 | 13 | 14 | # Constants 15 | APPLY_ACTION = 'apply' 16 | DESTROY_ACTION = 'destroy' 17 | AWS_DEFAULT_REGION = 'AWS_DEFAULT_REGION' 18 | 19 | 20 | def __parse_arguments(): 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument('--action', help = 'The action to perform', choices = [APPLY_ACTION, DESTROY_ACTION]) 23 | parser.add_argument('--provisioned-product-descriptor', 24 | help = 'A descriptor that uniquely identifies a provisioned product') 25 | parser.add_argument('--launch-role', help = 'The launch role Arn') 26 | parser.add_argument('--region', 27 | help = 'The region where resources will be provisioned and where the Terraform state will be stored') 28 | parser.add_argument('--terraform-state-bucket', 29 | help = 'The bucket where the Terraform state will be stored') 30 | parser.add_argument('--artifact-path', help = 'The artifact S3 path in URI format') 31 | parser.add_argument('--artifact-parameters', type = json.loads, 32 | help = 'Artifact parameters in json format') 33 | parser.add_argument('--tags', type = json.loads, 34 | help = 'Tags to apply to the provisioned resources, in json format') 35 | return parser.parse_args() 36 | 37 | def __set_environment_variables(args): 38 | os.environ[AWS_DEFAULT_REGION] = args.region 39 | 40 | def __setup_workspace(workspace_manager): 41 | workspace_manager.setup_workspace_directory() 42 | workspace_dir = workspace_manager.get_workspace_directory() 43 | os.chdir(workspace_dir) 44 | return workspace_dir 45 | 46 | def __write_common_overrides(workspace_dir, args): 47 | write_backend_override(workspace_dir, args.provisioned_product_descriptor, 48 | args.terraform_state_bucket, args.region) 49 | write_provider_override(workspace_dir, args.provisioned_product_descriptor, args.launch_role, 50 | args.region, args.tags) 51 | 52 | def __perform_apply(command_manager, workspace_dir, args): 53 | download_artifact(args.launch_role, args.artifact_path, workspace_dir) 54 | write_variable_override(workspace_dir, args.artifact_parameters) 55 | command_manager.run_command(['terraform', 'init', '-no-color']) 56 | command_manager.run_command(['terraform', 'validate', '-no-color']) 57 | command_manager.run_command(['terraform', 'apply', '-auto-approve', '-input=false', '-compact-warnings', '-no-color']) 58 | 59 | def __perform_destroy(command_manager): 60 | command_manager.run_command(['terraform', 'init', '-no-color']) 61 | command_manager.run_command(['terraform', 'validate', '-no-color']) 62 | command_manager.run_command(['terraform', 'destroy', '-auto-approve', '-no-color']) 63 | 64 | def main(): 65 | args = __parse_arguments() 66 | log = CustomLogger(args.provisioned_product_descriptor) 67 | log.info(f'Command args: {args}') 68 | 69 | command_manager = CommandManager(log) 70 | workspace_manager = WorkspaceManager(log, args.provisioned_product_descriptor) 71 | 72 | exit_code = 0 73 | try: 74 | __set_environment_variables(args) 75 | 76 | workspace_dir = __setup_workspace(workspace_manager) 77 | __write_common_overrides(workspace_dir, args) 78 | 79 | # Perform the action 80 | if args.action == APPLY_ACTION: 81 | __perform_apply(command_manager, workspace_dir, args) 82 | elif args.action == DESTROY_ACTION: 83 | __perform_destroy(command_manager) 84 | 85 | except Exception as exception: 86 | message = str(exception) 87 | # Log every exception with traceback in a single place. 88 | log.error(f'{message} {traceback.format_exc()}' ) 89 | # Then exit with error status, only writing the exception message to stderr 90 | exit_code = message 91 | 92 | finally: 93 | log.info(f'Removing workspace directory {workspace_dir}') 94 | workspace_manager.remove_workspace_directory() 95 | 96 | sys.exit(exit_code) 97 | 98 | 99 | if __name__ == '__main__': 100 | main() 101 | -------------------------------------------------------------------------------- /sample-provisioning-artifacts/Readme.md: -------------------------------------------------------------------------------- 1 | # Terraform Reference Engine Sample Provisioning Artifacts 2 | 3 | This directory contains sample provisioning artifacts that can be used to test a deployment of the Terraform Reference Engine. 4 | 5 | ## Simple S3 Bucket 6 | 7 | File: s3bucket.tar.gz 8 | 9 | ### Provisioning Parameters 10 | 11 | * bucket_name 12 | 13 | ### Resources 14 | 15 | * A simple S3 bucket with default settings 16 | 17 | ### Launch Role Permissions 18 | 19 | The following permissions are required in the product's launch role in order to provision, update, terminate and tag. 20 | 21 | * S3 permissions to manage a bucket's lifecycle 22 | * S3 permissions to tag a bucket. This allows the engine to set its tracer tag on the resource. The tracer tag is used by Service Catalog to identify the resources belonging to the provisioned product. 23 | * Resource group and tagging permissions: This is required for every launch role on a terraform open source product. Service Catalog requires these permissions to apply system and user tags to the resources. 24 | 25 | Example: 26 | 27 | ``` 28 | Statement: 29 | - Effect: Allow 30 | Action: 31 | - s3:CreateBucket* 32 | - s3:DeleteBucket* 33 | - s3:Get* 34 | - s3:List* 35 | - s3:PutBucketTagging 36 | Resource: !Sub "arn:${AWS::Partition}:s3:::*" 37 | - Effect: Allow 38 | Action: 39 | - resource-groups:CreateGroup 40 | - resource-groups:ListGroupResources 41 | Resource: "*" 42 | - Effect: Allow 43 | Action: 44 | - tag:GetResources 45 | - tag:GetTagKeys 46 | - tag:GetTagValues 47 | - tag:TagResources 48 | - tag:UntagResources 49 | Resource: "*" 50 | ``` 51 | 52 | ## S3 Bucket and Notification Topic Using Modules 53 | 54 | File: s3website-module.tar.gz 55 | 56 | This provisioning artifact is an example of using Terraform modules. It contains a local module for S3 resources and a remote module for SNS resources. 57 | 58 | ### Provisioning Parameters 59 | 60 | * bucket_name 61 | * topic_name 62 | 63 | 64 | ### Resources 65 | 66 | * A simple S3 bucket with default settings 67 | * A website configuration on the bucket 68 | * A simple SNS topic with default settings 69 | * A bucket notification configuration for the bucket and topic 70 | 71 | ### Launch Role Permissions 72 | 73 | The following permissions are required in the product's launch role in order to provision, update, terminate, and tag. 74 | 75 | * S3 permissions to manage a bucket's lifecycle, including website and notification configuration 76 | * SNS permissions to manage a topic's lifecycle 77 | * S3 and SNS permissions to tag resources. This allows the engine to set its tracer tag on the resources. The tracer tag is used by Service Catalog to identify the resources belonging to the provisioned product. 78 | * Resource group and tagging permissions: This is required for every launch role on a terraform open source product. Service Catalog requires these permissions to apply system and user tags to the resources. 79 | 80 | Example: 81 | 82 | ``` 83 | Statement: 84 | - Effect: Allow 85 | Action: 86 | - s3:CreateBucket* 87 | - s3:DeleteBucket* 88 | - s3:Get* 89 | - s3:List* 90 | - s3:PutBucketNotification 91 | - s3:PutBucketWebsite 92 | - s3:PutBucketTagging 93 | Resource: !Sub "arn:${AWS::Partition}:s3:::*" 94 | - Effect: Allow 95 | Action: 96 | - sns:CreateTopic 97 | - sns:DeleteTopic 98 | - sns:GetTopicAttributes 99 | - sns:SetTopicAttributes 100 | - sns:ListTagsForResource 101 | - sns:TagResource 102 | Resource: "*" 103 | - Effect: Allow 104 | Action: 105 | - resource-groups:CreateGroup 106 | - resource-groups:ListGroupResources 107 | Resource: "*" 108 | - Effect: Allow 109 | Action: 110 | - tag:GetResources 111 | - tag:GetTagKeys 112 | - tag:GetTagValues 113 | - tag:TagResources 114 | - tag:UntagResources 115 | Resource: "*" 116 | ``` 117 | -------------------------------------------------------------------------------- /lambda-functions/provisioning-operations-handler/provisioning_operations_handler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import traceback 3 | import json 4 | import logging 5 | import boto3 6 | from botocore.config import Config 7 | from botocore.exceptions import ClientError 8 | 9 | 10 | log = logging.getLogger() 11 | log.setLevel(logging.INFO) 12 | 13 | step_functions_client = None 14 | 15 | # Environment variables 16 | AWS_REGION_KEY: str = 'AWS_REGION' 17 | STATE_MACHINE_ARN_KEY: str = 'STATE_MACHINE_ARN' 18 | # Payload constants 19 | RECORDS_KEY: str = 'Records' 20 | BODY_KEY: str = 'body' 21 | MESSAGE_ID_KEY: str = 'messageId' 22 | TOKEN_KEY: str = 'token' 23 | PROVISIONED_PRODUCT_ID_KEY: str = 'provisionedProductId' 24 | RECORD_ID_KEY: str = 'recordId' 25 | # Step Functions constants 26 | EXECUTION_ARN_KEY: str = 'executionArn' 27 | REQUEST_ID_KEY: str = 'RequestId' 28 | RESPONSE_METADATA_KEY: str = 'ResponseMetadata' 29 | ERROR_KEY: str = 'Error' 30 | CODE_KEY: str = 'Code' 31 | # Return object constants 32 | BATCH_ITEM_FAILURES_KEY: str = 'batchItemFailures' 33 | ITEM_IDENTIFIER_KEY: str = 'itemIdentifier' 34 | # Step functions Error Codes 35 | EXECUTION_ALREADY_EXISTS: str = 'ExecutionAlreadyExists' 36 | 37 | 38 | def __start_state_machine(record: dict, state_machine_arn: str): 39 | state_machine_payload: dict = json.loads(record[BODY_KEY]) 40 | 41 | # These fields are required to start the state machine execution. 42 | # Raise a KeyError if any are missing. 43 | token: str = state_machine_payload[TOKEN_KEY] 44 | provisioned_product_id: str = state_machine_payload[PROVISIONED_PRODUCT_ID_KEY] 45 | record_id: str = state_machine_payload[RECORD_ID_KEY] 46 | execution_name = f'{provisioned_product_id}-{record_id}' 47 | 48 | log.info(f'Starting state machine {state_machine_arn} with token {token} & name: {execution_name} & payload: {state_machine_payload}') 49 | 50 | start_execution_response = step_functions_client.start_execution( 51 | stateMachineArn=state_machine_arn, 52 | name=execution_name, 53 | input=json.dumps(state_machine_payload) 54 | ) 55 | 56 | execution_arn: str = start_execution_response[EXECUTION_ARN_KEY] 57 | start_execution_request_id: str = start_execution_response[RESPONSE_METADATA_KEY][REQUEST_ID_KEY] 58 | 59 | log.info(f'Started state machine execution with arn: {execution_arn} for request Id: {start_execution_request_id}') 60 | 61 | 62 | def handle_sqs_records(event, context): 63 | """ 64 | Starts a state machine execution for each record in an SQS queue payload. 65 | The environment variable STATE_MACHINE_ARN must be set to indicate the state machine to execute. 66 | :param event: The SQS queue payload 67 | :param context: Lambda context 68 | :return: List of batch item failures 69 | """ 70 | 71 | global step_functions_client 72 | if not step_functions_client: 73 | step_functions_client = boto3.client('stepfunctions', config=Config( 74 | region_name=os.environ.get(AWS_REGION_KEY), 75 | retries={ 76 | 'max_attempts': 3, 77 | 'mode': 'standard' 78 | })) 79 | 80 | state_machine_arn: str = os.environ.get(STATE_MACHINE_ARN_KEY) 81 | records = event[RECORDS_KEY] 82 | log.info(f'Processing a total of: {len(records)} records') 83 | 84 | batchItemFailures = {BATCH_ITEM_FAILURES_KEY: []} 85 | for record in records: 86 | log.info(f'Processing record: {record}') 87 | try: 88 | __start_state_machine(record, state_machine_arn) 89 | except ClientError as clientError: 90 | error_code: str = clientError.response[ERROR_KEY][CODE_KEY] 91 | failing_request_id: str = clientError.response[RESPONSE_METADATA_KEY][REQUEST_ID_KEY] 92 | 93 | if error_code == EXECUTION_ALREADY_EXISTS: 94 | log.warning(f'A state machine execution with the same execution ARN ' 95 | f'already exists for requestId: {failing_request_id} & record {record}') 96 | else: 97 | log.error(f'Processing for record: {record} failed with error: {clientError} ' 98 | f'and requestId: {failing_request_id}') 99 | batchItemFailures[BATCH_ITEM_FAILURES_KEY].append({ITEM_IDENTIFIER_KEY: record[MESSAGE_ID_KEY]}) 100 | 101 | except Exception as exception: 102 | log.error(f'Processing for {record} failed with error: {exception} & stack trace: {traceback.format_exc()}') 103 | batchItemFailures[BATCH_ITEM_FAILURES_KEY].append({ITEM_IDENTIFIER_KEY: record[MESSAGE_ID_KEY]}) 104 | 105 | return batchItemFailures 106 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/osx,linux,python,windows,pycharm,visualstudiocode 3 | 4 | ### Linux ### 5 | *~ 6 | 7 | # temporary files which can be created if a process still has a handle open of a deleted file 8 | .fuse_hidden* 9 | 10 | # KDE directory preferences 11 | .directory 12 | 13 | # Linux trash folder which might appear on any partition or disk 14 | .Trash-* 15 | 16 | # .nfs files are created when an open file is removed but is still being accessed 17 | .nfs* 18 | 19 | ### OSX ### 20 | *.DS_Store 21 | .AppleDouble 22 | .LSOverride 23 | 24 | # Icon must end with two \r 25 | Icon 26 | 27 | # Thumbnails 28 | ._* 29 | 30 | # Files that might appear in the root of a volume 31 | .DocumentRevisions-V100 32 | .fseventsd 33 | .Spotlight-V100 34 | .TemporaryItems 35 | .Trashes 36 | .VolumeIcon.icns 37 | .com.apple.timemachine.donotpresent 38 | 39 | # Directories potentially created on remote AFP share 40 | .AppleDB 41 | .AppleDesktop 42 | Network Trash Folder 43 | Temporary Items 44 | .apdisk 45 | 46 | ### PyCharm ### 47 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 48 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 49 | 50 | # User-specific stuff: 51 | .idea/**/workspace.xml 52 | .idea/**/tasks.xml 53 | .idea/dictionaries 54 | 55 | # Sensitive or high-churn files: 56 | .idea/**/dataSources/ 57 | .idea/**/dataSources.ids 58 | .idea/**/dataSources.xml 59 | .idea/**/dataSources.local.xml 60 | .idea/**/sqlDataSources.xml 61 | .idea/**/dynamic.xml 62 | .idea/**/uiDesigner.xml 63 | 64 | # Gradle: 65 | .idea/**/gradle.xml 66 | .idea/**/libraries 67 | 68 | # CMake 69 | cmake-build-debug/ 70 | 71 | # Mongo Explorer plugin: 72 | .idea/**/mongoSettings.xml 73 | 74 | ## File-based project format: 75 | *.iws 76 | 77 | ## Plugin-specific files: 78 | 79 | # IntelliJ 80 | /out/ 81 | 82 | # mpeltonen/sbt-idea plugin 83 | .idea_modules/ 84 | 85 | # JIRA plugin 86 | atlassian-ide-plugin.xml 87 | 88 | # Cursive Clojure plugin 89 | .idea/replstate.xml 90 | 91 | # Ruby plugin and RubyMine 92 | /.rakeTasks 93 | 94 | # Crashlytics plugin (for Android Studio and IntelliJ) 95 | com_crashlytics_export_strings.xml 96 | crashlytics.properties 97 | crashlytics-build.properties 98 | fabric.properties 99 | 100 | ### PyCharm Patch ### 101 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 102 | 103 | # *.iml 104 | # modules.xml 105 | # .idea/misc.xml 106 | # *.ipr 107 | 108 | # Sonarlint plugin 109 | .idea/sonarlint 110 | 111 | ### Python ### 112 | # Byte-compiled / optimized / DLL files 113 | __pycache__/ 114 | *.py[cod] 115 | *$py.class 116 | 117 | # C extensions 118 | *.so 119 | 120 | # Distribution / packaging 121 | .Python 122 | build/ 123 | develop-eggs/ 124 | dist/ 125 | downloads/ 126 | eggs/ 127 | .eggs/ 128 | lib/ 129 | lib64/ 130 | parts/ 131 | sdist/ 132 | var/ 133 | wheels/ 134 | *.egg-info/ 135 | .installed.cfg 136 | *.egg 137 | 138 | # PyInstaller 139 | # Usually these files are written by a python script from a template 140 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 141 | *.manifest 142 | *.spec 143 | 144 | # Installer logs 145 | pip-log.txt 146 | pip-delete-this-directory.txt 147 | 148 | # Unit test / coverage reports 149 | htmlcov/ 150 | .tox/ 151 | .coverage 152 | .coverage.* 153 | .cache 154 | .pytest_cache/ 155 | nosetests.xml 156 | coverage.xml 157 | *.cover 158 | .hypothesis/ 159 | 160 | # Translations 161 | *.mo 162 | *.pot 163 | 164 | # Flask stuff: 165 | instance/ 166 | .webassets-cache 167 | 168 | # Scrapy stuff: 169 | .scrapy 170 | 171 | # Sphinx documentation 172 | docs/_build/ 173 | 174 | # PyBuilder 175 | target/ 176 | 177 | # Jupyter Notebook 178 | .ipynb_checkpoints 179 | 180 | # pyenv 181 | .python-version 182 | 183 | # celery beat schedule file 184 | celerybeat-schedule.* 185 | 186 | # SageMath parsed files 187 | *.sage.py 188 | 189 | # Environments 190 | .env 191 | .venv 192 | env/ 193 | venv/ 194 | ENV/ 195 | env.bak/ 196 | venv.bak/ 197 | 198 | # Spyder project settings 199 | .spyderproject 200 | .spyproject 201 | 202 | # Rope project settings 203 | .ropeproject 204 | 205 | # mkdocs documentation 206 | /site 207 | 208 | # mypy 209 | .mypy_cache/ 210 | 211 | ### VisualStudioCode ### 212 | .vscode/* 213 | !.vscode/settings.json 214 | !.vscode/tasks.json 215 | !.vscode/launch.json 216 | !.vscode/extensions.json 217 | .history 218 | 219 | ### Windows ### 220 | # Windows thumbnail cache files 221 | Thumbs.db 222 | ehthumbs.db 223 | ehthumbs_vista.db 224 | 225 | # Folder config file 226 | Desktop.ini 227 | 228 | # Recycle Bin used on file shares 229 | $RECYCLE.BIN/ 230 | 231 | # Windows Installer files 232 | *.cab 233 | *.msi 234 | *.msm 235 | *.msp 236 | 237 | # Windows shortcuts 238 | *.lnk 239 | 240 | # Build folder 241 | 242 | */build/* 243 | 244 | # End of https://www.gitignore.io/api/osx,linux,python,windows,pycharm,visualstudiocode 245 | 246 | # Go 247 | lambda-functions/terraform_open_source_parameter_parser/go.mod 248 | lambda-functions/terraform_open_source_parameter_parser/go.sum 249 | 250 | #AWS 251 | .aws-sam 252 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/test_poll_command_invocation.py: -------------------------------------------------------------------------------- 1 | from unittest import main, TestCase 2 | from unittest.mock import patch, MagicMock 3 | 4 | from botocore.exceptions import ClientError 5 | 6 | import poll_command_invocation 7 | 8 | 9 | class TestPollCommandInvocation(TestCase): 10 | 11 | def setUp(self): 12 | # This is required to reset the mocks 13 | poll_command_invocation.app_config = None 14 | poll_command_invocation.ssm_facade = None 15 | 16 | @patch('poll_command_invocation.Configuration') 17 | @patch('poll_command_invocation.ssm_facade') 18 | def test_poll_command_invocation_happy_path(self: TestCase, 19 | mocked_ssm_facade: MagicMock, 20 | mocked_configuration: MagicMock): 21 | #Arrange 22 | event = { 23 | "commandId": "fc7b5795-aab1-43a8-9fa0-8645409091fe", 24 | "instanceId": "i-0c9a068586ae5c597" 25 | } 26 | mocked_response = { 27 | "errorMessage": "", 28 | "invocationStatus": "Success" 29 | } 30 | mocked_app_config = mocked_configuration.return_value 31 | mocked_ssm_facade.get_command_invocation.return_value = mocked_response 32 | 33 | # Act 34 | response = poll_command_invocation.poll(event, None) 35 | 36 | # Assert 37 | mocked_configuration.assert_called_once() 38 | mocked_ssm_facade.get_command_invocation.assert_called_once_with(event['commandId'], event['instanceId']) 39 | self.assertEqual(response, {'invocationStatus': 'Success', 'errorMessage': ''}) 40 | 41 | @patch('poll_command_invocation.Configuration') 42 | @patch('poll_command_invocation.ssm_facade') 43 | def test_poll_command_invocation_given_ssm_error(self: TestCase, 44 | mocked_ssm_facade: MagicMock, 45 | mocked_configuration: MagicMock): 46 | # Arrange 47 | event = { 48 | "commandId": "fc7b5795-aab1-43a8-9fa0-8645409091fe", 49 | "instanceId": "i-0c9a068586ae5c597" 50 | } 51 | 52 | error_response = { 53 | 'Error': { 54 | 'Message': 'Some SSM 4XX or 5XX error' 55 | }, 56 | 'ResponseMetadata': { 57 | 'RequestId': 'some-random-uuid' 58 | } 59 | } 60 | 61 | mocked_app_config = mocked_configuration.return_value 62 | mocked_ssm_facade.get_command_invocation.side_effect = ClientError( 63 | operation_name='get_command_invocation', 64 | error_response=error_response 65 | ) 66 | 67 | # Act 68 | with self.assertRaises(ClientError) as context: 69 | poll_command_invocation.poll(event, None) 70 | 71 | # Assert 72 | mocked_configuration.assert_called_once() 73 | mocked_ssm_facade.get_command_invocation.assert_called_once_with(event['commandId'], event['instanceId']) 74 | 75 | self.assertEqual(context.expected, ClientError) 76 | self.assertEqual(context.exception.response, error_response) 77 | 78 | @patch('poll_command_invocation.Configuration') 79 | @patch('poll_command_invocation.ssm_facade') 80 | def test_poll_command_invocation_command_id_not_given_throw_RuntimeError(self: TestCase, 81 | mocked_ssm_facade: MagicMock, 82 | mocked_configuration: MagicMock): 83 | event = { 84 | "instanceId": "i-0c9a068586ae5c597" 85 | } 86 | mocked_app_config = mocked_configuration.return_value 87 | 88 | with self.assertRaises(RuntimeError) as context: 89 | poll_command_invocation.poll(event, None) 90 | 91 | mocked_configuration.assert_not_called() 92 | mocked_ssm_facade.assert_not_called() 93 | 94 | self.assertEqual(context.expected, RuntimeError) 95 | self.assertEqual(str(context.exception), "commandId must be provided") 96 | 97 | @patch('poll_command_invocation.Configuration') 98 | @patch('poll_command_invocation.ssm_facade') 99 | def test_poll_command_invocation_instance_id_not_given_throw_RuntimeError(self: TestCase, 100 | mocked_ssm_facade: MagicMock, 101 | mocked_configuration: MagicMock): 102 | event = { 103 | "commandId": "fc7b5795-aab1-43a8-9fa0-8645409091fe" 104 | } 105 | mocked_app_config = mocked_configuration.return_value 106 | 107 | with self.assertRaises(RuntimeError) as context: 108 | poll_command_invocation.poll(event, None) 109 | 110 | mocked_configuration.assert_not_called() 111 | mocked_ssm_facade.assert_not_called() 112 | 113 | self.assertEqual(context.expected, RuntimeError) 114 | self.assertEqual(str(context.exception), "instanceId must be provided") 115 | 116 | 117 | if __name__ == '__main__': 118 | main() 119 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/test_cli.py: -------------------------------------------------------------------------------- 1 | from unittest import main, TestCase 2 | 3 | from core.cli import create_runuser_command_with_default_user, double_escape_double_quotes, double_escape_double_quotes_and_backslashes, triple_escape_double_single_quotes, escape_quotes_backslashes 4 | 5 | class TestCli(TestCase): 6 | 7 | def test_create_runuser_command_with_default_user_happy_path(self): 8 | # Arrange 9 | base_command = 'base' 10 | expected = "runuser -l ec2-user -c 'base'" 11 | 12 | # Act 13 | actual = create_runuser_command_with_default_user(base_command) 14 | 15 | # Assert 16 | self.assertEqual(expected, actual) 17 | 18 | def test_create_runuser_command_with_default_user_base_command_is_empty(self): 19 | # Act 20 | with self.assertRaises(ValueError) as context: 21 | create_runuser_command_with_default_user('') 22 | 23 | # Assert 24 | self.assertEqual(context.expected, ValueError) 25 | self.assertEqual(str(context.exception), 'base_command must be a non-empty string') 26 | 27 | def test_create_runuser_command_with_default_user_base_command_is_None(self): 28 | # Act 29 | with self.assertRaises(ValueError) as context: 30 | create_runuser_command_with_default_user(None) 31 | 32 | # Assert 33 | self.assertEqual(context.expected, ValueError) 34 | self.assertEqual(str(context.exception), 'base_command must be a non-empty string') 35 | 36 | def test_double_escape_double_quotes_happy_path(self): 37 | # Arrange 38 | input = '"' 39 | expected = '\\"' 40 | 41 | # Act 42 | actual = double_escape_double_quotes(input) 43 | 44 | # Assert 45 | self.assertEqual(expected, actual) 46 | 47 | def test_double_escape_double_quotes_input_is_empty(self): 48 | # Arrange 49 | input = '' 50 | expected = '' 51 | 52 | # Act 53 | actual = double_escape_double_quotes(input) 54 | 55 | # Assert 56 | self.assertEqual(expected, actual) 57 | 58 | def test_double_escape_double_quotes_input_is_None(self): 59 | # Arrange 60 | input = None 61 | expected = None 62 | 63 | # Act 64 | actual = double_escape_double_quotes(input) 65 | 66 | # Assert 67 | self.assertEqual(expected, actual) 68 | 69 | def test_double_escape_double_quotes_and_backslashes_happy_path(self): 70 | # Arrange 71 | input = '"key": "aws_amis", "value": "{\"us-east-1\":\"ami-5f709f34\",\"us-west-2\":\"ami-7f675e4f\"}"' 72 | expected = '\\"key\\": \\"aws_amis\\", \\"value\\": \\"{\\\"us-east-1\\\":\\\"ami-5f709f34\\\",\\\"us-west-2\\\":\\\"ami-7f675e4f\\\"}\\"' 73 | 74 | # Act 75 | actual = double_escape_double_quotes_and_backslashes(input) 76 | 77 | # Assert 78 | self.assertEqual(expected, actual) 79 | 80 | def test_double_escape_double_quotes_and_backslashes_input_is_empty(self): 81 | # Arrange 82 | input = '' 83 | expected = '' 84 | 85 | # Act 86 | actual = double_escape_double_quotes_and_backslashes(input) 87 | 88 | # Assert 89 | self.assertEqual(expected, actual) 90 | 91 | def test_double_escape_double_quotes_and_backslashes_input_is_None(self): 92 | # Arrange 93 | input = None 94 | expected = None 95 | 96 | # Act 97 | actual = double_escape_double_quotes_and_backslashes(input) 98 | 99 | # Assert 100 | self.assertEqual(expected, actual) 101 | 102 | def test_triple_escape_double_single_quotes_happy_path(self): 103 | # Arrange 104 | input = "Testing user's parameter" 105 | expected = "Testing user'\\''s parameter" 106 | 107 | # Act 108 | actual = triple_escape_double_single_quotes(input) 109 | 110 | # Assert 111 | self.assertEqual(expected, actual) 112 | 113 | 114 | def test_triple_escape_double_single_quotes_input_is_None(self): 115 | # Arrange 116 | input = None 117 | expected = None 118 | 119 | # Act 120 | actual = triple_escape_double_single_quotes(input) 121 | 122 | # Assert 123 | self.assertEqual(expected, actual) 124 | 125 | 126 | def test_triple_escape_double_single_quotes_input_is_empty(self): 127 | # Arrange 128 | input = '' 129 | expected = '' 130 | 131 | # Act 132 | actual = triple_escape_double_single_quotes(input) 133 | 134 | # Assert 135 | self.assertEqual(expected, actual) 136 | 137 | def test_escape_quotes_backslashes_happy_path(self): 138 | # Arrange 139 | input = [None, '', '"key": "aws_amis", "value": "{\"us-east-1\":\"ami-5f709f34\",\"us-west-2\":\"ami-7f675e4f\"}"', "Testing it's all good ?"] 140 | expected = [None, '', '\\"key\\": \\"aws_amis\\", \\"value\\": \\"{\\\"us-east-1\\\":\\\"ami-5f709f34\\\",\\\"us-west-2\\\":\\\"ami-7f675e4f\\\"}\\"', "Testing it'\\''s all good ?"] 141 | 142 | for index, data in enumerate(input): 143 | # Act 144 | actual = escape_quotes_backslashes(data) 145 | 146 | # Assert 147 | self.assertEqual(expected[index], actual) 148 | 149 | if __name__ == '__main__': 150 | main() 151 | -------------------------------------------------------------------------------- /bin/bash/deploy-tre.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Install dependencies & configure AWS credentials before executing script; see Readme.md for instructions 5 | # Execute script from project root dir 6 | 7 | usage() { 8 | echo "Usage: $0 -r [-e ] [-s ]" 9 | echo "-r AWS region is required." 10 | echo "-e sets the endpoint for calls to Service Catalog. If not present, uses the default endpoint for the region." 11 | echo "-s sets SSL verification for calls to Service Catalog. Allowed values are true|false. Default is true." 12 | exit 1; 13 | } 14 | 15 | while getopts ":r:e:s:" opt 16 | do 17 | case "$opt" in 18 | r) 19 | AWS_REGION=$OPTARG 20 | ;; 21 | e) 22 | OVERRIDE_SERVICE_CATALOG_ENDPOINT="ParameterKey=ServiceCatalogEndpoint,ParameterValue=$OPTARG" 23 | ;; 24 | s) 25 | [[ $OPTARG == "true" || $OPTARG == "false" ]] || usage 26 | OVERRIDE_SERVICE_CATALOG_VERIFY_SSL="ParameterKey=ServiceCatalogVerifySsl,ParameterValue=$OPTARG" 27 | ;; 28 | *) 29 | usage 30 | ;; 31 | esac 32 | done 33 | 34 | [[ -z $AWS_REGION ]] && usage 35 | 36 | RAW_AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query "Account" --region $AWS_REGION) 37 | AWS_ACCOUNT_ID=`echo $RAW_AWS_ACCOUNT_ID | sed s/\"//g` 38 | 39 | BOOTSTRAP_BUCKET_NAME=terraform-engine-bootstrap-$AWS_ACCOUNT_ID-$AWS_REGION 40 | SAM_STACK_NAME=SAM-TRE 41 | SAM_DEPLOY_OUTPUT=/tmp/tre-sam-deploy-command.out 42 | SCRIPT_DIR=./bin/bash 43 | 44 | 45 | echo "AWS account: $AWS_ACCOUNT_ID" 46 | echo "AWS region: $AWS_REGION" 47 | 48 | if [ -d "venv" ] 49 | then 50 | echo "Virtual environment directory already exists, Skipping creation of virtual environment" 51 | else 52 | echo "Creating a Python virtual environment" 53 | # This assumes that you have python3.9 in your machine 54 | python3 -m venv venv 55 | fi 56 | . venv/bin/activate 57 | 58 | echo "Building the ServiceCatalogTerraformOSParameterParser function" 59 | cd lambda-functions/terraform_open_source_parameter_parser 60 | rm -f go.mod 61 | go mod init terraform_open_source_parameter_parser 62 | go env -w GOPROXY=direct 63 | go mod tidy 64 | 65 | echo "Building the Lambda code" 66 | cd ../.. # project root dir 67 | 68 | # Install wheel because sam build seems to need it. 69 | pip3 install wheel --upgrade 70 | 71 | # Locally install required versions of boto3/botocore, so sam bundles it with the rest of the state machine lambda builds. 72 | # We need to do this because the Lambda runtime environment lags on what version of boto it provides. 73 | # https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html 74 | pip3 install -r lambda-functions/state_machine_lambdas/requirements.txt \ 75 | -t lambda-functions/state_machine_lambdas \ 76 | --upgrade 77 | 78 | GOOS=linux GOARCH=amd64 CGO_ENABLED=0 sam build 79 | 80 | echo "Deploying the bootstrap bucket stack." 81 | source $SCRIPT_DIR/deploy-bootstrap-bucket-stack.sh 82 | 83 | echo "Deploying the Terraform CLI wrapper scripts" 84 | cd wrapper-scripts 85 | python3 setup.py bdist_wheel 86 | aws s3 sync dist s3://$BOOTSTRAP_BUCKET_NAME/dist --region $AWS_REGION 87 | 88 | cd .. # project root dir 89 | 90 | echo "Checking to see if this is a new installation or an update to an existingg installation." 91 | STACK_EXISTS_CHECK=`aws cloudformation describe-stacks --stack-name $SAM_STACK_NAME --region $AWS_REGION 2>&1 || true` 92 | if [[ "$STACK_EXISTS_CHECK" =~ "does not exist" ]] 93 | then 94 | echo "First-time installation. Deploying the Terraform reference engine stack with name: $SAM_STACK_NAME" 95 | SAM_STACK_EXISTS=1 96 | else 97 | echo "Verifying describe-stacks command output:" 98 | echo "$STACK_EXISTS_CHECK" 99 | echo $STACK_EXISTS_CHECK | jq . > /dev/null 100 | echo "Updating existing installation. Deploying the Terraform reference engine stack with name: $SAM_STACK_NAME" 101 | SAM_STACK_EXISTS=0 102 | fi 103 | 104 | # Set up parameter overrides for sam deploy, if any are needed 105 | SERVICE_CATALOG_PARAMETER_OVERRIDES="$OVERRIDE_SERVICE_CATALOG_ENDPOINT $OVERRIDE_SERVICE_CATALOG_VERIFY_SSL" 106 | if [[ $SERVICE_CATALOG_PARAMETER_OVERRIDES =~ [A-Za-z] ]] 107 | then 108 | SAM_DEPLOY_PARAMETER_OVERRIDES="--parameter-overrides $SERVICE_CATALOG_PARAMETER_OVERRIDES" 109 | else 110 | unset SAM_DEPLOY_PARAMETER_OVERRIDES 111 | fi 112 | 113 | echo "Sending output of the sam deploy command to $SAM_DEPLOY_OUTPUT. This is done to check the results after the command has completed." 114 | echo "This may take a while. Please be patient." 115 | sam deploy --s3-bucket $BOOTSTRAP_BUCKET_NAME \ 116 | --stack-name $SAM_STACK_NAME --capabilities CAPABILITY_NAMED_IAM --region $AWS_REGION $SAM_DEPLOY_PARAMETER_OVERRIDES > $SAM_DEPLOY_OUTPUT 2>&1 || true 117 | 118 | if [[ `grep "Successfully created/updated stack" $SAM_DEPLOY_OUTPUT` || `grep "Error: No changes to deploy" $SAM_DEPLOY_OUTPUT` ]] 119 | then 120 | echo "Deployment succeeded" 121 | else 122 | echo "Deployment failed. Check $SAM_DEPLOY_OUTPUT for details." 123 | echo "Deactivating python virtual environment" 124 | exit 1 125 | fi 126 | 127 | # Safely replace the EC2 instances if this is an update to an existing environment 128 | if (( $SAM_STACK_EXISTS == 0 )) 129 | then 130 | echo "Now safely replacing EC2 instances." 131 | pip3 install boto3 --upgrade 132 | export AWS_REGION 133 | python3 $SCRIPT_DIR/replace-ec2-instances.py 134 | fi 135 | 136 | echo "Deactivating python virtual environment" 137 | deactivate 138 | 139 | echo "Deployment finished successfully for account $AWS_ACCOUNT_ID in $AWS_REGION. The script took $SECONDS seconds." 140 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/parser.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/hashicorp/hcl/v2/hclparse" 10 | "github.com/hashicorp/terraform-config-inspect/tfconfig" 11 | ) 12 | 13 | const PrimaryModuleName = "PrimaryModule" 14 | const OverrideModuleName = "OverrideModule" 15 | const OverrideFileSuffix = "override.tf" 16 | const NoFilesToParseExceptionMessage = "No .tf files found. Nothing to parse. Make sure the root directory of the Terraform open source configuration file contains the .tf files for the root module." 17 | 18 | // ParseParametersFromConfiguration - Takes Terraform configuration represented as a map from file name to string contents 19 | // parses out the variable blocks and returns slice of Parameter pointers 20 | func ParseParametersFromConfiguration(fileMap map[string]string) ([]*Parameter, error) { 21 | if len(fileMap) == 0 { 22 | return nil, ParserInvalidParameterException{ 23 | Message: NoFilesToParseExceptionMessage, 24 | } 25 | } 26 | 27 | primaryFileMap, overrideFileMap := bisectFileMap(fileMap) 28 | 29 | primaryParameterMap := parseParameterMapFromFileMap(primaryFileMap, PrimaryModuleName) 30 | overrideParameterMap := parseParameterMapFromFileMap(overrideFileMap, OverrideModuleName) 31 | parameters := mergeParameterMaps(primaryParameterMap, overrideParameterMap) 32 | 33 | return parameters, nil 34 | } 35 | 36 | // bisects the original file map into primaryFileMap and overrideFileMap 37 | func bisectFileMap(fileMap map[string]string) (map[string]string, map[string]string) { 38 | primaryFileMap := make(map[string]string) 39 | overrideFileMap := make(map[string]string) 40 | 41 | if fileMap == nil || len(fileMap) == 0 { 42 | return primaryFileMap, overrideFileMap 43 | } 44 | 45 | for fileName, fileContents := range fileMap { 46 | if strings.HasSuffix(fileName, OverrideFileSuffix) { 47 | log.Printf("Identified override file: %s", fileName) 48 | overrideFileMap[fileName] = fileContents 49 | } else { 50 | log.Printf("Identified primary file: %s", fileName) 51 | primaryFileMap[fileName] = fileContents 52 | } 53 | } 54 | 55 | return primaryFileMap, overrideFileMap 56 | } 57 | 58 | // parses parameter map from provided file map and TF module 59 | func parseParameterMapFromFileMap(fileMap map[string]string, moduleName string) map[string]*Parameter { 60 | parameterMap := make(map[string]*Parameter) 61 | 62 | parser := hclparse.NewParser() 63 | mod := tfconfig.NewModule(moduleName) 64 | 65 | if fileMap == nil || len(fileMap) == 0 { 66 | return parameterMap 67 | } 68 | 69 | for fileName, fileContents := range fileMap { 70 | log.Printf("Parsing file %s as HCL", fileName) 71 | file, _ := parser.ParseHCL([]byte(fileContents), fileName) 72 | if file == nil { 73 | log.Panicf("Failed to parse file %s as HCL", fileName) 74 | continue 75 | } 76 | tfconfig.LoadModuleFromFile(file, mod) 77 | } 78 | 79 | for _, variable := range mod.Variables { 80 | var defaultValue string 81 | 82 | if variable.Default == nil { 83 | defaultValue = "" 84 | } else { 85 | defaultValueJson, _ := json.Marshal(variable.Default) 86 | defaultValueJsonUnquoted, err := strconv.Unquote(string(defaultValueJson)) 87 | // err indicates that there was no quotation mark in defaultValueJson 88 | // return defaultValueJson string in that case 89 | if err != nil { 90 | defaultValue = string(defaultValueJson) 91 | } else { 92 | defaultValue = defaultValueJsonUnquoted 93 | } 94 | } 95 | 96 | parameterMap[variable.Name] = &Parameter{ 97 | Key: variable.Name, 98 | DefaultValue: defaultValue, 99 | Type: variable.Type, 100 | Description: variable.Description, 101 | IsNoEcho: variable.Sensitive, 102 | } 103 | } 104 | return parameterMap 105 | } 106 | 107 | // merges primary parameter map and override parameter map into a single list of parameters 108 | func mergeParameterMaps(primaryParameterMap map[string]*Parameter, overrideParameterMap map[string]*Parameter) []*Parameter { 109 | var parameters []*Parameter 110 | 111 | if overrideParameterMap != nil && len(overrideParameterMap) != 0 { 112 | for key, overrideParameter := range overrideParameterMap { 113 | primaryParameter, ok := primaryParameterMap[key] 114 | if ok { 115 | mergedParameter := mergeParameters(primaryParameter, overrideParameter) 116 | parameters = append(parameters, mergedParameter) 117 | } else { 118 | parameters = append(parameters, overrideParameter) 119 | } 120 | } 121 | } 122 | 123 | if primaryParameterMap != nil && len(primaryParameterMap) != 0 { 124 | for key, primaryParameter := range primaryParameterMap { 125 | _, ok := overrideParameterMap[key] 126 | if !ok { 127 | parameters = append(parameters, primaryParameter) 128 | } 129 | } 130 | } 131 | 132 | return parameters 133 | } 134 | 135 | // merges the primary parameter with the override parameter into a single parameter 136 | func mergeParameters(primaryParameter *Parameter, overrideParameter *Parameter) *Parameter { 137 | mergedParameter := &Parameter{} 138 | 139 | mergedParameter.Key = primaryParameter.Key 140 | 141 | if overrideParameter.DefaultValue == "" { 142 | mergedParameter.DefaultValue = primaryParameter.DefaultValue 143 | } else { 144 | mergedParameter.DefaultValue = overrideParameter.DefaultValue 145 | } 146 | 147 | if overrideParameter.Type == "" { 148 | mergedParameter.Type = primaryParameter.Type 149 | } else { 150 | mergedParameter.Type = overrideParameter.Type 151 | } 152 | 153 | if overrideParameter.Description == "" { 154 | mergedParameter.Description = primaryParameter.Description 155 | } else { 156 | mergedParameter.Description = overrideParameter.Description 157 | } 158 | 159 | mergedParameter.IsNoEcho = overrideParameter.IsNoEcho 160 | 161 | return mergedParameter 162 | } 163 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/get_state_file_outputs.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | import boto3 6 | 7 | from core.configuration import Configuration 8 | from core.exception import log_exception 9 | 10 | log = logging.getLogger() 11 | log.setLevel(logging.INFO) 12 | 13 | # Globals 14 | app_config = None 15 | s3_resource_client = None 16 | state_bucket_name = None 17 | 18 | # Input and output keys 19 | PROVISIONED_PRODUCT_ID_KEY = 'provisionedProductId' 20 | AWS_ACCOUNT_ID_KEY = "awsAccountId" 21 | RECORD_OUTPUTS_KEY = 'recordOutputs' 22 | RECORD_OUTPUT_KEY_KEY = 'key' 23 | RECORD_OUTPUT_VALUE_KEY = 'value' 24 | RECORD_OUTPUT_DESCRIPTION_KEY = 'description' 25 | 26 | # State file keys 27 | STATE_FILE_OUTPUTS_KEY = 'outputs' 28 | STATE_FILE_OUTPUTS_VALUE_KEY = 'value' 29 | STATE_FILE_OUTPUTS_DESCRIPTION_KEY = 'description' 30 | STATE_FILE_OUTPUTS_SENSITIVE_KEY = 'sensitive' 31 | 32 | #Constants 33 | SENSITIVE_VALUE_MARKER = '(sensitive value)' 34 | 35 | # Environment variable keys 36 | STATE_BUCKET_NAME_KEY = 'STATE_BUCKET_NAME' 37 | 38 | def __validate_event(event: dict): 39 | """Validates that all required fields are in the Lambda event and have expected values 40 | 41 | Parameters 42 | ---------- 43 | event: dict, required 44 | The Lambda event to be validated 45 | """ 46 | 47 | if PROVISIONED_PRODUCT_ID_KEY not in event: 48 | raise RuntimeError(f'{PROVISIONED_PRODUCT_ID_KEY} must be provided') 49 | if AWS_ACCOUNT_ID_KEY not in event: 50 | raise RuntimeError(f'{AWS_ACCOUNT_ID_KEY} must be provided') 51 | 52 | def __fetch_state_file_from_s3(bucket_name: str, key: str) -> dict: 53 | """Fetch state file dict from the provided S3 state bucket 54 | 55 | Parameters 56 | ---------- 57 | bucket_name: str, required 58 | The name of the state file bucket in S3 59 | 60 | key: str, required 61 | The name of the state file key in S3 62 | 63 | Returns 64 | ------- 65 | dict: The content returned by S3 66 | """ 67 | 68 | content_object = s3_resource_client.Object(bucket_name, key) 69 | state_file_content = content_object.get()['Body'].read().decode() 70 | 71 | try: 72 | return json.loads(state_file_content) 73 | except ValueError: 74 | raise RuntimeError(f'File {key} in bucket {bucket_name} is not in JSON format') 75 | 76 | def __sanitize_output_value(output_block: dict) -> str: 77 | """Returns a sanitized value for a record output 78 | 79 | Parameters 80 | ---------- 81 | output_block: dict, required 82 | The dict containing the block for one output in the state file contents 83 | 84 | Returns 85 | ------- 86 | str: The sanitized version of the output block's value 87 | """ 88 | if STATE_FILE_OUTPUTS_SENSITIVE_KEY in output_block and output_block[STATE_FILE_OUTPUTS_SENSITIVE_KEY]: 89 | return SENSITIVE_VALUE_MARKER 90 | else: 91 | return str(output_block[STATE_FILE_OUTPUTS_VALUE_KEY]) 92 | 93 | def __parse_outputs_from_state_file(state_file_content: dict) -> list: 94 | """Parse outputs from state file to fetch record outputs 95 | 96 | Parameters 97 | ---------- 98 | state_file_content: dict, required 99 | The dict of state file 100 | 101 | Returns 102 | ------- 103 | list: The list of record outputs 104 | """ 105 | record_outputs = [] 106 | 107 | if STATE_FILE_OUTPUTS_KEY not in state_file_content: 108 | return record_outputs 109 | 110 | outputs_block = state_file_content[STATE_FILE_OUTPUTS_KEY] 111 | 112 | for output_key in outputs_block: 113 | record_output = {RECORD_OUTPUT_KEY_KEY: output_key} 114 | 115 | if STATE_FILE_OUTPUTS_VALUE_KEY not in outputs_block[output_key]: 116 | raise RuntimeError(f'Output value is missing for output {output_key}') 117 | 118 | record_output[RECORD_OUTPUT_VALUE_KEY] = __sanitize_output_value(outputs_block[output_key]) 119 | 120 | if STATE_FILE_OUTPUTS_DESCRIPTION_KEY not in outputs_block[output_key]: 121 | record_output[RECORD_OUTPUT_DESCRIPTION_KEY] = None 122 | else: 123 | record_output[RECORD_OUTPUT_DESCRIPTION_KEY] = outputs_block[output_key][STATE_FILE_OUTPUTS_DESCRIPTION_KEY] 124 | 125 | record_outputs.append(record_output) 126 | 127 | return record_outputs 128 | 129 | def parse(event, context) -> dict: 130 | """Lambda handler to parse state file JSON from S3 state bucket to fetch record outputs 131 | 132 | Parameters 133 | ---------- 134 | event: dict, required 135 | The input event to the Lambda function 136 | 137 | context: object, required 138 | Lambda Context runtime methods and attributes 139 | 140 | Returns 141 | ------- 142 | dict: The list of record outputs which contain key, value, and optional description 143 | """ 144 | global app_config 145 | global state_bucket_name 146 | global s3_resource_client 147 | 148 | try: 149 | __validate_event(event) 150 | 151 | if not app_config: 152 | app_config = Configuration() 153 | if not state_bucket_name: 154 | state_bucket_name = os.environ[STATE_BUCKET_NAME_KEY] 155 | if not s3_resource_client: 156 | s3_resource_client = boto3.resource('s3', config=app_config.get_boto_config()) 157 | 158 | state_file_json_key = f'{event[AWS_ACCOUNT_ID_KEY]}/{event[PROVISIONED_PRODUCT_ID_KEY]}' 159 | state_file_content = __fetch_state_file_from_s3(state_bucket_name, state_file_json_key) 160 | record_outputs = __parse_outputs_from_state_file(state_file_content) 161 | 162 | response = {RECORD_OUTPUTS_KEY: record_outputs} 163 | log.info(f'Returning {response}') 164 | return response 165 | 166 | except Exception as e: 167 | log_exception(e) 168 | raise e 169 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/send_apply_command.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from core.cli import create_runuser_command_with_default_user, escape_quotes_backslashes 6 | from core.configuration import Configuration 7 | from core.exception import log_exception 8 | from core.ssm_facade import SsmFacade 9 | 10 | log = logging.getLogger() 11 | log.setLevel(logging.INFO) 12 | 13 | # Globals 14 | app_config = None 15 | state_bucket_name = None 16 | ssm_facade = None 17 | 18 | 19 | #Constants 20 | PROVISION_PRODUCT = 'PROVISION_PRODUCT' 21 | UPDATE_PROVISIONED_PRODUCT = 'UPDATE_PROVISIONED_PRODUCT' 22 | 23 | # Input keys 24 | INSTANCE_ID_KEY = 'instanceId' 25 | OPERATION_KEY = "operation" 26 | PROVISIONED_PRODUCT_ID_KEY = 'provisionedProductId' 27 | AWS_ACCOUNT_ID_KEY = "awsAccountId" 28 | ARTIFACT_PATH_KEY = 'artifactPath' 29 | ARTIFACT_TYPE_KEY = 'artifactType' 30 | LAUNCH_ROLE_ARN_KEY = 'launchRoleArn' 31 | PARAMETERS_KEY = 'parameters' 32 | TRACER_TAG_KEY = 'tracerTag' 33 | TAGS_KEY = 'tags' 34 | TAG_KEY_KEY = 'key' 35 | TAG_VALUE_KEY = 'value' 36 | 37 | # Output keys 38 | COMMAND_ID_KEY = 'commandId' 39 | 40 | # Environment variable keys 41 | STATE_BUCKET_NAME_KEY = 'STATE_BUCKET_NAME' 42 | 43 | 44 | def __validate_event(event: dict): 45 | """Validates that all required fields are in the Lambda event and have expected values 46 | 47 | Parameters 48 | ---------- 49 | event: dict, required 50 | The Lambda event to be validated 51 | """ 52 | 53 | if INSTANCE_ID_KEY not in event: 54 | raise RuntimeError(f'{INSTANCE_ID_KEY} must be provided') 55 | if OPERATION_KEY not in event: 56 | raise RuntimeError(f'{OPERATION_KEY} must be provided') 57 | if event[OPERATION_KEY] not in [PROVISION_PRODUCT, UPDATE_PROVISIONED_PRODUCT]: 58 | raise RuntimeError(f'{OPERATION_KEY} is invalid: {event[OPERATION_KEY]}') 59 | if PROVISIONED_PRODUCT_ID_KEY not in event: 60 | raise RuntimeError(f'{PROVISIONED_PRODUCT_ID_KEY} must be provided') 61 | if AWS_ACCOUNT_ID_KEY not in event: 62 | raise RuntimeError(f'{AWS_ACCOUNT_ID_KEY} must be provided') 63 | if ARTIFACT_PATH_KEY not in event: 64 | raise RuntimeError(f'{ARTIFACT_PATH_KEY} must be provided') 65 | if ARTIFACT_TYPE_KEY not in event: 66 | raise RuntimeError(f'{ARTIFACT_TYPE_KEY} must be provided') 67 | if LAUNCH_ROLE_ARN_KEY not in event: 68 | raise RuntimeError(f'{LAUNCH_ROLE_ARN_KEY} must be provided') 69 | if TRACER_TAG_KEY not in event: 70 | raise RuntimeError(f'{TRACER_TAG_KEY} must be provided') 71 | if TAG_KEY_KEY not in event[TRACER_TAG_KEY]: 72 | raise RuntimeError(f'{TRACER_TAG_KEY} must include {TAG_KEY_KEY}') 73 | if TAG_VALUE_KEY not in event[TRACER_TAG_KEY]: 74 | raise RuntimeError(f'{TRACER_TAG_KEY} must include {TAG_VALUE_KEY}') 75 | 76 | 77 | def __get_optional_json(body, key) -> str: 78 | """Gets an optional entry from a dict and returns a valid json string representing the result 79 | 80 | Parameters 81 | ---------- 82 | body: dict, required 83 | The dict where the key may or may not be found 84 | 85 | key: str, required 86 | The key to look up in the dict 87 | 88 | Returns 89 | ------- 90 | str: Json form of the key-value pair if it exists; otherwise an empty json string. 91 | """ 92 | 93 | try: 94 | return json.dumps(body[key]) 95 | except KeyError: 96 | # If the optional key is not found, return an empty json string 97 | return '{}' 98 | 99 | def __get_tags_text(event: dict) -> str: 100 | """Creates the text for the tags parameter for the command. 101 | 102 | Parameters 103 | ---------- 104 | event: dict, required 105 | The input event to the Lambda function 106 | 107 | Returns 108 | ------- 109 | str: The text for the command's tags argument 110 | """ 111 | total_tags = [event[TRACER_TAG_KEY]] 112 | if TAGS_KEY in event: 113 | total_tags += event[TAGS_KEY] 114 | return escape_quotes_backslashes(json.dumps(total_tags)) 115 | 116 | def __get_command_text(event: dict) -> str: 117 | """Creates the command to run on the instance based on the Lambda input event. 118 | 119 | Parameters 120 | ---------- 121 | event: dict, required 122 | The input event to the Lambda function 123 | 124 | Returns 125 | ------- 126 | str: The command text 127 | """ 128 | artifact_parameters_text = escape_quotes_backslashes(__get_optional_json(event, PARAMETERS_KEY)) 129 | tags_text = __get_tags_text(event) 130 | 131 | base_command = f"""python3 -m terraform_runner --action=apply \ 132 | --provisioned-product-descriptor={f'{event[AWS_ACCOUNT_ID_KEY]}/{event[PROVISIONED_PRODUCT_ID_KEY]}'} \ 133 | --launch-role={event[LAUNCH_ROLE_ARN_KEY]} \ 134 | --artifact-path={event[ARTIFACT_PATH_KEY]} \ 135 | --region={app_config.get_region()} \ 136 | --terraform-state-bucket={state_bucket_name} \ 137 | --artifact-parameters="{artifact_parameters_text}" \ 138 | --tags="{tags_text}" """ 139 | return create_runuser_command_with_default_user(base_command) 140 | 141 | def send(event, context) -> dict: 142 | """Lambda handler to send a command to a host to run Terraform apply 143 | 144 | Parameters 145 | ---------- 146 | event: dict, required 147 | The input event to the Lambda function 148 | 149 | context: object, required 150 | Lambda Context runtime methods and attributes 151 | 152 | Returns 153 | ------- 154 | dict: The command ID returned by SSM 155 | """ 156 | log.info(f'Handling event: {event}') 157 | global app_config 158 | global state_bucket_name 159 | global ssm_facade 160 | 161 | try: 162 | __validate_event(event) 163 | 164 | if not app_config: 165 | app_config = Configuration() 166 | if not state_bucket_name: 167 | state_bucket_name = os.environ[STATE_BUCKET_NAME_KEY] 168 | if not ssm_facade: 169 | ssm_facade = SsmFacade(app_config) 170 | 171 | command_text = __get_command_text(event) 172 | 173 | log.info(f'Sending command text {command_text}') 174 | 175 | response = { 176 | COMMAND_ID_KEY: ssm_facade.send_shell_command(command_text, event[INSTANCE_ID_KEY]) 177 | } 178 | log.info(f'Returning {response}') 179 | return response 180 | 181 | except Exception as e: 182 | log_exception(e) 183 | raise e 184 | -------------------------------------------------------------------------------- /lambda-functions/terraform_open_source_parameter_parser/validator_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func TestValidateInputHappy(t *testing.T) { 10 | // setup 11 | input := TerraformOpenSourceParameterParserInput{ 12 | Artifact: Artifact{ 13 | Path: TestArtifactPath, 14 | Type: TestArtifactType, 15 | }, 16 | LaunchRoleArn: TestLaunchRoleArn, 17 | } 18 | 19 | // act 20 | err := ValidateInput(input) 21 | 22 | // assert 23 | if err != nil { 24 | t.Errorf("Validation failed for happy path input") 25 | } 26 | } 27 | 28 | func TestValidateInputWithEmptyLaunchRoleHappy(t *testing.T) { 29 | // setup 30 | input := TerraformOpenSourceParameterParserInput{ 31 | Artifact: Artifact{ 32 | Path: TestArtifactPath, 33 | Type: TestArtifactType, 34 | }, 35 | LaunchRoleArn: "", 36 | } 37 | 38 | // act 39 | err := ValidateInput(input) 40 | 41 | // assert 42 | if err != nil { 43 | t.Errorf("Validation failed for happy path input with empty launch role") 44 | } 45 | } 46 | 47 | func TestValidateInputWithEmptyArtifactThrowsParserInvalidParameterException(t *testing.T) { 48 | // setup 49 | input := TerraformOpenSourceParameterParserInput{ 50 | Artifact: Artifact{}, 51 | LaunchRoleArn: TestLaunchRoleArn, 52 | } 53 | expectedErrorMessage := fmt.Sprintf(RequiredKeyMissingOrEmptyErrorMessage, ArtifactKey) 54 | 55 | // act 56 | err := ValidateInput(input) 57 | 58 | // assert 59 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 60 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 61 | } 62 | } 63 | 64 | func TestValidateInputWithEmptyArtifactPathThrowsParserInvalidParameterException(t *testing.T) { 65 | // setup 66 | input := TerraformOpenSourceParameterParserInput{ 67 | Artifact: Artifact{ 68 | Path: "", 69 | Type: TestArtifactType, 70 | }, 71 | LaunchRoleArn: TestLaunchRoleArn, 72 | } 73 | expectedErrorMessage := fmt.Sprintf(RequiredKeyMissingOrEmptyErrorMessage, ArtifactPathKey) 74 | 75 | // act 76 | err := ValidateInput(input) 77 | 78 | // assert 79 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 80 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 81 | } 82 | } 83 | 84 | func TestValidateInputWithEmptyArtifactTypeThrowsParserInvalidParameterException(t *testing.T) { 85 | // setup 86 | input := TerraformOpenSourceParameterParserInput{ 87 | Artifact: Artifact{ 88 | Path: TestArtifactPath, 89 | Type: "", 90 | }, 91 | LaunchRoleArn: TestLaunchRoleArn, 92 | } 93 | expectedErrorMessage := fmt.Sprintf(RequiredKeyMissingOrEmptyErrorMessage, ArtifactTypeKey) 94 | 95 | // act 96 | err := ValidateInput(input) 97 | 98 | // assert 99 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 100 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 101 | } 102 | } 103 | 104 | func TestValidateInputWithSyntacticallyIncorrectArnThrowsParserInvalidParameterException(t *testing.T) { 105 | // setup 106 | input := TerraformOpenSourceParameterParserInput{ 107 | Artifact: Artifact{ 108 | Path: TestArtifactPath, 109 | Type: TestArtifactType, 110 | }, 111 | LaunchRoleArn: "fakeArn", 112 | } 113 | expectedErrorMessage := fmt.Sprintf(InvalidLaunchRoleArnSyntaxErrorMessage, "fakeArn") 114 | 115 | // act 116 | err := ValidateInput(input) 117 | 118 | // assert 119 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 120 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 121 | } 122 | } 123 | 124 | func TestValidateInputWithNonIamArnThrowsParserInvalidParameterException(t *testing.T) { 125 | // setup 126 | input := TerraformOpenSourceParameterParserInput{ 127 | Artifact: Artifact{ 128 | Path: TestArtifactPath, 129 | Type: TestArtifactType, 130 | }, 131 | LaunchRoleArn: "arn:aws:sts::829064435212:role/SCLaunchRole", 132 | } 133 | expectedErrorMessage := fmt.Sprintf(InvalidIamLaunchRoleArnErrorMessage, "arn:aws:sts::829064435212:role/SCLaunchRole") 134 | 135 | // act 136 | err := ValidateInput(input) 137 | 138 | // assert 139 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 140 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 141 | } 142 | } 143 | 144 | func TestValidateInputWithNonDefaultArtifactTypeThrowsParserInvalidParameterException(t *testing.T) { 145 | // setup 146 | input := TerraformOpenSourceParameterParserInput{ 147 | Artifact: Artifact{ 148 | Path: TestArtifactPath, 149 | Type: "fakeType", 150 | }, 151 | LaunchRoleArn: TestLaunchRoleArn, 152 | } 153 | expectedErrorMessage := fmt.Sprintf(InvalidArtifactTypeErrorMessage, "fakeType") 154 | 155 | // act 156 | err := ValidateInput(input) 157 | 158 | // assert 159 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 160 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 161 | } 162 | } 163 | 164 | func TestValidateInputWithInvalidArtifactPathThrowsParserInvalidParameterException(t *testing.T) { 165 | // setup 166 | input := TerraformOpenSourceParameterParserInput{ 167 | Artifact: Artifact{ 168 | Path: "invalidPath", 169 | Type: TestArtifactType, 170 | }, 171 | LaunchRoleArn: TestLaunchRoleArn, 172 | } 173 | expectedErrorMessage := fmt.Sprintf(InvalidArtifactPathErrorMessage, "invalidPath") 174 | 175 | // act 176 | err := ValidateInput(input) 177 | 178 | // assert 179 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 180 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 181 | } 182 | } 183 | 184 | func TestValidateInputWithNoneS3ArtifactPathThrowsParserInvalidParameterException(t *testing.T) { 185 | // setup 186 | input := TerraformOpenSourceParameterParserInput{ 187 | Artifact: Artifact{ 188 | Path: "https://terraform-configurations-cross-account-demo/product_with_override_var.tar.gz", 189 | Type: TestArtifactType, 190 | }, 191 | LaunchRoleArn: TestLaunchRoleArn, 192 | } 193 | expectedErrorMessage := fmt.Sprintf(InvalidArtifactPathErrorMessage, "https://terraform-configurations-cross-account-demo/product_with_override_var.tar.gz") 194 | 195 | // act 196 | err := ValidateInput(input) 197 | 198 | // assert 199 | if !reflect.DeepEqual(err, ParserInvalidParameterException{Message: expectedErrorMessage}) { 200 | t.Errorf("Validator did not throw ParserInvalidParameterException with expected error message") 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /bin/bash/replace-ec2-instances.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | import time 4 | 5 | import boto3 6 | 7 | AWS_REGION_KEY = 'AWS_REGION' 8 | AWAIT_SECONDS = 10 9 | INSTANCE_NAME = 'TerraformEngineExecutionInstance' 10 | INSTANCE_FILTERS = [ 11 | {'Name': 'tag:Name', 'Values': [INSTANCE_NAME]} 12 | ] 13 | 14 | if AWS_REGION_KEY in os.environ and os.environ[AWS_REGION_KEY]: 15 | aws_region = os.environ[AWS_REGION_KEY] 16 | else: 17 | raise RuntimeError(f'Environment variable {AWS_REGION_KEY} has not been set.') 18 | 19 | lambda_client = boto3.client('lambda', region_name = aws_region) 20 | step_functions_client = boto3.client('stepfunctions', region_name = aws_region) 21 | ec2_client = boto3.client('ec2', region_name = aws_region) 22 | autoscaling_client = boto3.client('autoscaling', region_name = aws_region) 23 | 24 | def get_event_source_mapping_uuid_tuples(function_names): 25 | function_name_uuid_tuples = [] 26 | for function_name in function_names: 27 | print(f'Getting event source mappings for function {function_name}') 28 | # Not paginating here because we know we have no more than two mappings per function 29 | response = lambda_client.list_event_source_mappings(FunctionName = function_name) 30 | for event_source_mapping in response['EventSourceMappings']: 31 | function_name_uuid_tuples.append((function_name, event_source_mapping['UUID'])) 32 | return function_name_uuid_tuples 33 | 34 | def await_event_source_mapping_state(uuid, required_state): 35 | print(f'Waiting for event source mapping state: {required_state}. {uuid}') 36 | current_state = '' 37 | while current_state != required_state: 38 | time.sleep(AWAIT_SECONDS) 39 | response = lambda_client.get_event_source_mapping(UUID = uuid) 40 | current_state = response['State'] 41 | print(f'Event source mapping state is {current_state}. {uuid}') 42 | 43 | def update_event_source_mappings(function_name_uuid_tuples, enabled): 44 | for function_name_uuid_tuple in function_name_uuid_tuples: 45 | print(f'Updating event source mapping with enabled={enabled}: {function_name_uuid_tuple}') 46 | lambda_client.update_event_source_mapping(FunctionName = function_name_uuid_tuple[0], UUID = function_name_uuid_tuple[1], Enabled = enabled) 47 | # The update is an asynchronous operation, so await its completion. 48 | required_state = 'Enabled' if enabled else 'Disabled' 49 | await_event_source_mapping_state(function_name_uuid_tuple[1], required_state) 50 | 51 | def get_state_machine_arn(state_machine_name): 52 | print(f'Getting state machine arn for {state_machine_name}') 53 | page_iterator = step_functions_client.get_paginator('list_state_machines').paginate() 54 | for page in page_iterator: 55 | for state_machine in page['stateMachines']: 56 | if state_machine['name'] == state_machine_name: 57 | return state_machine['stateMachineArn'] 58 | raise RuntimeError(f'No state machine arn could be found for {state_machine_name}') 59 | 60 | def drain_state_machine_executions(state_machine_name): 61 | state_machine_arn = get_state_machine_arn(state_machine_name) 62 | print(f'Draining state machine executions for {state_machine_arn}') 63 | while True: 64 | # Not paginating here. We only care if there are more than zero executions. 65 | response = step_functions_client.list_executions(stateMachineArn = state_machine_arn, statusFilter = 'RUNNING') 66 | execution_count = len(response['executions']) 67 | print(f'Found {execution_count} running state machine executions for {state_machine_arn}') 68 | if execution_count == 0: 69 | break 70 | time.sleep(AWAIT_SECONDS) 71 | 72 | def get_instance_ids(filters): 73 | print(f'Getting EC2 instance IDs for filters: {filters}') 74 | paginator = ec2_client.get_paginator('describe_instances') 75 | page_iterator = paginator.paginate(Filters = filters) 76 | 77 | instances = [] 78 | for page in page_iterator: 79 | for reservation in page['Reservations']: 80 | instances += reservation['Instances'] 81 | 82 | return [instance['InstanceId'] for instance in instances] 83 | 84 | def terminate_terraform_ec2_instances(): 85 | instance_ids = get_instance_ids(INSTANCE_FILTERS) 86 | print(f'Terminating Terraform EC2 instances: {instance_ids}') 87 | responce = ec2_client.terminate_instances(InstanceIds = instance_ids) 88 | instance_id_count = len(instance_ids) 89 | terminating_instance_count = len(responce['TerminatingInstances']) 90 | if instance_id_count != terminating_instance_count: 91 | raise RuntimeError(f'Expected {instance_id_count} instances to be terminated, but only {terminating_instance_count} are actually being terminated.') 92 | 93 | def await_running_terraform_ec2_instances(): 94 | autoscaling_response = autoscaling_client.describe_auto_scaling_groups(AutoScalingGroupNames = ['TerraformAutoscalingGroup']) 95 | autoscaling_desired_capacity = autoscaling_response['AutoScalingGroups'][0]['DesiredCapacity'] 96 | print(f'Waiting for running Terraform EC2 instances. Autoscaling desired capacity is {autoscaling_desired_capacity}.') 97 | 98 | filters = INSTANCE_FILTERS 99 | filters.append({'Name': 'instance-state-name', 'Values': ['running']}) 100 | while True: 101 | time.sleep(AWAIT_SECONDS) 102 | instance_ids = get_instance_ids(filters) 103 | instance_id_count = len(instance_ids) 104 | print(f'Found {instance_id_count} instances. Waiting for {autoscaling_desired_capacity} running instances.') 105 | if instance_id_count >= autoscaling_desired_capacity: 106 | break 107 | 108 | # Main 109 | start_time = time.time() 110 | print('Pausing SQS message processing in order to safely replace the Terraform EC2 instances.') 111 | print('This may take a few minutes.') 112 | 113 | event_source_mapping_uuid_tuples = get_event_source_mapping_uuid_tuples(['TerraformEngineProvisioningHandlerLambda', 'TerraformEngineTerminateHandlerLambda']) 114 | update_event_source_mappings(event_source_mapping_uuid_tuples, False) 115 | print('Event source mappings have been disabled. SQS messages will remain in their queues until they are enabled.') 116 | 117 | drain_state_machine_executions('ManageProvisionedProductStateMachine') 118 | drain_state_machine_executions('TerminateProvisionedProductStateMachine') 119 | print('All state machine executions have finished. Ready to replace EC2 instances.') 120 | 121 | terminate_terraform_ec2_instances() 122 | await_running_terraform_ec2_instances() 123 | 124 | update_event_source_mappings(event_source_mapping_uuid_tuples, True) 125 | print('Event source mappings have been enabled. SQS message processing will now resume.') 126 | print(f'The instance replacement process took{math.floor(time.time() - start_time)} seconds.') 127 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/test_ssm_facade.py: -------------------------------------------------------------------------------- 1 | import json 2 | from unittest import main, TestCase 3 | from unittest.mock import MagicMock, Mock, patch 4 | 5 | from botocore.exceptions import ClientError 6 | 7 | from core.ssm_facade import SsmFacade, DOCUMENT_NAME_RUN_SHELL_COMMAND 8 | 9 | 10 | class TestSsmFacade(TestCase): 11 | 12 | @patch('boto3.client') 13 | def test_send_shell_command_happy_path(self: TestCase, 14 | mocked_client: MagicMock): 15 | # arrange 16 | mocked_app_config = Mock() 17 | mocked_boto_config = Mock() 18 | mocked_app_config.get_boto_config.return_value = mocked_boto_config 19 | 20 | send_command_response = {'Command': {'CommandId': 'commandId'}} 21 | mocked_client.return_value.send_command.return_value = send_command_response 22 | 23 | command_text = 'command-text' 24 | instance_id = 'instance-id' 25 | facade = SsmFacade(mocked_app_config) 26 | 27 | # act 28 | function_response = facade.send_shell_command(command_text, instance_id) 29 | 30 | # assert 31 | mocked_client.assert_called_once_with('ssm', config=mocked_boto_config) 32 | mocked_client.return_value.send_command.assert_called_once_with( 33 | InstanceIds=[instance_id], 34 | DocumentName=DOCUMENT_NAME_RUN_SHELL_COMMAND, 35 | Parameters={'commands': [command_text]}, 36 | CloudWatchOutputConfig={'CloudWatchOutputEnabled': True}) 37 | self.assertEqual(function_response, 'commandId') 38 | 39 | @patch('boto3.client') 40 | def test_send_shell_command_raises_client_error(self: TestCase, 41 | mocked_client: MagicMock): 42 | # arrange 43 | mocked_app_config = Mock() 44 | mocked_boto_config = Mock() 45 | mocked_app_config.get_boto_config.return_value = mocked_boto_config 46 | 47 | mocked_error_response = { 48 | 'Error': { 49 | 'Message': 'Some SSM 4XX or 5XX error' 50 | }, 51 | 'ResponseMetadata': { 52 | 'RequestId': 'some-random-uuid' 53 | } 54 | } 55 | mocked_client.return_value.send_command.side_effect = ClientError( 56 | operation_name='SendCommand', 57 | error_response=mocked_error_response 58 | ) 59 | 60 | 61 | command_text = 'command-text' 62 | instance_id = 'instance-id' 63 | facade = SsmFacade(mocked_app_config) 64 | 65 | # act 66 | with self.assertRaises(ClientError) as context: 67 | facade.send_shell_command(command_text, instance_id) 68 | 69 | # assert 70 | mocked_client.assert_called_once_with('ssm', config=mocked_boto_config) 71 | mocked_client.return_value.send_command.assert_called_once_with( 72 | InstanceIds=[instance_id], 73 | DocumentName=DOCUMENT_NAME_RUN_SHELL_COMMAND, 74 | Parameters={'commands': [command_text]}, 75 | CloudWatchOutputConfig={'CloudWatchOutputEnabled': True}) 76 | self.assertEqual(context.expected, ClientError) 77 | self.assertEqual(context.exception.response, mocked_error_response) 78 | 79 | @patch('boto3.client') 80 | def test_get_command_invocation_happy_path(self: TestCase, 81 | mocked_client: MagicMock): 82 | # Act 83 | mocked_app_config = Mock() 84 | mocked_boto_config = Mock() 85 | mocked_app_config.get_boto_config.return_value = mocked_boto_config 86 | 87 | command_id = "fc7b5795-aab1-43a8-9fa0-8645409091fe" 88 | instance_id = "i-0c9a068586ae5c597" 89 | 90 | ssm_client_response = { 91 | "Comment": "", 92 | "ExecutionElapsedTime": "PT0.102S", 93 | "ExecutionEndDateTime": "2022-11-17T21:41:16.561Z", 94 | "StandardErrorContent": "", 95 | "CloudWatchOutputConfig": { 96 | "CloudWatchLogGroupName": "Dummy", 97 | "CloudWatchOutputEnabled": True 98 | }, 99 | "InstanceId": instance_id, 100 | "DocumentName": "AWS-RunShellScript", 101 | "DocumentVersion": "1", 102 | "Status": "Success", 103 | "StatusDetails": "Success", 104 | "PluginName": "aws:runShellScript", 105 | "StandardOutputContent": "/usr/bin\n", 106 | "ResponseCode": 0, 107 | "ExecutionStartDateTime": "2022-11-17T21:41:16.561Z", 108 | "CommandId": command_id 109 | } 110 | mocked_ssm_client: MagicMock = mocked_client.return_value 111 | mocked_ssm_client.get_command_invocation.return_value = ssm_client_response 112 | facade = SsmFacade(mocked_app_config) 113 | 114 | # Act 115 | response = facade.get_command_invocation(command_id, instance_id) 116 | 117 | # Assert 118 | mocked_client.assert_called_once_with('ssm', config=mocked_app_config.get_boto_config()) 119 | mocked_ssm_client.get_command_invocation.assert_called_once_with( 120 | CommandId=command_id, 121 | InstanceId=instance_id 122 | ) 123 | 124 | self.assertEqual(response, {'invocationStatus': 'Success', 'errorMessage': ''}) 125 | 126 | @patch('boto3.client') 127 | def test_get_command_invocation_given_ssm_error(self: TestCase, 128 | mocked_client: MagicMock): 129 | # Arrange 130 | mocked_app_config = Mock() 131 | mocked_boto_config = Mock() 132 | mocked_app_config.get_boto_config.return_value = mocked_boto_config 133 | 134 | command_id = "fc7b5795-aab1-43a8-9fa0-8645409091fe" 135 | instance_id = "i-0c9a068586ae5c597" 136 | 137 | error_response = { 138 | 'Error': { 139 | 'Message': 'Some SSM 4XX or 5XX error' 140 | }, 141 | 'ResponseMetadata': { 142 | 'RequestId': 'some-random-uuid' 143 | } 144 | } 145 | 146 | mocked_ssm_client: MagicMock = mocked_client.return_value 147 | mocked_ssm_client.get_command_invocation.side_effect = ClientError( 148 | operation_name='get_command_invocation', 149 | error_response=error_response 150 | ) 151 | facade = SsmFacade(mocked_app_config) 152 | 153 | # Act 154 | with self.assertRaises(ClientError) as context: 155 | facade.get_command_invocation(command_id, instance_id) 156 | 157 | # Assert 158 | mocked_client.assert_called_once_with('ssm', config=mocked_app_config.get_boto_config()) 159 | mocked_ssm_client.get_command_invocation.assert_called_once_with( 160 | CommandId=command_id, 161 | InstanceId=instance_id 162 | ) 163 | 164 | self.assertEqual(context.expected, ClientError) 165 | self.assertEqual(context.exception.response, error_response) 166 | 167 | 168 | if __name__ == '__main__': 169 | main() 170 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/core/service_catalog_facade.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import uuid 4 | from enum import Enum 5 | 6 | from core.configuration import Configuration 7 | 8 | 9 | log = logging.getLogger() 10 | log.setLevel(logging.INFO) 11 | 12 | 13 | ### We must force Lambdas that use this class to use the local version of boto3/botocore 14 | ### because we need the latest release of the Service Catalog SDK. 15 | ### 16 | ### This is required because Lambda runtime environments lag behind on the SDK releases. 17 | ### 18 | ### This code assumes that boto3 and botocore have been installed in the designated local directory. 19 | ### See the Readme and the deploy-tre.sh script for details. 20 | ### 21 | ### We will be able to remove this code once we are certain that Lambda runtime environments are using the minimum required versions across all regions. 22 | import sys 23 | 24 | LAMBDA_TASK_ROOT = 'LAMBDA_TASK_ROOT' 25 | LOCAL_INSTALL_DIR = 'state_machine_lambdas' 26 | 27 | if LAMBDA_TASK_ROOT in os.environ: 28 | lambda_task_root = os.environ[LAMBDA_TASK_ROOT] 29 | log.info(f'{LAMBDA_TASK_ROOT} was found in os.environ: {lambda_task_root}. Now modifying sys.path to use the local versions of boto3/botocore instead of the versions provided by the Lambda runtime environment.') 30 | 31 | log.info(f'sys.path before modification: {str(sys.path)}') 32 | sys.path.insert(0, f'{lambda_task_root}/{LOCAL_INSTALL_DIR}') 33 | log.info(f'sys.path after modification: {str(sys.path)}') 34 | 35 | else: 36 | log.info(f'{LAMBDA_TASK_ROOT} was not found in os.environ. No modifications were made to sys.path to load the local versions of boto3/botocore. this indicates that this code is not running in a Lambda environment.') 37 | 38 | import boto3 39 | log.info(f'boto3 version: {boto3.__version__}') 40 | ### End of code to use the local build of boto3/botocore 41 | 42 | 43 | # Constants 44 | TRUE = 'true' 45 | SERVICE_CATALOG_VERIFY_SSL = 'SERVICE_CATALOG_VERIFY_SSL' 46 | SERVICE_CATALOG_ENDPOINT = 'SERVICE_CATALOG_ENDPOINT' 47 | 48 | 49 | class Status(Enum): 50 | SUCCEEDED = 0 51 | FAILED = 1 52 | 53 | class ServiceCatalogFacade: 54 | 55 | def __init__(self, app_config: Configuration): 56 | self.__service_catalog_client = self.__get_service_catalog_client(app_config) 57 | 58 | def __get_service_catalog_client(self, app_config: Configuration): 59 | verify = SERVICE_CATALOG_VERIFY_SSL not in os.environ or os.environ[SERVICE_CATALOG_VERIFY_SSL] == TRUE 60 | log.info(f'Constructing Service Catalog client: verify={verify}') 61 | 62 | if SERVICE_CATALOG_ENDPOINT in os.environ and os.environ[SERVICE_CATALOG_ENDPOINT]: 63 | endpoint_url = os.environ[SERVICE_CATALOG_ENDPOINT] 64 | log.info(f'Constructing Service Catalog client with overridden endpoint={endpoint_url}') 65 | return boto3.client('servicecatalog', 66 | verify = verify, 67 | endpoint_url = endpoint_url, 68 | config = app_config.get_boto_config()) 69 | else: 70 | log.info('Constructing Service Catalog client with default endpoint') 71 | return boto3.client('servicecatalog', 72 | verify = verify, 73 | config = app_config.get_boto_config()) 74 | 75 | def __log_response(self, response): 76 | log.info(f'Notified service catalog of workflow results via request Id: ' 77 | f'{response["ResponseMetadata"]["RequestId"]}') 78 | 79 | def notify_provision_succeeded(self, *, 80 | workflow_token: str, 81 | record_id: str, 82 | tracer_tag_key: str, 83 | tracer_tag_value: str, 84 | outputs: list 85 | ): 86 | 87 | response = self.__service_catalog_client.notify_provision_product_engine_workflow_result( 88 | WorkflowToken = workflow_token, 89 | RecordId = record_id, 90 | Status = Status.SUCCEEDED.name, 91 | ResourceIdentifier = { 92 | 'UniqueTag': { 93 | 'Key': tracer_tag_key, 94 | 'Value': tracer_tag_value 95 | } 96 | }, 97 | Outputs = outputs, 98 | IdempotencyToken=str(uuid.uuid4()) 99 | ) 100 | self.__log_response(response) 101 | 102 | def notify_provision_failed(self, *, 103 | workflow_token: str, 104 | record_id: str, 105 | failure_reason: str, 106 | tracer_tag_key: str, 107 | tracer_tag_value: str, 108 | outputs: str 109 | ): 110 | 111 | response = self.__service_catalog_client.notify_provision_product_engine_workflow_result( 112 | WorkflowToken = workflow_token, 113 | RecordId = record_id, 114 | Status = Status.FAILED.name, 115 | FailureReason = failure_reason, 116 | ResourceIdentifier = { 117 | 'UniqueTag': { 118 | 'Key': tracer_tag_key, 119 | 'Value': tracer_tag_value 120 | } 121 | }, 122 | Outputs = outputs, 123 | IdempotencyToken=str(uuid.uuid4()) 124 | ) 125 | self.__log_response(response) 126 | 127 | def notify_update_succeeded(self, *, 128 | workflow_token: str, 129 | record_id: str, 130 | outputs: list 131 | ): 132 | 133 | response = self.__service_catalog_client.notify_update_provisioned_product_engine_workflow_result( 134 | WorkflowToken = workflow_token, 135 | RecordId = record_id, 136 | Status = Status.SUCCEEDED.name, 137 | Outputs = outputs, 138 | IdempotencyToken=str(uuid.uuid4()) 139 | ) 140 | self.__log_response(response) 141 | 142 | def notify_update_failed(self, *, 143 | workflow_token: str, 144 | record_id: str, 145 | failure_reason: str, 146 | outputs: str 147 | ): 148 | 149 | response = self.__service_catalog_client.notify_update_provisioned_product_engine_workflow_result( 150 | WorkflowToken = workflow_token, 151 | RecordId = record_id, 152 | Status = Status.FAILED.name, 153 | FailureReason = failure_reason, 154 | Outputs = outputs, 155 | IdempotencyToken=str(uuid.uuid4()) 156 | ) 157 | self.__log_response(response) 158 | 159 | def notify_terminate_succeeded(self, *, 160 | workflow_token: str, 161 | record_id: str, 162 | ): 163 | 164 | response = self.__service_catalog_client.notify_terminate_provisioned_product_engine_workflow_result( 165 | WorkflowToken = workflow_token, 166 | RecordId = record_id, 167 | Status = Status.SUCCEEDED.name, 168 | IdempotencyToken=str(uuid.uuid4()) 169 | ) 170 | self.__log_response(response) 171 | 172 | def notify_terminate_failed(self, *, 173 | workflow_token: str, 174 | record_id: str, 175 | failure_reason: str 176 | ): 177 | 178 | response = self.__service_catalog_client.notify_terminate_provisioned_product_engine_workflow_result( 179 | WorkflowToken = workflow_token, 180 | RecordId = record_id, 181 | Status = Status.FAILED.name, 182 | FailureReason = failure_reason, 183 | IdempotencyToken=str(uuid.uuid4()) 184 | ) 185 | self.__log_response(response) 186 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/test_artifact_manager.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import Mock, patch 3 | from terraform_runner.artifact_manager import download_artifact, ROLE_SESSION_NAME 4 | 5 | 6 | class TestArtifactManager(unittest.TestCase): 7 | 8 | @patch('terraform_runner.artifact_manager.boto3.client') 9 | @patch('tarfile.open') 10 | @patch('terraform_runner.artifact_manager.glob') 11 | def test_download_artifact_happy_path(self, mock_glob, mock_tarfile_open, mock_client): 12 | # arrange 13 | mock_sts = Mock() 14 | mock_s3 = Mock() 15 | mock_client.side_effect = [mock_sts, mock_s3] 16 | mock_credentials = { 17 | 'AccessKeyId': 'access-key', 18 | 'SecretAccessKey': 'secret-key', 19 | 'SessionToken': 'session-token' 20 | } 21 | mock_assume_role_response = {'Credentials': mock_credentials} 22 | mock_sts.assume_role.return_value = mock_assume_role_response 23 | 24 | launch_role_arn = 'launch-role-arn' 25 | artifact_bucket = 'artifact-bucket' 26 | artifact_key = 'artifact' 27 | artifact_path = f's3://{artifact_bucket}/{artifact_key}' 28 | workspace_dir = 'workspace/dir' 29 | local_file = f'{workspace_dir}/artifact.local' 30 | 31 | mock_glob.return_value = ['mock.tf'] 32 | 33 | # act 34 | download_artifact(launch_role_arn, artifact_path, workspace_dir) 35 | 36 | # assert 37 | mock_sts.assume_role.assert_called_once_with(RoleArn=launch_role_arn, 38 | RoleSessionName=ROLE_SESSION_NAME) 39 | mock_client.assert_called_with('s3', 40 | aws_access_key_id=mock_credentials['AccessKeyId'], 41 | aws_secret_access_key=mock_credentials['SecretAccessKey'], 42 | aws_session_token=mock_credentials['SessionToken']) 43 | mock_s3.download_file.assert_called_once_with(artifact_bucket, artifact_key, 44 | local_file) 45 | mock_tarfile_open.assert_called_once_with('artifact.local') 46 | 47 | @patch('terraform_runner.artifact_manager.boto3.client') 48 | @patch('tarfile.open') 49 | def test_download_artifact_tarfile_open_exception(self, mock_tarfile_open, mock_client): 50 | # arrange 51 | mock_tarfile_open.side_effect = Exception('mock exception') 52 | 53 | mock_sts = Mock() 54 | mock_s3 = Mock() 55 | mock_client.side_effect = [mock_sts, mock_s3] 56 | mock_credentials = { 57 | 'AccessKeyId': 'access-key', 58 | 'SecretAccessKey': 'secret-key', 59 | 'SessionToken': 'session-token' 60 | } 61 | mock_assume_role_response = {'Credentials': mock_credentials} 62 | mock_sts.assume_role.return_value = mock_assume_role_response 63 | 64 | launch_role_arn = 'launch-role-arn' 65 | artifact_bucket = 'artifact-bucket' 66 | artifact_key = 'artifact.tar.gz' 67 | artifact_path = f's3://{artifact_bucket}/{artifact_key}' 68 | workspace_dir = 'workspace/dir' 69 | local_file = f'{workspace_dir}/artifact.local' 70 | 71 | # act 72 | with self.assertRaises(RuntimeError) as context: 73 | download_artifact(launch_role_arn, artifact_path, workspace_dir) 74 | 75 | # assert 76 | mock_sts.assume_role.assert_called_once_with(RoleArn=launch_role_arn, 77 | RoleSessionName=ROLE_SESSION_NAME) 78 | mock_client.assert_called_with('s3', 79 | aws_access_key_id=mock_credentials['AccessKeyId'], 80 | aws_secret_access_key=mock_credentials['SecretAccessKey'], 81 | aws_session_token=mock_credentials['SessionToken']) 82 | mock_s3.download_file.assert_called_once_with(artifact_bucket, artifact_key, 83 | local_file) 84 | mock_tarfile_open.assert_called_once_with('artifact.local') 85 | self.assertEqual(context.expected, RuntimeError) 86 | self.assertEqual(context.exception.args[0], f'Could not extract files from {artifact_path}: mock exception') 87 | 88 | @patch('terraform_runner.artifact_manager.boto3.client') 89 | def test_download_artifact_path_has_no_bucket(self, mock_client): 90 | # arrange 91 | launch_role_arn = 'launch-role-arn' 92 | artifact_path = '/test-data/foo.tf' 93 | workspace_dir = 'workspace/dir' 94 | 95 | # act and assert 96 | with self.assertRaises(RuntimeError) as context: 97 | download_artifact(launch_role_arn, artifact_path, workspace_dir) 98 | self.assertEqual(context.expected, RuntimeError) 99 | self.assertTrue(context.exception.args[0].startswith(f'Invalid artifact path {artifact_path}')) 100 | 101 | @patch('terraform_runner.artifact_manager.boto3.client') 102 | def test_download_artifact_path_has_no_key(self, mock_client): 103 | # arrange 104 | launch_role_arn = 'launch-role-arn' 105 | artifact_path = 's3://test-bucket' 106 | workspace_dir = 'workspace/dir' 107 | 108 | # act and assert 109 | with self.assertRaises(RuntimeError) as context: 110 | download_artifact(launch_role_arn, artifact_path, workspace_dir) 111 | self.assertEqual(context.expected, RuntimeError) 112 | self.assertTrue(context.exception.args[0].startswith(f'Invalid artifact path {artifact_path}')) 113 | 114 | @patch('terraform_runner.artifact_manager.boto3.client') 115 | @patch('tarfile.open') 116 | @patch('terraform_runner.artifact_manager.glob') 117 | def test_download_artifact_no_terraform_files(self, mock_glob, mock_tarfile_open, mock_client): 118 | # arrange 119 | mock_sts = Mock() 120 | mock_s3 = Mock() 121 | mock_client.side_effect = [mock_sts, mock_s3] 122 | mock_credentials = { 123 | 'AccessKeyId': 'access-key', 124 | 'SecretAccessKey': 'secret-key', 125 | 'SessionToken': 'session-token' 126 | } 127 | mock_assume_role_response = {'Credentials': mock_credentials} 128 | mock_sts.assume_role.return_value = mock_assume_role_response 129 | 130 | launch_role_arn = 'launch-role-arn' 131 | artifact_bucket = 'artifact-bucket' 132 | artifact_key = 'artifact' 133 | artifact_path = f's3://{artifact_bucket}/{artifact_key}' 134 | workspace_dir = 'workspace/dir' 135 | local_file = f'{workspace_dir}/artifact.local' 136 | 137 | mock_glob.return_value = [] 138 | 139 | # act 140 | with self.assertRaises(RuntimeError) as context: 141 | download_artifact(launch_role_arn, artifact_path, workspace_dir) 142 | 143 | # assert 144 | mock_sts.assume_role.assert_called_once_with(RoleArn=launch_role_arn, 145 | RoleSessionName=ROLE_SESSION_NAME) 146 | mock_client.assert_called_with('s3', 147 | aws_access_key_id=mock_credentials['AccessKeyId'], 148 | aws_secret_access_key=mock_credentials['SecretAccessKey'], 149 | aws_session_token=mock_credentials['SessionToken']) 150 | mock_s3.download_file.assert_called_once_with(artifact_bucket, artifact_key, 151 | local_file) 152 | mock_tarfile_open.assert_called_once_with('artifact.local') 153 | 154 | self.assertEqual(context.expected, RuntimeError) 155 | self.assertEqual(str(context.exception), 'No .tf files found. Nothing to parse. Make sure the root directory of the Terraform open source configuration file contains the .tf files for the root module.') 156 | 157 | 158 | if __name__ == '__main__': 159 | unittest.main() 160 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/test_notify_terminate_result.py: -------------------------------------------------------------------------------- 1 | from unittest import main, TestCase 2 | from unittest.mock import MagicMock, Mock, patch, ANY 3 | 4 | from botocore.exceptions import ClientError 5 | 6 | import notify_terminate_result 7 | 8 | 9 | class LambdaContext: 10 | def __init__(self, invoked_function_arn: str): 11 | self.invoked_function_arn = invoked_function_arn 12 | 13 | 14 | class TestNotifyTerminateResult(TestCase): 15 | 16 | def setUp(self: TestCase): 17 | # This is required to reset the mocks 18 | notify_terminate_result.app_config = None 19 | notify_terminate_result.service_catalog_facade = None 20 | 21 | @patch('notify_terminate_result.Configuration') 22 | @patch('notify_terminate_result.service_catalog_facade') 23 | def test_notify_succeeded_happy_path(self: TestCase, 24 | mocked_service_catalog_facade: MagicMock, 25 | mocked_configuration: MagicMock): 26 | 27 | # arrange 28 | mocked_app_config = Mock() 29 | mocked_app_config.get_region.return_value = 'us-east-1' 30 | mocked_configuration.return_value = mocked_app_config 31 | 32 | mocked_event = { 33 | "token": "token3000", 34 | "awsAccountId": "012345678910", 35 | "provisionedProductId": "pp-id", 36 | "provisionedProductName": "pp-name", 37 | "recordId": "rec-123" 38 | } 39 | 40 | lambda_context: LambdaContext = LambdaContext( 41 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyProvisionProductEngineWorkflowResult') 42 | 43 | # act 44 | notify_terminate_result.notify(mocked_event, lambda_context) 45 | 46 | # assert 47 | mocked_configuration.assert_called_once() 48 | mocked_service_catalog_facade.notify_terminate_succeeded.assert_called_once_with( 49 | workflow_token = mocked_event['token'], 50 | record_id = mocked_event['recordId'], 51 | ) 52 | 53 | @patch('notify_terminate_result.Configuration') 54 | @patch('notify_terminate_result.service_catalog_facade') 55 | def test_notify_with_tre_failure(self: TestCase, 56 | mocked_service_catalog_facade: MagicMock, 57 | mocked_configuration: MagicMock): 58 | # arrange 59 | mocked_app_config = Mock() 60 | mocked_app_config.get_region.return_value = 'us-east-1' 61 | mocked_configuration.return_value = mocked_app_config 62 | 63 | mocked_event = { 64 | "token": "token3000", 65 | "awsAccountId": "012345678910", 66 | "provisionedProductId": "pp-id", 67 | "provisionedProductName": "pp-name", 68 | "recordId": "rec-123", 69 | "error": "RuntimeError", 70 | "errorMessage": "I failed" 71 | } 72 | 73 | lambda_context: LambdaContext = LambdaContext( 74 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyProvisionProductEngineWorkflowResult') 75 | 76 | # act 77 | notify_terminate_result.notify(mocked_event, lambda_context) 78 | 79 | # assert 80 | mocked_configuration.assert_called_once() 81 | mocked_service_catalog_facade.notify_terminate_failed.assert_called_once_with( 82 | workflow_token = mocked_event['token'], 83 | record_id = mocked_event['recordId'], 84 | failure_reason = mocked_event['errorMessage'] 85 | ) 86 | 87 | @patch('notify_terminate_result.Configuration') 88 | @patch('notify_terminate_result.service_catalog_facade') 89 | def test_notify_when_succeeded_internal_error(self: TestCase, 90 | mocked_service_catalog_facade: MagicMock, 91 | mocked_configuration: MagicMock): 92 | # arrange 93 | mocked_app_config = Mock() 94 | mocked_app_config.get_region.return_value = 'us-east-1' 95 | mocked_configuration.return_value = mocked_app_config 96 | 97 | mocked_event = { 98 | "token": "token3000", 99 | "awsAccountId": "012345678910", 100 | "provisionedProductId": "pp-id", 101 | "provisionedProductName": "pp-name", 102 | "recordId": "rec-123" 103 | } 104 | 105 | lambda_context: LambdaContext = LambdaContext( 106 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyProvisionProductEngineWorkflowResult') 107 | 108 | error_response = { 109 | 'Error': { 110 | 'Message': 'An internal error has occurred' 111 | }, 112 | 'ResponseMetadata': { 113 | 'RequestId': 'some-random-uuid' 114 | } 115 | } 116 | mocked_client_error = ClientError( 117 | operation_name='NotifyProvisionProductEngineWorkflowResult', 118 | error_response=error_response 119 | ) 120 | mocked_service_catalog_facade.notify_terminate_succeeded.side_effect = mocked_client_error 121 | 122 | # act 123 | with self.assertRaises(ClientError) as context: 124 | notify_terminate_result.notify(mocked_event, lambda_context) 125 | 126 | # assert 127 | mocked_configuration.assert_called_once() 128 | mocked_service_catalog_facade.notify_terminate_succeeded.assert_called_once_with( 129 | workflow_token = mocked_event['token'], 130 | record_id = mocked_event['recordId'] 131 | ) 132 | 133 | self.assertEqual(context.expected, ClientError) 134 | self.assertEqual(context.exception, mocked_client_error) 135 | 136 | @patch('notify_terminate_result.Configuration') 137 | @patch('notify_terminate_result.service_catalog_facade') 138 | def test_notify_when_failed_internal_error(self: TestCase, 139 | mocked_service_catalog_facade: MagicMock, 140 | mocked_configuration: MagicMock): 141 | # arrange 142 | mocked_app_config = Mock() 143 | mocked_app_config.get_region.return_value = 'us-east-1' 144 | mocked_configuration.return_value = mocked_app_config 145 | 146 | mocked_event = { 147 | "token": "token3000", 148 | "awsAccountId": "012345678910", 149 | "provisionedProductId": "pp-id", 150 | "provisionedProductName": "pp-name", 151 | "recordId": "rec-123", 152 | "error": "RuntimeError", 153 | "errorMessage": "I failed" 154 | } 155 | 156 | lambda_context: LambdaContext = LambdaContext( 157 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyProvisionProductEngineWorkflowResult') 158 | 159 | error_response = { 160 | 'Error': { 161 | 'Message': 'An internal error has occurred' 162 | }, 163 | 'ResponseMetadata': { 164 | 'RequestId': 'some-random-uuid' 165 | } 166 | } 167 | mocked_client_error = ClientError( 168 | operation_name='NotifyProvisionProductEngineWorkflowResult', 169 | error_response=error_response 170 | ) 171 | mocked_service_catalog_facade.notify_terminate_failed.side_effect = mocked_client_error 172 | 173 | # act 174 | with self.assertRaises(ClientError) as context: 175 | notify_terminate_result.notify(mocked_event, lambda_context) 176 | 177 | # assert 178 | mocked_configuration.assert_called_once() 179 | mocked_service_catalog_facade.notify_terminate_failed.assert_called_once_with( 180 | workflow_token = mocked_event['token'], 181 | record_id = mocked_event['recordId'], 182 | failure_reason = mocked_event['errorMessage'] 183 | ) 184 | 185 | self.assertEqual(context.expected, ClientError) 186 | self.assertEqual(context.exception, mocked_client_error) 187 | 188 | 189 | if __name__ == '__main__': 190 | main() 191 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/test_select_worker_host.py: -------------------------------------------------------------------------------- 1 | from unittest import main, TestCase 2 | from unittest.mock import patch, MagicMock 3 | 4 | from botocore.exceptions import ClientError 5 | 6 | import select_worker_host 7 | 8 | 9 | class TestSelectWorkerHost(TestCase): 10 | 11 | def setUp(self): 12 | # This is required to reset the mocks 13 | select_worker_host.app_config = None 14 | select_worker_host.ec2_client = None 15 | 16 | @patch('select_worker_host.Configuration') 17 | @patch('boto3.client') 18 | def test_select_worker_host_given_no_hosts(self: TestCase, 19 | mocked_client: MagicMock, 20 | mocked_configuration: MagicMock): 21 | mocked_app_config = mocked_configuration.return_value 22 | 23 | with self.assertRaises(RuntimeError) as context: 24 | select_worker_host.select(None, None) 25 | 26 | mocked_configuration.assert_called_once() 27 | mocked_client.assert_called_once_with('ec2', config=mocked_app_config.get_boto_config()) 28 | self.assertEqual(context.expected, RuntimeError) 29 | self.assertEqual(str(context.exception), 'No usable EC2 instances found') 30 | 31 | @patch('select_worker_host.Configuration') 32 | @patch('boto3.client') 33 | def test_select_worker_host_given_one_host(self: TestCase, 34 | mocked_client: MagicMock, 35 | mocked_configuration: MagicMock): 36 | mocked_app_config = mocked_configuration.return_value 37 | mocked_ec2_client: MagicMock = mocked_client.return_value 38 | mocked_paginator: MagicMock = mocked_ec2_client.get_paginator.return_value 39 | mocked_paginator.paginate.return_value = [ 40 | { 41 | 'Reservations': [ 42 | { 43 | 'Instances': [ 44 | { 45 | 'InstanceId': 'instance-0' 46 | }, 47 | ] 48 | } 49 | ] 50 | } 51 | ] 52 | 53 | response = select_worker_host.select(None, None) 54 | 55 | mocked_configuration.assert_called_once() 56 | mocked_client.assert_called_once_with('ec2', config=mocked_app_config.get_boto_config()) 57 | mocked_ec2_client.get_paginator.assert_called_once_with('describe_instances') 58 | mocked_paginator.paginate.assert_called_once_with(Filters=[ 59 | { 60 | 'Name': 'tag:Name', 61 | 'Values': ['TerraformEngineExecutionInstance'] 62 | }, 63 | { 64 | 'Name': 'instance-state-name', 65 | 'Values': ['running'] 66 | } 67 | ]) 68 | 69 | self.assertEqual(response, {'instanceId': 'instance-0'}) 70 | 71 | @patch('select_worker_host.Configuration') 72 | @patch('boto3.client') 73 | def test_select_worker_host_given_two_hosts(self: TestCase, 74 | mocked_client: MagicMock, 75 | mocked_configuration: MagicMock): 76 | mocked_app_config = mocked_configuration.return_value 77 | mocked_ec2_client: MagicMock = mocked_client.return_value 78 | mocked_paginator: MagicMock = mocked_ec2_client.get_paginator.return_value 79 | mocked_paginator.paginate.return_value = [ 80 | { 81 | 'Reservations': [ 82 | { 83 | 'Instances': [ 84 | { 85 | 'InstanceId': 'instance-0' 86 | }, 87 | { 88 | 'InstanceId': 'instance-1' 89 | }, 90 | ] 91 | } 92 | ] 93 | } 94 | ] 95 | 96 | response = select_worker_host.select(None, None) 97 | 98 | mocked_configuration.assert_called_once() 99 | mocked_client.assert_called_once_with('ec2', config=mocked_app_config.get_boto_config()) 100 | mocked_ec2_client.get_paginator.assert_called_once_with('describe_instances') 101 | mocked_paginator.paginate.assert_called_once_with(Filters=[ 102 | { 103 | 'Name': 'tag:Name', 104 | 'Values': ['TerraformEngineExecutionInstance'] 105 | }, 106 | { 107 | 'Name': 'instance-state-name', 108 | 'Values': ['running'] 109 | } 110 | ]) 111 | 112 | instance_id = response['instanceId'] 113 | self.assertRegex(instance_id, 'instance-\d') 114 | 115 | @patch('select_worker_host.Configuration') 116 | @patch('boto3.client') 117 | def test_select_worker_host_given_three_hosts(self: TestCase, 118 | mocked_client: MagicMock, 119 | mocked_configuration: MagicMock): 120 | mocked_app_config = mocked_configuration.return_value 121 | mocked_ec2_client: MagicMock = mocked_client.return_value 122 | mocked_paginator: MagicMock = mocked_ec2_client.get_paginator.return_value 123 | mocked_paginator.paginate.return_value = [ 124 | { 125 | 'Reservations': [ 126 | { 127 | 'Instances': [ 128 | { 129 | 'InstanceId': 'instance-0' 130 | }, 131 | { 132 | 'InstanceId': 'instance-1' 133 | }, 134 | ] 135 | } 136 | ] 137 | }, 138 | { 139 | 'Reservations': [ 140 | { 141 | 'Instances': [ 142 | { 143 | 'InstanceId': 'instance-3' 144 | }, 145 | ] 146 | } 147 | ] 148 | } 149 | ] 150 | 151 | response = select_worker_host.select(None, None) 152 | 153 | mocked_configuration.assert_called_once() 154 | mocked_client.assert_called_once_with('ec2', config=mocked_app_config.get_boto_config()) 155 | mocked_ec2_client.get_paginator.assert_called_once_with('describe_instances') 156 | mocked_paginator.paginate.assert_called_once_with(Filters=[ 157 | { 158 | 'Name': 'tag:Name', 159 | 'Values': ['TerraformEngineExecutionInstance'] 160 | }, 161 | { 162 | 'Name': 'instance-state-name', 163 | 'Values': ['running'] 164 | } 165 | ]) 166 | 167 | instance_id = response['instanceId'] 168 | self.assertRegex(instance_id, 'instance-\d') 169 | 170 | @patch('select_worker_host.Configuration') 171 | @patch('boto3.client') 172 | def test_select_worker_host_given_ec2_error(self: TestCase, 173 | mocked_client: MagicMock, 174 | mocked_configuration: MagicMock): 175 | mocked_app_config = mocked_configuration.return_value 176 | mocked_ec2_client: MagicMock = mocked_client.return_value 177 | mocked_paginator: MagicMock = mocked_ec2_client.get_paginator.return_value 178 | 179 | error_response = { 180 | 'Error': { 181 | 'Message': 'Some EC2 4XX or 5XX error' 182 | }, 183 | 'ResponseMetadata': { 184 | 'RequestId': 'some-random-uuid' 185 | } 186 | } 187 | 188 | mocked_client_error = ClientError( 189 | operation_name='describe_instances', 190 | error_response=error_response 191 | ) 192 | mocked_paginator.paginate.side_effect = mocked_client_error 193 | 194 | with self.assertRaises(ClientError) as context: 195 | select_worker_host.select(None, None) 196 | 197 | mocked_configuration.assert_called_once() 198 | mocked_client.assert_called_once_with('ec2', config=mocked_app_config.get_boto_config()) 199 | mocked_ec2_client.get_paginator.assert_called_once_with('describe_instances') 200 | mocked_paginator.paginate.assert_called_once_with(Filters=[ 201 | { 202 | 'Name': 'tag:Name', 203 | 'Values': ['TerraformEngineExecutionInstance'] 204 | }, 205 | { 206 | 'Name': 'instance-state-name', 207 | 'Values': ['running'] 208 | } 209 | ]) 210 | 211 | self.assertEqual(context.expected, ClientError) 212 | self.assertEqual(str(context.exception), str(mocked_client_error)) 213 | 214 | 215 | if __name__ == '__main__': 216 | main() 217 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/test_send_destroy_command.py: -------------------------------------------------------------------------------- 1 | import json 2 | from unittest import main, TestCase 3 | from unittest.mock import MagicMock, Mock, patch 4 | 5 | from botocore.exceptions import ClientError 6 | 7 | import send_destroy_command 8 | 9 | 10 | class TestSendDestroyCommand(TestCase): 11 | 12 | def setUp(self: TestCase): 13 | # This is required to reset the mocks 14 | send_destroy_command.app_config = None 15 | send_destroy_command.ssm_facade = None 16 | 17 | @patch('send_destroy_command.Configuration') 18 | @patch('send_destroy_command.os') 19 | @patch('send_destroy_command.ssm_facade') 20 | def test_send_happy_path(self: TestCase, 21 | mocked_ssm_facade: MagicMock, 22 | mocked_os: MagicMock, 23 | mocked_configuration: MagicMock): 24 | # arrange 25 | state_bucket_name = 'state-bucket-name' 26 | mocked_os.environ.__getitem__.return_value = state_bucket_name 27 | mocked_app_config = Mock() 28 | mocked_app_config.get_region.return_value = 'us-east-1' 29 | mocked_configuration.return_value = mocked_app_config 30 | mocked_ssm_facade.send_shell_command.return_value = 'command-id' 31 | 32 | mocked_event = { 33 | "instanceId": "instance-id", 34 | "operation": "TERMINATE_PROVISIONED_PRODUCT", 35 | "provisionedProductId": "pp-id", 36 | "awsAccountId": "account-Id", 37 | "provisionedProductName": "pp-name", 38 | "recordId": "rec-id", 39 | "launchRoleArn": 'launch-role-arn', 40 | } 41 | 42 | # The indents here are weird because it needs to match the actual command including whitespace. 43 | expected_command_text = f"""runuser -l ec2-user -c 'python3 -m terraform_runner --action=destroy \ 44 | --provisioned-product-descriptor={mocked_event['awsAccountId'] + '/' + mocked_event['provisionedProductId']} \ 45 | --launch-role={mocked_event['launchRoleArn']} \ 46 | --region={mocked_app_config.get_region()} \ 47 | --terraform-state-bucket={state_bucket_name}'""" 48 | 49 | # act 50 | function_response = send_destroy_command.send(mocked_event, None) 51 | 52 | # assert 53 | mocked_configuration.assert_called_once() 54 | mocked_ssm_facade.send_shell_command.assert_called_once_with(expected_command_text, 'instance-id') 55 | self.assertEqual(function_response, {'commandId': 'command-id'}) 56 | 57 | @patch('send_destroy_command.Configuration') 58 | @patch('send_destroy_command.os') 59 | @patch('send_destroy_command.ssm_facade') 60 | def test_send_ssm_client_error(self: TestCase, 61 | mocked_ssm_facade: MagicMock, 62 | mocked_os: MagicMock, 63 | mocked_configuration: MagicMock): 64 | # arrange 65 | state_bucket_name = 'state-bucket-name' 66 | mocked_os.environ.__getitem__.return_value = state_bucket_name 67 | mocked_error_response = { 68 | 'Error': { 69 | 'Message': 'Some SSM 4XX or 5XX error' 70 | }, 71 | 'ResponseMetadata': { 72 | 'RequestId': 'some-random-uuid' 73 | } 74 | } 75 | mocked_ssm_facade.send_shell_command.side_effect = ClientError( 76 | operation_name='SendCommand', 77 | error_response=mocked_error_response 78 | ) 79 | 80 | mocked_app_config = Mock() 81 | mocked_app_config.get_region.return_value = 'us-east-1' 82 | mocked_configuration.return_value = mocked_app_config 83 | 84 | mocked_event = { 85 | "instanceId": "instanceId", 86 | "operation": "TERMINATE_PROVISIONED_PRODUCT", 87 | "provisionedProductId": "pp-id", 88 | "awsAccountId": "account-Id", 89 | "provisionedProductName": "pp-name", 90 | "recordId": "rec-id", 91 | "launchRoleArn": "launch-role-arn", 92 | } 93 | 94 | # The indents here are weird because it needs to match the actual command including whitespace. 95 | expected_command_text = f"""runuser -l ec2-user -c 'python3 -m terraform_runner --action=destroy \ 96 | --provisioned-product-descriptor={mocked_event['awsAccountId'] + '/' + mocked_event['provisionedProductId']} \ 97 | --launch-role={mocked_event['launchRoleArn']} \ 98 | --region={mocked_app_config.get_region()} \ 99 | --terraform-state-bucket={state_bucket_name}'""" 100 | 101 | # act 102 | with self.assertRaises(ClientError) as context: 103 | send_destroy_command.send(mocked_event, None) 104 | 105 | # assert 106 | mocked_configuration.assert_called_once() 107 | mocked_ssm_facade.send_shell_command.assert_called_once_with(expected_command_text, 'instanceId') 108 | self.assertEqual(context.expected, ClientError) 109 | self.assertEqual(context.exception.response, mocked_error_response) 110 | 111 | def test_send_missing_operation(self: TestCase): 112 | # arrange 113 | mocked_event = { 114 | "instanceId": "instance-id", 115 | "provisionedProductId": "pp-id", 116 | "provisionedProductName": "pp-name", 117 | "recordId": "rec-id", 118 | "launchRoleArn": "arn", 119 | } 120 | 121 | # act 122 | with self.assertRaises(RuntimeError) as context: 123 | send_destroy_command.send(mocked_event, None) 124 | 125 | # assert 126 | self.assertEqual(context.expected, RuntimeError) 127 | self.assertEqual(str(context.exception), "operation must be provided") 128 | 129 | def test_send_invalid_operation(self: TestCase): 130 | # arrange 131 | mocked_event = { 132 | "instanceId": "instance-id", 133 | "operation": "invalid-operation", 134 | "provisionedProductId": "pp-id", 135 | "awsAccountId": "account-Id", 136 | "provisionedProductName": "pp-name", 137 | "recordId": "rec-id", 138 | "launchRoleArn": "arn", 139 | } 140 | 141 | # act 142 | with self.assertRaises(RuntimeError) as context: 143 | send_destroy_command.send(mocked_event, None) 144 | 145 | # assert 146 | self.assertEqual(context.expected, RuntimeError) 147 | self.assertEqual(str(context.exception), f"operation must be TERMINATE_PROVISIONED_PRODUCT but was {mocked_event['operation']}") 148 | 149 | def test_send_missing_provisioned_product_id(self: TestCase): 150 | # arrange 151 | mocked_event = { 152 | "instanceId": "instance-id", 153 | "operation": "TERMINATE_PROVISIONED_PRODUCT", 154 | "awsAccountId": "account-Id", 155 | "provisionedProductName": "pp-name", 156 | "recordId": "rec-id", 157 | "launchRoleArn": "arn", 158 | } 159 | 160 | # act 161 | with self.assertRaises(RuntimeError) as context: 162 | send_destroy_command.send(mocked_event, None) 163 | 164 | # assert 165 | self.assertEqual(context.expected, RuntimeError) 166 | self.assertEqual(str(context.exception), "provisionedProductId must be provided") 167 | 168 | def test_send_missing_instance_id(self: TestCase): 169 | # arrange 170 | mocked_event = { 171 | "operation": "TERMINATE_PROVISIONED_PRODUCT", 172 | "provisionedProductId": "pp-id", 173 | "awsAccountId": "account-Id", 174 | "provisionedProductName": "pp-name", 175 | "recordId": "rec-id", 176 | "launchRoleArn": "arn", 177 | } 178 | 179 | # act 180 | with self.assertRaises(RuntimeError) as context: 181 | send_destroy_command.send(mocked_event, None) 182 | 183 | # assert 184 | self.assertEqual(context.expected, RuntimeError) 185 | self.assertEqual(str(context.exception), "instanceId must be provided") 186 | 187 | def test_send_missing_launch_role(self: TestCase): 188 | # arrange 189 | mocked_event = { 190 | "instanceId": "instance-id", 191 | "operation": "TERMINATE_PROVISIONED_PRODUCT", 192 | "provisionedProductId": "pp-id", 193 | "awsAccountId": "account-Id", 194 | "provisionedProductName": "pp-name", 195 | } 196 | 197 | # act 198 | with self.assertRaises(RuntimeError) as context: 199 | send_destroy_command.send(mocked_event, None) 200 | 201 | # assert 202 | self.assertEqual(context.expected, RuntimeError) 203 | self.assertEqual(str(context.exception), "launchRoleArn must be provided") 204 | 205 | def test_send_missing_aws_account_id(self: TestCase): 206 | # arrange 207 | mocked_event = { 208 | "instanceId": "instance-id", 209 | "operation": "TERMINATE_PROVISIONED_PRODUCT", 210 | "provisionedProductId": "pp-id", 211 | "provisionedProductName": "pp-name", 212 | "recordId": "rec-id", 213 | "launchRoleArn": "arn" 214 | } 215 | 216 | # act 217 | with self.assertRaises(RuntimeError) as context: 218 | send_destroy_command.send(mocked_event, None) 219 | 220 | # assert 221 | self.assertEqual(context.expected, RuntimeError) 222 | self.assertEqual(str(context.exception), "awsAccountId must be provided") 223 | 224 | if __name__ == '__main__': 225 | main() 226 | -------------------------------------------------------------------------------- /wrapper-scripts/terraform_runner/test_override_manager.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import json 4 | import unittest 5 | from terraform_runner import override_manager 6 | 7 | 8 | class TestOverrideManager(unittest.TestCase): 9 | TMP_WORKSPACE_DIR = '/tmp' 10 | OVERRIDE_FILES_PATTERN = '*.tf.json' 11 | 12 | def test_write_backend_override_happy_path(self): 13 | # arrange 14 | provisioned_product_descriptor = 'account-id/pp-id' 15 | state_bucket = 'state-bucket' 16 | state_region = 'us-west-2' 17 | expected_backend_override = { 18 | 'terraform': { 19 | 'backend': { 20 | 's3': { 21 | 'bucket': f'{state_bucket}', 22 | 'key': f'{provisioned_product_descriptor}', 23 | 'region': f'{state_region}' 24 | } 25 | } 26 | } 27 | } 28 | 29 | # act 30 | override_manager.write_backend_override(self.TMP_WORKSPACE_DIR, 31 | provisioned_product_descriptor, state_bucket, state_region) 32 | with open(f'{self.TMP_WORKSPACE_DIR}/{override_manager.BACKEND_FILE_NAME}', 'r') as json_file: 33 | actual_backend_override = json.load(json_file) 34 | 35 | # assert 36 | self.assertEqual(expected_backend_override, actual_backend_override) 37 | 38 | def test_write_provider_override_happy_path(self): 39 | # arrange 40 | provisioned_product_descriptor = 'account-id/pp-id' 41 | launch_role_arn = 'role-arn' 42 | region = 'us-east-1' 43 | tags = [{'key': 'k1', 'value': 'v1'}, {'key': 'k2', 'value': 'v2'}] 44 | expected_provider_override = { 45 | 'provider': { 46 | 'aws': { 47 | 'region': f'{region}', 48 | 'assume_role': { 49 | 'role_arn': f'{launch_role_arn}', 50 | 'session_name': f'{provisioned_product_descriptor}'.replace('/', '-') 51 | }, 52 | 'default_tags': { 53 | 'tags': {'k1': 'v1', 'k2': 'v2'} 54 | } 55 | } 56 | } 57 | } 58 | 59 | # act 60 | override_manager.write_provider_override(self.TMP_WORKSPACE_DIR, 61 | provisioned_product_descriptor, launch_role_arn, region, tags) 62 | with open(f'{self.TMP_WORKSPACE_DIR}/{override_manager.PROVIDER_FILE_NAME}', 'r') as json_file: 63 | actual_provider_override = json.load(json_file) 64 | 65 | # assert 66 | self.assertEqual(expected_provider_override, actual_provider_override) 67 | 68 | def test_write_provider_override_long_pp_descriptor(self): 69 | # arrange 70 | provisioned_product_descriptor = 'p' * 1000 71 | expected_session_name = 'p' * override_manager.MAX_SESSION_NAME_LENGTH 72 | launch_role_arn = 'role-arn' 73 | region = 'us-east-1' 74 | tags = [{'key': 'k1', 'value': 'v1'}, {'key': 'k2', 'value': 'v2'}] 75 | expected_provider_override = { 76 | 'provider': { 77 | 'aws': { 78 | 'region': f'{region}', 79 | 'assume_role': { 80 | 'role_arn': f'{launch_role_arn}', 81 | 'session_name': f'{expected_session_name}' 82 | }, 83 | 'default_tags': { 84 | 'tags': {'k1': 'v1', 'k2': 'v2'} 85 | } 86 | } 87 | } 88 | } 89 | 90 | # act 91 | override_manager.write_provider_override(self.TMP_WORKSPACE_DIR, 92 | provisioned_product_descriptor, launch_role_arn, region, tags) 93 | with open(f'{self.TMP_WORKSPACE_DIR}/{override_manager.PROVIDER_FILE_NAME}', 'r') as json_file: 94 | actual_provider_override = json.load(json_file) 95 | 96 | # assert 97 | self.assertEqual(expected_provider_override, actual_provider_override) 98 | 99 | def test_write_provider_override_no_tags(self): 100 | # arrange 101 | provisioned_product_descriptor = 'account-id/pp-id' 102 | launch_role_arn = 'role-arn' 103 | region = 'us-east-1' 104 | tags = None 105 | expected_provider_override = { 106 | 'provider': { 107 | 'aws': { 108 | 'region': f'{region}', 109 | 'assume_role': { 110 | 'role_arn': f'{launch_role_arn}', 111 | 'session_name': f'{provisioned_product_descriptor}'.replace('/', '-') 112 | }, 113 | 'default_tags': {'tags': {}} 114 | } 115 | } 116 | } 117 | 118 | # act 119 | override_manager.write_provider_override(self.TMP_WORKSPACE_DIR, 120 | provisioned_product_descriptor, launch_role_arn, region, tags) 121 | with open(f'{self.TMP_WORKSPACE_DIR}/{override_manager.PROVIDER_FILE_NAME}', 'r') as json_file: 122 | actual_provider_override = json.load(json_file) 123 | 124 | # assert 125 | self.assertEqual(expected_provider_override, actual_provider_override) 126 | 127 | def test_write_provider_override_empty_tags(self): 128 | # arrange 129 | provisioned_product_descriptor = 'account-id/pp-id' 130 | launch_role_arn = 'role-arn' 131 | region = 'us-east-1' 132 | tags = {} 133 | expected_provider_override = { 134 | 'provider': { 135 | 'aws': { 136 | 'region': f'{region}', 137 | 'assume_role': { 138 | 'role_arn': f'{launch_role_arn}', 139 | 'session_name': f'{provisioned_product_descriptor}'.replace('/', '-') 140 | }, 141 | 'default_tags': {'tags': {}} 142 | } 143 | } 144 | } 145 | 146 | # act 147 | override_manager.write_provider_override(self.TMP_WORKSPACE_DIR, 148 | provisioned_product_descriptor, launch_role_arn, region, tags) 149 | with open(f'{self.TMP_WORKSPACE_DIR}/{override_manager.PROVIDER_FILE_NAME}', 'r') as json_file: 150 | actual_provider_override = json.load(json_file) 151 | 152 | # assert 153 | self.assertEqual(expected_provider_override, actual_provider_override) 154 | 155 | def test_write_variable_override_happy_path(self): 156 | # arrange 157 | variables = [ 158 | {'key': 'key1', 'value': 'value1'}, 159 | {'key': 'key2', 'value': 'value2'} 160 | ] 161 | expected_variable_override = { 162 | 'variable': { 163 | 'key1': {'default': 'value1'}, 164 | 'key2': {'default': 'value2'} 165 | } 166 | } 167 | 168 | # act 169 | override_manager.write_variable_override(self.TMP_WORKSPACE_DIR, variables) 170 | with open(f'{self.TMP_WORKSPACE_DIR}/{override_manager.VARIABLE_FILE_NAME}', 'r') as json_file: 171 | actual_variable_override = json.load(json_file) 172 | 173 | # assert 174 | self.assertEqual(expected_variable_override, actual_variable_override) 175 | 176 | def test_write_variable_override_with_complex_type_happy_path(self): 177 | # arrange 178 | complex_type_value = { 179 | 'nested_key_1': 'nested_value_1', 180 | 'nested_key_2': 'nested_value_2' 181 | } 182 | variables = [ 183 | {'key': 'key1', 'value': 'value1'}, 184 | {'key': 'key2', 'value': json.dumps(complex_type_value)} 185 | ] 186 | expected_variable_override = { 187 | 'variable': { 188 | 'key1': {'default': 'value1'}, 189 | 'key2': {'default': complex_type_value} 190 | } 191 | } 192 | 193 | # act 194 | override_manager.write_variable_override(self.TMP_WORKSPACE_DIR, variables) 195 | with open(f'{self.TMP_WORKSPACE_DIR}/{override_manager.VARIABLE_FILE_NAME}', 'r') as json_file: 196 | actual_variable_override = json.load(json_file) 197 | 198 | # assert 199 | self.assertEqual(expected_variable_override, actual_variable_override) 200 | 201 | def test_write_variable_override_no_variables(self): 202 | # arrange 203 | variables = None 204 | 205 | # act 206 | override_manager.write_variable_override(self.TMP_WORKSPACE_DIR, variables) 207 | 208 | # assert 209 | self.assertFalse( 210 | os.path.exists(f'{self.TMP_WORKSPACE_DIR}/{override_manager.VARIABLE_FILE_NAME}')) 211 | 212 | def test_write_variable_override_empty_variables(self): 213 | # arrange 214 | variables = {} 215 | 216 | # act 217 | override_manager.write_variable_override(self.TMP_WORKSPACE_DIR, variables) 218 | 219 | # assert 220 | self.assertFalse( 221 | os.path.exists(f'{self.TMP_WORKSPACE_DIR}/{override_manager.VARIABLE_FILE_NAME}')) 222 | 223 | def tearDown(self): 224 | # Remove temp files after each test 225 | override_files = glob.glob(f'{self.TMP_WORKSPACE_DIR}/{self.OVERRIDE_FILES_PATTERN}') 226 | for override_file in override_files: 227 | os.remove(override_file) 228 | 229 | 230 | if __name__ == '__main__': 231 | unittest.main() 232 | -------------------------------------------------------------------------------- /lambda-functions/state_machine_lambdas/test_notify_update_result.py: -------------------------------------------------------------------------------- 1 | from unittest import main, TestCase 2 | from unittest.mock import MagicMock, Mock, patch, ANY 3 | 4 | from botocore.exceptions import ClientError 5 | 6 | import notify_update_result 7 | 8 | 9 | class LambdaContext: 10 | def __init__(self, invoked_function_arn: str): 11 | self.invoked_function_arn = invoked_function_arn 12 | 13 | 14 | class TestNotifyUpdateProvisionedProductEngineWorkflowResult(TestCase): 15 | 16 | def setUp(self: TestCase): 17 | # This is required to reset the mocks 18 | notify_update_result.app_config = None 19 | notify_update_result.service_catalog_facade = None 20 | 21 | @patch('notify_update_result.Configuration') 22 | @patch('notify_update_result.service_catalog_facade') 23 | def test_notify_succeeded_happy_path(self: TestCase, 24 | mocked_service_catalog_facade: MagicMock, 25 | mocked_configuration: MagicMock): 26 | 27 | # arrange 28 | mocked_app_config = Mock() 29 | mocked_app_config.get_region.return_value = 'us-east-1' 30 | mocked_configuration.return_value = mocked_app_config 31 | 32 | mocked_event = { 33 | "token": "token3000", 34 | "awsAccountId": "012345678910", 35 | "provisionedProductId": "pp-id", 36 | "provisionedProductName": "pp-name", 37 | "recordId": "rec-123", 38 | "outputs": [ 39 | { 40 | "key": "key1", 41 | "value": "value1", 42 | "description": "desc1" 43 | }, 44 | { 45 | "key": "key2", 46 | "value": "value2" 47 | }, 48 | { 49 | "key": "key3", 50 | "value": "value3", 51 | "description": None 52 | } 53 | ] 54 | } 55 | 56 | lambda_context: LambdaContext = LambdaContext( 57 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyUpdateProvisionedProductEngineWorkflowResult') 58 | 59 | expected_service_catalog_outputs =[ 60 | { 61 | "OutputKey": "key1", 62 | "OutputValue": "value1", 63 | "Description": "desc1" 64 | }, 65 | { 66 | "OutputKey": "key2", 67 | "OutputValue": "value2" 68 | }, 69 | { 70 | "OutputKey": "key3", 71 | "OutputValue": "value3" 72 | } 73 | ] 74 | 75 | # act 76 | notify_update_result.notify(mocked_event, lambda_context) 77 | 78 | # assert 79 | mocked_configuration.assert_called_once() 80 | mocked_service_catalog_facade.notify_update_succeeded.assert_called_once_with( 81 | workflow_token = mocked_event['token'], 82 | record_id = mocked_event['recordId'], 83 | outputs = expected_service_catalog_outputs 84 | ) 85 | 86 | @patch('notify_update_result.Configuration') 87 | @patch('notify_update_result.service_catalog_facade') 88 | def test_notify_with_tre_failure(self: TestCase, 89 | mocked_service_catalog_facade: MagicMock, 90 | mocked_configuration: MagicMock): 91 | # arrange 92 | mocked_app_config = Mock() 93 | mocked_app_config.get_region.return_value = 'us-east-1' 94 | mocked_configuration.return_value = mocked_app_config 95 | 96 | mocked_event = { 97 | "token": "token3000", 98 | "awsAccountId": "012345678910", 99 | "provisionedProductId": "pp-id", 100 | "provisionedProductName": "pp-name", 101 | "recordId": "rec-123", 102 | "error": "RuntimeError", 103 | "errorMessage": "I failed", 104 | "outputs": [ 105 | { 106 | "key": "key1", 107 | "value": "value1", 108 | "description": "desc1" 109 | } 110 | ] 111 | } 112 | 113 | expected_service_catalog_outputs = [ 114 | { 115 | 'OutputKey': 'key1', 116 | 'OutputValue': 'value1', 117 | 'Description': 'desc1' 118 | } 119 | ] 120 | 121 | lambda_context: LambdaContext = LambdaContext( 122 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyUpdateProvisionedProductEngineWorkflowResult') 123 | 124 | # act 125 | notify_update_result.notify(mocked_event, lambda_context) 126 | 127 | # assert 128 | mocked_configuration.assert_called_once() 129 | mocked_service_catalog_facade.notify_update_failed.assert_called_once_with( 130 | workflow_token = mocked_event['token'], 131 | record_id = mocked_event['recordId'], 132 | failure_reason = mocked_event['errorMessage'], 133 | outputs = expected_service_catalog_outputs 134 | ) 135 | 136 | @patch('notify_update_result.Configuration') 137 | @patch('notify_update_result.service_catalog_facade') 138 | def test_notify_when_succeeded_internal_error(self: TestCase, 139 | mocked_service_catalog_facade: MagicMock, 140 | mocked_configuration: MagicMock): 141 | # arrange 142 | mocked_app_config = Mock() 143 | mocked_app_config.get_region.return_value = 'us-east-1' 144 | mocked_configuration.return_value = mocked_app_config 145 | 146 | mocked_event = { 147 | "token": "token3000", 148 | "awsAccountId": "012345678910", 149 | "provisionedProductId": "pp-id", 150 | "provisionedProductName": "pp-name", 151 | "recordId": "rec-123", 152 | "outputs": [ 153 | { 154 | "key": "key1", 155 | "value": "value1", 156 | "description": "desc1" 157 | } 158 | ] 159 | } 160 | expected_service_catalog_outputs =[ 161 | { 162 | "OutputKey": "key1", 163 | "OutputValue": "value1", 164 | "Description": "desc1" 165 | } 166 | ] 167 | 168 | lambda_context: LambdaContext = LambdaContext( 169 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyUpdateProvisionedProductEngineWorkflowResult') 170 | 171 | error_response = { 172 | 'Error': { 173 | 'Message': 'An internal error has occurred' 174 | }, 175 | 'ResponseMetadata': { 176 | 'RequestId': 'some-random-uuid' 177 | } 178 | } 179 | mocked_client_error = ClientError( 180 | operation_name='NotifyUpdateProvisionedProductEngineWorkflowResult', 181 | error_response=error_response 182 | ) 183 | mocked_service_catalog_facade.notify_update_succeeded.side_effect = mocked_client_error 184 | 185 | # act 186 | with self.assertRaises(ClientError) as context: 187 | notify_update_result.notify(mocked_event, lambda_context) 188 | 189 | # assert 190 | mocked_configuration.assert_called_once() 191 | mocked_service_catalog_facade.notify_update_succeeded.assert_called_once_with( 192 | workflow_token = mocked_event['token'], 193 | record_id = mocked_event['recordId'], 194 | outputs = expected_service_catalog_outputs 195 | ) 196 | 197 | self.assertEqual(context.expected, ClientError) 198 | self.assertEqual(context.exception, mocked_client_error) 199 | 200 | @patch('notify_update_result.Configuration') 201 | @patch('notify_update_result.service_catalog_facade') 202 | def test_notify_when_failed_internal_error(self: TestCase, 203 | mocked_service_catalog_facade: MagicMock, 204 | mocked_configuration: MagicMock): 205 | # arrange 206 | mocked_app_config = Mock() 207 | mocked_app_config.get_region.return_value = 'us-east-1' 208 | mocked_configuration.return_value = mocked_app_config 209 | 210 | mocked_event = { 211 | "token": "token3000", 212 | "awsAccountId": "012345678910", 213 | "provisionedProductId": "pp-id", 214 | "provisionedProductName": "pp-name", 215 | "recordId": "rec-123", 216 | "error": "RuntimeError", 217 | "errorMessage": "I failed", 218 | "outputs": [] 219 | } 220 | 221 | lambda_context: LambdaContext = LambdaContext( 222 | 'arn:aws:lambda:us-east-1:264796065659:function:NotifyUpdateProvisionedProductEngineWorkflowResult') 223 | 224 | error_response = { 225 | 'Error': { 226 | 'Message': 'An internal error has occurred' 227 | }, 228 | 'ResponseMetadata': { 229 | 'RequestId': 'some-random-uuid' 230 | } 231 | } 232 | mocked_client_error = ClientError( 233 | operation_name='NotifyUpdateProvisionedProductEngineWorkflowResult', 234 | error_response=error_response 235 | ) 236 | mocked_service_catalog_facade.notify_update_failed.side_effect = mocked_client_error 237 | 238 | # act 239 | with self.assertRaises(ClientError) as context: 240 | notify_update_result.notify(mocked_event, lambda_context) 241 | 242 | # assert 243 | mocked_configuration.assert_called_once() 244 | mocked_service_catalog_facade.notify_update_failed.assert_called_once_with( 245 | workflow_token = mocked_event['token'], 246 | record_id = mocked_event['recordId'], 247 | failure_reason = mocked_event['errorMessage'], 248 | outputs = [] 249 | ) 250 | 251 | self.assertEqual(context.expected, ClientError) 252 | self.assertEqual(context.exception, mocked_client_error) 253 | 254 | 255 | if __name__ == '__main__': 256 | main() 257 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | --------------------------------------------------------------------------------