├── .github └── workflows │ └── proton_run.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── GitHubConfiguration.yaml ├── LICENSE ├── README.md ├── aws-managed ├── lb-fargate-service │ └── v1 │ │ ├── .compatible-envs │ │ ├── instance_infrastructure │ │ ├── cloudformation.yaml │ │ └── manifest.yaml │ │ ├── pipeline_infrastructure │ │ ├── cloudformation.yaml │ │ └── manifest.yaml │ │ └── schema │ │ └── schema.yaml └── multi-svc-env │ └── v1 │ ├── infrastructure │ ├── cloudformation.yaml │ └── manifest.yaml │ └── schema │ └── schema.yaml ├── env_config.json └── lambda-vpc ├── README.md ├── sample-lambda-function-template └── v1 │ ├── .compatible-envs │ ├── instance_infrastructure │ ├── config.tf │ ├── lambda.tf │ ├── manifest.yaml │ └── outputs.tf │ ├── lambda_function.py │ └── schema │ └── schema.yaml └── sample-vpc-environment-template └── v1 ├── infrastructure ├── config.tf ├── manifest.yaml ├── outputs.tf └── vpc.tf └── schema └── schema.yaml /.github/workflows/proton_run.yml: -------------------------------------------------------------------------------- 1 | # This is a workflow created to run based on a commit made by AWS Proton 2 | # It only works if there is only one resource modified as part of the commit. 3 | 4 | name: 'proton-run' 5 | 6 | on: 7 | pull_request: 8 | types: 9 | - opened 10 | - reopened 11 | paths: 12 | - '**/.proton/deployment-metadata.json' 13 | push: 14 | branches: 15 | - main 16 | paths: 17 | - '**/.proton/deployment-metadata.json' 18 | 19 | jobs: 20 | get-deployment-data: 21 | name: Get Deployment Data 22 | runs-on: ubuntu-latest 23 | 24 | outputs: 25 | role_arn: ${{ steps.get-data.outputs.role_arn }} 26 | environment: ${{ steps.get-data.outputs.environment }} 27 | resource_arn: ${{ steps.get-data.outputs.resource_arn }} 28 | working_directory: ${{ steps.get-data.outputs.working_directory }} 29 | deployment_id: ${{ steps.get-data.outputs.deployment_id }} 30 | target_region: ${{ steps.get-data.outputs.target_region }} 31 | proton_region: ${{ steps.get-data.outputs.proton_region }} 32 | state_bucket: ${{ steps.get-data.outputs.state_bucket }} 33 | 34 | permissions: 35 | id-token: write 36 | contents: read 37 | 38 | continue-on-error: true 39 | 40 | steps: 41 | # Checkout the repository to the GitHub Actions runner 42 | - name: Checkout 43 | uses: actions/checkout@v2 44 | 45 | - name: Get changed files 46 | id: files 47 | uses: jitterbit/get-changed-files@v1 48 | 49 | - name: Find modified resource 50 | id: find-modified 51 | run: | 52 | found=false 53 | for changed_file in ${{ steps.files.outputs.all }}; do 54 | if [[ "$changed_file" == *".proton/deployment-metadata.json" ]]; then 55 | echo "found file" 56 | if [[ "$found" == true ]]; then 57 | echo "More than one resource found to have a new deployment, I'm not sure which one to update, exiting." 58 | exit 1 59 | fi 60 | echo "setting found to true" 61 | found=true 62 | echo "setting outputs" 63 | echo "::set-output name=deployment-metadata-path::$changed_file" 64 | fi 65 | done 66 | if [[ "$found" == false ]]; then 67 | echo "No change made to deployment-metadata.json, exiting" 68 | exit 1 69 | fi 70 | 71 | - name: Get data 72 | id: get-data 73 | run: | 74 | modified_resource_arn=$(jq -r '.resourceMetadata.arn' ${{ steps.find-modified.outputs.deployment-metadata-path }}) 75 | echo "::set-output name=resource_arn::$modified_resource_arn" 76 | 77 | IFS=':' 78 | read -a split_arn <<< "$modified_resource_arn" 79 | proton_region=${split_arn[3]} 80 | echo "::set-output name=proton_region::$proton_region" 81 | 82 | deployment_id=$(jq -r '.deploymentId' ${{ steps.find-modified.outputs.deployment-metadata-path }}) 83 | echo "::set-output name=deployment_id::$deployment_id" 84 | 85 | if [[ "$modified_resource_arn" == *":environment/"* ]]; then 86 | environment_name=${modified_resource_arn##*/} 87 | working_directory="$environment_name/" 88 | elif [[ "$modified_resource_arn" == *"/service-instance/"* ]]; then 89 | environment_arn=$(jq -r '.resourceMetadata.environmentArn' ${{ steps.find-modified.outputs.deployment-metadata-path }}) 90 | environment_name=${environment_arn##*/} 91 | 92 | resource_portion=${modified_resource_arn##*:} 93 | IFS='/' 94 | read -a split_resources <<< "$resource_portion" 95 | 96 | service_name=${split_resources[1]} 97 | instance_name=${split_resources[3]} 98 | 99 | working_directory=$environment_name/$service_name-$instance_name/ 100 | elif [[ "$modified_resource_arn" == *"/pipeline"* ]]; then 101 | environment_name="pipeline" 102 | 103 | resource_portion=${modified_resource_arn##*:} 104 | IFS='/' 105 | read -a split_resources <<< "$resource_portion" 106 | 107 | service_name=${split_resources[1]} 108 | 109 | working_directory=pipeline/$service_name 110 | fi 111 | 112 | if [[ $(jq -r --arg env $environment_name 'has($env)' env_config.json) = "false" ]]; then 113 | echo "Missing $env from env_config.json, existing" 114 | exit 1 115 | fi 116 | 117 | echo "::set-output name=working_directory::$working_directory" 118 | echo "::set-output name=environment::$environment_name" 119 | 120 | role_arn=$(jq -r --arg env $environment_name '.[$env]["role"]' env_config.json) 121 | echo "::set-output name=role_arn::$role_arn" 122 | 123 | target_region=$(jq -r --arg env $environment_name '.[$env]["region"]' env_config.json) 124 | echo "::set-output name=target_region::$target_region" 125 | 126 | state_bucket=$(jq -r --arg env $environment_name '.[$env]["state_bucket"]' env_config.json) 127 | echo "::set-output name=state_bucket::$state_bucket" 128 | 129 | terraform: 130 | name: 'Terraform' 131 | needs: get-deployment-data 132 | runs-on: ubuntu-latest 133 | environment: ${{ needs.get-deployment-data.outputs.environment }} 134 | 135 | outputs: 136 | tf_outputs: ${{ steps.tf_get_outputs.outputs.tf_outputs }} 137 | 138 | permissions: 139 | id-token: write 140 | contents: read 141 | 142 | defaults: 143 | run: 144 | working-directory: ${{ needs.get-deployment-data.outputs.working_directory }} 145 | shell: bash # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 146 | 147 | if: needs.get-deployment-data.result == 'success' 148 | continue-on-error: true 149 | 150 | steps: 151 | # Checkout the repository to the GitHub Actions runner 152 | - name: Checkout 153 | uses: actions/checkout@v2 154 | 155 | - name: Configure AWS Credentials 156 | id: assume_role 157 | uses: aws-actions/configure-aws-credentials@v1 158 | with: 159 | aws-region: ${{ needs.get-deployment-data.outputs.target_region }} 160 | role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }} 161 | role-session-name: TF-Github-Actions 162 | 163 | # Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token 164 | - name: Setup Terraform 165 | id: tf_setup 166 | uses: hashicorp/setup-terraform@v1 167 | with: 168 | terraform_version: 1.0.7 169 | terraform_wrapper: false 170 | 171 | # Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc. 172 | - name: Terraform Init 173 | id: tf_init 174 | run: terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}" 175 | 176 | # Checks that all Terraform configuration files adhere to a canonical format 177 | - name: Terraform Format 178 | id: tf_fmt 179 | run: terraform fmt -diff -check 180 | 181 | # Generates an execution plan for Terraform 182 | - name: Terraform Plan 183 | id: tf_plan 184 | run: terraform plan -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}" 185 | 186 | # On push to main, build or change infrastructure according to Terraform configuration files 187 | # Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks 188 | - name: Terraform Apply 189 | id: tf_apply 190 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 191 | run: terraform apply -auto-approve -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}" 192 | 193 | - name: Terraform Get Outputs 194 | id: tf_get_outputs 195 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 196 | run: | 197 | # Get outputs as json 198 | outputs_json=$(terraform output -json) 199 | 200 | # The outputs parameters expects a list of key=keyName,valueString=value key=key2Name,valueString=value2 etc... 201 | # So here we convert the output json into a shell array 202 | # NOTE: This will probably not play nicely with complex output objects (non primitives) 203 | 204 | formatted_outputs=( $(echo $outputs_json | jq -r "to_entries|map(\"key=\(.key),valueString=\(.value.value|tostring)\")|.[]") ) 205 | echo "::set-output name=tf_outputs::$formatted_outputs" 206 | 207 | notify-proton: 208 | name: 'Notify Proton' 209 | needs: 210 | - get-deployment-data 211 | - terraform 212 | runs-on: ubuntu-latest 213 | environment: ${{ needs.get-deployment-data.outputs.environment }} 214 | 215 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 216 | 217 | permissions: 218 | id-token: write 219 | contents: read 220 | 221 | steps: 222 | - name: Configure AWS Credentials 223 | id: assume_role 224 | uses: aws-actions/configure-aws-credentials@v1 225 | with: 226 | aws-region: ${{ needs.get-deployment-data.outputs.proton_region }} 227 | role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }} 228 | role-session-name: TF-Github-Actions-Notify-Proton 229 | 230 | # This is a temporary measure until this feature exits Public Preview 231 | - name: Install Proton Model 232 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 233 | run: | 234 | aws s3 cp s3://aws-proton-preview-public-files/model/proton-2020-07-20.normal.json . 235 | 236 | - name: Notify Proton Success 237 | id: notify_success 238 | if: needs.terraform.result == 'success' 239 | run: | 240 | # Notify proton 241 | aws proton notify-resource-deployment-status-change --region ${{ needs.get-deployment-data.outputs.proton_region }} --resource-arn ${{ needs.get-deployment-data.outputs.resource_arn }} --status SUCCEEDED --deployment-id ${{ needs.get-deployment-data.outputs.deployment_id }} --outputs ${{ needs.terraform.outputs.tf_outputs }} 242 | echo "Notify success!" 243 | 244 | - name: Notify Proton Failure 245 | if: needs.terraform.result == 'failure' || needs.terraform.result == 'cancelled' 246 | run: | 247 | aws proton notify-resource-deployment-status-change --region ${{ needs.get-deployment-data.outputs.proton_region }} --resource-arn ${{ needs.get-deployment-data.outputs.resource_arn }} --status FAILED --deployment-id ${{ needs.get-deployment-data.outputs.deployment_id }} 248 | echo "Notify failure!" 249 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv/ 2 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /GitHubConfiguration.yaml: -------------------------------------------------------------------------------- 1 | Parameters: 2 | FullRepoName: 3 | Type: String 4 | Description: "The organization and name of the repository within which you will be utilizing created IAM Role within GitHub Actions (e.g. org/repo_name)" 5 | 6 | Resources: 7 | Role: 8 | Type: AWS::IAM::Role 9 | # Creates the role that will be passed to Terraform to be able to deploy 10 | # infrastructure in your AWS account. You will enter this role in your 11 | # env_config.json file 12 | # Note that this role has administrator access to your account, so that 13 | # it can be used to provision any infrastructure in your templates. We 14 | # recommend you scope down the role permissions to the resources that will be used 15 | # in your Proton templates 16 | Properties: 17 | RoleName: ExampleGithubRole 18 | ManagedPolicyArns: [arn:aws:iam::aws:policy/AdministratorAccess] 19 | AssumeRolePolicyDocument: 20 | Statement: 21 | - Effect: Allow 22 | Action: sts:AssumeRoleWithWebIdentity 23 | Principal: 24 | Federated: !Ref GithubOidc 25 | Condition: 26 | StringLike: 27 | token.actions.githubusercontent.com:sub: !Sub repo:${FullRepoName}:* 28 | 29 | GithubOidc: 30 | # This identity provider is required to accept OpenID Connect credentials 31 | # from GitHub 32 | Type: AWS::IAM::OIDCProvider 33 | Properties: 34 | Url: https://token.actions.githubusercontent.com 35 | ThumbprintList: [6938fd4d98bab03faadb97b34396831e3780aea1] 36 | ClientIdList: 37 | - sts.amazonaws.com 38 | 39 | StateFileBucket: 40 | # This bucket will be used to store your Terraform Open Source state files 41 | Type: AWS::S3::Bucket 42 | Properties: 43 | BucketName: !Join 44 | - '' 45 | - - 'aws-proton-terraform-bucket-' 46 | - !Ref AWS::AccountId 47 | Outputs: 48 | Role: 49 | Description: "The role that will be used to provision infrastructure. Configure it per environment in your env_config.json file" 50 | Value: !GetAtt Role.Arn 51 | 52 | BucketName: 53 | Description: "Name of the bucket for our state files. Configure it per environment in your env_config.json file" 54 | Value: !Ref StateFileBucket 55 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## AWS Proton workshop - Sample Cloudformation and Terraform Templates 2 | 3 | This repository is a curated list of templates to use within AWS Proton workshop that are authored for integration with Cloudformation and [Terraform](https://www.terraform.io/). 4 | 5 | ## Security 6 | 7 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 8 | 9 | ## License 10 | 11 | This library is licensed under the MIT-0 License. See the LICENSE file. 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /aws-managed/lb-fargate-service/v1/.compatible-envs: -------------------------------------------------------------------------------- 1 | multi-svc-env:1 2 | -------------------------------------------------------------------------------- /aws-managed/lb-fargate-service/v1/instance_infrastructure/cloudformation.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Description: Deploy a service on AWS Fargate, hosted in a public subnet, and accessible via a load balancer. 3 | Mappings: 4 | TaskSize: 5 | x-small: 6 | Cpu: 256 7 | Memory: 512 8 | small: 9 | Cpu: 512 10 | Memory: 1024 11 | medium: 12 | Cpu: 1024 13 | Memory: 2048 14 | large: 15 | Cpu: 2048 16 | Memory: 4096 17 | x-large: 18 | Cpu: 4096 19 | Memory: 8192 20 | Resources: 21 | # A log group for storing the stdout logs from this service's containers 22 | LogGroup: 23 | Type: AWS::Logs::LogGroup 24 | Properties: 25 | LogGroupName: '{{service.name}}/{{service_instance.name}}' 26 | 27 | # The task definition. This is a simple metadata description of what 28 | # container to run, and what resource requirements it has. 29 | TaskDefinition: 30 | Type: AWS::ECS::TaskDefinition 31 | Properties: 32 | Family: '{{service.name}}_{{service_instance.name}}' 33 | Cpu: !FindInMap [TaskSize, '{{service_instance.inputs.task_size}}', Cpu] 34 | Memory: !FindInMap [TaskSize, '{{service_instance.inputs.task_size}}', Memory] 35 | NetworkMode: awsvpc 36 | RequiresCompatibilities: 37 | - FARGATE 38 | ExecutionRoleArn: '{{environment.outputs.ECSTaskExecutionRole}}' 39 | TaskRoleArn: !Ref "AWS::NoValue" 40 | ContainerDefinitions: 41 | - Name: '{{service_instance.name}}' 42 | Cpu: !FindInMap [TaskSize, '{{service_instance.inputs.task_size}}', Cpu] 43 | Memory: !FindInMap [TaskSize, '{{service_instance.inputs.task_size}}', Memory] 44 | Image: '{{service_instance.inputs.image}}' 45 | Environment: 46 | {% set env_vars = service_instance.inputs.env_vars.split(';') %} 47 | {% for env_var in env_vars %} 48 | {% set env_name, env_value = env_var.split('=') %} 49 | - Name: '{{ env_name|trim }}' 50 | Value: '{{ env_value|trim }}' 51 | {% endfor %} 52 | PortMappings: 53 | - ContainerPort: '{{service_instance.inputs.port}}' 54 | LogConfiguration: 55 | LogDriver: 'awslogs' 56 | Options: 57 | awslogs-create-group: 'True' 58 | awslogs-group: '{{service.name}}/{{service_instance.name}}' 59 | awslogs-region: !Ref 'AWS::Region' 60 | awslogs-stream-prefix: 'fargate/service/{{service.name}}' 61 | 62 | # The service_instance.inputs. The service is a resource which allows you to run multiple 63 | # copies of a type of task, and gather up their logs and metrics, as well 64 | # as monitor the number of running tasks and replace any that have crashed 65 | Service: 66 | Type: AWS::ECS::Service 67 | DependsOn: LoadBalancerRule 68 | Properties: 69 | ServiceName: '{{service.name}}_{{service_instance.name}}' 70 | Cluster: '{{environment.outputs.ClusterName}}' 71 | LaunchType: FARGATE 72 | DeploymentConfiguration: 73 | MaximumPercent: 200 74 | MinimumHealthyPercent: 75 75 | DesiredCount: '{{service_instance.inputs.desired_count}}' 76 | NetworkConfiguration: 77 | AwsvpcConfiguration: 78 | AssignPublicIp: ENABLED 79 | SecurityGroups: 80 | - '{{environment.outputs.ContainerSecurityGroup}}' 81 | Subnets: 82 | - '{{environment.outputs.PublicSubnetOne}}' 83 | - '{{environment.outputs.PublicSubnetTwo}}' 84 | - '{{environment.outputs.PublicSubnetThree}}' 85 | TaskDefinition: !Ref 'TaskDefinition' 86 | LoadBalancers: 87 | - ContainerName: '{{service_instance.name}}' 88 | ContainerPort: '{{service_instance.inputs.port}}' 89 | TargetGroupArn: !Ref 'TargetGroup' 90 | 91 | # A target group. This is used for keeping track of all the tasks, and 92 | # what IP addresses / port numbers they have. You can query it yourself, 93 | # to use the addresses yourself, but most often this target group is just 94 | # connected to an application load balancer, or network load balancer, so 95 | # it can automatically distribute traffic across all the targets. 96 | TargetGroup: 97 | Type: AWS::ElasticLoadBalancingV2::TargetGroup 98 | Properties: 99 | TargetType: ip 100 | HealthCheckIntervalSeconds: 10 101 | HealthCheckPath: /health 102 | HealthCheckProtocol: HTTP 103 | HealthCheckTimeoutSeconds: 5 104 | HealthyThresholdCount: 2 105 | Matcher: 106 | HttpCode: 200-299 107 | # Note that the Name property has a 32 character limit, which could be 108 | # reached by using either {{service.name}}, {{service_instance.name}} 109 | # or a combination of both as we're doing here, so we truncate the name to 29 characters 110 | # plus an ellipsis different from '...' or '---' to avoid running into errors. 111 | Name: '{{(service.name~"--"~service_instance.name)|truncate(29, true, "zzz")}}' 112 | Port: '{{service_instance.inputs.port}}' 113 | Protocol: HTTP 114 | UnhealthyThresholdCount: 10 115 | VpcId: '{{environment.outputs.VpcId}}' 116 | TargetGroupAttributes: 117 | - Key: deregistration_delay.timeout_seconds 118 | Value: '5' 119 | - Key: slow_start.duration_seconds 120 | Value: '60' 121 | 122 | # Create a rule on the load balancer for routing traffic to the target group 123 | LoadBalancerRule: 124 | Type: AWS::ElasticLoadBalancingV2::ListenerRule 125 | Properties: 126 | Actions: 127 | - TargetGroupArn: !Ref 'TargetGroup' 128 | Type: 'forward' 129 | Conditions: 130 | - Field: path-pattern 131 | Values: 132 | - '*' 133 | ListenerArn: !Ref LoadBalancerListener 134 | Priority: 1 135 | 136 | EcsSecurityGroupIngressFromALB: 137 | Type: AWS::EC2::SecurityGroupIngress 138 | Properties: 139 | Description: Ingress from the ALB 140 | GroupId: '{{environment.outputs.ContainerSecurityGroup}}' 141 | IpProtocol: -1 142 | SourceSecurityGroupId: !Ref 'LoadBalancerSG' 143 | 144 | # Load balancer, hosted in public subnets that is accessible 145 | # to the public or internally depending on the scope. It is 146 | # intended to route traffic to one or more public/private 147 | # facing services. 148 | LoadBalancerSG: 149 | Type: AWS::EC2::SecurityGroup 150 | Properties: 151 | GroupDescription: Access to the public facing load balancer 152 | VpcId: '{{environment.outputs.VpcId}}' 153 | SecurityGroupIngress: 154 | {% if 'public' == service_instance.inputs.scope %} 155 | # Allow access to ALB from anywhere on the internet 156 | - CidrIp: 0.0.0.0/0 157 | IpProtocol: -1 158 | {% else %} 159 | # Allow access only from the VPC CIDR 160 | - CidrIp: '{{environment.outputs.VpcCIDR}}' 161 | IpProtocol: -1 162 | FromPort: '{{service_instance.inputs.port}}' 163 | ToPort: '{{service_instance.inputs.port}}' 164 | {% endif %} 165 | 166 | {% if 'public' == service_instance.inputs.scope %} 167 | {% set scheme = 'internet-facing' %} 168 | {% set port = '80' %} 169 | {% else %} 170 | {% set scheme = 'internal' %} 171 | {% set port = service_instance.inputs.port %} 172 | {% endif %} 173 | LoadBalancer: 174 | Type: AWS::ElasticLoadBalancingV2::LoadBalancer 175 | Properties: 176 | Scheme: '{{scheme}}' 177 | LoadBalancerAttributes: 178 | - Key: idle_timeout.timeout_seconds 179 | Value: '30' 180 | Subnets: 181 | # The load balancer is placed into the public subnets, so that traffic 182 | # from the internet can reach the load balancer directly via the internet gateway 183 | - '{{environment.outputs.PublicSubnetOne}}' 184 | - '{{environment.outputs.PublicSubnetTwo}}' 185 | - '{{environment.outputs.PublicSubnetThree}}' 186 | SecurityGroups: [!Ref 'LoadBalancerSG'] 187 | 188 | LoadBalancerListener: 189 | Type: AWS::ElasticLoadBalancingV2::Listener 190 | DependsOn: 191 | - LoadBalancer 192 | Properties: 193 | DefaultActions: 194 | - TargetGroupArn: !Ref 'TargetGroup' 195 | Type: 'forward' 196 | LoadBalancerArn: !Ref 'LoadBalancer' 197 | Port: '{{port}}' 198 | Protocol: HTTP 199 | 200 | RecordSet: 201 | Type: AWS::Route53::RecordSet 202 | DependsOn: 203 | - LoadBalancer 204 | Properties: 205 | AliasTarget: 206 | HostedZoneId: !GetAtt LoadBalancer.CanonicalHostedZoneID 207 | DNSName: !GetAtt LoadBalancer.DNSName 208 | HostedZoneId: '{{environment.outputs.HostedZoneId}}' 209 | Name: '{{service.name}}.{{environment.outputs.DnsHostname}}' 210 | Type: A 211 | 212 | Outputs: 213 | ServiceEndpoint: 214 | Description: The URL to access the service 215 | Value: !Sub "http://${LoadBalancer.DNSName}" -------------------------------------------------------------------------------- /aws-managed/lb-fargate-service/v1/instance_infrastructure/manifest.yaml: -------------------------------------------------------------------------------- 1 | infrastructure: 2 | templates: 3 | - file: "cloudformation.yaml" 4 | rendering_engine: jinja 5 | template_language: cloudformation 6 | -------------------------------------------------------------------------------- /aws-managed/lb-fargate-service/v1/pipeline_infrastructure/cloudformation.yaml: -------------------------------------------------------------------------------- 1 | Resources: 2 | ECRRepo: 3 | Type: AWS::ECR::Repository 4 | DeletionPolicy: Retain 5 | BuildProject: 6 | Type: AWS::CodeBuild::Project 7 | Properties: 8 | Artifacts: 9 | Type: CODEPIPELINE 10 | Environment: 11 | ComputeType: BUILD_GENERAL1_SMALL 12 | Image: aws/codebuild/amazonlinux2-x86_64-standard:3.0 13 | PrivilegedMode: true 14 | Type: LINUX_CONTAINER 15 | EnvironmentVariables: 16 | - Name: repo_name 17 | Type: PLAINTEXT 18 | Value: !Ref ECRRepo 19 | - Name: service_name 20 | Type: PLAINTEXT 21 | Value: '{{service.name}}' 22 | ServiceRole: 23 | Fn::GetAtt: 24 | - PublishRole 25 | - Arn 26 | Source: 27 | BuildSpec: 28 | Fn::Join: 29 | - "" 30 | - - >- 31 | { 32 | "version": "0.2", 33 | "phases": { 34 | "install": { 35 | "runtime-versions": { 36 | "docker": 18 37 | }, 38 | "commands": [ 39 | "pip3 install --upgrade --user awscli", 40 | "echo 'f6bd1536a743ab170b35c94ed4c7c4479763356bd543af5d391122f4af852460 yq_linux_amd64' > yq_linux_amd64.sha", 41 | "wget https://github.com/mikefarah/yq/releases/download/3.4.0/yq_linux_amd64", 42 | "sha256sum -c yq_linux_amd64.sha", 43 | "mv yq_linux_amd64 /usr/bin/yq", 44 | "chmod +x /usr/bin/yq" 45 | ] 46 | }, 47 | "pre_build": { 48 | "commands": [ 49 | "cd $CODEBUILD_SRC_DIR", 50 | "$(aws ecr get-login --no-include-email --region $AWS_DEFAULT_REGION)", 51 | "{{ pipeline.inputs.unit_test_command }}", 52 | ] 53 | }, 54 | "build": { 55 | "commands": [ 56 | "IMAGE_REPO_NAME=$repo_name", 57 | "IMAGE_TAG=$CODEBUILD_BUILD_NUMBER", 58 | "IMAGE_ID= 59 | - Ref: AWS::AccountId 60 | - >- 61 | .dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE_REPO_NAME:$IMAGE_TAG", 62 | "docker build -t $IMAGE_REPO_NAME:$IMAGE_TAG -f {{ pipeline.inputs.dockerfile }} .", 63 | "docker tag $IMAGE_REPO_NAME:$IMAGE_TAG $IMAGE_ID;", 64 | "docker push $IMAGE_ID" 65 | ] 66 | }, 67 | "post_build": { 68 | "commands": [ 69 | "aws proton --endpoint-url https://proton.$AWS_DEFAULT_REGION.amazonaws.com --region $AWS_DEFAULT_REGION get-service --name $service_name | jq -r .service.spec > service.yaml", 70 | "yq w service.yaml 'instances[*].spec.image' \"$IMAGE_ID\" > rendered_service.yaml", 71 | "cat rendered_service.yaml" 72 | ] 73 | } 74 | }, 75 | "artifacts": { 76 | "files": [ 77 | "rendered_service.yaml" 78 | ] 79 | } 80 | } 81 | Type: CODEPIPELINE 82 | EncryptionKey: 83 | Fn::GetAtt: 84 | - PipelineArtifactsBucketEncryptionKey 85 | - Arn 86 | {% for service_instance in service_instances %} 87 | Deploy{{loop.index}}Project: 88 | Type: AWS::CodeBuild::Project 89 | Properties: 90 | Artifacts: 91 | Type: CODEPIPELINE 92 | Environment: 93 | ComputeType: BUILD_GENERAL1_SMALL 94 | Image: aws/codebuild/amazonlinux2-x86_64-standard:3.0 95 | PrivilegedMode: false 96 | Type: LINUX_CONTAINER 97 | EnvironmentVariables: 98 | - Name: service_name 99 | Type: PLAINTEXT 100 | Value: '{{service.name}}' 101 | - Name: service_instance_name 102 | Type: PLAINTEXT 103 | Value: '{{service_instance.name}}' 104 | ServiceRole: 105 | Fn::GetAtt: 106 | - DeploymentRole 107 | - Arn 108 | Source: 109 | BuildSpec: >- 110 | { 111 | "version": "0.2", 112 | "phases": { 113 | "build": { 114 | "commands": [ 115 | "pip3 install --upgrade --user awscli", 116 | "aws proton --endpoint-url https://proton.$AWS_DEFAULT_REGION.amazonaws.com --region $AWS_DEFAULT_REGION update-service-instance --deployment-type CURRENT_VERSION --name $service_instance_name --service-name $service_name --spec file://rendered_service.yaml" 117 | ] 118 | } 119 | } 120 | } 121 | Type: CODEPIPELINE 122 | EncryptionKey: 123 | Fn::GetAtt: 124 | - PipelineArtifactsBucketEncryptionKey 125 | - Arn 126 | {% endfor %} 127 | # This role is used to build and publish an image to ECR 128 | PublishRole: 129 | Type: AWS::IAM::Role 130 | Properties: 131 | AssumeRolePolicyDocument: 132 | Statement: 133 | - Action: sts:AssumeRole 134 | Effect: Allow 135 | Principal: 136 | Service: codebuild.amazonaws.com 137 | Version: "2012-10-17" 138 | PublishRoleDefaultPolicy: 139 | Type: AWS::IAM::Policy 140 | Properties: 141 | PolicyDocument: 142 | Statement: 143 | - Action: 144 | - logs:CreateLogGroup 145 | - logs:CreateLogStream 146 | - logs:PutLogEvents 147 | Effect: Allow 148 | Resource: 149 | - Fn::Join: 150 | - "" 151 | - - "arn:" 152 | - Ref: AWS::Partition 153 | - ":logs:" 154 | - Ref: AWS::Region 155 | - ":" 156 | - Ref: AWS::AccountId 157 | - :log-group:/aws/codebuild/ 158 | - Ref: BuildProject 159 | - Fn::Join: 160 | - "" 161 | - - "arn:" 162 | - Ref: AWS::Partition 163 | - ":logs:" 164 | - Ref: AWS::Region 165 | - ":" 166 | - Ref: AWS::AccountId 167 | - :log-group:/aws/codebuild/ 168 | - Ref: BuildProject 169 | - :* 170 | - Action: 171 | - codebuild:CreateReportGroup 172 | - codebuild:CreateReport 173 | - codebuild:UpdateReport 174 | - codebuild:BatchPutTestCases 175 | Effect: Allow 176 | Resource: 177 | Fn::Join: 178 | - "" 179 | - - "arn:" 180 | - Ref: AWS::Partition 181 | - ":codebuild:" 182 | - Ref: AWS::Region 183 | - ":" 184 | - Ref: AWS::AccountId 185 | - :report-group/ 186 | - Ref: BuildProject 187 | - -* 188 | - Action: 189 | - ecr:GetAuthorizationToken 190 | Effect: Allow 191 | Resource: "*" 192 | - Action: 193 | - ecr:BatchCheckLayerAvailability 194 | - ecr:CompleteLayerUpload 195 | - ecr:GetAuthorizationToken 196 | - ecr:InitiateLayerUpload 197 | - ecr:PutImage 198 | - ecr:UploadLayerPart 199 | Effect: Allow 200 | Resource: 201 | Fn::GetAtt: 202 | - ECRRepo 203 | - Arn 204 | - Action: 205 | - proton:GetService 206 | Effect: Allow 207 | Resource: "*" 208 | - Action: 209 | - s3:GetObject* 210 | - s3:GetBucket* 211 | - s3:List* 212 | - s3:DeleteObject* 213 | - s3:PutObject* 214 | - s3:Abort* 215 | Effect: Allow 216 | Resource: 217 | - Fn::GetAtt: 218 | - PipelineArtifactsBucket 219 | - Arn 220 | - Fn::Join: 221 | - "" 222 | - - Fn::GetAtt: 223 | - PipelineArtifactsBucket 224 | - Arn 225 | - /* 226 | - Action: 227 | - kms:Decrypt 228 | - kms:DescribeKey 229 | - kms:Encrypt 230 | - kms:ReEncrypt* 231 | - kms:GenerateDataKey* 232 | Effect: Allow 233 | Resource: 234 | Fn::GetAtt: 235 | - PipelineArtifactsBucketEncryptionKey 236 | - Arn 237 | - Action: 238 | - kms:Decrypt 239 | - kms:Encrypt 240 | - kms:ReEncrypt* 241 | - kms:GenerateDataKey* 242 | Effect: Allow 243 | Resource: 244 | Fn::GetAtt: 245 | - PipelineArtifactsBucketEncryptionKey 246 | - Arn 247 | Version: "2012-10-17" 248 | PolicyName: PublishRoleDefaultPolicy 249 | Roles: 250 | - Ref: PublishRole 251 | 252 | DeploymentRole: 253 | Type: AWS::IAM::Role 254 | Properties: 255 | AssumeRolePolicyDocument: 256 | Statement: 257 | - Action: sts:AssumeRole 258 | Effect: Allow 259 | Principal: 260 | Service: codebuild.amazonaws.com 261 | Version: "2012-10-17" 262 | DeploymentRoleDefaultPolicy: 263 | Type: AWS::IAM::Policy 264 | Properties: 265 | PolicyDocument: 266 | Statement: 267 | - Action: 268 | - logs:CreateLogGroup 269 | - logs:CreateLogStream 270 | - logs:PutLogEvents 271 | Effect: Allow 272 | Resource: 273 | - Fn::Join: 274 | - "" 275 | - - "arn:" 276 | - Ref: AWS::Partition 277 | - ":logs:" 278 | - Ref: AWS::Region 279 | - ":" 280 | - Ref: AWS::AccountId 281 | - :log-group:/aws/codebuild/Deploy*Project* 282 | - Fn::Join: 283 | - "" 284 | - - "arn:" 285 | - Ref: AWS::Partition 286 | - ":logs:" 287 | - Ref: AWS::Region 288 | - ":" 289 | - Ref: AWS::AccountId 290 | - :log-group:/aws/codebuild/Deploy*Project:* 291 | - Action: 292 | - codebuild:CreateReportGroup 293 | - codebuild:CreateReport 294 | - codebuild:UpdateReport 295 | - codebuild:BatchPutTestCases 296 | Effect: Allow 297 | Resource: 298 | Fn::Join: 299 | - "" 300 | - - "arn:" 301 | - Ref: AWS::Partition 302 | - ":codebuild:" 303 | - Ref: AWS::Region 304 | - ":" 305 | - Ref: AWS::AccountId 306 | - :report-group/Deploy*Project 307 | - -* 308 | - Action: 309 | - proton:UpdateServiceInstance 310 | - proton:GetServiceInstance 311 | Effect: Allow 312 | Resource: "*" 313 | - Action: 314 | - s3:GetObject* 315 | - s3:GetBucket* 316 | - s3:List* 317 | Effect: Allow 318 | Resource: 319 | - Fn::GetAtt: 320 | - PipelineArtifactsBucket 321 | - Arn 322 | - Fn::Join: 323 | - "" 324 | - - Fn::GetAtt: 325 | - PipelineArtifactsBucket 326 | - Arn 327 | - /* 328 | - Action: 329 | - kms:Decrypt 330 | - kms:DescribeKey 331 | Effect: Allow 332 | Resource: 333 | Fn::GetAtt: 334 | - PipelineArtifactsBucketEncryptionKey 335 | - Arn 336 | - Action: 337 | - kms:Decrypt 338 | - kms:Encrypt 339 | - kms:ReEncrypt* 340 | - kms:GenerateDataKey* 341 | Effect: Allow 342 | Resource: 343 | Fn::GetAtt: 344 | - PipelineArtifactsBucketEncryptionKey 345 | - Arn 346 | Version: "2012-10-17" 347 | PolicyName: DeploymentRoleDefaultPolicy 348 | Roles: 349 | - Ref: DeploymentRole 350 | PipelineArtifactsBucketEncryptionKey: 351 | Type: AWS::KMS::Key 352 | Properties: 353 | KeyPolicy: 354 | Statement: 355 | - Action: 356 | - kms:Create* 357 | - kms:Describe* 358 | - kms:Enable* 359 | - kms:List* 360 | - kms:Put* 361 | - kms:Update* 362 | - kms:Revoke* 363 | - kms:Disable* 364 | - kms:Get* 365 | - kms:Delete* 366 | - kms:ScheduleKeyDeletion 367 | - kms:CancelKeyDeletion 368 | - kms:GenerateDataKey 369 | - kms:TagResource 370 | - kms:UntagResource 371 | Effect: Allow 372 | Principal: 373 | AWS: 374 | Fn::Join: 375 | - "" 376 | - - "arn:" 377 | - Ref: AWS::Partition 378 | - ":iam::" 379 | - Ref: AWS::AccountId 380 | - :root 381 | Resource: "*" 382 | - Action: 383 | - kms:Decrypt 384 | - kms:DescribeKey 385 | - kms:Encrypt 386 | - kms:ReEncrypt* 387 | - kms:GenerateDataKey* 388 | Effect: Allow 389 | Principal: 390 | AWS: 391 | Fn::GetAtt: 392 | - PipelineRole 393 | - Arn 394 | Resource: "*" 395 | - Action: 396 | - kms:Decrypt 397 | - kms:DescribeKey 398 | - kms:Encrypt 399 | - kms:ReEncrypt* 400 | - kms:GenerateDataKey* 401 | Effect: Allow 402 | Principal: 403 | AWS: 404 | Fn::GetAtt: 405 | - PublishRole 406 | - Arn 407 | Resource: "*" 408 | - Action: 409 | - kms:Decrypt 410 | - kms:Encrypt 411 | - kms:ReEncrypt* 412 | - kms:GenerateDataKey* 413 | Effect: Allow 414 | Principal: 415 | AWS: 416 | Fn::GetAtt: 417 | - PublishRole 418 | - Arn 419 | Resource: "*" 420 | - Action: 421 | - kms:Decrypt 422 | - kms:DescribeKey 423 | Effect: Allow 424 | Principal: 425 | AWS: 426 | Fn::GetAtt: 427 | - DeploymentRole 428 | - Arn 429 | Resource: "*" 430 | - Action: 431 | - kms:Decrypt 432 | - kms:Encrypt 433 | - kms:ReEncrypt* 434 | - kms:GenerateDataKey* 435 | Effect: Allow 436 | Principal: 437 | AWS: 438 | Fn::GetAtt: 439 | - DeploymentRole 440 | - Arn 441 | Resource: "*" 442 | Version: "2012-10-17" 443 | UpdateReplacePolicy: Delete 444 | DeletionPolicy: Delete 445 | PipelineArtifactsBucket: 446 | Type: AWS::S3::Bucket 447 | Properties: 448 | VersioningConfiguration: 449 | Status: Enabled 450 | BucketEncryption: 451 | ServerSideEncryptionConfiguration: 452 | - ServerSideEncryptionByDefault: 453 | KMSMasterKeyID: 454 | Fn::GetAtt: 455 | - PipelineArtifactsBucketEncryptionKey 456 | - Arn 457 | SSEAlgorithm: aws:kms 458 | PublicAccessBlockConfiguration: 459 | BlockPublicAcls: true 460 | BlockPublicPolicy: true 461 | IgnorePublicAcls: true 462 | RestrictPublicBuckets: true 463 | UpdateReplacePolicy: Retain 464 | DeletionPolicy: Retain 465 | PipelineArtifactsBucketEncryptionKeyAlias: 466 | Type: AWS::KMS::Alias 467 | Properties: 468 | AliasName: 'alias/codepipeline-encryption-key-{{ service.name }}' 469 | TargetKeyId: 470 | Fn::GetAtt: 471 | - PipelineArtifactsBucketEncryptionKey 472 | - Arn 473 | UpdateReplacePolicy: Delete 474 | DeletionPolicy: Delete 475 | PipelineRole: 476 | Type: AWS::IAM::Role 477 | Properties: 478 | AssumeRolePolicyDocument: 479 | Statement: 480 | - Action: sts:AssumeRole 481 | Effect: Allow 482 | Principal: 483 | Service: codepipeline.amazonaws.com 484 | Version: "2012-10-17" 485 | PipelineRoleDefaultPolicy: 486 | Type: AWS::IAM::Policy 487 | Properties: 488 | PolicyDocument: 489 | Statement: 490 | - Action: 491 | - s3:GetObject* 492 | - s3:GetBucket* 493 | - s3:List* 494 | - s3:DeleteObject* 495 | - s3:PutObject* 496 | - s3:Abort* 497 | Effect: Allow 498 | Resource: 499 | - Fn::GetAtt: 500 | - PipelineArtifactsBucket 501 | - Arn 502 | - Fn::Join: 503 | - "" 504 | - - Fn::GetAtt: 505 | - PipelineArtifactsBucket 506 | - Arn 507 | - /* 508 | - Action: 509 | - kms:Decrypt 510 | - kms:DescribeKey 511 | - kms:Encrypt 512 | - kms:ReEncrypt* 513 | - kms:GenerateDataKey* 514 | Effect: Allow 515 | Resource: 516 | Fn::GetAtt: 517 | - PipelineArtifactsBucketEncryptionKey 518 | - Arn 519 | - Action: codestar-connections:* 520 | Effect: Allow 521 | Resource: "*" 522 | - Action: sts:AssumeRole 523 | Effect: Allow 524 | Resource: 525 | Fn::GetAtt: 526 | - PipelineBuildCodePipelineActionRole 527 | - Arn 528 | - Action: sts:AssumeRole 529 | Effect: Allow 530 | Resource: 531 | Fn::GetAtt: 532 | - PipelineDeployCodePipelineActionRole 533 | - Arn 534 | Version: "2012-10-17" 535 | PolicyName: PipelineRoleDefaultPolicy 536 | Roles: 537 | - Ref: PipelineRole 538 | Pipeline: 539 | Type: AWS::CodePipeline::Pipeline 540 | Properties: 541 | RoleArn: 542 | Fn::GetAtt: 543 | - PipelineRole 544 | - Arn 545 | Stages: 546 | - Actions: 547 | - ActionTypeId: 548 | Category: Source 549 | Owner: AWS 550 | Provider: CodeStarSourceConnection 551 | Version: "1" 552 | Configuration: 553 | ConnectionArn: '{{ service.repository_connection_arn }}' 554 | FullRepositoryId: '{{ service.repository_id }}' 555 | BranchName: '{{ service.branch_name }}' 556 | Name: Checkout 557 | OutputArtifacts: 558 | - Name: Artifact_Source_Checkout 559 | RunOrder: 1 560 | Name: Source 561 | - Actions: 562 | - ActionTypeId: 563 | Category: Build 564 | Owner: AWS 565 | Provider: CodeBuild 566 | Version: "1" 567 | Configuration: 568 | ProjectName: 569 | Ref: BuildProject 570 | InputArtifacts: 571 | - Name: Artifact_Source_Checkout 572 | Name: Build 573 | OutputArtifacts: 574 | - Name: BuildOutput 575 | RoleArn: 576 | Fn::GetAtt: 577 | - PipelineBuildCodePipelineActionRole 578 | - Arn 579 | RunOrder: 1 580 | Name: Build {%- for service_instance in service_instances %} 581 | - Actions: 582 | - ActionTypeId: 583 | Category: Build 584 | Owner: AWS 585 | Provider: CodeBuild 586 | Version: "1" 587 | Configuration: 588 | ProjectName: 589 | Ref: Deploy{{loop.index}}Project 590 | InputArtifacts: 591 | - Name: BuildOutput 592 | Name: Deploy 593 | RoleArn: 594 | Fn::GetAtt: 595 | - PipelineDeployCodePipelineActionRole 596 | - Arn 597 | RunOrder: 1 598 | Name: 'Deploy{{service_instance.name}}' 599 | {%- endfor %} 600 | ArtifactStore: 601 | EncryptionKey: 602 | Id: 603 | Fn::GetAtt: 604 | - PipelineArtifactsBucketEncryptionKey 605 | - Arn 606 | Type: KMS 607 | Location: 608 | Ref: PipelineArtifactsBucket 609 | Type: S3 610 | DependsOn: 611 | - PipelineRoleDefaultPolicy 612 | - PipelineRole 613 | PipelineBuildCodePipelineActionRole: 614 | Type: AWS::IAM::Role 615 | Properties: 616 | AssumeRolePolicyDocument: 617 | Statement: 618 | - Action: sts:AssumeRole 619 | Effect: Allow 620 | Principal: 621 | AWS: 622 | Fn::Join: 623 | - "" 624 | - - "arn:" 625 | - Ref: AWS::Partition 626 | - ":iam::" 627 | - Ref: AWS::AccountId 628 | - :root 629 | Version: "2012-10-17" 630 | PipelineBuildCodePipelineActionRoleDefaultPolicy: 631 | Type: AWS::IAM::Policy 632 | Properties: 633 | PolicyDocument: 634 | Statement: 635 | - Action: 636 | - codebuild:BatchGetBuilds 637 | - codebuild:StartBuild 638 | - codebuild:StopBuild 639 | Effect: Allow 640 | Resource: 641 | Fn::GetAtt: 642 | - BuildProject 643 | - Arn 644 | Version: "2012-10-17" 645 | PolicyName: PipelineBuildCodePipelineActionRoleDefaultPolicy 646 | Roles: 647 | - Ref: PipelineBuildCodePipelineActionRole 648 | PipelineDeployCodePipelineActionRole: 649 | Type: AWS::IAM::Role 650 | Properties: 651 | AssumeRolePolicyDocument: 652 | Statement: 653 | - Action: sts:AssumeRole 654 | Effect: Allow 655 | Principal: 656 | AWS: 657 | Fn::Join: 658 | - "" 659 | - - "arn:" 660 | - Ref: AWS::Partition 661 | - ":iam::" 662 | - Ref: AWS::AccountId 663 | - :root 664 | Version: "2012-10-17" 665 | PipelineDeployCodePipelineActionRoleDefaultPolicy: 666 | Type: AWS::IAM::Policy 667 | Properties: 668 | PolicyDocument: 669 | Statement: 670 | - Action: 671 | - codebuild:BatchGetBuilds 672 | - codebuild:StartBuild 673 | - codebuild:StopBuild 674 | Effect: Allow 675 | Resource: 676 | Fn::Join: 677 | - "" 678 | - - "arn:" 679 | - Ref: AWS::Partition 680 | - ":codebuild:" 681 | - Ref: AWS::Region 682 | - ":" 683 | - Ref: AWS::AccountId 684 | - ":project/Deploy*" 685 | Version: "2012-10-17" 686 | PolicyName: PipelineDeployCodePipelineActionRoleDefaultPolicy 687 | Roles: 688 | - Ref: PipelineDeployCodePipelineActionRole 689 | Outputs: 690 | PipelineEndpoint: 691 | Description: The URL to access the pipeline 692 | Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/codesuite/codepipeline/pipelines/${Pipeline}/view?region=${AWS::Region}" -------------------------------------------------------------------------------- /aws-managed/lb-fargate-service/v1/pipeline_infrastructure/manifest.yaml: -------------------------------------------------------------------------------- 1 | infrastructure: 2 | templates: 3 | - file: "cloudformation.yaml" 4 | rendering_engine: jinja 5 | template_language: cloudformation 6 | -------------------------------------------------------------------------------- /aws-managed/lb-fargate-service/v1/schema/schema.yaml: -------------------------------------------------------------------------------- 1 | schema: 2 | format: 3 | openapi: "3.0.0" 4 | service_input_type: "LoadBalancedServiceInput" 5 | pipeline_input_type: "PipelineInputs" 6 | 7 | types: 8 | LoadBalancedServiceInput: 9 | type: object 10 | description: "Input properties for a loadbalanced Fargate service" 11 | properties: 12 | port: 13 | type: number 14 | description: "The port to route traffic to" 15 | default: 80 16 | minimum: 0 17 | maximum: 65535 18 | desired_count: 19 | type: number 20 | description: "The default number of Fargate tasks you want running" 21 | default: 1 22 | minimum: 1 23 | task_size: 24 | type: string 25 | description: "The size of the task you want to run" 26 | enum: ["x-small", "small", "medium", "large", "x-large"] 27 | default: "x-small" 28 | scope: 29 | type: string 30 | description: "If the service will be public or private" 31 | enum: ["public", "private"] 32 | default: "private" 33 | image: 34 | type: string 35 | description: "The name/url of the container image" 36 | default: "public.ecr.aws/aws-containers/proton-demo-image:2d7f777" 37 | minLength: 1 38 | maxLength: 200 39 | env_vars: 40 | type: string 41 | description: "The Docker environment variables to use" 42 | default: "ENV_NAME_1=ENV_VALUE_1;ENV_NAME_2=ENV_VALUE_2" 43 | minLength: 1 44 | maxLength: 200 45 | PipelineInputs: 46 | type: object 47 | description: "Pipeline input properties" 48 | properties: 49 | dockerfile: 50 | type: string 51 | description: "The location of the Dockerfile to build" 52 | default: "Dockerfile" 53 | minLength: 1 54 | maxLength: 100 55 | unit_test_command: 56 | type: string 57 | description: "The command to run to unit test the application code" 58 | default: "echo 'add your unit test command here'" 59 | minLength: 1 60 | maxLength: 200 61 | -------------------------------------------------------------------------------- /aws-managed/multi-svc-env/v1/infrastructure/cloudformation.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: Simple Lambda based HTTP service template 4 | 5 | Mappings: 6 | # The VPC and subnet configuration is passed in via the environment spec. 7 | SubnetConfig: 8 | VPC: 9 | CIDR: '{{ environment.inputs.vpc_cidr}}' # customization parameter 10 | DNS: 11 | Hostname: '{{ environment.inputs.dns_hostname}}' # customization parameter 12 | 13 | Resources: 14 | VPC: 15 | Type: AWS::EC2::VPC 16 | Properties: 17 | EnableDnsSupport: true 18 | EnableDnsHostnames: true 19 | CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR'] 20 | 21 | PublicSubnetOne: 22 | Type: AWS::EC2::Subnet 23 | Properties: 24 | AvailabilityZone: 25 | Fn::Select: 26 | - 0 27 | - Fn::GetAZs: {Ref: 'AWS::Region'} 28 | VpcId: !Ref 'VPC' 29 | CidrBlock: !Select [ 0, !Cidr [ !FindInMap ['SubnetConfig', 'VPC', 'CIDR'], 3, 8 ]] 30 | MapPublicIpOnLaunch: true 31 | 32 | PublicSubnetTwo: 33 | Type: AWS::EC2::Subnet 34 | Properties: 35 | AvailabilityZone: 36 | Fn::Select: 37 | - 1 38 | - Fn::GetAZs: {Ref: 'AWS::Region'} 39 | VpcId: !Ref 'VPC' 40 | CidrBlock: !Select [ 1, !Cidr [ !FindInMap ['SubnetConfig', 'VPC', 'CIDR'], 3, 8 ]] 41 | MapPublicIpOnLaunch: true 42 | 43 | PublicSubnetThree: 44 | Type: AWS::EC2::Subnet 45 | Properties: 46 | AvailabilityZone: 47 | Fn::Select: 48 | - 2 49 | - Fn::GetAZs: {Ref: 'AWS::Region'} 50 | VpcId: !Ref 'VPC' 51 | CidrBlock: !Select [ 2, !Cidr [ !FindInMap ['SubnetConfig', 'VPC', 'CIDR'], 3, 8 ]] 52 | MapPublicIpOnLaunch: true 53 | 54 | # Setup networking resources for the public subnets. Containers 55 | # in the public subnets have public IP addresses and the routing table 56 | # sends network traffic via the internet gateway. 57 | InternetGateway: 58 | Type: AWS::EC2::InternetGateway 59 | GatewayAttachement: 60 | Type: AWS::EC2::VPCGatewayAttachment 61 | Properties: 62 | VpcId: !Ref 'VPC' 63 | InternetGatewayId: !Ref 'InternetGateway' 64 | PublicRouteTable: 65 | Type: AWS::EC2::RouteTable 66 | Properties: 67 | VpcId: !Ref 'VPC' 68 | PublicRoute: 69 | Type: AWS::EC2::Route 70 | DependsOn: GatewayAttachement 71 | Properties: 72 | RouteTableId: !Ref 'PublicRouteTable' 73 | DestinationCidrBlock: '0.0.0.0/0' 74 | GatewayId: !Ref 'InternetGateway' 75 | PublicSubnetOneRouteTableAssociation: 76 | Type: AWS::EC2::SubnetRouteTableAssociation 77 | Properties: 78 | SubnetId: !Ref PublicSubnetOne 79 | RouteTableId: !Ref PublicRouteTable 80 | PublicSubnetTwoRouteTableAssociation: 81 | Type: AWS::EC2::SubnetRouteTableAssociation 82 | Properties: 83 | SubnetId: !Ref PublicSubnetTwo 84 | RouteTableId: !Ref PublicRouteTable 85 | PublicSubnetThreeRouteTableAssociation: 86 | Type: AWS::EC2::SubnetRouteTableAssociation 87 | Properties: 88 | SubnetId: !Ref PublicSubnetThree 89 | RouteTableId: !Ref PublicRouteTable 90 | 91 | # ECS Resources 92 | ECSCluster: 93 | Type: AWS::ECS::Cluster 94 | 95 | # A security group for the containers we will run in Fargate. 96 | # Rules are added to this security group based on what ingress you 97 | # add for the cluster. 98 | ContainerSecurityGroup: 99 | Type: AWS::EC2::SecurityGroup 100 | Properties: 101 | GroupDescription: Access to the ECS Containers 102 | VpcId: !Ref 'VPC' 103 | 104 | # This is a role to allows ECS container agent makes calls to 105 | # the Amazon ECS API on your behalf. 106 | ECSTaskExecutionRole: 107 | Type: AWS::IAM::Role 108 | Properties: 109 | AssumeRolePolicyDocument: 110 | Statement: 111 | - Effect: Allow 112 | Principal: 113 | Service: 114 | - ecs.amazonaws.com 115 | - ecs-tasks.amazonaws.com 116 | Action: ['sts:AssumeRole'] 117 | Path: / 118 | ManagedPolicyArns: 119 | - 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' 120 | 121 | DnsHostedZone: 122 | Type: AWS::Route53::HostedZone 123 | Properties: 124 | Name: !FindInMap ['SubnetConfig', 'DNS', 'Hostname'] 125 | HostedZoneConfig: 126 | Comment: Private hosted zone 127 | VPCs: 128 | - VPCId: !Ref VPC 129 | VPCRegion: !Ref AWS::Region 130 | 131 | # These output values are will be available to service templates, given the 132 | # the 'environment' namespace, for example, environment.outputs.ClusterName. 133 | Outputs: 134 | ContainerSecurityGroup: 135 | Description: Container Security group 136 | Value: !Ref 'ContainerSecurityGroup' 137 | ClusterName: 138 | Description: The name of the ECS cluster 139 | Value: !Ref 'ECSCluster' 140 | ECSTaskExecutionRole: 141 | Description: The ARN of the ECS task execution role 142 | Value: !GetAtt 'ECSTaskExecutionRole.Arn' 143 | VpcId: 144 | Description: The ID of the VPC that this stack is deployed in 145 | Value: !Ref 'VPC' 146 | PublicSubnetOne: 147 | Description: Public subnet one 148 | Value: !Ref 'PublicSubnetOne' 149 | PublicSubnetTwo: 150 | Description: Public subnet two 151 | Value: !Ref 'PublicSubnetTwo' 152 | PublicSubnetThree: 153 | Description: Public subnet three 154 | Value: !Ref 'PublicSubnetThree' 155 | HostedZoneId: 156 | Description: The ID of the hosted zone 157 | Value: !Ref 'DnsHostedZone' 158 | VpcCIDR: 159 | Description: The VPC CIDR 160 | Value: '{{ environment.inputs.vpc_cidr }}' 161 | DnsHostname: 162 | Description: The DNS hostname of the hosted zone 163 | Value: '{{ environment.inputs.dns_hostname }}' -------------------------------------------------------------------------------- /aws-managed/multi-svc-env/v1/infrastructure/manifest.yaml: -------------------------------------------------------------------------------- 1 | infrastructure: 2 | templates: 3 | - file: "cloudformation.yaml" 4 | rendering_engine: jinja 5 | template_language: cloudformation 6 | -------------------------------------------------------------------------------- /aws-managed/multi-svc-env/v1/schema/schema.yaml: -------------------------------------------------------------------------------- 1 | schema: # required 2 | format: # required 3 | openapi: "3.0.0" # required 4 | # required defined by administrator 5 | environment_input_type: "PublicEnvironmentInput" 6 | types: # required 7 | # defined by administrator 8 | PublicEnvironmentInput: 9 | type: object 10 | description: "Input properties for my environment" 11 | properties: 12 | vpc_cidr: # parameter 13 | type: string 14 | description: "The CIDR range for your VPC" 15 | default: 10.0.0.0/16 16 | pattern: ([0-9]{1,3}\.){3}[0-9]{1,3}($|/(16|24)) 17 | dns_hostname: # parameter 18 | type: string 19 | description: "The hostname for the Route53 Hosted Zone" 20 | default: protonworkshop.hosted.local 21 | -------------------------------------------------------------------------------- /env_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "REPLACE_ME": { 3 | "role": "arn:aws:iam:::REPLACE_ME:role/REPLACE_ME", 4 | "region": "us-east-1", 5 | "state_bucket":"REPLACE_ME" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /lambda-vpc/README.md: -------------------------------------------------------------------------------- 1 | ## Terraform VPC + Lambda Sample 2 | 3 | This directory contains two sample templates authored using Terraform. One template is an AWS Proton Environment Template that will construct a VPC. The second template is an AWS Proton Service Template that will construct a Lambda function inside of that VPC. 4 | 5 | If you need help deploying this, head to [aws-samples/aws-proton-terraform-github-actions-sample](https://github.com/aws-samples/aws-proton-terraform-github-actions-sample) where we provide a walkthrough for running Self-managed provisioning options for executing Terraform through AWS Proton! 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /lambda-vpc/sample-lambda-function-template/v1/.compatible-envs: -------------------------------------------------------------------------------- 1 | sample-vpc-environment-template:1 2 | -------------------------------------------------------------------------------- /lambda-vpc/sample-lambda-function-template/v1/instance_infrastructure/config.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 3.0" 6 | } 7 | } 8 | 9 | backend "s3" {} 10 | } 11 | 12 | # Configure the AWS Provider 13 | provider "aws" {} 14 | 15 | variable "aws_region" { 16 | type = string 17 | } 18 | -------------------------------------------------------------------------------- /lambda-vpc/sample-lambda-function-template/v1/instance_infrastructure/lambda.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "iam_for_lambda" { 2 | name = "iam_for_lambda" 3 | 4 | managed_policy_arns = ["arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"] 5 | 6 | assume_role_policy = <