├── .chglog ├── CHANGELOG.tpl.md └── config.yml ├── .config ├── .checkov.yml └── .terraform-docs.yml ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ ├── documentation_improvements.yml │ ├── feature_request.yml │ ├── maintenance.yml │ └── rfc.yml ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml ├── release-drafter.yml ├── semantic.yml └── workflows │ ├── changelog.yml │ ├── checkov.yml │ ├── codeql-analysis.yml │ ├── dependency-review.yml │ ├── docs.yml │ ├── jenkins-deployment.yml │ ├── ossf_scorecard.yml │ ├── release-drafter.yml │ ├── tf-test-on-comment-modules-perforce.yml │ └── trivy.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── THIRD-PARTY-LICENSES ├── assets ├── ansible-playbooks │ └── perforce │ │ └── p4-server │ │ ├── README.md │ │ ├── commit_playbook.yml │ │ └── replica_playbook.yml ├── jenkins-pipelines │ ├── README.md │ ├── delete_oldest_snapshot.groovy │ ├── gamelift_sdk.groovy │ ├── godot.groovy │ ├── multiplatform_build.groovy │ └── ue5_build_pipeline.groovy └── packer │ ├── build-agents │ ├── linux │ │ ├── README.md │ │ ├── amazon-linux-2023-arm64.pkr.hcl │ │ ├── amazon-linux-2023-x86_64.pkr.hcl │ │ ├── create_swap.service │ │ ├── create_swap.sh │ │ ├── example.pkrvars.hcl │ │ ├── fsx_automounter.py │ │ ├── fsx_automounter.service │ │ ├── install_common.al2023.sh │ │ ├── install_common.ubuntu.sh │ │ ├── install_mold.sh │ │ ├── install_octobuild.al2023.x86_64.sh │ │ ├── install_octobuild.ubuntu.x86_64.sh │ │ ├── install_sccache.sh │ │ ├── mount_ephemeral.service │ │ ├── mount_ephemeral.sh │ │ ├── octobuild.conf │ │ ├── sccache.service │ │ ├── ubuntu-jammy-22.04-amd64-server.pkr.hcl │ │ └── ubuntu-jammy-22.04-arm64-server.pkr.hcl │ └── windows │ │ ├── README.md │ │ ├── base_setup.ps1 │ │ ├── example.pkrvars.hcl │ │ ├── install_vs_tools.ps1 │ │ ├── setup_jenkins_agent.ps1 │ │ ├── userdata.ps1 │ │ └── windows.pkr.hcl │ └── perforce │ └── p4-server │ ├── README.md │ ├── example.pkrvars.hcl │ ├── p4_configure.sh │ ├── p4_setup.sh │ ├── perforce_arm64.pkr.hcl │ └── perforce_x86.pkr.hcl ├── docs ├── Dockerfile ├── assets │ ├── dockerfiles.md │ ├── index.md │ └── packer │ │ └── index.md ├── changelog.md ├── contributing.md ├── getting-started.md ├── index.md ├── media │ ├── diagrams │ │ ├── jenkins-module-architecture.drawio │ │ ├── perforce-arch-cdg-toolkit-terraform-aws-perforce-full-arch-route53-dns.png │ │ ├── perforce-module-architectures.drawio │ │ ├── perforce_complete_example.drawio │ │ ├── teamcity-server-architecture.drawio │ │ ├── unreal-cloud-ddc-infra.drawio │ │ ├── unreal-cloud-ddc-single-region.drawio │ │ └── unreal-engine-horde-architecture.drawio │ ├── images │ │ ├── access-jenkins.png │ │ ├── helix-auth-service-architecture.png │ │ ├── helix-core-architecture.png │ │ ├── helix-swarm-architecture.jpg │ │ ├── helix-swarm-architecture.png │ │ ├── jenkins-admin-password.png │ │ ├── jenkins-module-architecture.png │ │ ├── perforce-complete-example.jpg │ │ ├── perforce-complete-example.png │ │ ├── teamcity-server-architecture.png │ │ ├── unreal-cloud-ddc-infra.png │ │ ├── unreal-cloud-ddc-single-region.png │ │ └── unreal-engine-horde-architecture.png │ └── logos │ │ ├── aws-logo-white.svg │ │ └── favicon.ico ├── modules │ └── index.md ├── overrides │ ├── index.html │ └── main.html ├── requirements.txt ├── security.md └── stylesheets │ ├── companies.css │ ├── extra.css │ ├── hero.css │ └── themes.css ├── mkdocs.yml ├── modules ├── README.md ├── jenkins │ ├── README.md │ ├── alb.tf │ ├── asg.tf │ ├── assets │ │ └── media │ │ │ └── diagrams │ │ │ ├── jenkins-admin-password.png │ │ │ └── jenkins-module-architecture.png │ ├── data.tf │ ├── ecs.tf │ ├── efs.tf │ ├── examples │ │ └── complete │ │ │ ├── dns.tf │ │ │ ├── local.tf │ │ │ ├── main.tf │ │ │ ├── variables.tf │ │ │ ├── versions.tf │ │ │ └── vpc.tf │ ├── fsxz.tf │ ├── iam.tf │ ├── local.tf │ ├── outputs.tf │ ├── s3.tf │ ├── sg.tf │ ├── variables.tf │ └── versions.tf ├── perforce │ ├── README.md │ ├── assets │ │ └── media │ │ │ └── diagrams │ │ │ ├── p4-auth-architecture.png │ │ │ ├── p4-code-review-architecture.png │ │ │ ├── p4-server-architecture.png │ │ │ └── perforce-arch-cdg-toolkit-terraform-aws-perforce-full-arch-route53-dns.png │ ├── examples │ │ ├── create-resources-complete │ │ │ ├── README.md │ │ │ ├── dns.tf │ │ │ ├── locals.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── security.tf │ │ │ ├── variables.tf │ │ │ ├── versions.tf │ │ │ └── vpc.tf │ │ └── p4-server-fsxn │ │ │ ├── data.tf │ │ │ ├── dns.tf │ │ │ ├── local.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── variables.tf │ │ │ ├── versions.tf │ │ │ └── vpc.tf │ ├── lb.tf │ ├── locals.tf │ ├── main.tf │ ├── modules │ │ ├── p4-auth │ │ │ ├── README.md │ │ │ ├── alb.tf │ │ │ ├── data.tf │ │ │ ├── iam.tf │ │ │ ├── locals.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── sg.tf │ │ │ ├── variables.tf │ │ │ └── versions.tf │ │ ├── p4-code-review │ │ │ ├── README.md │ │ │ ├── alb.tf │ │ │ ├── data.tf │ │ │ ├── elasticache.tf │ │ │ ├── iam.tf │ │ │ ├── locals.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── sg.tf │ │ │ ├── variables.tf │ │ │ └── versions.tf │ │ └── p4-server │ │ │ ├── README.md │ │ │ ├── data.tf │ │ │ ├── fsxn.tf │ │ │ ├── iam.tf │ │ │ ├── locals.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── sg.tf │ │ │ ├── templates │ │ │ └── user_data.tftpl │ │ │ ├── variables.tf │ │ │ └── versions.tf │ ├── outputs.tf │ ├── route53.tf │ ├── sg.tf │ ├── tests │ │ ├── 01_create_resources_complete.tftest.hcl │ │ ├── 02_p4_server_fsxn.tftest.hcl │ │ └── setup │ │ │ └── ssm.tf │ ├── variables.tf │ └── versions.tf ├── teamcity │ ├── README.md │ ├── assets │ │ └── media │ │ │ └── diagrams │ │ │ └── teamcity-server-architecture.png │ ├── examples │ │ └── simple │ │ │ ├── dns.tf │ │ │ ├── local.tf │ │ │ ├── main.tf │ │ │ ├── versions.tf │ │ │ └── vpc.tf │ ├── local.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf └── unreal │ ├── horde │ ├── README.md │ ├── alb.tf │ ├── asg.tf │ ├── assets │ │ └── media │ │ │ └── diagrams │ │ │ └── unreal-engine-horde-architecture.png │ ├── config │ │ ├── agent │ │ │ ├── horde-agent.ansible.yml │ │ │ └── horde-agent.service │ │ └── ssm │ │ │ └── AnsibleRunCommand.json │ ├── docdb.tf │ ├── ecs.tf │ ├── elasticache.tf │ ├── examples │ │ └── complete │ │ │ ├── data.tf │ │ │ ├── dns.tf │ │ │ ├── main.tf │ │ │ ├── variables.tf │ │ │ ├── versions.tf │ │ │ └── vpc.tf │ ├── iam.tf │ ├── local.tf │ ├── outputs.tf │ ├── sg.tf │ ├── variables.tf │ └── versions.tf │ └── unreal-cloud-ddc │ ├── unreal-cloud-ddc-infra │ ├── README.md │ ├── assets │ │ └── media │ │ │ └── diagrams │ │ │ └── unreal-cloud-ddc-infra.png │ ├── data.tf │ ├── eks.tf │ ├── iam.tf │ ├── locals.tf │ ├── monitoring.tf │ ├── outputs.tf │ ├── s3.tf │ ├── scylla.tf │ ├── sg.tf │ ├── variables.tf │ └── versions.tf │ └── unreal-cloud-ddc-intra-cluster │ ├── README.md │ ├── assets │ └── media │ │ └── diagrams │ │ └── unreal-cloud-ddc-single-region.png │ ├── data.tf │ ├── helm.tf │ ├── iam.tf │ ├── locals.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf └── samples ├── README.md ├── simple-build-pipeline ├── README.md ├── dns.tf ├── lb.tf ├── locals.tf ├── main.tf ├── outputs.tf ├── security.tf ├── variables.tf ├── versions.tf └── vpc.tf └── unreal-cloud-ddc-single-region ├── README.md ├── assets ├── sanity_check.sh └── unreal_cloud_ddc_single_region.yaml ├── dns.tf ├── local.tf ├── main.tf ├── outputs.tf ├── providers.tf ├── variables.tf └── vpc ├── main.tf ├── outputs.tf ├── variables.tf └── versions.tf /.chglog/CHANGELOG.tpl.md: -------------------------------------------------------------------------------- 1 | {{ if .Versions -}} 2 | 3 | ## [Unreleased] 4 | 5 | {{ if .Unreleased.CommitGroups -}} 6 | {{ range .Unreleased.CommitGroups -}} 7 | ### {{ .Title }} 8 | {{ range .Commits -}} 9 | - {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }} 10 | {{ end }} 11 | {{ end -}} 12 | {{ end -}} 13 | {{ end -}} 14 | 15 | {{ range .Versions }} 16 | 17 | ## {{ if .Tag.Previous }}[{{ .Tag.Name }}]{{ else }}{{ .Tag.Name }}{{ end }} - {{ datetime "2006-01-02" .Tag.Date }} 18 | {{ range .CommitGroups -}} 19 | ### {{ .Title }} 20 | {{ range .Commits -}} 21 | - {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }} 22 | {{ end }} 23 | {{ end -}} 24 | 25 | {{- if .RevertCommits -}} 26 | ### Reverts 27 | {{ range .RevertCommits -}} 28 | - {{ .Revert.Header }} 29 | {{ end }} 30 | {{ end -}} 31 | 32 | {{- if .MergeCommits -}} 33 | ### Pull Requests 34 | {{ range .MergeCommits -}} 35 | - {{ .Header }} 36 | {{ end }} 37 | {{ end -}} 38 | 39 | {{- if .NoteGroups -}} 40 | {{ range .NoteGroups -}} 41 | ### {{ .Title }} 42 | {{ range .Notes }} 43 | {{ .Body }} 44 | {{ end }} 45 | {{ end -}} 46 | {{ end -}} 47 | {{ end -}} 48 | 49 | {{- if .Versions }} 50 | [Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD 51 | {{ range .Versions -}} 52 | {{ if .Tag.Previous -}} 53 | [{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }} 54 | {{ end -}} 55 | {{ end -}} 56 | {{ end -}} -------------------------------------------------------------------------------- /.chglog/config.yml: -------------------------------------------------------------------------------- 1 | style: github 2 | template: CHANGELOG.tpl.md 3 | info: 4 | title: CHANGELOG 5 | repository_url: https://github.com/aws-games/cloud-game-development-toolkit 6 | options: 7 | commits: 8 | filters: 9 | Type: 10 | - feat 11 | - fix 12 | - perf 13 | - refactor 14 | - docs 15 | - chore 16 | - revert 17 | commit_groups: 18 | title_maps: 19 | feat: Features 20 | fix: Bug Fixes 21 | perf: Performance Improvements 22 | refactor: Code Refactoring 23 | header: 24 | pattern: "^(\\w*)(?:\\(([\\w\\$\\.\\-\\*\\s]*)\\))?\\:\\s(.*)$" 25 | pattern_maps: 26 | - Type 27 | - Scope 28 | - Subject 29 | notes: 30 | keywords: 31 | - BREAKING CHANGE 32 | -------------------------------------------------------------------------------- /.config/.checkov.yml: -------------------------------------------------------------------------------- 1 | branch: main 2 | compact: true 3 | download-external-modules: true 4 | evaluate-variables: true 5 | external-modules-download-path: .external_modules 6 | framework: 7 | - all 8 | output: cli 9 | quiet: true 10 | soft-fail-on: 11 | - CKV_GHA_7 12 | skip-framework: 13 | - dockerfile 14 | - secrets 15 | soft-fail: false 16 | -------------------------------------------------------------------------------- /.config/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | formatter: markdown table 2 | sections: 3 | show: 4 | - header 5 | - inputs 6 | - providers 7 | - modules 8 | - resources 9 | - footer 10 | output: 11 | file: README.md 12 | mode: inject 13 | template: |- 14 | 15 | {{ .Content }} 16 | 17 | sort: 18 | enabled: true 19 | by: required 20 | settings: 21 | indent: 4 22 | escape: false 23 | default: false 24 | required: false 25 | type: true 26 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners 2 | 3 | * @aws-games/cloud-game-development-toolkit-maintainers -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Report a reproducible bug to help us improve 3 | title: "Bug: TITLE" 4 | labels: ["bug", "triage"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thank you for submitting a bug report. Please add as much information as possible to help us reproduce, and remove any potential sensitive data. 10 | - type: textarea 11 | id: expected_behaviour 12 | attributes: 13 | label: Expected Behaviour 14 | description: Please share details on the behaviour you expected 15 | validations: 16 | required: true 17 | - type: textarea 18 | id: current_behaviour 19 | attributes: 20 | label: Current Behaviour 21 | description: Please share details on the current issue 22 | validations: 23 | required: true 24 | - type: textarea 25 | id: solution 26 | attributes: 27 | label: Possible Solution 28 | description: If known, please suggest a potential resolution 29 | validations: 30 | required: false 31 | - type: textarea 32 | id: steps 33 | attributes: 34 | label: Steps to Reproduce 35 | description: Please share how we might be able to reproduce this issue 36 | validations: 37 | required: true 38 | - type: input 39 | id: version 40 | attributes: 41 | label: Cloud Game Development Toolkit version 42 | placeholder: "latest" 43 | value: latest 44 | validations: 45 | required: true -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Ask a question 4 | url: https://github.com/aws-games/cloud-game-development-toolkit/discussions 5 | about: Ask a question about the Cloud Game Development Toolkit -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation_improvements.yml: -------------------------------------------------------------------------------- 1 | name: Documentation improvements 2 | description: Suggest a documentation update 3 | title: "Docs: TITLE" 4 | labels: ["documentation", "triage"] 5 | body: 6 | - type: textarea 7 | id: search_area 8 | attributes: 9 | label: What were you searching in the docs? 10 | description: Please help us understand how you looked for information that was either unclear or not available. 11 | validations: 12 | required: true 13 | - type: input 14 | id: area 15 | attributes: 16 | label: Is this related to an existing documentation section? 17 | description: Please share a link, if applicable. 18 | validations: 19 | required: false 20 | - type: textarea 21 | id: idea 22 | attributes: 23 | label: How can we improve? 24 | description: Please share your thoughts on how we can improve this experience 25 | validations: 26 | required: true 27 | - type: textarea 28 | id: suggestion 29 | attributes: 30 | label: Got a suggestion in mind? 31 | description: Please suggest a proposed update 32 | validations: 33 | required: false 34 | - type: checkboxes 35 | id: acknowledgment 36 | attributes: 37 | label: Acknowledgment 38 | options: 39 | - label: I understand the final update might be different from my proposed suggestion, or refused. 40 | required: true -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for Cloud Game Development Toolkit 3 | title: "Feature request: TITLE" 4 | labels: ["feature-request", "triage"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thank you for taking the time to suggest an idea for Cloud Game Development Toolkit. 10 | *Note*: If you find a feature request valuable, please react with 👍 and your use case to help us understand customer demand. 11 | - type: textarea 12 | id: problem 13 | attributes: 14 | label: Use case 15 | description: Please help us understand your use case or problem you're facing 16 | validations: 17 | required: true 18 | - type: textarea 19 | id: suggestion 20 | attributes: 21 | label: Solution/User Experience 22 | description: Please share what a good solution would look like to this use case 23 | validations: 24 | required: true 25 | - type: textarea 26 | id: alternatives 27 | attributes: 28 | label: Alternative solutions 29 | description: Please describe what alternative solutions to this use case, if any 30 | validations: 31 | required: false -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/maintenance.yml: -------------------------------------------------------------------------------- 1 | name: Maintenance 2 | description: Suggest an activity to help address governance and anything internal 3 | title: "Maintenance: TITLE" 4 | labels: ["triage", "maintenance"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thank you for taking the time to suggest an idea for Cloud Game Development Toolkit. 10 | *Note*: If you find an issue valuable, please react with 👍 and your use case to help us understand customer demand. 11 | - type: textarea 12 | id: importance 13 | attributes: 14 | label: Why is this needed? 15 | description: Please help us understand the value so we can prioritize it accordingly 16 | validations: 17 | required: true 18 | - type: dropdown 19 | id: area 20 | attributes: 21 | label: Which area does this relate to? 22 | multiple: true 23 | options: 24 | - Governance and Project Management 25 | - CI/GitHub Actions 26 | - Other 27 | - type: textarea 28 | id: suggestion 29 | attributes: 30 | label: Solution 31 | description: If available, please share what a good solution would look like 32 | validations: 33 | required: false -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/rfc.yml: -------------------------------------------------------------------------------- 1 | name: Request for Comments (RFC) 2 | description: Feature design and detailed proposals 3 | title: "RFC: TITLE" 4 | labels: ["RFC", "triage"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thank you for submitting a RFC. Please add as many details as possible to help further enrich this design. 10 | - type: input 11 | id: relation 12 | attributes: 13 | label: Is this related to an existing feature request or issue? 14 | description: Please share a link, if applicable 15 | - type: dropdown 16 | id: area 17 | attributes: 18 | label: What part of the Cloud Game Development Toolkit does this RFC relate to? 19 | options: 20 | - Assets 21 | - Modules 22 | - Playbooks 23 | - Samples 24 | - Other 25 | validations: 26 | required: true 27 | - type: textarea 28 | id: summary 29 | attributes: 30 | label: Summary 31 | description: Please provide an overview in one or two paragraphs 32 | validations: 33 | required: true 34 | - type: textarea 35 | id: problem 36 | attributes: 37 | label: Use case 38 | description: Please share the use case and motivation behind this proposal 39 | validations: 40 | required: true 41 | - type: textarea 42 | id: proposal 43 | attributes: 44 | label: Proposal 45 | description: Please explain the design in detail, so anyone familiar with the project could implement it 46 | placeholder: What the user experience looks like before and after this design? 47 | validations: 48 | required: true 49 | - type: textarea 50 | id: scope 51 | attributes: 52 | label: Out of scope 53 | description: Please explain what should be considered out of scope in your proposal 54 | validations: 55 | required: true 56 | - type: textarea 57 | id: challenges 58 | attributes: 59 | label: Potential challenges 60 | description: Nothing is perfect. Please share what common challenges, edge cases, unresolved areas, and suggestions on how to mitigate them 61 | validations: 62 | required: true 63 | - type: textarea 64 | id: integrations 65 | attributes: 66 | label: Dependencies and Integrations 67 | description: If applicable, please share whether this feature has additional dependencies, and how it might integrate with other parts of this project 68 | validations: 69 | required: false 70 | - type: textarea 71 | id: alternatives 72 | attributes: 73 | label: Alternative solutions 74 | description: Please describe what alternative solutions to this use case, if any 75 | render: markdown 76 | validations: 77 | required: false -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **Issue number:** 2 | 3 | ## Summary 4 | 5 | ### Changes 6 | 7 | > Please provide a summary of what's being changed 8 | 9 | ### User experience 10 | 11 | > Please share what the user experience looks like before and after this change 12 | 13 | ## Checklist 14 | 15 | If your change doesn't seem to apply, please leave them unchecked. 16 | 17 | * [ ] I have performed a self-review of this change 18 | * [ ] Changes have been tested 19 | * [ ] Changes are documented 20 | 21 | 22 | Is this a breaking change? 23 | 24 | 25 | 26 | ## Acknowledgment 27 | 28 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. 29 | 30 | **Disclaimer**: We value your time and bandwidth. As such, any pull requests created might not be successful. -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | commit-message: 6 | prefix: "chore" 7 | include: "scope" 8 | schedule: 9 | interval: "daily" 10 | - package-ecosystem: "terraform" 11 | directories: 12 | - "/modules/jenkins" 13 | - "/modules/perforce/helix-authentication-service" 14 | - "/modules/perforce/helix-core" 15 | - "/modules/perforce/helix-swarm" 16 | - "/samples/simple-build-pipeline" 17 | - "/modules/unreal/unreal-cloud-ddc-intra-cluster" 18 | - "/modules/unreal/unreal-cloud-ddc-infra" 19 | - "/modules/unreal/horde" 20 | commit-message: 21 | prefix: "chore" 22 | include: "scope" 23 | groups: 24 | aws-provider: 25 | applies-to: version-updates 26 | patterns: 27 | - "hashicorp/aws" 28 | awscc-provider: 29 | applies-to: version-updates 30 | patterns: 31 | - "hashicorp/awscc" 32 | random-provider: 33 | applies-to: version-updates 34 | patterns: 35 | - "hashicorp/random" 36 | schedule: 37 | interval: "daily" 38 | - package-ecosystem: "docker" 39 | directory: "/docs" 40 | commit-message: 41 | prefix: "chore" 42 | include: "scope" 43 | schedule: 44 | interval: "daily" 45 | - package-ecosystem: "pip" 46 | directory: "/docs" 47 | commit-message: 48 | prefix: "chore" 49 | include: "scope" 50 | schedule: 51 | interval: "daily" 52 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | change-template: "* $TITLE (#$NUMBER) by @$AUTHOR" 2 | categories: 3 | - title: "⚡ Breaking Changes" 4 | labels: 5 | - "breaking-change" 6 | - title: "🌟New features and non-breaking changes" 7 | labels: 8 | - "feature" 9 | - title: "📜 Documentation updates" 10 | labels: 11 | - "documentation" 12 | - title: "🐛 Bug and hot fixes" 13 | labels: 14 | - "bug" 15 | - "fix" 16 | - title: "🚒 Deprecations" 17 | labels: 18 | - "deprecated" 19 | - title: "🔧 Maintenance" 20 | labels: 21 | - "internal" 22 | - "dependencies" 23 | exclude-labels: 24 | - "skip-changelog" 25 | tag-template: "v$NEXT_PATCH_VERSION" 26 | template: | 27 | ## Summary 28 | 29 | **[Human readable summary of changes]** 30 | 31 | ## Changes 32 | 33 | $CHANGES 34 | 35 | ## This release was made possible by the following contributors: 36 | 37 | $CONTRIBUTORS -------------------------------------------------------------------------------- /.github/semantic.yml: -------------------------------------------------------------------------------- 1 | # conventional commit types: https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json 2 | types: 3 | - feat 4 | - fix 5 | - docs 6 | - style 7 | - refactor 8 | - perf 9 | - test 10 | - build 11 | - ci 12 | - chore 13 | - revert 14 | - improv 15 | 16 | # Always validate the PR title 17 | # and ignore the commits to lower the entry bar for contribution 18 | # while titles make up the Release notes to ease maintenance overhead 19 | titleOnly: true -------------------------------------------------------------------------------- /.github/workflows/changelog.yml: -------------------------------------------------------------------------------- 1 | name: Update Changelog 2 | on: 3 | schedule: 4 | - cron: '0 0 * * *' # This will run the action every day at midnight 5 | workflow_dispatch: # If we need to run the action manually 6 | permissions: 7 | contents: read 8 | jobs: 9 | update-changelog: 10 | if: github.repository == 'aws-games/cloud-game-development-toolkit' 11 | concurrency: 12 | group: changelog-build 13 | permissions: 14 | contents: write # Used to create temporary branch for changelog updates 15 | pull-requests: write # Used to create PRs for changelog updates 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Check out code 19 | uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | token: ${{ secrets.BOT_PAT }} 23 | - name: Update Changelog 24 | env: 25 | GH_TOKEN: ${{ secrets.BOT_PAT }} 26 | FILE_TO_COMMIT: CHANGELOG.md 27 | DESTINATION_BRANCH: changelog-${{ github.run_id }} 28 | run: | 29 | git fetch --tags origin 30 | docker run --rm -v $(pwd):/workdir quay.io/git-chglog/git-chglog -o CHANGELOG.md 31 | export TODAY=$( date -u '+%Y-%m-%d' ) 32 | export MESSAGE="chore: regenerate $FILE_TO_COMMIT for $TODAY" 33 | export CONTENT=$( base64 -i $FILE_TO_COMMIT ) 34 | export BRANCH_SHA=$( gh api /repos/:owner/:repo/git/ref/heads/main | jq .object.sha | tr -d '"') 35 | gh api --method POST /repos/:owner/:repo/git/refs \ 36 | --field "ref=refs/heads/$DESTINATION_BRANCH" \ 37 | --field "sha=$BRANCH_SHA" 38 | export SHA=$( gh api -XGET /repos/:owner/:repo/contents/$FILE_TO_COMMIT -F ref=refs/heads/$DESTINATION_BRANCH | jq .sha | tr -d '"') 39 | gh api --method PUT /repos/:owner/:repo/contents/$FILE_TO_COMMIT \ 40 | --field message="$MESSAGE" \ 41 | --field content="$CONTENT" \ 42 | --field encoding="base64" \ 43 | --field branch="$DESTINATION_BRANCH" \ 44 | --field sha=$SHA 45 | gh pr create --title "chore: update changelog" --body "This is an auto-generated PR to update the changelog." --base main --head $DESTINATION_BRANCH 46 | -------------------------------------------------------------------------------- /.github/workflows/checkov.yml: -------------------------------------------------------------------------------- 1 | name: Checkov 2 | on: 3 | pull_request: 4 | paths: 5 | - 'modules/**' 6 | - 'samples/**' 7 | - '.github/**' 8 | workflow_dispatch: 9 | permissions: 10 | contents: read 11 | jobs: 12 | scan: 13 | permissions: 14 | contents: read 15 | security-events: write 16 | actions: read 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Checkov GitHub Action 21 | uses: bridgecrewio/checkov-action@v12 22 | with: 23 | output_format: sarif 24 | output_file_path: results.sarif 25 | config_file: .config/.checkov.yml 26 | - name: Upload SARIF file 27 | uses: github/codeql-action/upload-sarif@v3 28 | if: success() || failure() 29 | with: 30 | sarif_file: results.sarif 31 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | on: 3 | workflow_call: 4 | push: 5 | branches: main 6 | pull_request: 7 | branches: main 8 | permissions: 9 | contents: read 10 | jobs: 11 | analyze: 12 | name: Analyze 13 | runs-on: ubuntu-latest 14 | permissions: 15 | security-events: write 16 | actions: read 17 | contents: read 18 | steps: 19 | - name: Checkout repository 20 | uses: actions/checkout@v4 21 | - name: Initialize CodeQL 22 | uses: github/codeql-action/init@v3 23 | - name: Perform CodeQL Analysis 24 | uses: github/codeql-action/analyze@v3 25 | -------------------------------------------------------------------------------- /.github/workflows/dependency-review.yml: -------------------------------------------------------------------------------- 1 | name: 'Dependency Review' 2 | on: 3 | pull_request: 4 | types: [opened, synchronize, reopened] 5 | permissions: 6 | contents: read 7 | jobs: 8 | dependency-review: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: 'Checkout Repository' 12 | uses: actions/checkout@v4 13 | - name: 'Dependency Review' 14 | uses: actions/dependency-review-action@v4 -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | #==================================================================================================== 2 | ## Process 3 | # 1. When changes are made to documentation files and pushed to the main branch, the workflow in this file is triggered. 4 | # 2. Copy the repository's contents to the runner, so the workflow can access them. 5 | # 3. Build and deploy the documentation 6 | #==================================================================================================== 7 | name: Build Docs and Publish to gh-pages 8 | on: 9 | #push: 10 | # branches: 11 | # - main 12 | # paths: 13 | # - "docs/**" 14 | # - "mkdocs.yml" 15 | # - "CHANGELOG.md" 16 | # - "**/README.md" 17 | # - ".github/workflows/docs.yml" 18 | workflow_dispatch: 19 | inputs: 20 | version: 21 | description: "Version to build and publish docs (i.e. v0.1.0-alpha.1, v1.0.0)" 22 | required: true 23 | type: string 24 | alias: 25 | description: "Alias to associate version (latest, stage)" 26 | required: true 27 | type: string 28 | git_ref: 29 | description: "Branch or commit ID to checkout from" 30 | required: false 31 | type: string 32 | default: main 33 | permissions: 34 | contents: read 35 | jobs: 36 | publish_docs: 37 | if: github.repository == 'aws-games/cloud-game-development-toolkit' 38 | permissions: 39 | contents: write 40 | pages: write 41 | concurrency: 42 | group: docs 43 | runs-on: ubuntu-latest 44 | env: 45 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 46 | steps: 47 | - name: Checkout code 48 | uses: actions/checkout@v4 49 | with: 50 | fetch-depth: 0 51 | ref: ${{ inputs.git_ref }} 52 | - name: Git client setup 53 | run: | 54 | git config user.name github-actions[bot] 55 | git config user.email 41898282+github-actions[bot]@users.noreply.github.com 56 | - name: Setup Python 57 | uses: actions/setup-python@v5 58 | with: 59 | python-version: '3.12' 60 | - name: Deploy Docs 61 | run: make docs-deploy-github VERSION=${{ inputs.VERSION }} ALIAS=${{ inputs.ALIAS }} 62 | -------------------------------------------------------------------------------- /.github/workflows/ossf_scorecard.yml: -------------------------------------------------------------------------------- 1 | name: Scorecard supply-chain security 2 | on: 3 | schedule: 4 | - cron: '0 0 * * *' 5 | push: 6 | branches: [ "main" ] 7 | workflow_dispatch: 8 | permissions: read-all 9 | jobs: 10 | analysis: 11 | name: Scorecard analysis 12 | runs-on: ubuntu-latest 13 | permissions: 14 | security-events: write # Needed to upload the results to code-scanning dashboard. 15 | id-token: write # Needed to publish results and get a badge (see publish_results below). 16 | contents: read # Uncomment the permissions if installing in a private repository. 17 | actions: read # Uncomment the permissions if installing in a private repository. 18 | steps: 19 | - name: "Checkout code" 20 | uses: actions/checkout@85e6279cec87321a52edac9c87bce653a07cf6c2 # v4.1.6 21 | with: 22 | persist-credentials: false 23 | - name: "Run analysis" 24 | uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.3.2 25 | with: 26 | results_file: results.sarif 27 | results_format: sarif 28 | publish_results: true # Publish results to OpenSSF REST API for easy access by consumers 29 | - name: "Upload artifact" 30 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 31 | with: 32 | name: SARIF file 33 | path: results.sarif 34 | retention-days: 5 35 | # Upload the results to GitHub's code scanning dashboard (optional). 36 | - name: "Upload to code-scanning" 37 | uses: github/codeql-action/upload-sarif@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 38 | with: 39 | sarif_file: results.sarif 40 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | # PROCESS 4 | # 5 | # 1. Enumerate all PRs in merged state 6 | # 2. Filter out any PR labeled `skip-changelog` 7 | # 3. Updates or creates a new release in Draft mode 8 | 9 | on: 10 | push: 11 | branches: 12 | - main 13 | workflow_dispatch: 14 | permissions: 15 | contents: read 16 | jobs: 17 | update_release_draft: 18 | runs-on: ubuntu-latest 19 | permissions: 20 | contents: write # create release in draft mode 21 | pull-requests: write 22 | steps: 23 | - uses: release-drafter/release-drafter@b1476f6e6eb133afa41ed8589daba6dc69b4d3f5 24 | env: 25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 26 | -------------------------------------------------------------------------------- /.github/workflows/trivy.yml: -------------------------------------------------------------------------------- 1 | name: Trivy Scan 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | workflow_call: 8 | permissions: 9 | contents: read 10 | jobs: 11 | trivy_scan: 12 | name: Trivy Scan 13 | runs-on: ubuntu-latest 14 | permissions: 15 | security-events: write 16 | actions: read 17 | contents: read 18 | steps: 19 | - name: Checkout code 20 | uses: actions/checkout@v4 21 | - name: Run Trivy vulnerability scanner in repo mode 22 | uses: aquasecurity/trivy-action@0.31.0 23 | with: 24 | scan-type: 'repo' 25 | ignore-unfixed: true 26 | format: 'sarif' 27 | output: 'trivy-results.sarif' 28 | scanners: 'vuln, secret, config' 29 | severity: 'CRITICAL,HIGH' 30 | - name: Upload Trivy scan results to GitHub Security tab 31 | uses: github/codeql-action/upload-sarif@v3 32 | with: 33 | sarif_file: 'trivy-results.sarif' 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # terraform lock files 5 | **/.terraform.lock.hcl 6 | 7 | # .tfstate files 8 | *.tfstate 9 | *.tfstate.* 10 | 11 | # Crash log files 12 | crash.log 13 | crash.*.log 14 | 15 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 16 | # password, private keys, and other secrets. These should not be part of version 17 | # control as they are data points which are potentially sensitive and subject 18 | # to change depending on the environment. 19 | *.tfvars 20 | !example.tfvars 21 | *.tfvars.json 22 | 23 | # Ignore override files as they are usually used to override resources locally and so 24 | # are not checked in 25 | override.tf 26 | override.tf.json 27 | *_override.tf 28 | *_override.tf.json 29 | 30 | # Include override files you do wish to add to version control using negated pattern 31 | # !example_override.tf 32 | 33 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 34 | # example: *tfplan* 35 | 36 | # Ignore CLI configuration files 37 | .terraformrc 38 | terraform.rc 39 | *.tfbackend 40 | 41 | # Ignore terraform backend state configuration 42 | backend.tf 43 | 44 | # Ignore packer variables 45 | *.pkrvars.hcl 46 | !example.pkrvars.hcl 47 | 48 | # Ignore IDE configurations 49 | .idea 50 | *.vscode 51 | 52 | # Ignore pre-check cache 53 | .pre-commit-trivy-cache/* 54 | .external_modules 55 | 56 | # Ignore site files related to docs 57 | site/ 58 | 59 | # Other 60 | .DS_Store 61 | 62 | # Python 63 | *.venv 64 | 65 | # SSH keys 66 | id_ed25519 67 | id_ed25519.pub 68 | 69 | # drawio temp and backup files 70 | *.drawio.bkp 71 | *.drawio.dtmp 72 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/gitleaks/gitleaks 3 | rev: "v8.23.1" 4 | hooks: 5 | - id: gitleaks 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.6.0 8 | hooks: 9 | - id: check-merge-conflict 10 | - id: detect-private-key 11 | - id: detect-aws-credentials 12 | args: 13 | - --allow-missing-credentials 14 | - id: end-of-file-fixer 15 | - id: trailing-whitespace 16 | - repo: https://github.com/bridgecrewio/checkov 17 | rev: "3.2.201" 18 | hooks: 19 | - id: checkov 20 | args: ["--config-file", ".config/.checkov.yml"] 21 | - repo: https://github.com/antonbabenko/pre-commit-terraform 22 | rev: "v1.89.1" 23 | hooks: 24 | - id: terraform_docs 25 | args: 26 | - --hook-config=--add-to-existing-file=true 27 | - --hook-config=--create-file-if-not-exist=false 28 | - --hook-config=--use-standard-markers=true 29 | - id: terraform_validate 30 | - id: terraform_fmt 31 | - id: terraform_tflint -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := help 2 | 3 | GIT_USER_NAME := $(shell git config user.name) 4 | GIT_USER_EMAIL := $(shell git config user.email) 5 | 6 | COLOR_SUPPORT := $(shell tput colors 2>/dev/null) 7 | # Define color codes if the terminal supports color 8 | ifdef COLOR_SUPPORT 9 | ifneq ($(shell tput colors),-1) 10 | RED := $(shell tput setaf 1) 11 | GREEN := $(shell tput setaf 2) 12 | CYAN := $(shell tput setaf 6) 13 | RESET := $(shell tput sgr0) 14 | endif 15 | endif 16 | 17 | .PHONY: docs-deploy-github 18 | # Deploy the docs to remote branch in github. 19 | docs-deploy-github: ## Usage: `docs-deploy-github VERSION=v1.0.0 ALIAS=latest` 20 | @if [ -z "${VERSION}" ]; then echo -e "${RED}VERSION is not set. Example: 'docs-deploy-github VERSION=v1.0.0 ALIAS=latest'. Run 'make help' for usage. ${RESET}"; exit 1; fi 21 | @if [ -z "${ALIAS}" ]; then echo -e "${RED}ALIAS is not set. Example: 'docs-deploy-github VERSION=v1.0.0 ALIAS=latest'. Run 'make help' for usage. ${RESET}"; exit 1; fi 22 | @echo -e "${GREEN}Docs version is: ${VERSION}:${ALIAS}${RESET}"; 23 | pip install -r ./docs/requirements.txt 24 | mike deploy --push --update-aliases ${VERSION} ${ALIAS} 25 | 26 | .PHONY: docs-run 27 | # Builds and runs a specific version of the docs in Docker with live reloading to support iterative development. This doesn't include the version selector in the navigation pane that ships in production. 28 | docs-run: ## Usage: `make docs-run VERSION=v1.0.0 ALIAS=latest` 29 | @if [ -z "${VERSION}" ]; then echo -e "${RED}VERSION is not set. Example: 'make docs-run VERSION=v1.0.0 ALIAS=latest'. Run 'make help' for usage. ${RESET}"; exit 1; fi 30 | @if [ -z "${ALIAS}" ]; then echo -e "${RED}ALIAS is not set. Example: 'make docs-run VERSION=v1.0.0 ALIAS=latest'. Run 'make help' for usage. ${RESET}"; exit 1; fi 31 | @echo -e "${GREEN}Docs version is: ${VERSION}:${ALIAS}${RESET}"; 32 | docker build --no-cache -t docs:${VERSION} ./docs/ 33 | docker run --rm -it -p 8000:8000 -v ${PWD}:/docs docs:${VERSION} 34 | 35 | .PHONY: docs-run-versioned 36 | # Builds and runs the docs in Docker using `mike` instead of `mkdocs` to run a versioned docs site locally (what we deploy to prod). `mike` doesn't support live reloading, so you'll need to rebuild the container to see changes. 37 | docs-run-versioned: ## Usage: `make docs-run-versioned VERSION=v1.0.0 ALIAS=latest` 38 | @if [ -z "${VERSION}" ]; then echo -e "${RED}VERSION is not set. Example: 'make docs-run-versioned VERSION=v1.0.0 ALIAS=latest'. Run 'make help' for usage. ${RESET}"; exit 1; fi 39 | @if [ -z "${ALIAS}" ]; then echo -e "${RED}ALIAS is not set. Example: 'make docs-run-versioned VERSION=v1.0.0 ALIAS=latest'. Run 'make help' for usage. ${RESET}"; exit 1; fi 40 | @echo -e "${GREEN}Docs version is: ${VERSION}:${ALIAS}${RESET}"; 41 | docker build --no-cache -t docs:${VERSION} ./docs/ 42 | docker run --rm -it -p 8000:8000 -v ${PWD}:/docs --entrypoint /bin/sh docs:${VERSION} -c "mike serve --dev-addr=0.0.0.0:8000" 43 | 44 | .PHONY: help 45 | help: ## Display this help 46 | @echo -e "Usage: make [TARGET]\n" 47 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "${CYAN}%-30s${RESET} %s\n", $$1, $$2}' 48 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cloud Game Development Toolkit 2 | 3 | [](LICENSE) 4 | [](https://api.securityscorecards.dev/projects/github.com/aws-games/cloud-game-development-toolkit) 5 | 6 | 7 | The **Cloud Game Development Toolkit (a.k.a. CGD Toolkit)** is a collection of templates and configurations for deploying game development infrastructure and tools on AWS. 8 | 9 | The project is designed for piecemeal usage: 10 | 11 | - Already have a CI/CD pipeline deployed but need a build machine image? :white_check_mark: 12 | - Looking to migrate your Perforce server from on-premise to AWS? :white_check_mark: 13 | - Starting your new studio from the ground up and looking for pre-built templates to deploy common infrastructure? :white_check_mark: 14 | 15 | The **Toolkit** consists of three key components: 16 | 17 | | Component | Description 18 | |-|-| 19 | |**Assets**| Reusable scripts, pipeline definitions, Dockerfiles, [Packer](https://www.packer.io/) templates, [Ansible](https://github.com/ansible/ansible) Playbooks to configure workloads after deployment, and other resources that might prove useful or are dependencies of any of the modules. 20 | |**Modules**| Highly configurable and extensible [Terraform](https://www.terraform.io/) modules for simplified deployment of key game development infrastructure on AWS with best-practices by default. 21 | |**Samples**| Complete Terraform configurations for expedited studio setup that demonstrate module usage and integration with other AWS services. 22 | 23 | ## Getting Started 24 | 25 | **[📖 Documentation](https://aws-games.github.io/cloud-game-development-toolkit/latest/)** | **[💻 Contribute to the Project](https://aws-games.github.io/cloud-game-development-toolkit/latest/contributing.html)** | **[💬 Ask Questions](https://github.com/aws-games/cloud-game-development-toolkit/discussions/)** | **[🚧 Roadmap](https://github.com/orgs/aws-games/projects/1/views/1)** 26 | 27 | ## Security 28 | 29 | If you think you’ve found a potential security issue, please do not post it in the Issues. Instead, please follow the instructions [here](https://aws.amazon.com/security/vulnerability-reporting/) or [email AWS security directly](mailto:aws-security@amazon.com). 30 | 31 | ## License 32 | 33 | This project is licensed under the MIT-0 License. See the [LICENSE](LICENSE) file. 34 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | [](https://api.securityscorecards.dev/projects/github.com/aws-games/cloud-game-development-toolkit) [](https://www.bestpractices.dev/projects/9377) 3 | 4 | This project is maintained by members of the AWS for Games technical community within AWS (i.e. Solutions Architects, Technical Account Managers, Software Engineers) who support the gaming industry. Design decisions and tradeoffs made throughout this project reflect our experiences working with game studios to build and maintain their development infrastructure and tools in the cloud. We encourage contributions from the community via Pull Requests, which are manually reviewed and tested by the core maintainers of the project before they are merged. 5 | 6 | We rely on [Open Source Security (OpenSSF) Scorecard](https://securityscorecards.dev/) assessments to validate our project's security posture against a common set of open source project risks. We also self-certify our security practices against the standards defined by [OpenSSF Best Practices Badge Program](https://www.bestpractices.dev/en). The badges above are hyperlinks that you can follow to review the results of each. 7 | 8 | 9 | ## Reporting a vulnerability 10 | 11 | If you discover a potential security issue in this project, we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/) or directly via email to . 12 | 13 | Please do **not** create a public GitHub issue. 14 | -------------------------------------------------------------------------------- /assets/ansible-playbooks/perforce/p4-server/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Playbooks | P4 Server 2 | 3 | This page includes documentation for reusable Ansible Playbooks for game development on AWS. 4 | 5 | Currently the project provides the following playbooks: 6 | 7 | 8 | | Playbook | Description | 9 | |----------|-------------| 10 | | P4 Server (formerly Helix Core) Commit Playbook | Sets up a server as a P4 Server Commit Server 11 | | P4 Server (formerly Helix Core) Replica Playbook | Sets up a server as a P4 Server Replica Server 12 | -------------------------------------------------------------------------------- /assets/ansible-playbooks/perforce/p4-server/commit_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Run mkrep.sh on a host and do the live backup 3 | hosts: localhost 4 | vars_prompt: 5 | - name: replica_hostname 6 | prompt: "Enter the replica hostname" 7 | private: no 8 | 9 | tasks: 10 | - name: Execute mkrep.sh script with parameters 11 | become: yes 12 | ansible.builtin.command: 13 | cmd: "/hxdepots/p4/common/bin/mkrep.sh -os -i 1 -t fr -s awseuwest1 -r {{ replica_hostname }}" 14 | 15 | - name: Change to perforce user and source vars, then run live_checkpoint.sh 16 | become: yes 17 | become_user: perforce 18 | ansible.builtin.shell: | 19 | source /p4/common/bin/p4_vars 1 && /p4/common/bin/live_checkpoint.sh 1 20 | 21 | - name: Add export to /etc/exports 22 | become: yes 23 | ansible.builtin.lineinfile: 24 | path: /etc/exports 25 | line: "/hxdepots/p4/1/checkpoints *(ro,no_all_squash)" 26 | state: present 27 | create: yes 28 | 29 | - name: Restart and enable NFS server 30 | become: yes 31 | ansible.builtin.systemd: 32 | name: nfs-server 33 | state: restarted 34 | enabled: yes 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /assets/ansible-playbooks/perforce/p4-server/replica_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set up NFS client and handle checkpoints 3 | hosts: localhost 4 | vars_prompt: 5 | - name: ec2_private_dns 6 | prompt: "Enter the server private DNS name" 7 | private: no 8 | - name: checkpoint_file_name 9 | prompt: "Enter the checkpoint file name" 10 | private: no 11 | 12 | tasks: 13 | - name: Mount NFS share to /mnt 14 | become: yes 15 | ansible.builtin.mount: 16 | path: /mnt 17 | src: '{{ ec2_private_dns }}:/hxdepots/p4/1/checkpoints' 18 | fstype: nfs 19 | opts: ro 20 | state: mounted 21 | 22 | - name: Find the newest file in /mnt 23 | become: yes 24 | become_user: perforce 25 | ansible.builtin.shell: | 26 | cd /mnt 27 | newest_file=$(ls -Art | tail -n 1) 28 | if [ -z "$newest_file" ]; then 29 | echo "No files found in /mnt." 30 | exit 1 31 | else 32 | echo "The newest file in /mnt is: $newest_file" 33 | # Proceed with copying the file to /hxdepots/p4/1/checkpoints 34 | cp -a "/mnt/$newest_file" /hxdepots/p4/1/checkpoints/ 35 | fi 36 | register: find_newest_file 37 | 38 | - name: Copy checkpoint files from /mnt to /hxdepots/p4/1/checkpoints 39 | become: yes 40 | ansible.builtin.command: 41 | cmd: "cp -a /mnt/* /hxdepots/p4/1/checkpoints/" 42 | become_user: perforce 43 | 44 | - name: Create server.id file 45 | become: yes 46 | ansible.builtin.shell: 47 | cmd: "echo p4d_fr_awseuwest1 > /p4/1/root/server.id" 48 | become_user: perforce 49 | 50 | - name: Trust the remote server 51 | become: yes 52 | ansible.builtin.shell: 53 | cmd: "p4 ssl:{{ ec2_private_dns }}:1666 trust" 54 | become_user: perforce 55 | 56 | - name: Login to remote server with admin account 57 | become: yes 58 | ansible.builtin.shell: 59 | cmd: "p4 -p ssl:{{ ec2_private_dns }}:1666 login -a < /p4/common/config/.p4passwd.p4_1.admin" 60 | become_user: perforce 61 | 62 | - name: Login to remote server with service account 63 | become: yes 64 | ansible.builtin.shell: 65 | cmd: "p4 -p ssl:{{ ec2_private_dns }}:1666 login svc_p4d_fr_awseuwest1" 66 | become_user: perforce 67 | 68 | - name: Load the checkpoint using the newest file from /mnt 69 | become: yes 70 | ansible.builtin.shell: | 71 | newest_file=$(ls -Art /mnt | tail -n 1) 72 | nohup load_checkpoint.sh "/p4/1/checkpoints/$newest_file" -i 1 -y < /dev/null > /dev/null 2>&1 & 73 | become_user: perforce 74 | when: find_newest_file.stdout is defined and find_newest_file.stdout != '' 75 | -------------------------------------------------------------------------------- /assets/jenkins-pipelines/delete_oldest_snapshot.groovy: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | node { 4 | label 'linux' 5 | } 6 | } 7 | parameters { 8 | string(name: 'FSX_VOLUME_ID', defaultValue: '', description: 'FSx volume ID of the volume to delete the oldest snapshot from') 9 | } 10 | stages { 11 | stage('Validate Pipeline') { 12 | steps { 13 | script { 14 | if (env.FSX_VOLUME_ID == null || env.FSX_VOLUME_ID.length() <= 0) { 15 | throw new Exception("FSX_VOLUME_ID parameter/environment variable not set.") 16 | } 17 | } 18 | } 19 | } 20 | stage('Delete') { 21 | steps { 22 | script { 23 | env.source_path = sh(returnStdout: true, script:''' 24 | # Get the latest available snapshot that was created more than 7 days ago 25 | SNAPSHOTID=\$(aws fsx describe-snapshots --filters "Name=volume-id,Values=$FSX_VOLUME_ID" --query "sort_by(Snapshots,&CreationTime)[?CreationTime<='$(date +%Y-%m-%dT23:59:59.999999+23:59 -d "7 days ago")' && Lifecycle == 'AVAILABLE']" --output json | jq -r '.[0].SnapshotId') 26 | 27 | # Delete the snapshot, if found 28 | if [ "$SNAPSHOTID" != "null" ]; then 29 | aws fsx delete-snapshot --snapshot-id $SNAPSHOTID 30 | 31 | # Wait until the snapshot is deleted 32 | i=0 33 | while [ $i -ne 20 ]; do 34 | i=$(($i+1)) 35 | STATUS=$(aws fsx describe-snapshots --snapshot-ids $SNAPSHOTID --output text --query 'Snapshots[0].Lifecycle' || true) 36 | # if the status is no longer 'AVAILABLE' or 'DELETING', break out: 37 | if [ "$STATUS" = "AVAILABLE" ] || [ "$STATUS" = "DELETING" ]; then 38 | sleep 2 39 | else 40 | echo "done deleting snapshot" 41 | break; 42 | fi 43 | done; 44 | else 45 | echo "failed to delete snapshot in time" 46 | fi 47 | ''').trim() 48 | } 49 | } 50 | } 51 | } 52 | } -------------------------------------------------------------------------------- /assets/jenkins-pipelines/multiplatform_build.groovy: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent none 3 | 4 | stages { 5 | stage('Prepare') { 6 | stages { 7 | stage('GetTime') { 8 | agent { label 'linux && ubuntu-jammy-22.04 && x86' } 9 | steps { 10 | script { 11 | env.current_time = sh(returnStdout: true, script:''' 12 | echo The current time is \$(date +%s) 13 | ''') 14 | println env.current_time 15 | } 16 | } 17 | } 18 | } 19 | } 20 | stage('Build') { 21 | parallel { 22 | stage('Ubuntu Linux x86_64') { 23 | agent { label 'linux && ubuntu-jammy-22.04 && x86_64' } 24 | options { 25 | timeout(time: 2, unit: 'MINUTES') 26 | } 27 | steps { 28 | sh 'echo Hello from $(uname -a). Context from previous stage: $current_time' 29 | } 30 | } 31 | stage('Ubuntu Linux aarch64') { 32 | agent { label 'linux && ubuntu-jammy-22.04 && aarch64' } 33 | options { 34 | timeout(time: 2, unit: 'MINUTES') 35 | } 36 | steps { 37 | sh 'echo Hello from $(uname -a). Context from previous stage: $current_time' 38 | } 39 | } 40 | stage('Amazon Linux x86_64') { 41 | agent { label 'linux && amazonlinux-2023 && x86_64' } 42 | options { 43 | timeout(time: 2, unit: 'MINUTES') 44 | } 45 | steps { 46 | sh 'echo Hello from $(uname -a). Context from previous stage: $current_time' 47 | } 48 | } 49 | stage('Amazon Linux aarch64') { 50 | agent { label 'linux && amazonlinux-2023 && aarch64' } 51 | options { 52 | timeout(time: 2, unit: 'MINUTES') 53 | } 54 | steps { 55 | sh 'echo Hello from $(uname -a). Context from previous stage: $current_time' 56 | } 57 | } 58 | stage('Windows') { 59 | agent { label 'windows && x86' } 60 | options { 61 | timeout(time: 2, unit: 'MINUTES') 62 | } 63 | steps { 64 | echo 'Hello from Windows. Context from previous stage: ' + env.current_time 65 | } 66 | } 67 | } 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/create_swap.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run create_swap.sh script at boot 3 | Before=multi-user.target 4 | 5 | [Service] 6 | Type=oneshot 7 | ExecStart=/bin/bash /opt/create_swap.sh 8 | User=root 9 | Group=root 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/create_swap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | fallocate -l 1G -x /swapfile 3 | chmod 600 /swapfile 4 | mkswap /swapfile 5 | swapon /swapfile 6 | echo "/swapfile none swap sw 0 0" | tee -a /etc/fstab -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/example.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | /***************************** 2 | * Networking Configuration 3 | 4 | If vpc_id and subnet_id are null Packer will attempt to use 5 | the default vpc and subnet for the region. If you do not have 6 | a default VPC in the target region, you'll need to provide a 7 | VPC and a subnet. 8 | *****************************/ 9 | region = "us-west-2" # DEFAULT 10 | vpc_id = "PLACEHOLDER" 11 | subnet_id = "PLACEHOLDER" 12 | associate_public_ip_address = true # DEFAULT 13 | ssh_interface = "public_ip" # DEFAULT 14 | 15 | /***************************** 16 | * Software Configuration 17 | The provided public key will be added to the authorized SSH keys. 18 | Jenkins can then use the associated private key to access instances 19 | provisioned off of this AMI. 20 | *****************************/ 21 | public_key = "" 22 | -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/fsx_automounter.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run fsx_automounter script at boot 3 | Before=multi-user.target 4 | 5 | [Service] 6 | Type=oneshot 7 | ExecStart=/opt/fsx_automounter.py 8 | User=root 9 | Group=root 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/install_common.al2023.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Install common tools on Amazon Linux 2023, architecture-independent. 3 | # These common tools are necessary for Jenkins Agents, and to build/install various other software. 4 | # Core common tools: 5 | # git 6 | # curl 7 | # jq 8 | # unzip 9 | # AWS CLI 10 | # AWS Systems Manager Agent 11 | # Amazon Corretto 12 | # mount.nfs (already installed on Amazon Linux) 13 | # python3 14 | # python3-pip 15 | # python3-requests 16 | # boto3 17 | # botocore 18 | # dos2unix 19 | # clang 20 | # scons 21 | # cmake3 22 | 23 | cloud-init status --wait 24 | echo "Updating packages..." 25 | sudo yum update -y 26 | echo "Installing packages..." 27 | sudo yum -y groupinstall "Development Tools" 28 | sudo dnf install -y awscli java-11-amazon-corretto-headless java-11-amazon-corretto-devel libarchive libarchive-devel unzip cmake python3 python3-pip python3-requests clang lld git openssl libcurl-devel openssl-devel uuid-devel zlib-devel pulseaudio-libs-devel jq freetype-devel libsndfile-devel python3 jq libX11-devel libXcursor-devel libXinerama-devel mesa-libGL-devel mesa-libGLU-devel libudev-devel libXi-devel libXrandr-devel dos2unix 29 | sudo pip install boto3 botocore scons 30 | if [ "$(uname -p)" == "x86_64" ]; then 31 | sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm 32 | fi 33 | if [ "$(uname -p)" == "aarch64" ]; then 34 | sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_arm64/amazon-ssm-agent.rpm 35 | fi 36 | sudo systemctl enable amazon-ssm-agent 37 | sudo systemctl start amazon-ssm-agent 38 | -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/install_common.ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Install common tools on Ubuntu, architecture-independent. 3 | # These common tools are necessary for Jenkins Agents, and to build/install various other software. 4 | # Core common tools: 5 | # git 6 | # curl 7 | # jq 8 | # unzip 9 | # AWS CLI 10 | # AWS Systems Manager Agent 11 | # Amazon Corretto 12 | # mount.nfs 13 | # python3 14 | # python3-pip 15 | # python3-requests 16 | # python3-botocore 17 | # boto3 18 | # dos2unix 19 | # clang 20 | # scons 21 | # cmake3 22 | 23 | cloud-init status --wait 24 | wget -O - https://apt.corretto.aws/corretto.key | sudo gpg --dearmor -o /usr/share/keyrings/corretto-keyring.gpg && \ 25 | echo "deb [signed-by=/usr/share/keyrings/corretto-keyring.gpg] https://apt.corretto.aws stable main" | sudo tee /etc/apt/sources.list.d/corretto.list 26 | echo "Updating apt.." 27 | sudo apt-get -o DPkg::Lock::Timeout=180 update -y 28 | echo "Installing packages..." 29 | sudo apt-get -o DPkg::Lock::Timeout=180 install -y nfs-common libarchive-tools unzip cmake build-essential python3 python3-pip python3-requests python3-botocore clang lld git openssl libcurl4-openssl-dev libssl-dev uuid-dev zlib1g-dev libpulse-dev scons jq libsdl2-mixer-dev libsdl2-image-dev libsdl2-dev libfreetype-dev libsndfile1-dev libopenal-dev python3 jq libx11-dev libxcursor-dev libxinerama-dev libgl1-mesa-dev libglu-dev libasound2-dev libudev-dev libxi-dev libxrandr-dev java-11-amazon-corretto-jdk dos2unix 30 | sudo pip install boto3 31 | echo "Installing AWS cli..." 32 | curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" 33 | unzip awscliv2.zip 34 | sudo ./aws/install 35 | sudo snap install amazon-ssm-agent --classic -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/install_mold.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Install the mold linker on Linux (any OS), architecture-independent 3 | # Requires common tools to be installed first. 4 | echo "Installing mold..." 5 | curl -s -L https://github.com/rui314/mold/releases/download/v2.31.0/mold-2.31.0-$(uname -m)-linux.tar.gz | sudo tar -xvzf - --strip-components=1 -C /usr 6 | -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/install_octobuild.al2023.x86_64.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Compile and octobuild on Amazon Linux 2023, x86_64 only 3 | # (not tested on aarch64 at the moment) 4 | # Requires common tools to be installed first. 5 | # Will install Rust and cargo as well 6 | sudo yum update -y 7 | echo "Installing Rust..." 8 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 9 | . "$HOME/.cargo/env" 10 | cd $(mktemp -d) 11 | git clone https://github.com/octobuild/octobuild.git . 12 | cargo install --path . -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/install_octobuild.ubuntu.x86_64.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Install octobuild on Ubuntu, x86_64 only 3 | # (octobuild does not seem to have packages available for aarch64 at the moment) 4 | # Requires common tools to be installed first. 5 | sudo apt-get -o DPkg::Lock::Timeout=180 update -y 6 | sudo NEEDRESTART_MODE=a DEBIAN_FRONTEND=noninteractive apt-get -o DPkg::Lock::Timeout=180 install -y apt-transport-https 7 | curl -1sLf 'https://dl.cloudsmith.io/public/octobuild/octobuild/setup.deb.sh' | sudo -E bash 8 | sudo NEEDRESTART_MODE=a DEBIAN_FRONTEND=noninteractive apt-get -o DPkg::Lock::Timeout=180 install -y octobuild 9 | sudo mkdir -p /etc/octobuild 10 | -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/install_sccache.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Install sccache on Linux (any OS), architecture-independent 3 | # Requires common tools to be installed first. 4 | # This script does not set up a service or anything to automatically start it! 5 | cd $(mktemp -d) 6 | curl -s -L "https://github.com/mozilla/sccache/releases/download/v0.5.3/sccache-v0.5.3-$(uname -m)-unknown-linux-musl.tar.gz" | tar xvzf - 7 | sudo cp sccache*/sccache /usr/bin/ 8 | -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/mount_ephemeral.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run mount_ephemeral.sh script at boot 3 | Before=multi-user.target 4 | 5 | [Service] 6 | Type=oneshot 7 | ExecStart=/bin/bash /opt/mount_ephemeral.sh 8 | User=root 9 | Group=root 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/mount_ephemeral.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if lsblk /dev/nvme1n1 ; then 4 | echo "/dev/nvme1n1 exists" 5 | if [ $(lsblk --json -fs /dev/nvme1n1 | jq -r ".blockdevices[0].fstype") != "xfs" ] ; then 6 | echo "/dev/nvme1n1 is NOT xfs - formatting..." 7 | mkfs -t xfs /dev/nvme1n1 8 | fi 9 | 10 | if [ $(lsblk --json /dev/nvme1n1 | jq -r "[.blockdevices[0].mountpoints[] | select(. != null)] | length") -eq "0" ] ; then 11 | echo "/dev/nvme1n1 is not mounted - mounting..." 12 | mount /dev/nvme1n1 /tmp 13 | chmod 777 /tmp 14 | fi 15 | fi 16 | -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/octobuild.conf: -------------------------------------------------------------------------------- 1 | cache: /mnt/fsx_cache/octobuild_cache 2 | cache_limit_mb: 128000 -------------------------------------------------------------------------------- /assets/packer/build-agents/linux/sccache.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=sccache server 3 | Wants=network-online.target 4 | After=network-online.target 5 | 6 | [Service] 7 | Environment=SCCACHE_IDLE_TIMEOUT=0 8 | Environment=SCCACHE_NO_DAEMON=1 9 | Environment=SCCACHE_START_SERVER=1 10 | Environment=SCCACHE_LOG=debug 11 | Environment=SCCACHE_DIR=/mnt/fsx_cache/sccache 12 | ExecStart=/usr/bin/sccache 13 | 14 | [Install] 15 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /assets/packer/build-agents/windows/README.md: -------------------------------------------------------------------------------- 1 | # Packer Templates for Unreal Engine Windows Build Agents 2 | 3 | The following template builds a Windows based AMI capable of Unreal Engine 5.4 compilation jobs. Please customize it to your needs. 4 | 5 | ## Usage 6 | 7 | This Amazon Machine Image is provisioned using the Windows Server 2022 base operating system. It installs all required tooling for Unreal Engine 5 compilation by default. Please consult [the release notes for Unreal Engine 5.4](https://dev.epicgames.com/documentation/en-us/unreal-engine/unreal-engine-5.4-release-notes#platformsdkupgrades) for details on what tools are used for compiling this version of the engine. 8 | 9 | The only required variable for building this Amazon Machine Image is a public SSH key. 10 | 11 | ``` bash 12 | packer build windows.pkr.hcl \ 13 | -var "public_key=" 14 | ``` 15 | 16 | ???+ Note 17 | The above command assumes you are running `packer` from the `/assets/packer/build-agents/windows` directory. 18 | 19 | You will then want to upload the private SSH key to AWS Secrets Manager so that the Jenkins orchestration service can use it to connect to this build agent. 20 | 21 | ``` bash 22 | aws secretsmanager create-secret \ 23 | --name JenkinsPrivateSSHKey \ 24 | --description "Private SSH key for Jenkins build agent access." \ 25 | --secret-string "" \ 26 | --tags 'Key=jenkins:credentials:type,Value=sshUserPrivateKey' 'Key=jenkins:credentials:username,Value=jenkins' 27 | ``` 28 | 29 | Take note of the output of this CLI command. You will need the ARN later. 30 | 31 | Currently this AMI is designed to work with our Jenkins module. This is why it creates a `jenkins` user and the associated SSH username for the key you upload is that same `jenkins` user. [Expanded customization of this AMI is currently on the Cloud Game Development Toolkit roadmap.](https://github.com/orgs/aws-games/projects/1/views/1?pane=issue&itemId=74515666) 32 | 33 | ## Installed Tooling 34 | 35 | - Chocolatey package manager 36 | - OpenJDK used by Jenkins agents 37 | - Git 38 | - OpenSSH 39 | - Python3 40 | - Botocore 41 | - Boto3 42 | - Client for Network File System (NFS) 43 | - Windows Development Kit and Debugging Tools 44 | - Visual Studio 2022 Build Tools 45 | - VCTools Workload; Include Recommended 46 | - ManagedDesktopBuild Tools; Include Recommended 47 | - MSVC v143 - VS 2022 C++ x64/x86 build tools 48 | - Microsoft.Net.Component.4.6.2TargetingPack 49 | 50 | Consult the [Visual Studio Build Tools component directory](https://learn.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-build-tools?view=vs-2022) for details. 51 | -------------------------------------------------------------------------------- /assets/packer/build-agents/windows/base_setup.ps1: -------------------------------------------------------------------------------- 1 | function Write($message) { 2 | Write-Output $message 3 | } 4 | 5 | try { 6 | # Download Chocolatey 7 | Write "Installing Chocolatey" 8 | $chocInstall = (New-Object System.Net.WebClient).DownloadString("https://chocolatey.org/install.ps1") 9 | Out-File -FilePath ./chocInstall.ps1 -InputObject $chocInstall 10 | powershell.exe -File ./chocInstall.ps1 11 | $env:path = "$env:path;C:\ProgramData\Chocolatey\bin" 12 | Import-Module C:\ProgramData\chocolatey\helpers\chocolateyInstaller.psm1 13 | } 14 | catch { 15 | Write "Failed to install Chocolatey" 16 | } 17 | 18 | try { 19 | # Java Runtime for Jenkins 20 | Write "Installing Git" 21 | choco install -y --no-progress git 22 | } 23 | catch { 24 | Write "Failed to install Git" 25 | } 26 | 27 | try { 28 | # Installing OpenSSH Server 29 | Write "Installing OpenSSH and setting service" 30 | Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 31 | Set-Service -Name sshd -StartupType 'Automatic' 32 | } 33 | catch { 34 | Write "Failed to install OpenSSH" 35 | } 36 | 37 | try { 38 | # Installing Client for NFS 39 | Write "Installing Client for NFS" 40 | Install-WindowsFeature NFS-Client 41 | } 42 | catch { 43 | Write "Failed to install Client for NFS" 44 | } 45 | 46 | try { 47 | # Python 48 | Write "Installing Python, botocore, boto3" 49 | choco install -y --no-progress python 50 | refreshenv 51 | pip install botocore boto3 52 | } 53 | catch { 54 | Write "Failed to install Python, botocore, boto3" 55 | } 56 | 57 | try { 58 | Write Get-Disk | Where-Object partitionstyle -EQ 'raw' 59 | Get-Disk | Where-Object partitionstyle -EQ \"raw\" | Initialize-Disk -PartitionStyle GPT -PassThru | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem NTFS -NewFileSystemLabel \"Data Drive\" -Confirm:$false 60 | } 61 | catch { 62 | Write "Failed to mount drives" 63 | } 64 | RefreshEnv 65 | -------------------------------------------------------------------------------- /assets/packer/build-agents/windows/example.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | /***************************** 2 | * Networking Configuration 3 | 4 | If vpc_id and subnet_id are null Packer will attempt to use 5 | the default vpc and subnet for the region. If you do not have 6 | a default VPC in the target region, you'll need to provide a 7 | VPC and a subnet. 8 | *****************************/ 9 | region = "us-west-2" # DEFAULT 10 | vpc_id = "PLACEHOLDER" 11 | subnet_id = "PLACEHOLDER" 12 | associate_public_ip_address = true # DEFAULT 13 | ssh_interface = "public_ip" # DEFAULT 14 | 15 | /***************************** 16 | * Instance Configuration 17 | 18 | The instance_type and root_volume_size allow you to configure 19 | the defaults for your AMI. If you are installing a significant 20 | amount of software and tooling onto your instance we recommend 21 | expanding the root_volume_size accordingly. 22 | 23 | For reference, when building Unreal Engine 5.4, we expand the 24 | root_volume_size to 256 to accomodate the Visual Studio Build 25 | Tools. We then mount an external volume or filesystem to instances 26 | to store the Unreal Engine content. 27 | *****************************/ 28 | instance_type = "c6a.4xlarge" # DEFAULT 29 | root_volume_size = 256 # DEFAULT 30 | 31 | /***************************** 32 | * Software Configuration 33 | 34 | The install_vs_tools variable will provison the AMI with 35 | the Visual Studio Build Tools, workloads and components required 36 | for Unreal Engine 5.4 builds. 37 | 38 | The setup_jenkins_agent variable will create a Jenkins user on the 39 | AMI, and install the latest version of Java for usage with Jenkins. 40 | 41 | The provided public key will be added to the authorized SSH keys. 42 | Jenkins can then use the associated private key to access instances 43 | provisioned off of this AMI. 44 | *****************************/ 45 | setup_jenkins_agent = true 46 | install_vs_tools = true 47 | public_key = "" 48 | -------------------------------------------------------------------------------- /assets/packer/build-agents/windows/install_vs_tools.ps1: -------------------------------------------------------------------------------- 1 | function Write($message) { 2 | Write-Output $message 3 | } 4 | 5 | try { 6 | # Downloads Debugging Tools for Windows 7 | # This is required for the PDBCOPY.exe which is not available through vs_installer 8 | # Ref: https://forums.unrealengine.com/t/installed-build-fails-trying-to-run-pdbcopy-exe/88759/19 9 | Write "Installing Debugging Tools for Windows..." 10 | 11 | $WDK_DOWNLOAD_LINK = "https://go.microsoft.com/fwlink/?linkid=2249371" 12 | $WDK_DESTINATION = "C:\\Users\\Administrator\\Downloads\\wdksetup.exe" 13 | 14 | Invoke-WebRequest -Uri $WDK_DOWNLOAD_LINK -OutFile $WDK_DESTINATION 15 | Start-Process -FilePath $WDK_DESTINATION -ArgumentList "/q" -Wait -PassThru 16 | 17 | Write "Windows Development Kit Installed successfully." 18 | } 19 | catch { 20 | Write "Debugging Tools for Windows installation failed." 21 | } 22 | finally { 23 | Remove-Item -Path $WDK_DESTINATION 24 | } 25 | 26 | try { 27 | Write "Installing Visual Studio 2022 Build Tools" 28 | choco install -y --no-progress visualstudio2022buildtools --package-parameters " --passive --locale en-US --add Microsoft.VisualStudio.Workload.VCTools;includeRecommended --add Microsoft.VisualStudio.Workload.ManagedDesktopBuildTools;includeRecommended --add Microsoft.VisualStudio.Component.VC.14.38.17.8.x86.x64 --add Microsoft.Net.Component.4.6.2.TargetingPack" 29 | 30 | } 31 | catch { 32 | Write "Failed to install Visual Studio 2022 Build Tools" 33 | } -------------------------------------------------------------------------------- /assets/packer/build-agents/windows/setup_jenkins_agent.ps1: -------------------------------------------------------------------------------- 1 | function Write($message) { 2 | Write-Output $message 3 | } 4 | 5 | try { 6 | # Create Jenkins user and add to administrative group 7 | Write "Creating Jenkins User" 8 | New-LocalUser -Name "jenkins" -AccountNeverExpires -Description "jenkins" -NoPassword 9 | Add-LocalGroupMember -Group "Administrators" -Member "jenkins" 10 | } 11 | catch { 12 | Write "Failed to create jenkins user" 13 | } 14 | 15 | try { 16 | # Java Runtime for Jenkins 17 | Write "Installing Java for Jenkins agents" 18 | choco install -y --no-progress openjdk 19 | } 20 | catch { 21 | Write "Failed to install java sdk" 22 | } 23 | -------------------------------------------------------------------------------- /assets/packer/build-agents/windows/userdata.ps1: -------------------------------------------------------------------------------- 1 | 2 | 3 | Write-Output "Running User Data Script" 4 | 5 | Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Ignore 6 | 7 | # Don't set this before Set-ExecutionPolicy as it throws an error 8 | $ErrorActionPreference = "stop" 9 | 10 | # Remove HTTP listener 11 | Remove-Item -Path WSMan:\Localhost\listener\listener* -Recurse 12 | 13 | Set-Item WSMan:\localhost\MaxTimeoutms 1800000 14 | Set-Item WSMan:\localhost\Service\Auth\Basic $true 15 | 16 | $Cert = New-SelfSignedCertificate -CertstoreLocation Cert:\LocalMachine\My -DnsName "packer" 17 | New-Item -Path WSMan:\LocalHost\Listener -Transport HTTPS -Address * -CertificateThumbPrint $Cert.Thumbprint -Force 18 | 19 | # WinRM 20 | Write-Output "Setting up WinRM" 21 | 22 | cmd.exe /c winrm quickconfig -q 23 | cmd.exe /c winrm set "winrm/config" '@{MaxTimeoutms="1800000"}' 24 | cmd.exe /c winrm set "winrm/config/winrs" '@{MaxMemoryPerShellMB="1024"}' 25 | cmd.exe /c winrm set "winrm/config/service" '@{AllowUnencrypted="true"}' 26 | cmd.exe /c winrm set "winrm/config/client" '@{AllowUnencrypted="true"}' 27 | cmd.exe /c winrm set "winrm/config/service/auth" '@{Basic="true"}' 28 | cmd.exe /c winrm set "winrm/config/client/auth" '@{Basic="true"}' 29 | cmd.exe /c winrm set "winrm/config/service/auth" '@{CredSSP="true"}' 30 | cmd.exe /c winrm set "winrm/config/listener?Address=*+Transport=HTTPS" "@{Port=`"5986`";Hostname=`"packer`";CertificateThumbprint=`"$($Cert.Thumbprint)`"}" 31 | cmd.exe /c netsh advfirewall firewall set rule group="remote administration" new enable=yes 32 | cmd.exe /c netsh firewall add portopening TCP 5986 "Port 5986" 33 | cmd.exe /c net stop winrm 34 | cmd.exe /c sc config winrm start= auto 35 | cmd.exe /c net start winrm 36 | 37 | 38 | -------------------------------------------------------------------------------- /assets/packer/perforce/p4-server/example.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | region = "" 2 | vpc_id = "" 3 | # You will need access to this subnet from the provisioning machine 4 | subnet_id = "" 5 | profile = "DEFAULT" -------------------------------------------------------------------------------- /assets/packer/perforce/p4-server/perforce_arm64.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | amazon = { 4 | version = ">= 0.0.2" 5 | source = "github.com/hashicorp/amazon" 6 | } 7 | } 8 | } 9 | 10 | locals { 11 | timestamp = regex_replace(timestamp(), "[- TZ:]", "") 12 | ami_prefix = "p4_al2023" 13 | } 14 | 15 | variable "region" { 16 | type = string 17 | default = null 18 | } 19 | 20 | variable "vpc_id" { 21 | type = string 22 | default = null 23 | } 24 | 25 | variable "subnet_id" { 26 | type = string 27 | default = null 28 | } 29 | 30 | variable "associate_public_ip_address" { 31 | type = bool 32 | default = true 33 | } 34 | 35 | variable "ssh_interface" { 36 | type = string 37 | default = "public_ip" 38 | } 39 | 40 | source "amazon-ebs" "al2023" { 41 | region = var.region 42 | ami_name = "${local.ami_prefix}-${local.timestamp}" 43 | instance_type = "t4g.medium" 44 | 45 | vpc_id = var.vpc_id 46 | subnet_id = var.subnet_id 47 | 48 | associate_public_ip_address = var.associate_public_ip_address 49 | ssh_interface = var.ssh_interface 50 | 51 | source_ami_filter { 52 | filters = { 53 | name = "al2023-ami-2023.*" 54 | architecture = "arm64" 55 | root-device-type = "ebs" 56 | virtualization-type = "hvm" 57 | } 58 | most_recent = true 59 | owners = ["amazon"] 60 | } 61 | 62 | ssh_username = "ec2-user" 63 | } 64 | 65 | build { 66 | name = "P4_SDP_AWS" 67 | sources = [ 68 | "source.amazon-ebs.al2023" 69 | ] 70 | 71 | provisioner "shell" { 72 | inline = [ 73 | "cloud-init status --wait", 74 | "sudo dnf install -y git sendmail nfs-utils s-nail unzip cronie" 75 | ] 76 | } 77 | 78 | provisioner "shell" { 79 | script = "${path.root}/p4_setup.sh" 80 | execute_command = "sudo sh {{.Path}}" 81 | } 82 | 83 | provisioner "file" { 84 | source = "${path.root}/p4_configure.sh" 85 | destination = "/tmp/p4_configure.sh" 86 | } 87 | 88 | provisioner "shell" { 89 | inline = ["mkdir -p /home/ec2-user/gpic_scripts", 90 | "sudo mv /tmp/p4_configure.sh /home/ec2-user/gpic_scripts" 91 | ] 92 | } 93 | 94 | provisioner "shell" { 95 | inline = ["sudo chmod +x /home/ec2-user/gpic_scripts/p4_configure.sh"] 96 | } 97 | 98 | } 99 | -------------------------------------------------------------------------------- /assets/packer/perforce/p4-server/perforce_x86.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | amazon = { 4 | version = ">= 0.0.2" 5 | source = "github.com/hashicorp/amazon" 6 | } 7 | } 8 | } 9 | 10 | locals { 11 | timestamp = regex_replace(timestamp(), "[- TZ:]", "") 12 | ami_prefix = "p4_al2023" 13 | } 14 | 15 | variable "region" { 16 | type = string 17 | default = null 18 | } 19 | 20 | variable "vpc_id" { 21 | type = string 22 | default = null 23 | } 24 | 25 | variable "subnet_id" { 26 | type = string 27 | default = null 28 | } 29 | 30 | variable "associate_public_ip_address" { 31 | type = bool 32 | default = true 33 | } 34 | 35 | variable "ssh_interface" { 36 | type = string 37 | default = "public_ip" 38 | } 39 | 40 | source "amazon-ebs" "al2023" { 41 | region = var.region 42 | ami_name = "${local.ami_prefix}-${local.timestamp}" 43 | instance_type = "t3.medium" 44 | 45 | vpc_id = var.vpc_id 46 | subnet_id = var.subnet_id 47 | 48 | associate_public_ip_address = var.associate_public_ip_address 49 | ssh_interface = var.ssh_interface 50 | 51 | source_ami_filter { 52 | filters = { 53 | name = "al2023-ami-2023.*" 54 | architecture = "x86_64" 55 | root-device-type = "ebs" 56 | virtualization-type = "hvm" 57 | } 58 | most_recent = true 59 | owners = ["amazon"] 60 | } 61 | 62 | ssh_username = "ec2-user" 63 | } 64 | 65 | build { 66 | name = "P4_SDP_AWS" 67 | sources = [ 68 | "source.amazon-ebs.al2023" 69 | ] 70 | 71 | provisioner "shell" { 72 | inline = [ 73 | "cloud-init status --wait", 74 | "sudo dnf install -y git sendmail nfs-utils s-nail unzip cronie" 75 | ] 76 | } 77 | 78 | provisioner "shell" { 79 | script = "${path.root}/p4_setup.sh" 80 | execute_command = "sudo sh {{.Path}}" 81 | } 82 | 83 | provisioner "file" { 84 | source = "${path.root}/p4_configure.sh" 85 | destination = "/tmp/p4_configure.sh" 86 | } 87 | 88 | provisioner "shell" { 89 | inline = ["mkdir -p /home/ec2-user/gpic_scripts", 90 | "sudo mv /tmp/p4_configure.sh /home/ec2-user/gpic_scripts" 91 | ] 92 | } 93 | 94 | provisioner "shell" { 95 | inline = ["sudo chmod +x /home/ec2-user/gpic_scripts/p4_configure.sh"] 96 | } 97 | 98 | } 99 | -------------------------------------------------------------------------------- /docs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM squidfunk/mkdocs-material:9.6.14 2 | COPY requirements.txt ./ 3 | ARG VERSION ALIAS 4 | ENV VERSION=$VERSION ALIAS=$ALIAS 5 | RUN pip install -U -r requirements.txt 6 | WORKDIR /docs -------------------------------------------------------------------------------- /docs/assets/dockerfiles.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Dockerfiles 3 | description: Re-usable Dockerfile assets for game development on AWS 4 | --- 5 | 6 | # Dockerfiles 7 | 8 | Coming soon. 9 | -------------------------------------------------------------------------------- /docs/assets/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Assets 3 | description: Re-usable assets for game development on AWS 4 | --- 5 | 6 | # Assets 7 | 8 | **Assets** are reusable scripts, pipeline definitions, Dockerfiles, Packer templates, and other resources that might prove useful or are dependencies of any of the modules. 9 | 10 | !!! info 11 | **Don't see an asset listed?** Create a [feature request](https://github.com/aws-games/cloud-game-development-toolkit/issues/new?assignees=&labels=feature-request&projects=&template=feature_request.yml&title=Feature+request%3A+TITLE) for a new asset or learn [how to contribute new assets to the project](../../CONTRIBUTING.md) 12 | 13 | | Asset Type | Description | 14 | | :--------------------------------------------------------------- | :- | 15 | | [:simple-packer: __Packer Templates__](./packer/index.md) | Packer templates provide an easy way to build machine images for commonly used game dev infrastructure. Currently the project includes Packer templates for UE5 build agents for Linux and Windows, as well as a Packer template for building a Perforce Helix Core version control AMI. | 16 | | [:simple-jenkins: __Jenkins Pipelines__](../../assets/jenkins-pipelines/README.md) | Jenkins Pipelines for common game dev automation workflows | 17 | | [:simple-ansible: __Ansible Playbooks__](../../assets/ansible-playbooks/perforce/p4-server/README.md) | Automation scripts for reusable system level configurations. Unlike Packer templates, you can use these to add new functionality to existing EC2 instances. | 18 | | [:simple-docker: __Dockerfiles (Coming Soon!)__](./dockerfiles.md) | Dockerfiles for creating Docker images of commonly used game dev infrastructure. These are primarily used in scenarios where there aren't openly available pre-built images that address a use case, or significant customization is needed that warrants building an image | 19 | -------------------------------------------------------------------------------- /docs/assets/packer/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Packer Templates 3 | description: Packer Templates for game development on AWS 4 | --- 5 | 6 | # Packer Templates 7 | 8 | [Packer](https://www.packer.io/) is a tool for simplifying and automating Amazon Machine Image (AMI) creation with code. It enables developers to create identical images for multiple platforms. The Packer templates provided in the Cloud Game Development Toolkit can be used to provision EC2 instances with common development tools preinstalled. 9 | 10 | !!! info 11 | **Don't see a Packer template that solves your needs?** Create a [feature request](https://github.com/aws-games/cloud-game-development-toolkit/issues/new?assignees=&labels=feature-request&projects=&template=feature_request.yml&title=Feature+request%3A+TITLE) for a new template or learn [how to contribute new assets to the project](../../../CONTRIBUTING.md) 12 | 13 | | Template | Description | 14 | | :--------------------------------------------------------------- | :- | 15 | | [:simple-linux: __Linux Build Agents__](../../../assets/packer/build-agents/linux/README.md) | Provision C++ compilation machines on Amazon Linux 2023 and Ubuntu machines on both x86 and ARM based architectures with useful tools like compiler caches such as [Octobuild](https://github.com/octobuild/octobuild) preinstalled.| 16 | | [:material-microsoft-windows-classic: __Windows Build Agents__](../../../assets/packer/build-agents/windows/README.md) | Create Windows 2022 based instances capable of Unreal Engine compilation out of the box. | 17 | | [:simple-perforce: __P4 Server (formerly Helix Core)__](../../../assets/packer/perforce/p4-server/README.md) | An Amazon Machine Image used for provisioning P4 Server on AWS. This AMI is required for deployment of the [Perforce module](../../../modules/perforce/README.md) | 18 | -------------------------------------------------------------------------------- /docs/changelog.md: -------------------------------------------------------------------------------- 1 | --8<-- "CHANGELOG.md" 2 | -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | --8<-- "CONTRIBUTING.md" 2 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | Welcome to the **Cloud Game Development Toolkit 4 | **. There are a number of ways to use this repository depending on your development needs. This guide will introduce some of the key features of the project, and provide detailed instructions for deploying your game studio on AWS. 5 | 6 | ## Introduction to Repository Structure 7 | 8 | ### Assets 9 | 10 | An _asset_ is a singular template, script, or automation document that may prove useful in isolation. Currently, the **Toolkit** contains three types of 11 | _assets_: [Ansible playbooks](./assets/ansible-playbooks/perforce/p4-server/README.md), [Jenkins pipelines](./assets/jenkins-pipelines/README.md), and [Packer templates](./docs/assets/packer/index.md). Each of these 12 | _assets_ can be used in isolation. 13 | 14 | For more information about _assets_, consult the [detailed documentation](./docs/assets/index.md). 15 | 16 | ### Modules 17 | 18 | A 19 | _module_ is a reusable [Terraform](https://www.terraform.io/) configuration encapsulating all of the resources needed to deploy a particular workload on AWS. These modules are highly configurable through variables, and provide necessary outputs for building interconnected architectures. We recommend reviewing the [Terraform module documentation](https://developer.hashicorp.com/terraform/language/modules) if you are unfamiliar with this concept. Modules are designed for you to depend on in your own Terraform modules, and we don't expect you to have to make any modifications to them; that said, if a module doesn't meet your needs, please raise an issue! 20 | 21 | For more information about _modules_, consult the [detailed documentation](./docs/modules/index.md). 22 | 23 | ### Samples 24 | 25 | A _sample_ is a complete reference architecture that stitches together [modules](./docs/modules/index.md) and first-party AWS services. A 26 | _sample_ is deployed with Terraform, and is the best way to get started with the **Cloud Game Development Toolkit 27 | **. Samples are designed for you to copy from and modify as needed to suit your architecture and needs. 28 | 29 | > **Note:** 30 | > Because samples may deploy resources that have unique name constraints, we cannot guarantee that two different samples can be deployed into the same AWS account without modifying either of the samples to integrate shared infrastructure or resolve conflicts. If you're interested in using functionality from multiple samples, we recommend that you use them as reference material to base your own infrastructure off of. 31 | 32 | For more information about _samples_, consult the [detailed documentation](./samples/README.md). 33 | 34 | If you're new to the project, we recommend starting by deploying one of the samples, such as the [Simple Build Pipeline](../samples/simple-build-pipeline/README.md). 35 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | template: index.html 3 | title: Cloud Game Development Toolkit 4 | hide: [navigation] 5 | hero: 6 | title: Tools and best practices for deploying game development infrastructure on AWS 7 | subtitle: A collection of modular templates and configurations, built by AWS for the game development community. 8 | getting_started_button: Getting Started 9 | source_button: Source Code 10 | features: 11 | - title: Assets 12 | #image: media/images/assets.svg 13 | link: assets/index.html 14 | description: Foundational resources such as image templates, configurations scripts, and CI/CD pipeline definitions for game development. 15 | - title: Modules 16 | #image: media/images/modules.svg 17 | link: modules/index.html 18 | description: Terraform Modules for deploying common game dev workloads with best-practices by default. 19 | - title: Samples 20 | #image: media/images/samples.svg 21 | link: samples/index.html 22 | description: Opinionated reference implementations to address common use cases for expedited game studio setup and battle-tested scenarios from the community. 23 | companies: 24 | title: 25 | list: 26 | --- 27 | 28 | # Welcome to the Cloud Game Development Toolkit 29 | 30 | !!! info 31 | **This project is under active development and community contributions are welcomed!**. If you would like to see something in this repository please create a feature request in the Issues tab. If you'd like to contribute, raise a pull request. You'll find our contribution guidelines [here](https://aws-games.github.io/cloud-game-development-toolkit/latest/contributing.html). 32 | 33 | The **Cloud Game Development Toolkit (a.k.a. CGD Toolkit)** is a collection of templates and configurations for deploying game development infrastructure and tools on AWS. 34 | 35 | Below are key tenets driving project's focus: 36 | 37 | - **This is a fork-first, open-source project**. We know that every game project is unique, so fork the repo, create your own branches for customization, and sync as appropriate. If you build something that can benefit other game developers, feel free to share via PR, as we encourage contributions! 38 | - **Meet game developers where they are**. We aim to minimize learning curves and introducing new technologies where possible by building solutions that incorporate tools and software that are already widely used across the game industry and among our existing AWS for Games customers. 39 | - **Solutions are built for AWS**. This project is focused on improving the game development experience on AWS and does not try to standardize solutions for deployment across many hosting platforms. In our experience, doing so is generally difficult, unecessary, and fraught with tradeoffs. If AWS is not your jam, you're welcome to fork and customize as needed (see above)! 40 | 41 | ## Getting Started 42 | 43 | [Getting Started](https://aws-games.github.io/cloud-game-development-toolkit/latest/getting-started.html){ .md-button } 44 | 45 | ## License 46 | 47 | This project is licensed under the [MIT-0 License](https://github.com/aws-games/cloud-game-development-toolkit/blob/main/LICENSE). 48 | -------------------------------------------------------------------------------- /docs/media/diagrams/perforce-arch-cdg-toolkit-terraform-aws-perforce-full-arch-route53-dns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/diagrams/perforce-arch-cdg-toolkit-terraform-aws-perforce-full-arch-route53-dns.png -------------------------------------------------------------------------------- /docs/media/images/access-jenkins.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/access-jenkins.png -------------------------------------------------------------------------------- /docs/media/images/helix-auth-service-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/helix-auth-service-architecture.png -------------------------------------------------------------------------------- /docs/media/images/helix-core-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/helix-core-architecture.png -------------------------------------------------------------------------------- /docs/media/images/helix-swarm-architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/helix-swarm-architecture.jpg -------------------------------------------------------------------------------- /docs/media/images/helix-swarm-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/helix-swarm-architecture.png -------------------------------------------------------------------------------- /docs/media/images/jenkins-admin-password.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/jenkins-admin-password.png -------------------------------------------------------------------------------- /docs/media/images/jenkins-module-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/jenkins-module-architecture.png -------------------------------------------------------------------------------- /docs/media/images/perforce-complete-example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/perforce-complete-example.jpg -------------------------------------------------------------------------------- /docs/media/images/perforce-complete-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/perforce-complete-example.png -------------------------------------------------------------------------------- /docs/media/images/teamcity-server-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/teamcity-server-architecture.png -------------------------------------------------------------------------------- /docs/media/images/unreal-cloud-ddc-infra.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/unreal-cloud-ddc-infra.png -------------------------------------------------------------------------------- /docs/media/images/unreal-cloud-ddc-single-region.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/unreal-cloud-ddc-single-region.png -------------------------------------------------------------------------------- /docs/media/images/unreal-engine-horde-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/images/unreal-engine-horde-architecture.png -------------------------------------------------------------------------------- /docs/media/logos/aws-logo-white.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 9 | 10 | 31 | 32 | 34 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /docs/media/logos/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/docs/media/logos/favicon.ico -------------------------------------------------------------------------------- /docs/overrides/index.html: -------------------------------------------------------------------------------- 1 | {% extends "main.html" %} 2 | 3 | {% block hero %} 4 | 5 | 6 | 7 | 8 | 14 | 15 | 16 | {{ page.meta.hero.title }} 17 | {{ page.meta.hero.subtitle }} 18 | 23 | {{ page.meta.hero.getting_started_button }} 24 | 25 | {{ page.meta.hero.source_button }} 30 | 31 | 32 | 33 | 34 | 35 | {% endblock %} 36 | 37 | {% block content %} 38 | 39 | {% for feature in page.meta.features %} 40 | 41 | 42 | 43 | {{ feature.title }} 44 | 45 | {{ feature.description }} 46 | 47 | {% endfor %} 48 | 49 | {{ super() }} 50 | {% endblock %} 51 | -------------------------------------------------------------------------------- /docs/overrides/main.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block outdated %} 4 | You're not viewing the latest version. 5 | 6 | Click here to go to latest. 7 | 8 | {% endblock %} 9 | 10 | 11 | {% block extrahead %} 12 | 13 | 14 | {% set title = config.site_name %} 15 | 16 | {% if page and page.title and not page.is_homepage %} 17 | {% set title = config.site_name ~ " - " ~ page.title | striptags %} 18 | {% endif %} 19 | 20 | {% endblock %} 21 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs>=1.4.0 2 | mkdocs-material>=9.0.0 3 | pymdown-extensions>=9.0 4 | mike==2.1.3 5 | mkdocs-git-revision-date-plugin>=0.3.2 6 | mkdocs-open-in-new-tab>=1.0.2 7 | mkdocs-same-dir>=0.1.3 8 | mkdocs-redirects>=v1.2.2 9 | -------------------------------------------------------------------------------- /docs/security.md: -------------------------------------------------------------------------------- 1 | --8<-- "SECURITY.md" 2 | -------------------------------------------------------------------------------- /docs/stylesheets/companies.css: -------------------------------------------------------------------------------- 1 | @keyframes slider { 2 | 0% { 3 | transform: translateX(calc(100% - 100vw)); 4 | } 5 | 50% { 6 | transform: translateX(0); 7 | } 8 | 100% { 9 | transform: translateX(calc(100% - 100vw)); 10 | } 11 | } 12 | 13 | .companies { 14 | display: flex; 15 | flex-direction: row-reverse; 16 | white-space: nowrap; 17 | overflow: hidden; 18 | margin: 2rem auto; 19 | } 20 | 21 | .companies-title { 22 | text-transform: uppercase; 23 | } 24 | 25 | .companies-slider { 26 | white-space: nowrap; 27 | animation: 150s linear infinite slider; 28 | } 29 | 30 | .companies .company { 31 | height: 3rem; 32 | opacity: 0.6; 33 | margin: 0 1rem; 34 | filter: grayscale() brightness(25%) invert(0); 35 | } 36 | 37 | .companies .company:hover { 38 | opacity: 1; 39 | } 40 | 41 | [data-md-color-scheme="slate"] .companies .company { 42 | filter: grayscale() brightness(25%) invert(1); 43 | } 44 | 45 | .companies .company img { 46 | height: 100%; 47 | max-height: 2.5em; 48 | } 49 | -------------------------------------------------------------------------------- /docs/stylesheets/extra.css: -------------------------------------------------------------------------------- 1 | .md-grid { 2 | max-width: 90vw 3 | } 4 | 5 | .md-typeset table td:first-child { 6 | white-space: nowrap; 7 | } 8 | 9 | .video-wrapper { 10 | position: relative; 11 | padding-bottom: 56.25%; /* 16:9 aspect ratio */ 12 | height: 0; 13 | overflow: hidden; 14 | max-width: 100%; 15 | } 16 | 17 | .video-wrapper iframe { 18 | position: absolute; 19 | top: 0; 20 | left: 0; 21 | width: 100%; 22 | height: 100%; 23 | } 24 | -------------------------------------------------------------------------------- /docs/stylesheets/hero.css: -------------------------------------------------------------------------------- 1 | [data-md-color-scheme="default"] .hero-container { 2 | padding: 4rem 0; 3 | background: linear-gradient( 4 | 180deg, 5 | var(--md-primary-fg-color), 6 | var(--mdl-accent-fg-color) 99%, 7 | var(--md-default-bg-color) 0 8 | ); 9 | background-repeat: no-repeat; 10 | background-position: bottom; 11 | background-size: contain; 12 | } 13 | 14 | [data-md-color-scheme="slate"] .hero-container { 15 | padding: 4rem 0; 16 | background: linear-gradient( 17 | 180deg, 18 | var(--md-primary-fg-color), 19 | var(--md-accent-fg-color) 99%, 20 | var(--md-default-bg-color) 0 21 | ); 22 | background-repeat: no-repeat; 23 | background-position: bottom; 24 | background-size: contain; 25 | } 26 | 27 | .mdx-hero { 28 | margin-bottom: 4rem; 29 | } 30 | 31 | .mdx-hero .md-button { 32 | margin-top: 0.5rem; 33 | margin-right: 0.5rem; 34 | } 35 | 36 | .mdx-hero .md-button--primary { 37 | color: var(--md-primary-fg-color--light); 38 | background-color: var(--md-accent-fg-color); 39 | border-color: var(--md-accent-fg-color); 40 | } 41 | 42 | .mdx-hero .md-button--primary:hover { 43 | color: var(--md-accent-fg-color); 44 | background-color: transparent; 45 | border-color: var(--md-accent-fg-color); 46 | } 47 | 48 | .mdx-hero__image { 49 | display: flex; 50 | justify-content: center; 51 | align-items: flex-start; 52 | } 53 | 54 | .mdx-hero__content { 55 | text-align: center; 56 | } 57 | 58 | .mdx-hero__content h1 { 59 | margin-bottom: 1rem; 60 | font-weight: 700; 61 | color: currentColor; 62 | } 63 | 64 | .mdx-features { 65 | display: grid; 66 | grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); 67 | grid-gap: 3rem; 68 | margin-bottom: 3rem; 69 | } 70 | 71 | .mdx-features img { 72 | height: 160px; 73 | object-fit: fixed; 74 | } 75 | 76 | @media screen and (min-width: 960px) { 77 | .md-sidebar--secondary { 78 | display: none; 79 | } 80 | 81 | .mdx-hero { 82 | display: flex; 83 | align-items: stretch; 84 | } 85 | 86 | .mdx-hero__image { 87 | order: 1; 88 | width: 50%; 89 | } 90 | 91 | .mdx-hero__content { 92 | width: 50%; 93 | margin-top: 5rem; 94 | padding-bottom: 20vh; 95 | text-align: left; 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /docs/stylesheets/themes.css: -------------------------------------------------------------------------------- 1 | [data-md-color-scheme="default"] { 2 | --md-primary-fg-color: rgb(35, 47, 63); 3 | --md-primary-fg-color--light: rgb(255, 255, 255); 4 | --md-primary-fg-color--dark: rgb(35, 47, 63); 5 | --md-accent-fg-color: rgb(255, 153, 0); 6 | --md-accent-fg-color--light: rgb(255, 153, 0); 7 | --md-accent-fg-color--dark: rgb(255, 153, 0); 8 | --md-typeset-a-color: var(--md-accent-fg-color); 9 | } 10 | 11 | [data-md-color-scheme="slate"] { 12 | --md-primary-fg-color: rgb(35, 47, 63); 13 | --md-primary-fg-color--light: rgb(255, 255, 255); 14 | --md-primary-fg-color--dark: rgb(35, 47, 63); 15 | --md-accent-fg-color: rgb(255, 153, 0); 16 | --md-accent-fg-color--light: rgb(255, 153, 0); 17 | --md-accent-fg-color--dark: rgb(255, 153, 0); 18 | --md-typeset-a-color: var(--md-accent-fg-color); 19 | } 20 | 21 | [data-md-color-scheme="slate"] .md-button { 22 | color: var(--md-accent-fg-color); 23 | } 24 | 25 | [data-md-color-scheme="default"] .md-button { 26 | color: var(--md-accent-fg-color); 27 | } 28 | -------------------------------------------------------------------------------- /modules/jenkins/asg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_launch_template" "jenkins_build_farm_launch_template" { 2 | for_each = var.build_farm_compute 3 | 4 | name_prefix = "${local.name_prefix}-${each.key}-bf-" 5 | description = "${each.key} build farm launch template." 6 | 7 | image_id = each.value.ami 8 | instance_type = each.value.instance_type 9 | ebs_optimized = each.value.ebs_optimized 10 | 11 | vpc_security_group_ids = [aws_security_group.jenkins_build_farm_sg.id] 12 | 13 | metadata_options { 14 | http_endpoint = "enabled" 15 | http_tokens = "required" 16 | http_put_response_hop_limit = 1 17 | instance_metadata_tags = "enabled" 18 | } 19 | iam_instance_profile { 20 | arn = aws_iam_instance_profile.build_farm_instance_profile.arn 21 | } 22 | } 23 | 24 | resource "aws_autoscaling_group" "jenkins_build_farm_asg" { 25 | for_each = aws_launch_template.jenkins_build_farm_launch_template 26 | 27 | name = "${local.name_prefix}-${each.key}-build-farm" 28 | #TODO: parameterize zones for ASG. Currently deploys to same zones as Jenkins service. 29 | vpc_zone_identifier = var.build_farm_subnets 30 | 31 | launch_template { 32 | id = each.value.id 33 | version = "$Latest" 34 | } 35 | 36 | # These values are controlled by the EC2 Fleet plugin 37 | min_size = 0 38 | max_size = 1 39 | 40 | tag { 41 | key = "ASG" 42 | value = each.key 43 | propagate_at_launch = true 44 | } 45 | 46 | tag { 47 | key = "Name" 48 | value = "${local.name_prefix}-${each.key}-build-farm" 49 | propagate_at_launch = true 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /modules/jenkins/assets/media/diagrams/jenkins-admin-password.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/jenkins/assets/media/diagrams/jenkins-admin-password.png -------------------------------------------------------------------------------- /modules/jenkins/assets/media/diagrams/jenkins-module-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/jenkins/assets/media/diagrams/jenkins-module-architecture.png -------------------------------------------------------------------------------- /modules/jenkins/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "current" {} 2 | data "aws_region" "current" {} 3 | 4 | # If cluster name is provided use a data source to access existing resource 5 | data "aws_ecs_cluster" "jenkins_cluster" { 6 | count = var.cluster_name != null ? 1 : 0 7 | cluster_name = var.cluster_name 8 | } 9 | 10 | # Get target VPC 11 | data "aws_vpc" "build_farm_vpc" { 12 | id = var.vpc_id 13 | } 14 | -------------------------------------------------------------------------------- /modules/jenkins/efs.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Filesystem 3 | ################################################################################ 4 | 5 | # File system for Jenkins 6 | resource "aws_efs_file_system" "jenkins_efs_file_system" { 7 | creation_token = "${local.name_prefix}-efs-file-system" 8 | performance_mode = var.jenkins_efs_performance_mode 9 | throughput_mode = var.jenkins_efs_throughput_mode 10 | 11 | #TODO: Parameterize encryption and customer managed key creation 12 | encrypted = true 13 | 14 | lifecycle_policy { 15 | transition_to_ia = "AFTER_30_DAYS" 16 | } 17 | 18 | lifecycle_policy { 19 | transition_to_primary_storage_class = "AFTER_1_ACCESS" 20 | } 21 | #checkov:skip=CKV_AWS_184: CMK encryption not supported currently 22 | tags = merge(local.tags, { 23 | Name = "${local.name_prefix}-efs-file-system" 24 | }) 25 | } 26 | 27 | # Mount targets for Jenkins containers 28 | resource "aws_efs_mount_target" "jenkins_efs_mount_target" { 29 | count = length(var.jenkins_service_subnets) 30 | file_system_id = aws_efs_file_system.jenkins_efs_file_system.id 31 | subnet_id = var.jenkins_service_subnets[count.index] 32 | security_groups = [aws_security_group.jenkins_efs_security_group.id] 33 | } 34 | 35 | # Jenkins Home directory access point 36 | resource "aws_efs_access_point" "jenkins_efs_access_point" { 37 | file_system_id = aws_efs_file_system.jenkins_efs_file_system.id 38 | posix_user { 39 | gid = 1001 40 | uid = 1001 41 | } 42 | root_directory { 43 | path = local.jenkins_home_path 44 | creation_info { 45 | owner_gid = 1001 46 | owner_uid = 1001 47 | permissions = 755 48 | } 49 | } 50 | tags = local.tags 51 | } 52 | 53 | resource "aws_efs_backup_policy" "policy" { 54 | count = var.enable_default_efs_backup_plan ? 1 : 0 55 | file_system_id = aws_efs_file_system.jenkins_efs_file_system.id 56 | 57 | backup_policy { 58 | status = "ENABLED" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /modules/jenkins/examples/complete/dns.tf: -------------------------------------------------------------------------------- 1 | ########################################## 2 | # Route53 Hosted Zone for FQDN 3 | ########################################## 4 | data "aws_route53_zone" "root" { 5 | name = var.fully_qualified_domain_name 6 | private_zone = false 7 | } 8 | 9 | resource "aws_route53_record" "jenkins" { 10 | zone_id = data.aws_route53_zone.root.id 11 | name = "jenkins.${data.aws_route53_zone.root.name}" 12 | type = "A" 13 | alias { 14 | name = module.jenkins.jenkins_alb_dns_name 15 | zone_id = module.jenkins.jenkins_alb_zone_id 16 | evaluate_target_health = true 17 | } 18 | } 19 | 20 | ########################################## 21 | # Jenkins Certificate Management 22 | ########################################## 23 | 24 | resource "aws_acm_certificate" "jenkins" { 25 | domain_name = "jenkins.${data.aws_route53_zone.root.name}" 26 | validation_method = "DNS" 27 | 28 | tags = { 29 | environment = "dev" 30 | } 31 | 32 | lifecycle { 33 | create_before_destroy = true 34 | } 35 | } 36 | 37 | resource "aws_route53_record" "jenkins_cert" { 38 | for_each = { 39 | for dvo in aws_acm_certificate.jenkins.domain_validation_options : dvo.domain_name => { 40 | name = dvo.resource_record_name 41 | record = dvo.resource_record_value 42 | type = dvo.resource_record_type 43 | } 44 | } 45 | 46 | allow_overwrite = true 47 | name = each.value.name 48 | records = [each.value.record] 49 | ttl = 60 50 | type = each.value.type 51 | zone_id = data.aws_route53_zone.root.id 52 | } 53 | 54 | resource "aws_acm_certificate_validation" "jenkins" { 55 | timeouts { 56 | create = "15m" 57 | } 58 | certificate_arn = aws_acm_certificate.jenkins.arn 59 | validation_record_fqdns = [for record in aws_route53_record.jenkins_cert : record.fqdn] 60 | } 61 | -------------------------------------------------------------------------------- /modules/jenkins/examples/complete/local.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | 4 | locals { 5 | 6 | build_farm_compute = { 7 | example_builders : { 8 | ami = "ami-066784287e358dad1" // Amazon Linux 2023 (64-bit x86) 9 | instance_type = "t3.medium" 10 | } 11 | } 12 | 13 | build_farm_fsx_openzfs_storage = { 14 | cache : { 15 | storage_type = "SSD" 16 | throughput_capacity = 160 17 | storage_capacity = 256 18 | deployment_type = "MULTI_AZ_1" 19 | route_table_ids = [aws_route_table.private_rt.id] 20 | } 21 | } 22 | 23 | # VPC Configuration 24 | vpc_cidr_block = "10.0.0.0/16" 25 | public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] 26 | private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] 27 | 28 | tags = { 29 | environment = "cgd" 30 | } 31 | azs = slice(data.aws_availability_zones.available.names, 0, 2) 32 | } 33 | -------------------------------------------------------------------------------- /modules/jenkins/examples/complete/main.tf: -------------------------------------------------------------------------------- 1 | # tflint-ignore: terraform_required_version 2 | 3 | ########################################## 4 | # Shared ECS Cluster for Services 5 | ########################################## 6 | 7 | resource "aws_ecs_cluster" "jenkins_cluster" { 8 | name = "jenkins-cluster" 9 | 10 | setting { 11 | name = "containerInsights" 12 | value = "enabled" 13 | } 14 | } 15 | 16 | resource "aws_ecs_cluster_capacity_providers" "providers" { 17 | cluster_name = aws_ecs_cluster.jenkins_cluster.name 18 | 19 | capacity_providers = ["FARGATE"] 20 | 21 | default_capacity_provider_strategy { 22 | base = 1 23 | weight = 100 24 | capacity_provider = "FARGATE" 25 | } 26 | } 27 | 28 | ########################################## 29 | # Jenkins 30 | ########################################## 31 | 32 | module "jenkins" { 33 | source = "../.." 34 | 35 | cluster_name = aws_ecs_cluster.jenkins_cluster.name 36 | vpc_id = aws_vpc.jenkins_vpc.id 37 | jenkins_alb_subnets = aws_subnet.public_subnets[*].id 38 | jenkins_service_subnets = aws_subnet.private_subnets[*].id 39 | existing_security_groups = [] 40 | internal = false 41 | certificate_arn = aws_acm_certificate.jenkins.arn 42 | jenkins_agent_secret_arns = var.jenkins_agent_secret_arns 43 | create_ec2_fleet_plugin_policy = true 44 | debug = true 45 | enable_jenkins_alb_deletion_protection = false 46 | enable_default_efs_backup_plan = false 47 | 48 | # Build Farms 49 | build_farm_subnets = aws_subnet.private_subnets[*].id 50 | 51 | build_farm_compute = local.build_farm_compute 52 | 53 | build_farm_fsx_openzfs_storage = local.build_farm_fsx_openzfs_storage 54 | # Artifacts 55 | artifact_buckets = { 56 | builds : { 57 | name = "game-builds" 58 | enable_force_destroy = true 59 | 60 | tags = { 61 | Name = "game-builds" 62 | } 63 | }, 64 | } 65 | 66 | depends_on = [aws_ecs_cluster.jenkins_cluster, aws_acm_certificate_validation.jenkins] 67 | } 68 | -------------------------------------------------------------------------------- /modules/jenkins/examples/complete/variables.tf: -------------------------------------------------------------------------------- 1 | variable "fully_qualified_domain_name" { 2 | type = string 3 | description = "A fully qualified domain name (FQDN) to be used for jenkins. A record will be created on the hosted zone with the following patterns 'jenkins.'" 4 | } 5 | 6 | variable "jenkins_agent_secret_arns" { 7 | type = list(string) 8 | description = "A list of secretmanager ARNs (wildcards allowed) that contain any secrets which need to be accessed by the Jenkins service." 9 | default = [] 10 | } 11 | -------------------------------------------------------------------------------- /modules/jenkins/examples/complete/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.89.0" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/jenkins/fsxz.tf: -------------------------------------------------------------------------------- 1 | resource "aws_fsx_openzfs_file_system" "jenkins_build_farm_fsxz_file_system" { 2 | for_each = var.build_farm_fsx_openzfs_storage 3 | 4 | deployment_type = each.value.deployment_type 5 | preferred_subnet_id = var.build_farm_subnets[0] 6 | 7 | subnet_ids = var.build_farm_subnets 8 | route_table_ids = each.value.route_table_ids 9 | 10 | storage_capacity = each.value.storage_capacity 11 | throughput_capacity = each.value.throughput_capacity 12 | 13 | security_group_ids = [aws_security_group.jenkins_build_storage_sg.id] 14 | 15 | root_volume_configuration { 16 | data_compression_type = "LZ4" 17 | read_only = false 18 | record_size_kib = 128 19 | copy_tags_to_snapshots = true 20 | nfs_exports { 21 | client_configurations { 22 | clients = data.aws_vpc.build_farm_vpc.cidr_block 23 | options = ["async", "rw", "crossmnt"] 24 | } 25 | } 26 | } 27 | 28 | skip_final_backup = true 29 | automatic_backup_retention_days = 7 30 | copy_tags_to_backups = true 31 | copy_tags_to_volumes = true 32 | daily_automatic_backup_start_time = "06:00" 33 | #checkov:skip=CKV_AWS_203: CMK encryption not supported currently 34 | tags = merge(local.tags, each.value.tags, { 35 | Name = "${var.project_prefix}-${each.key}" 36 | }) 37 | } 38 | 39 | resource "aws_fsx_openzfs_volume" "jenkins_build_farm_fsxz_volume" { 40 | for_each = aws_fsx_openzfs_file_system.jenkins_build_farm_fsxz_file_system 41 | 42 | name = "${var.project_prefix}-${each.key}" 43 | parent_volume_id = aws_fsx_openzfs_file_system.jenkins_build_farm_fsxz_file_system[each.key].root_volume_id 44 | 45 | copy_tags_to_snapshots = true 46 | 47 | data_compression_type = "LZ4" 48 | 49 | nfs_exports { 50 | client_configurations { 51 | clients = data.aws_vpc.build_farm_vpc.cidr_block 52 | options = ["async", "rw", "crossmnt"] 53 | } 54 | } 55 | 56 | tags = merge(local.tags, each.value.tags, { 57 | Name = "${var.project_prefix}-${each.key}" 58 | }) 59 | } 60 | -------------------------------------------------------------------------------- /modules/jenkins/local.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | jenkins_image = "jenkins/jenkins:lts-jdk17" 3 | jenkins_home_path = "/var/jenkins_home" 4 | name_prefix = "${var.project_prefix}-${var.name}" 5 | 6 | tags = merge(var.tags, { 7 | "environment" = var.environment 8 | }) 9 | } 10 | -------------------------------------------------------------------------------- /modules/jenkins/outputs.tf: -------------------------------------------------------------------------------- 1 | output "service_security_group_id" { 2 | description = "Security group associated with the ECS service hosting jenkins" 3 | value = aws_security_group.jenkins_service_sg.id 4 | } 5 | 6 | output "alb_security_group_id" { 7 | description = "Security group associated with the Jenkins load balancer" 8 | value = var.create_application_load_balancer ? aws_security_group.jenkins_alb_sg[0].id : null 9 | } 10 | 11 | output "build_farm_security_group_id" { 12 | description = "Security group associated with the build farm autoscaling groups" 13 | value = aws_security_group.jenkins_build_farm_sg.id 14 | } 15 | 16 | output "jenkins_alb_dns_name" { 17 | description = "The DNS name of the Jenkins application load balancer." 18 | value = var.create_application_load_balancer ? aws_lb.jenkins_alb[0].dns_name : null 19 | } 20 | 21 | output "jenkins_alb_zone_id" { 22 | description = "The zone ID of the Jenkins ALB." 23 | value = var.create_application_load_balancer ? aws_lb.jenkins_alb[0].zone_id : null 24 | } 25 | 26 | output "service_target_group_arn" { 27 | value = aws_lb_target_group.jenkins_alb_target_group.arn 28 | description = "The ARN of the Jenkins service target group" 29 | } 30 | -------------------------------------------------------------------------------- /modules/jenkins/s3.tf: -------------------------------------------------------------------------------- 1 | # - Random String to prevent naming conflicts - 2 | resource "random_string" "artifact_buckets" { 3 | length = 4 4 | special = false 5 | upper = false 6 | } 7 | 8 | 9 | resource "aws_s3_bucket" "artifact_buckets" { 10 | #checkov:skip=CKV2_AWS_61: Lifecycle configuration not currently supported 11 | #checkov:skip=CKV_AWS_21: Versioning configurable through variables 12 | #checkov:skip=CKV_AWS_144: Cross-region replication not currently supported 13 | #checkov:skip=CKV_AWS_145: KMS encryption with CMK not currently supported 14 | #checkov:skip=CKV_AWS_18: S3 access logs not necessary 15 | #checkov:skip=CKV2_AWS_62: Event notifications not necessary 16 | for_each = var.artifact_buckets 17 | bucket = "${var.project_prefix}-${each.value.name}-${random_string.artifact_buckets.result}" 18 | force_destroy = each.value.enable_force_destroy 19 | 20 | tags = merge( 21 | { 22 | "environment" = var.environment 23 | }, 24 | each.value.tags, 25 | ) 26 | } 27 | 28 | resource "aws_s3_bucket_versioning" "artifact_bucket_versioning" { 29 | for_each = var.artifact_buckets 30 | bucket = aws_s3_bucket.artifact_buckets[each.key].id 31 | versioning_configuration { 32 | status = each.value.enable_versioning ? "Enabled" : "Disabled" 33 | } 34 | } 35 | 36 | resource "aws_s3_bucket_public_access_block" "artifacts_bucket_public_block" { 37 | for_each = var.artifact_buckets 38 | bucket = aws_s3_bucket.artifact_buckets[each.key].id 39 | 40 | block_public_acls = true 41 | block_public_policy = true 42 | ignore_public_acls = true 43 | restrict_public_buckets = true 44 | } 45 | -------------------------------------------------------------------------------- /modules/jenkins/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.7.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/perforce/assets/media/diagrams/p4-auth-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/perforce/assets/media/diagrams/p4-auth-architecture.png -------------------------------------------------------------------------------- /modules/perforce/assets/media/diagrams/p4-code-review-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/perforce/assets/media/diagrams/p4-code-review-architecture.png -------------------------------------------------------------------------------- /modules/perforce/assets/media/diagrams/p4-server-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/perforce/assets/media/diagrams/p4-server-architecture.png -------------------------------------------------------------------------------- /modules/perforce/assets/media/diagrams/perforce-arch-cdg-toolkit-terraform-aws-perforce-full-arch-route53-dns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/perforce/assets/media/diagrams/perforce-arch-cdg-toolkit-terraform-aws-perforce-full-arch-route53-dns.png -------------------------------------------------------------------------------- /modules/perforce/examples/create-resources-complete/dns.tf: -------------------------------------------------------------------------------- 1 | ########################################## 2 | # Fetch Shared NLB DNS Name and Zone ID 3 | ########################################## 4 | data "aws_lb" "shared_services_nlb" { 5 | arn = module.terraform-aws-perforce.shared_network_load_balancer_arn 6 | 7 | depends_on = [module.terraform-aws-perforce] 8 | } 9 | 10 | ########################################## 11 | # Fetch Route53 Public Hosted Zone for FQDN 12 | ########################################## 13 | data "aws_route53_zone" "root" { 14 | name = var.route53_public_hosted_zone_name 15 | private_zone = false 16 | } 17 | 18 | ########################################## 19 | # Perforce External (Public) DNS 20 | ########################################## 21 | # Route all external web service traffic (e.g. auth.perforce.example.com, review.perforce.example.com) to the Public NLB 22 | resource "aws_route53_record" "external_perforce_web_services" { 23 | zone_id = data.aws_route53_zone.root.id 24 | name = "*.${local.p4_server_fully_qualified_domain_name}" 25 | type = "A" 26 | alias { 27 | name = data.aws_lb.shared_services_nlb.dns_name 28 | zone_id = data.aws_lb.shared_services_nlb.zone_id 29 | evaluate_target_health = true 30 | } 31 | } 32 | 33 | # Route external web service traffic to the public EIP of the P4 Server 34 | resource "aws_route53_record" "external_perforce_p4_server" { 35 | #checkov:skip=CKV2_AWS_23: Attached to EIP public IP 36 | zone_id = data.aws_route53_zone.root.id 37 | name = "perforce.${data.aws_route53_zone.root.name}" 38 | type = "A" 39 | ttl = 300 40 | records = [module.terraform-aws-perforce.p4_server_eip_public_ip] 41 | } 42 | 43 | 44 | ########################################## 45 | # P4 Code Review Certificate Management 46 | ########################################## 47 | resource "aws_acm_certificate" "perforce" { 48 | domain_name = "*.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 49 | 50 | validation_method = "DNS" 51 | 52 | #checkov:skip=CKV2_AWS_71: Wildcard is necessary for this domain 53 | 54 | tags = { 55 | environment = "dev" 56 | } 57 | 58 | lifecycle { 59 | create_before_destroy = true 60 | } 61 | } 62 | 63 | resource "aws_route53_record" "perforce_cert" { 64 | for_each = { 65 | for dvo in aws_acm_certificate.perforce.domain_validation_options : dvo.domain_name => { 66 | name = dvo.resource_record_name 67 | record = dvo.resource_record_value 68 | type = dvo.resource_record_type 69 | } 70 | } 71 | 72 | allow_overwrite = true 73 | name = each.value.name 74 | records = [each.value.record] 75 | ttl = 60 76 | type = each.value.type 77 | zone_id = data.aws_route53_zone.root.id 78 | } 79 | 80 | resource "aws_acm_certificate_validation" "perforce" { 81 | certificate_arn = aws_acm_certificate.perforce.arn 82 | validation_record_fqdns = [for record in aws_route53_record.perforce_cert : record.fqdn] 83 | 84 | 85 | lifecycle { 86 | create_before_destroy = true 87 | } 88 | timeouts { 89 | create = "15m" 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /modules/perforce/examples/create-resources-complete/locals.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | locals { 4 | project_prefix = "cgd" 5 | azs = slice(data.aws_availability_zones.available.names, 0, 2) 6 | 7 | # Subdomains 8 | perforce_subdomain = "perforce" 9 | p4_auth_subdomain = "auth" 10 | p4_code_review_subdomain = "review" 11 | 12 | # P4 Server Domain 13 | p4_server_fully_qualified_domain_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 14 | 15 | # P4Auth Domain 16 | p4_auth_fully_qualified_domain_name = "${local.p4_auth_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 17 | 18 | # P4 Code Review 19 | p4_code_review_fully_qualified_domain_name = "${local.p4_code_review_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 20 | 21 | 22 | # Amazon Certificate Manager (ACM) 23 | certificate_arn = aws_acm_certificate.perforce.arn 24 | 25 | 26 | # VPC Configuration 27 | vpc_cidr_block = "10.0.0.0/16" 28 | public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] 29 | private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] 30 | 31 | tags = { 32 | environment = "dev" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /modules/perforce/examples/create-resources-complete/main.tf: -------------------------------------------------------------------------------- 1 | module "terraform-aws-perforce" { 2 | source = "../../" 3 | 4 | # - Shared - 5 | project_prefix = local.project_prefix 6 | vpc_id = aws_vpc.perforce_vpc.id 7 | 8 | create_route53_private_hosted_zone = true 9 | route53_private_hosted_zone_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 10 | certificate_arn = local.certificate_arn 11 | existing_security_groups = [aws_security_group.allow_my_ip.id] 12 | shared_alb_subnets = aws_subnet.private_subnets[*].id 13 | shared_nlb_subnets = aws_subnet.public_subnets[*].id 14 | 15 | # - P4 Server Configuration - 16 | p4_server_config = { 17 | # General 18 | name = "p4-server" 19 | fully_qualified_domain_name = local.p4_server_fully_qualified_domain_name 20 | 21 | # Compute 22 | lookup_existing_ami = false 23 | enable_auto_ami_creation = true 24 | p4_server_type = "p4d_commit" 25 | 26 | # Storage 27 | depot_volume_size = 128 28 | metadata_volume_size = 32 29 | logs_volume_size = 32 30 | 31 | # Networking & Security 32 | instance_subnet_id = aws_subnet.public_subnets[0].id 33 | existing_security_groups = [aws_security_group.allow_my_ip.id] 34 | } 35 | 36 | # - P4Auth Configuration - 37 | p4_auth_config = { 38 | # General 39 | name = "p4-auth" 40 | fully_qualified_domain_name = local.p4_auth_fully_qualified_domain_name 41 | existing_security_groups = [aws_security_group.allow_my_ip.id] 42 | debug = true # optional to use for debugging. Default is false if omitted 43 | deregistration_delay = 0 44 | service_subnets = aws_subnet.private_subnets[*].id 45 | # Allow ECS tasks to be immediately deregistered from target group. Helps to prevent race conditions during `terraform destroy` 46 | } 47 | 48 | 49 | # - P4 Code Review Configuration - 50 | p4_code_review_config = { 51 | name = "p4-code-review" 52 | fully_qualified_domain_name = local.p4_code_review_fully_qualified_domain_name 53 | existing_security_groups = [aws_security_group.allow_my_ip.id] 54 | debug = true # optional to use for debugging. Default is false if omitted 55 | deregistration_delay = 0 56 | service_subnets = aws_subnet.private_subnets[*].id 57 | # Allow ECS tasks to be immediately deregistered from target group. Helps to prevent race conditions during `terraform destroy` 58 | 59 | # Configuration 60 | enable_sso = true 61 | } 62 | } 63 | 64 | # placeholder since provider is "required" by the module 65 | provider "netapp-ontap" { 66 | connection_profiles = [ 67 | { 68 | name = "null" 69 | hostname = "null" 70 | username = "null" 71 | password = "null" 72 | } 73 | ] 74 | } 75 | -------------------------------------------------------------------------------- /modules/perforce/examples/create-resources-complete/outputs.tf: -------------------------------------------------------------------------------- 1 | output "p4_server_connection_string" { 2 | value = "ssl:${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}:1666" 3 | description = "The connection string for the P4 Server. Set your P4PORT environment variable to this value." 4 | } 5 | 6 | output "p4_code_review_url" { 7 | value = "https://${local.p4_code_review_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 8 | description = "The URL for the P4 Code Review service." 9 | } 10 | 11 | output "p4_auth_admin_url" { 12 | value = "https://${local.p4_auth_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}/admin" 13 | description = "The URL for the P4Auth service admin page." 14 | } 15 | -------------------------------------------------------------------------------- /modules/perforce/examples/create-resources-complete/security.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "allow_my_ip" { 2 | name = "allow_my_ip" 3 | description = "Allow inbound traffic from my IP" 4 | vpc_id = aws_vpc.perforce_vpc.id 5 | 6 | tags = { 7 | Name = "allow_my_ip" 8 | } 9 | } 10 | 11 | data "http" "my_ip" { 12 | url = "https://api.ipify.org" 13 | } 14 | 15 | resource "aws_vpc_security_group_ingress_rule" "allow_https" { 16 | security_group_id = aws_security_group.allow_my_ip.id 17 | description = "Allow HTTPS traffic from Kevon." 18 | from_port = 443 19 | to_port = 443 20 | ip_protocol = "tcp" 21 | cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" 22 | } 23 | resource "aws_vpc_security_group_ingress_rule" "allow_http" { 24 | security_group_id = aws_security_group.allow_my_ip.id 25 | description = "Allow HTTP traffic from Kevon." 26 | from_port = 80 27 | to_port = 80 28 | ip_protocol = "tcp" 29 | cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" 30 | } 31 | 32 | resource "aws_vpc_security_group_ingress_rule" "allow_icmp" { 33 | security_group_id = aws_security_group.allow_my_ip.id 34 | description = "Allow ICMP traffic from Kevon." 35 | from_port = -1 36 | to_port = -1 37 | ip_protocol = "icmp" 38 | cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" 39 | } 40 | resource "aws_vpc_security_group_ingress_rule" "allow_perforce" { 41 | security_group_id = aws_security_group.allow_my_ip.id 42 | description = "Allow Perforce traffic from Kevon." 43 | from_port = 1666 44 | to_port = 1666 45 | ip_protocol = "tcp" 46 | cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" 47 | } 48 | -------------------------------------------------------------------------------- /modules/perforce/examples/create-resources-complete/variables.tf: -------------------------------------------------------------------------------- 1 | variable "route53_public_hosted_zone_name" { 2 | description = "The name of your existing Route53 Public Hosted Zone. This is required to create the ACM certificate and Route53 records." 3 | type = string 4 | } 5 | -------------------------------------------------------------------------------- /modules/perforce/examples/create-resources-complete/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | awscc = { 10 | source = "hashicorp/awscc" 11 | version = "1.34.0" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.7.1" 16 | } 17 | http = { 18 | source = "hashicorp/http" 19 | version = "3.5.0" 20 | } 21 | netapp-ontap = { 22 | source = "NetApp/netapp-ontap" 23 | version = "2.1.0" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /modules/perforce/examples/p4-server-fsxn/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | -------------------------------------------------------------------------------- /modules/perforce/examples/p4-server-fsxn/dns.tf: -------------------------------------------------------------------------------- 1 | ########################################## 2 | # Route53 Hosted Zone for FQDN 3 | ########################################## 4 | data "aws_route53_zone" "root" { 5 | name = var.route53_public_hosted_zone_name 6 | private_zone = false 7 | } 8 | 9 | # Route all external Helix Core traffic to P4 Server 10 | resource "aws_route53_record" "external_p4_server" { 11 | #checkov:skip=CKV2_AWS_23: Route53 Record associated with P4 Server EIP 12 | zone_id = data.aws_route53_zone.root.zone_id 13 | name = "perforce.${data.aws_route53_zone.root.name}" 14 | type = "A" 15 | ttl = 300 16 | records = [module.perforce.p4_server_eip_public_ip] 17 | } 18 | -------------------------------------------------------------------------------- /modules/perforce/examples/p4-server-fsxn/local.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | locals { 4 | project_prefix = "cgd" 5 | azs = slice(data.aws_availability_zones.available.names, 0, 2) 6 | 7 | # Subdomains 8 | perforce_subdomain = "perforce" 9 | 10 | # P4 Server Domain 11 | p4_server_fully_qualified_domain_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 12 | 13 | # VPC Configuration 14 | vpc_cidr_block = "10.0.0.0/16" 15 | public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] 16 | private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] 17 | 18 | tags = { 19 | environment = "dev" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /modules/perforce/examples/p4-server-fsxn/outputs.tf: -------------------------------------------------------------------------------- 1 | output "amazon_fsxn_filesystem" { 2 | value = aws_fsx_ontap_file_system.p4_server_fs.id 3 | description = "FSxN filesystem ID" 4 | } 5 | 6 | output "p4_server_connection_string" { 7 | value = "ssl:perforce.${var.route53_public_hosted_zone_name}:1666" 8 | description = "The connection string for the Helix Core server. Set your P4PORT environment variable to this value." 9 | } 10 | -------------------------------------------------------------------------------- /modules/perforce/examples/p4-server-fsxn/variables.tf: -------------------------------------------------------------------------------- 1 | variable "route53_public_hosted_zone_name" { 2 | description = "The name of your existing Route53 Public Hosted Zone. This is required to create the ACM certificate and Route53 records." 3 | type = string 4 | } 5 | 6 | variable "fsxn_password" { 7 | description = "Admin password to be used with FSxN" 8 | type = string 9 | } 10 | 11 | variable "fsxn_aws_profile" { 12 | description = "AWS profile for managing FSxN" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /modules/perforce/examples/p4-server-fsxn/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | awscc = { 10 | source = "hashicorp/awscc" 11 | version = "1.34.0" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.7.1" 16 | } 17 | netapp-ontap = { 18 | source = "NetApp/netapp-ontap" 19 | version = "2.1.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /modules/perforce/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # shared ECS cluster configuration 3 | create_shared_ecs_cluster = (var.existing_ecs_cluster_name == null && 4 | (var.p4_auth_config != null || var.p4_code_review_config != null)) 5 | } 6 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-auth/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | # If cluster name is provided use a data source to access existing resource 4 | data "aws_ecs_cluster" "cluster" { 5 | count = var.cluster_name != null ? 1 : 0 6 | cluster_name = var.cluster_name 7 | } 8 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-auth/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | image = "perforce/helix-auth-svc" # cannot change this until the Perforce Helix Authentication Service Image is updated to use the new naming for P4Auth 3 | name_prefix = "${var.project_prefix}-${var.name}" 4 | data_volume_name = "helix-auth-config" # cannot change this until the Perforce Helix Authentication Service Image is updated to use the new naming for P4Auth 5 | data_path = "/var/has" # cannot change this until the Perforce Helix Authentication Service Image is updated to use the new naming for P4Auth 6 | } 7 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-auth/outputs.tf: -------------------------------------------------------------------------------- 1 | output "service_security_group_id" { 2 | value = aws_security_group.ecs_service.id 3 | description = "Security group associated with the ECS service running P4Auth" 4 | } 5 | 6 | output "alb_security_group_id" { 7 | value = var.create_application_load_balancer ? aws_security_group.alb[0].id : null 8 | description = "Security group associated with the P4Auth load balancer" 9 | } 10 | 11 | output "cluster_name" { 12 | value = var.cluster_name != null ? var.cluster_name : aws_ecs_cluster.cluster[0].name 13 | description = "Name of the ECS cluster hosting P4Auth" 14 | } 15 | 16 | output "alb_dns_name" { 17 | value = var.create_application_load_balancer ? aws_lb.alb[0].dns_name : null 18 | description = "The DNS name of the P4Auth ALB" 19 | } 20 | 21 | output "alb_zone_id" { 22 | value = var.create_application_load_balancer ? aws_lb.alb[0].zone_id : null 23 | description = "The hosted zone ID of the P4Auth ALB" 24 | } 25 | 26 | output "target_group_arn" { 27 | value = aws_lb_target_group.alb_target_group.arn 28 | description = "The service target group for P4Auth" 29 | } 30 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-auth/sg.tf: -------------------------------------------------------------------------------- 1 | ######################################## 2 | # ALB Security Group 3 | ######################################## 4 | # Load Balancer Security Group (attached to ALB) 5 | resource "aws_security_group" "alb" { 6 | #checkov:skip=CKV2_AWS_5: Attached to ALB on creation 7 | count = var.create_application_load_balancer ? 1 : 0 8 | name = "${local.name_prefix}-alb" 9 | vpc_id = var.vpc_id 10 | description = "${local.name_prefix} ALB Security Group" 11 | tags = merge(var.tags, 12 | { 13 | Name = "${local.name_prefix}-alb" 14 | } 15 | ) 16 | } 17 | 18 | # Outbound access from ALB to Containers 19 | resource "aws_vpc_security_group_egress_rule" "alb_outbound_to_ecs_service" { 20 | count = var.create_application_load_balancer ? 1 : 0 21 | security_group_id = aws_security_group.alb[0].id 22 | description = "Allow outbound traffic from ALB to ${local.name_prefix} ECS service" 23 | referenced_security_group_id = aws_security_group.ecs_service.id 24 | from_port = var.container_port 25 | to_port = var.container_port 26 | ip_protocol = "tcp" 27 | } 28 | 29 | ######################################## 30 | # ECS Service Security Group 31 | ######################################## 32 | # Service Security Group (attached to containers) 33 | resource "aws_security_group" "ecs_service" { 34 | name = "${local.name_prefix}-service" 35 | vpc_id = var.vpc_id 36 | description = "${local.name_prefix} service Security Group" 37 | tags = merge(var.tags, 38 | { 39 | Name = "${local.name_prefix}-service" 40 | } 41 | ) 42 | } 43 | 44 | # Inbound access to Containers from ALB 45 | resource "aws_vpc_security_group_ingress_rule" "ecs_service_inbound_from_alb" { 46 | count = var.create_application_load_balancer ? 1 : 0 47 | security_group_id = aws_security_group.ecs_service.id 48 | description = "Allow inbound traffic from ${local.name_prefix} ALB to ${local.name_prefix} service" 49 | referenced_security_group_id = aws_security_group.alb[0].id 50 | from_port = var.container_port 51 | to_port = var.container_port 52 | ip_protocol = "tcp" 53 | } 54 | 55 | # Outbound access from Containers to Internet (IPV4) 56 | resource "aws_vpc_security_group_egress_rule" "ecs_service_outbound_to_internet_ipv4" { 57 | security_group_id = aws_security_group.ecs_service.id 58 | description = "Allow outbound traffic from ${local.name_prefix} service to internet (ipv4)" 59 | cidr_ipv4 = "0.0.0.0/0" 60 | ip_protocol = "-1" # semantically equivalent to all ports 61 | } 62 | 63 | # Outbound access from Containers to Internet (IPV6) 64 | resource "aws_vpc_security_group_egress_rule" "ecs_service_outbound_to_internet_ipv6" { 65 | security_group_id = aws_security_group.ecs_service.id 66 | description = "Allow outbound traffic from ${local.name_prefix} service to internet (ipv6)" 67 | cidr_ipv6 = "::/0" 68 | ip_protocol = "-1" # semantically equivalent to all ports 69 | } 70 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-auth/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | awscc = { 10 | source = "hashicorp/awscc" 11 | version = "1.34.0" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.7.1" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-code-review/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | # If cluster name is provided use a data source to access existing resource 4 | data "aws_ecs_cluster" "cluster" { 5 | count = var.cluster_name != null ? 1 : 0 6 | cluster_name = var.cluster_name 7 | } 8 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-code-review/elasticache.tf: -------------------------------------------------------------------------------- 1 | # Subnet Group for Horde Elasticache 2 | resource "aws_elasticache_subnet_group" "subnet_group" { 3 | count = var.existing_redis_connection != null ? 0 : 1 4 | name = "${local.name_prefix}-elasticache-subnet-group" 5 | subnet_ids = var.subnets 6 | } 7 | 8 | # Single Node Elasticache Cluster for P4 Code Review 9 | resource "aws_elasticache_cluster" "cluster" { 10 | count = var.existing_redis_connection != null ? 0 : 1 11 | cluster_id = "${local.name_prefix}-elasticache-redis-cluster" 12 | engine = "redis" 13 | node_type = var.elasticache_node_type 14 | num_cache_nodes = var.elasticache_node_count 15 | parameter_group_name = local.elasticache_redis_parameter_group_name 16 | engine_version = local.elasticache_redis_engine_version 17 | port = local.elasticache_redis_port 18 | security_group_ids = [aws_security_group.elasticache[0].id] 19 | subnet_group_name = aws_elasticache_subnet_group.subnet_group[0].name 20 | 21 | tags = merge(var.tags, 22 | { 23 | Name = "${local.name_prefix}-elasticache-redis-cluster" 24 | } 25 | ) 26 | } 27 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-code-review/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | image = "perforce/helix-swarm" # cannot change this until the Perforce Helix Swarm Image is updated to use the new naming for P4 Code Review 3 | name_prefix = "${var.project_prefix}-${var.name}" 4 | data_volume_name = "helix-swarm-data" # cannot change this until the Perforce Helix Swarm Image is updated to use the new naming for P4 Code Review 5 | data_path = "/opt/perforce/swarm/data" # cannot change this until the Perforce Helix Swarm Image is updated to use the new naming for P4 Code Review 6 | 7 | elasticache_redis_port = 6379 8 | elasticache_redis_engine_version = "7.0" 9 | elasticache_redis_parameter_group_name = "default.redis7" 10 | 11 | } 12 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-code-review/outputs.tf: -------------------------------------------------------------------------------- 1 | output "service_security_group_id" { 2 | value = aws_security_group.ecs_service.id 3 | description = "Security group associated with the ECS service running P4 Code Review" 4 | } 5 | 6 | output "alb_security_group_id" { 7 | value = var.create_application_load_balancer ? aws_security_group.alb[0].id : null 8 | description = "Security group associated with the P4 Code Review load balancer" 9 | } 10 | 11 | output "cluster_name" { 12 | value = var.cluster_name != null ? var.cluster_name : aws_ecs_cluster.cluster[0].name 13 | description = "Name of the ECS cluster hosting P4 Code Review" 14 | } 15 | 16 | output "alb_dns_name" { 17 | value = var.create_application_load_balancer ? aws_lb.alb[0].dns_name : null 18 | description = "The DNS name of the P4 Code Review ALB" 19 | } 20 | 21 | output "alb_zone_id" { 22 | value = var.create_application_load_balancer ? aws_lb.alb[0].zone_id : null 23 | description = "The hosted zone ID of the P4 Code Review ALB" 24 | } 25 | 26 | output "target_group_arn" { 27 | value = aws_lb_target_group.alb_target_group.arn 28 | description = "The service target group for P4 Code Review" 29 | } 30 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-code-review/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.7.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-server/data.tf: -------------------------------------------------------------------------------- 1 | # Lookup of subnet that module creates 2 | data "aws_subnet" "instance_subnet" { 3 | id = var.instance_subnet_id 4 | } 5 | 6 | # Conditionally fetch exist P4 Server AMI that unless using the auto-generated AMI 7 | data "aws_ami" "existing_server_ami" { 8 | most_recent = true 9 | name_regex = "p4_al2023" 10 | owners = ["self"] 11 | 12 | filter { 13 | name = "name" 14 | values = ["p4_al2023*"] 15 | } 16 | 17 | filter { 18 | name = "root-device-type" 19 | values = ["ebs"] 20 | } 21 | 22 | filter { 23 | name = "virtualization-type" 24 | values = ["hvm"] 25 | } 26 | filter { 27 | name = "architecture" 28 | values = [var.instance_architecture] 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-server/iam.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "p4_server" { 2 | length = 4 3 | special = false 4 | upper = false 5 | } 6 | 7 | # EC2 Trust Relationship 8 | data "aws_iam_policy_document" "ec2_trust_relationship" { 9 | statement { 10 | effect = "Allow" 11 | actions = ["sts:AssumeRole"] 12 | principals { 13 | type = "Service" 14 | identifiers = ["ec2.amazonaws.com"] 15 | } 16 | } 17 | } 18 | 19 | # Grants permissions for P4 Server instance to fetch super user credentials from Secrets Manager 20 | data "aws_iam_policy_document" "default_policy" { 21 | statement { 22 | effect = "Allow" 23 | actions = [ 24 | "secretsmanager:ListSecrets", 25 | "secretsmanager:ListSecretVersionIds", 26 | "secretsmanager:GetRandomPassword", 27 | "secretsmanager:GetSecretValue", 28 | "secretsmanager:DescribeSecret", 29 | "secretsmanager:BatchGetSecretValue" 30 | ] 31 | resources = compact([ 32 | var.super_user_password_secret_arn == null ? awscc_secretsmanager_secret.super_user_username[0].secret_id : var.super_user_password_secret_arn, 33 | var.super_user_username_secret_arn == null ? awscc_secretsmanager_secret.super_user_password[0].secret_id : var.super_user_username_secret_arn, 34 | var.storage_type == "FSxN" && var.protocol == "ISCSI" ? var.fsxn_password : null 35 | ]) 36 | } 37 | } 38 | 39 | resource "aws_iam_policy" "default_policy" { 40 | name = "${local.name_prefix}-default-policy" 41 | description = "Policy granting permissions for P4 Server to access Secrets Manager." 42 | policy = data.aws_iam_policy_document.default_policy.json 43 | 44 | tags = merge(local.tags, 45 | { 46 | Name = "${local.name_prefix}-default-policy" 47 | } 48 | ) 49 | } 50 | 51 | # Instance Role 52 | resource "aws_iam_role" "default_role" { 53 | count = var.create_default_role ? 1 : 0 54 | name = "${local.name_prefix}-default-role" 55 | assume_role_policy = data.aws_iam_policy_document.ec2_trust_relationship.json 56 | 57 | tags = merge(local.tags, 58 | { 59 | Name = "${local.name_prefix}-default-role" 60 | } 61 | ) 62 | } 63 | 64 | resource "aws_iam_role_policy_attachment" "default_role_ssm_managed_instance_core" { 65 | count = var.create_default_role ? 1 : 0 66 | role = aws_iam_role.default_role[0].name 67 | policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" 68 | } 69 | resource "aws_iam_role_policy_attachment" "default_role_default_policy" { 70 | count = var.create_default_role ? 1 : 0 71 | role = aws_iam_role.default_role[0].name 72 | policy_arn = aws_iam_policy.default_policy.arn 73 | } 74 | 75 | # Instance Profile 76 | resource "aws_iam_instance_profile" "instance_profile" { 77 | name = "${local.name_prefix}-${var.name}-${random_string.p4_server.result}-instance-profile" 78 | role = var.custom_role != null ? var.custom_role : aws_iam_role.default_role[0].name 79 | 80 | tags = merge(local.tags, 81 | { 82 | Name = "${local.name_prefix}-default-instance-profile" 83 | } 84 | ) 85 | } 86 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-server/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name_prefix = "${var.project_prefix}-${var.name}" 3 | p4_server_az = data.aws_subnet.instance_subnet.availability_zone 4 | tags = merge( 5 | { 6 | "environment" = var.environment 7 | }, 8 | var.tags, 9 | ) 10 | } 11 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-server/outputs.tf: -------------------------------------------------------------------------------- 1 | output "eip_public_ip" { 2 | value = var.internal ? null : aws_eip.server_eip[0].public_ip 3 | description = "The public IP of your P4 Server instance." 4 | } 5 | 6 | output "eip_id" { 7 | value = var.internal ? null : aws_eip.server_eip[0].id 8 | description = "The ID of the Elastic IP associated with your P4 Server instance." 9 | } 10 | 11 | output "security_group_id" { 12 | value = var.create_default_sg ? aws_security_group.default_security_group[0].id : null 13 | description = "The default security group of your P4 Server instance." 14 | } 15 | 16 | output "super_user_password_secret_arn" { 17 | value = (var.super_user_password_secret_arn == null ? 18 | awscc_secretsmanager_secret.super_user_password[0].secret_id : 19 | var.super_user_password_secret_arn) 20 | description = "The ARN of the AWS Secrets Manager secret holding your P4 Server super user's username." 21 | } 22 | 23 | output "super_user_username_secret_arn" { 24 | value = (var.super_user_username_secret_arn == null ? 25 | awscc_secretsmanager_secret.super_user_username[0].secret_id : 26 | var.super_user_username_secret_arn) 27 | description = "The ARN of the AWS Secrets Manager secret holding your P4 Server super user's password." 28 | } 29 | 30 | output "instance_id" { 31 | value = aws_instance.server_instance.id 32 | description = "Instance ID for the P4 Server instance" 33 | } 34 | 35 | output "private_ip" { 36 | value = aws_instance.server_instance.private_ip 37 | description = "Private IP for the P4 Server instance" 38 | } 39 | 40 | output "lambda_link_name" { 41 | value = (var.storage_type == "FSxN" && var.protocol == "ISCSI" ? 42 | aws_lambda_function.lambda_function[0].function_name : null) 43 | description = "Lambda function name for the FSxN Link" 44 | } 45 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-server/sg.tf: -------------------------------------------------------------------------------- 1 | ########################################## 2 | # Default SG 3 | ########################################## 4 | resource "aws_security_group" "default_security_group" { 5 | count = var.create_default_sg ? 1 : 0 6 | #checkov:skip=CKV2_AWS_5:SG is attached to FSxZ file systems 7 | 8 | vpc_id = var.vpc_id 9 | name = "${local.name_prefix}-instance" 10 | description = "Security group for P4 Server machines." 11 | tags = merge(local.tags, 12 | { 13 | Name = "${local.name_prefix}-instance" 14 | } 15 | ) 16 | } 17 | 18 | # P4 Server --> Internet 19 | # Allows P4 Server to send outbound traffic to the Internet 20 | resource "aws_vpc_security_group_egress_rule" "server_internet" { 21 | count = var.create_default_sg ? 1 : 0 22 | security_group_id = aws_security_group.default_security_group[0].id 23 | cidr_ipv4 = "0.0.0.0/0" 24 | ip_protocol = -1 25 | description = "Allows P4 Server to send outbound traffic to the Internet." 26 | } 27 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-server/templates/user_data.tftpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DEPOT_VOLUME_NAME=${depot_volume_name} 3 | METADATA_VOLUME_NAME=${metadata_volume_name} 4 | LOGS_VOLUME_NAME=${logs_volume_name} 5 | /home/ec2-user/gpic_scripts/p4_configure.sh \ 6 | --hx_logs $LOGS_VOLUME_NAME \ 7 | --hx_metadata $METADATA_VOLUME_NAME \ 8 | --hx_depots $DEPOT_VOLUME_NAME \ 9 | --p4d_type ${p4_server_type} \ 10 | --username ${username_secret} \ 11 | --password ${password_secret} \ 12 | %{ if fqdn != "" ~} 13 | --fqdn ${fqdn} \ 14 | %{ endif ~} 15 | %{ if auth_url != "" ~} 16 | --auth ${auth_url} \ 17 | %{ endif ~} 18 | %{ if is_fsxn ~} 19 | --fsxn_password ${fsxn_password} \ 20 | --fsxn_svm_name ${fsxn_svm_name} \ 21 | --fsxn_management_ip ${fsxn_management_ip} \ 22 | %{ endif ~} 23 | --case_sensitive ${case_sensitive} \ 24 | --unicode ${unicode} \ 25 | --selinux ${selinux} \ 26 | --plaintext ${plaintext} 27 | -------------------------------------------------------------------------------- /modules/perforce/modules/p4-server/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | awscc = { 10 | source = "hashicorp/awscc" 11 | version = "1.34.0" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.7.1" 16 | } 17 | netapp-ontap = { 18 | source = "NetApp/netapp-ontap" 19 | version = "2.1.0" 20 | } 21 | null = { 22 | source = "hashicorp/null" 23 | version = "3.2.4" 24 | } 25 | local = { 26 | source = "hashicorp/local" 27 | version = "2.5.2" 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /modules/perforce/route53.tf: -------------------------------------------------------------------------------- 1 | ########################################## 2 | # Perforce Internal (Private) DNS 3 | ########################################## 4 | resource "aws_route53_zone" "perforce_private_hosted_zone" { 5 | count = var.create_route53_private_hosted_zone != false ? 1 : 0 6 | name = var.route53_private_hosted_zone_name 7 | #checkov:skip=CKV2_AWS_38: Hosted zone is private (vpc association) 8 | #checkov:skip=CKV2_AWS_39: Query logging disabled by design 9 | vpc { 10 | vpc_id = var.vpc_id 11 | } 12 | } 13 | 14 | # Route all internal web service traffic (e.g. auth.perforce.example.com, review.perforce.example.com) to the Private ALB 15 | resource "aws_route53_record" "internal_perforce_web_services" { 16 | count = var.create_shared_application_load_balancer && var.create_route53_private_hosted_zone ? 1 : 0 17 | zone_id = aws_route53_zone.perforce_private_hosted_zone[0].id 18 | name = "*.${aws_route53_zone.perforce_private_hosted_zone[0].name}" 19 | type = "A" 20 | alias { 21 | name = aws_lb.perforce_web_services[0].dns_name 22 | zone_id = aws_lb.perforce_web_services[0].zone_id 23 | evaluate_target_health = true 24 | } 25 | } 26 | 27 | # Route all internal P4 Server traffic to the instance 28 | resource "aws_route53_record" "internal_p4_server" { 29 | count = var.p4_server_config != null && var.create_route53_private_hosted_zone ? 1 : 0 30 | zone_id = aws_route53_zone.perforce_private_hosted_zone[0].zone_id 31 | name = aws_route53_zone.perforce_private_hosted_zone[0].name 32 | type = "A" 33 | records = [module.p4_server[0].private_ip] 34 | ttl = 300 35 | 36 | #checkov:skip=CKV2_AWS_23: Route53 A record is necessary for this example deployment 37 | } 38 | -------------------------------------------------------------------------------- /modules/perforce/tests/01_create_resources_complete.tftest.hcl: -------------------------------------------------------------------------------- 1 | # Fetch relevant values from SSM Parameter Store 2 | run "setup" { 3 | command = plan 4 | module { 5 | source = "./tests/setup" 6 | } 7 | } 8 | 9 | run "unit_test" { 10 | command = plan 11 | 12 | variables { 13 | route53_public_hosted_zone_name = run.setup.route53_public_hosted_zone_name 14 | } 15 | module { 16 | source = "./examples/create-resources-complete" 17 | } 18 | } 19 | 20 | # Unused until error handling/retry logic is improved in Terraform test 21 | # https://github.com/hashicorp/terraform/issues/36846#issuecomment-2820247524 22 | # run "e2e_test" { 23 | # command = apply 24 | # module { 25 | # source = "./examples/create-resources-complete" 26 | # } 27 | # } 28 | -------------------------------------------------------------------------------- /modules/perforce/tests/02_p4_server_fsxn.tftest.hcl: -------------------------------------------------------------------------------- 1 | # Fetch relevant values from SSM Parameter Store 2 | run "setup" { 3 | command = plan 4 | module { 5 | source = "./tests/setup" 6 | } 7 | } 8 | run "unit_test" { 9 | command = plan 10 | 11 | variables { 12 | route53_public_hosted_zone_name = run.setup.route53_public_hosted_zone_name 13 | fsxn_password = run.setup.fsxn_password 14 | fsxn_aws_profile = run.setup.fsxn_aws_profile 15 | } 16 | module { 17 | source = "./examples/p4-server-fsxn" 18 | } 19 | } 20 | 21 | # Unused until error handling/retry logic is improved in Terraform test 22 | # https://github.com/hashicorp/terraform/issues/36846#issuecomment-2820247524 23 | # # run "e2e_test" { 24 | # # command = apply 25 | # # module { 26 | # # source = "./examples/p4-server-fsxn" 27 | # # } 28 | # # } 29 | -------------------------------------------------------------------------------- /modules/perforce/tests/setup/ssm.tf: -------------------------------------------------------------------------------- 1 | # Fetch relevant values from SSM Parameter Store 2 | data "aws_ssm_parameter" "route53_public_hosted_zone_name" { 3 | name = "/cloud-game-development-toolkit/modules/perforce/route53-public-hosted-zone-name" 4 | } 5 | data "aws_ssm_parameter" "fsxn_password" { 6 | name = "/cloud-game-development-toolkit/modules/perforce/fsxn-password" 7 | } 8 | data "aws_ssm_parameter" "fsxn_aws_profile" { 9 | name = "/cloud-game-development-toolkit/modules/perforce/fsxn-aws-profile" 10 | } 11 | 12 | 13 | output "route53_public_hosted_zone_name" { 14 | value = nonsensitive(data.aws_ssm_parameter.route53_public_hosted_zone_name.value) 15 | sensitive = false 16 | } 17 | output "fsxn_password" { 18 | value = nonsensitive(data.aws_ssm_parameter.fsxn_password.value) 19 | sensitive = false 20 | } 21 | output "fsxn_aws_profile" { 22 | value = nonsensitive(data.aws_ssm_parameter.fsxn_aws_profile.value) 23 | sensitive = false 24 | } 25 | -------------------------------------------------------------------------------- /modules/perforce/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | awscc = { 10 | source = "hashicorp/awscc" 11 | version = "1.34.0" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.7.1" 16 | } 17 | null = { 18 | source = "hashicorp/null" 19 | version = "3.2.4" 20 | } 21 | local = { 22 | source = "hashicorp/local" 23 | version = "2.5.2" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /modules/teamcity/assets/media/diagrams/teamcity-server-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/teamcity/assets/media/diagrams/teamcity-server-architecture.png -------------------------------------------------------------------------------- /modules/teamcity/examples/simple/dns.tf: -------------------------------------------------------------------------------- 1 | variable "root_domain_name" { 2 | type = string 3 | description = "The root domain name for the Hosted Zone where the TeamCity record should be created." 4 | } 5 | 6 | ########################################## 7 | # Route53 Hosted Zone for Root 8 | ########################################## 9 | data "aws_route53_zone" "root" { 10 | name = var.root_domain_name 11 | private_zone = false 12 | } 13 | 14 | # Create a record in the Hosted Zone for the TeamCity server 15 | resource "aws_route53_record" "teamcity" { 16 | zone_id = data.aws_route53_zone.root.id 17 | name = "teamcity.${data.aws_route53_zone.root.name}" 18 | type = "A" 19 | 20 | alias { 21 | name = module.teamcity.external_alb_dns_name 22 | zone_id = module.teamcity.external_alb_zone_id 23 | evaluate_target_health = false 24 | } 25 | } 26 | 27 | # Create a certificate for the TeamCity server 28 | resource "aws_acm_certificate" "teamcity" { 29 | domain_name = "teamcity.${data.aws_route53_zone.root.name}" 30 | validation_method = "DNS" 31 | 32 | tags = { 33 | Environment = "test" 34 | } 35 | lifecycle { 36 | create_before_destroy = true 37 | } 38 | } 39 | 40 | resource "aws_route53_record" "teamcity_cert" { 41 | for_each = { 42 | for dvo in aws_acm_certificate.teamcity.domain_validation_options : dvo.domain_name => { 43 | name = dvo.resource_record_name 44 | record = dvo.resource_record_value 45 | type = dvo.resource_record_type 46 | } 47 | } 48 | 49 | allow_overwrite = true 50 | name = each.value.name 51 | records = [each.value.record] 52 | ttl = 60 53 | type = each.value.type 54 | zone_id = data.aws_route53_zone.root.id 55 | } 56 | 57 | resource "aws_acm_certificate_validation" "teamcity" { 58 | timeouts { 59 | create = "15m" 60 | } 61 | certificate_arn = aws_acm_certificate.teamcity.arn 62 | validation_record_fqdns = [for record in aws_route53_record.teamcity_cert : record.fqdn] 63 | } 64 | -------------------------------------------------------------------------------- /modules/teamcity/examples/simple/local.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | locals { 4 | vpc_cidr_block = "10.0.0.0/16" 5 | public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] 6 | private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] 7 | azs = slice(data.aws_availability_zones.available.names, 0, 2) 8 | tags = {} 9 | 10 | } 11 | -------------------------------------------------------------------------------- /modules/teamcity/examples/simple/main.tf: -------------------------------------------------------------------------------- 1 | module "teamcity" { 2 | source = "../../" 3 | vpc_id = aws_vpc.teamcity_vpc.id 4 | service_subnets = aws_subnet.private_subnets[*].id 5 | alb_subnets = aws_subnet.public_subnets[*].id 6 | alb_certificate_arn = aws_acm_certificate.teamcity.arn 7 | } 8 | -------------------------------------------------------------------------------- /modules/teamcity/examples/simple/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.89.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.5.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/teamcity/examples/simple/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "teamcity_vpc" { 2 | cidr_block = local.vpc_cidr_block 3 | tags = merge(local.tags, 4 | { 5 | Name = "teamcity-vpc" 6 | } 7 | ) 8 | enable_dns_hostnames = true 9 | #checkov:skip=CKV2_AWS_11: VPC flow logging disabled by design 10 | } 11 | 12 | # Set default SG to restrict all traffic 13 | resource "aws_default_security_group" "default" { 14 | vpc_id = aws_vpc.teamcity_vpc.id 15 | } 16 | 17 | resource "aws_subnet" "public_subnets" { 18 | count = length(local.public_subnet_cidrs) 19 | vpc_id = aws_vpc.teamcity_vpc.id 20 | cidr_block = element(local.public_subnet_cidrs, count.index) 21 | availability_zone = element(local.azs, count.index) 22 | 23 | tags = merge(local.tags, 24 | { 25 | Name = "pub-subnet-${count.index + 1}" 26 | } 27 | ) 28 | } 29 | 30 | resource "aws_subnet" "private_subnets" { 31 | count = length(local.private_subnet_cidrs) 32 | vpc_id = aws_vpc.teamcity_vpc.id 33 | cidr_block = element(local.private_subnet_cidrs, count.index) 34 | availability_zone = element(local.azs, count.index) 35 | 36 | tags = merge(local.tags, 37 | { 38 | Name = "pvt-subnet-${count.index + 1}" 39 | } 40 | ) 41 | } 42 | 43 | ########################################## 44 | # Internet Gateway 45 | ########################################## 46 | 47 | resource "aws_internet_gateway" "igw" { 48 | vpc_id = aws_vpc.teamcity_vpc.id 49 | tags = merge(local.tags, 50 | { 51 | Name = "teamcity-igw" 52 | } 53 | ) 54 | } 55 | 56 | ########################################## 57 | # Route Tables & NAT Gateway 58 | ########################################## 59 | 60 | resource "aws_route_table" "public_rt" { 61 | vpc_id = aws_vpc.teamcity_vpc.id 62 | 63 | # public route to the internet 64 | route { 65 | cidr_block = "0.0.0.0/0" 66 | gateway_id = aws_internet_gateway.igw.id 67 | } 68 | 69 | tags = merge(local.tags, 70 | { 71 | Name = "teamcity-public-rt" 72 | } 73 | ) 74 | } 75 | 76 | resource "aws_route_table_association" "public_rt_asso" { 77 | count = length(aws_subnet.public_subnets) 78 | route_table_id = aws_route_table.public_rt.id 79 | subnet_id = aws_subnet.public_subnets[count.index].id 80 | } 81 | 82 | resource "aws_eip" "nat_gateway_eip" { 83 | depends_on = [aws_internet_gateway.igw] 84 | #checkov:skip=CKV2_AWS_19:EIP associated with NAT Gateway through association ID 85 | tags = merge(local.tags, 86 | { 87 | Name = "teamcity-nat-eip" 88 | } 89 | ) 90 | } 91 | 92 | resource "aws_route_table" "private_rt" { 93 | vpc_id = aws_vpc.teamcity_vpc.id 94 | 95 | tags = merge(local.tags, 96 | { 97 | Name = "teamcity-private-rt" 98 | } 99 | ) 100 | } 101 | 102 | # route to the internet through NAT gateway 103 | resource "aws_route" "private_rt_nat_gateway" { 104 | route_table_id = aws_route_table.private_rt.id 105 | destination_cidr_block = "0.0.0.0/0" 106 | nat_gateway_id = aws_nat_gateway.nat_gateway.id 107 | } 108 | 109 | resource "aws_route_table_association" "private_rt_asso" { 110 | count = length(aws_subnet.private_subnets) 111 | route_table_id = aws_route_table.private_rt.id 112 | subnet_id = aws_subnet.private_subnets[count.index].id 113 | } 114 | 115 | resource "aws_nat_gateway" "nat_gateway" { 116 | allocation_id = aws_eip.nat_gateway_eip.id 117 | subnet_id = aws_subnet.public_subnets[0].id 118 | tags = merge(local.tags, 119 | { 120 | Name = "teamcity-nat" 121 | } 122 | ) 123 | } 124 | -------------------------------------------------------------------------------- /modules/teamcity/local.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name_prefix = "teamcity" 3 | tags = merge(var.tags, { 4 | "environment" = var.environment 5 | }) 6 | 7 | # Database information 8 | database_connection_string = var.database_connection_string != null ? var.database_connection_string : "jdbc:postgresql://${aws_rds_cluster.teamcity_db_cluster[0].endpoint}/teamcity" 9 | database_master_username = var.database_master_username != null ? var.database_master_username : aws_rds_cluster.teamcity_db_cluster[0].master_username 10 | database_master_password = var.database_master_password != null ? var.database_master_password : null 11 | 12 | # Docker image to use for TeamCity Server 13 | image = "jetbrains/teamcity-server" 14 | 15 | # EFS information 16 | efs_file_system_id = var.efs_id != null ? var.efs_id : aws_efs_file_system.teamcity_efs_file_system[0].id 17 | efs_file_system_arn = var.efs_id != null ? data.aws_efs_file_system.efs_file_system[0].arn : aws_efs_file_system.teamcity_efs_file_system[0].arn 18 | efs_access_point_id = var.efs_access_point_id != null ? var.efs_access_point_id : aws_efs_access_point.teamcity_efs_data_access_point[0].id 19 | 20 | # TeamCity Server Information 21 | # Set environment variables 22 | base_env = [ 23 | { 24 | name = "TEAMCITY_DB_URL" 25 | value = local.database_connection_string 26 | }, 27 | { 28 | name = "TEAMCITY_DB_USER" 29 | value = local.database_master_username 30 | }, 31 | { 32 | name = "TEAMCITY_DATA_PATH" 33 | value = "/data/teamcity_server/datadir" 34 | } 35 | ] 36 | # Define password environment variable if provided 37 | password_env = local.database_master_password != null ? [ 38 | { 39 | name = "TEAMCITY_DB_PASSWORD" 40 | value = local.database_master_password 41 | } 42 | ] : [] 43 | } 44 | data "aws_region" "current" {} 45 | 46 | # Data source to look up existing EFS file system if ID is provided 47 | data "aws_efs_file_system" "efs_file_system" { 48 | count = var.efs_id != null ? 1 : 0 49 | file_system_id = var.efs_id 50 | } 51 | -------------------------------------------------------------------------------- /modules/teamcity/outputs.tf: -------------------------------------------------------------------------------- 1 | output "external_alb_dns_name" { 2 | value = var.create_external_alb ? aws_lb.teamcity_external_lb[0].dns_name : null 3 | description = "DNS endpoint of Application Load Balancer (ALB)" 4 | } 5 | 6 | output "external_alb_zone_id" { 7 | value = var.create_external_alb ? aws_lb.teamcity_external_lb[0].zone_id : null 8 | description = "Zone ID for internet facing load balancer" 9 | } 10 | -------------------------------------------------------------------------------- /modules/teamcity/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.9" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.89.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.5.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/unreal/horde/assets/media/diagrams/unreal-engine-horde-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/unreal/horde/assets/media/diagrams/unreal-engine-horde-architecture.png -------------------------------------------------------------------------------- /modules/unreal/horde/config/agent/horde-agent.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description= Horde Agent Service 3 | 4 | [Service] 5 | ExecStart=dotnet /home/Horde/HordeAgent.dll 6 | WorkingDirectory=/home/Horde/ 7 | Restart=always 8 | RestartSec=5 9 | SyslogIdentifier=horde-agent 10 | StandardOutput=append:/home/Horde/agent-service.log 11 | StandardError=append:/home/Horde/agent-service.err 12 | User=Horde 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /modules/unreal/horde/docdb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_docdb_subnet_group" "horde" { 2 | count = var.database_connection_string == null ? 1 : 0 3 | 4 | name = "${var.name}-docdb-subnet-group" 5 | subnet_ids = var.unreal_horde_service_subnets 6 | 7 | tags = { 8 | Name = "Horde DocumentDB subnet group" 9 | } 10 | } 11 | 12 | resource "aws_docdb_cluster_parameter_group" "horde" { 13 | count = var.database_connection_string == null ? 1 : 0 14 | 15 | family = "docdb5.0" 16 | name = "${var.name}-docdb-parameter-group" 17 | description = "Horde DocumentDb cluster parameter group" 18 | #checkov:skip=CKV_AWS_104:Audit logs will be enabled through variable 19 | 20 | parameter { 21 | name = "tls" 22 | value = "enabled" 23 | } 24 | } 25 | 26 | resource "aws_docdb_cluster_instance" "horde" { 27 | count = var.database_connection_string == null ? var.docdb_instance_count : 0 28 | 29 | identifier = "${var.name}-docdb-${count.index}" 30 | cluster_identifier = aws_docdb_cluster.horde[0].id 31 | instance_class = var.docdb_instance_class 32 | } 33 | 34 | resource "aws_docdb_cluster" "horde" { 35 | count = var.database_connection_string == null ? 1 : 0 36 | 37 | #checkov:skip=CKV_AWS_182:CMK encryption not currently supported 38 | #checkov:skip=CKV_AWS_85:Logging will be enabled by variable 39 | cluster_identifier = "${var.name}-docdb-cluster" 40 | engine = "docdb" 41 | master_username = var.docdb_master_username 42 | master_password = var.docdb_master_password 43 | backup_retention_period = var.docdb_backup_retention_period 44 | preferred_backup_window = var.docdb_preferred_backup_window 45 | skip_final_snapshot = var.docdb_skip_final_snapshot 46 | 47 | db_cluster_parameter_group_name = aws_docdb_cluster_parameter_group.horde[0].name 48 | db_subnet_group_name = aws_docdb_subnet_group.horde[0].name 49 | vpc_security_group_ids = [aws_security_group.unreal_horde_docdb_sg[0].id] 50 | storage_encrypted = var.docdb_storage_encrypted 51 | } 52 | -------------------------------------------------------------------------------- /modules/unreal/horde/elasticache.tf: -------------------------------------------------------------------------------- 1 | # Subnet Group for Horde Elasticache 2 | resource "aws_elasticache_subnet_group" "horde" { 3 | count = var.redis_connection_config == null ? 1 : 0 4 | name = "${var.name}-elasticache-subnet-group" 5 | subnet_ids = var.unreal_horde_service_subnets 6 | } 7 | 8 | # Single Node Elasticache Cluster for Horde 9 | resource "aws_elasticache_cluster" "horde" { 10 | count = var.redis_connection_config == null ? 1 : 0 11 | cluster_id = "${var.name}-elasticache-redis-cluster" 12 | engine = "redis" 13 | node_type = var.elasticache_node_type 14 | num_cache_nodes = var.elasticache_node_count 15 | parameter_group_name = local.elasticache_redis_parameter_group_name 16 | engine_version = local.elasticache_redis_engine_version 17 | port = local.elasticache_redis_port 18 | security_group_ids = [aws_security_group.unreal_horde_elasticache_sg[0].id] 19 | subnet_group_name = aws_elasticache_subnet_group.horde[0].name 20 | 21 | snapshot_retention_limit = var.elasticache_snapshot_retention_limit 22 | } 23 | -------------------------------------------------------------------------------- /modules/unreal/horde/examples/complete/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "ubuntu_noble_amd" { 2 | most_recent = true 3 | filter { 4 | name = "name" 5 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 6 | } 7 | 8 | filter { 9 | name = "virtualization-type" 10 | values = ["hvm"] 11 | } 12 | 13 | filter { 14 | name = "root-device-type" 15 | values = ["ebs"] 16 | } 17 | 18 | filter { 19 | name = "architecture" 20 | values = [ 21 | "x86_64" 22 | ] 23 | } 24 | 25 | owners = ["amazon"] 26 | } 27 | -------------------------------------------------------------------------------- /modules/unreal/horde/examples/complete/dns.tf: -------------------------------------------------------------------------------- 1 | ########################################## 2 | # Route53 Hosted Zone for Root 3 | ########################################## 4 | data "aws_route53_zone" "root" { 5 | name = var.root_domain_name 6 | private_zone = false 7 | } 8 | 9 | resource "aws_route53_record" "unreal_engine_horde_external" { 10 | zone_id = data.aws_route53_zone.root.id 11 | name = "horde.${data.aws_route53_zone.root.name}" 12 | type = "A" 13 | #checkov:skip=CKV2_AWS_23:The attached resource is managed by CGD Toolkit 14 | alias { 15 | name = module.unreal_engine_horde.external_alb_dns_name 16 | zone_id = module.unreal_engine_horde.external_alb_zone_id 17 | evaluate_target_health = true 18 | } 19 | } 20 | 21 | ########################################## 22 | # Internal Hosted Zone for Unreal Horde 23 | ########################################## 24 | 25 | resource "aws_route53_zone" "unreal_engine_horde_private_zone" { 26 | name = "horde.${var.root_domain_name}" 27 | #checkov:skip=CKV2_AWS_38: Hosted zone is private (vpc association) 28 | #checkov:skip=CKV2_AWS_39: Query logging disabled by design 29 | vpc { 30 | vpc_id = aws_vpc.unreal_engine_horde_vpc.id 31 | } 32 | } 33 | 34 | resource "aws_route53_record" "unreal_engine_horde_internal" { 35 | zone_id = aws_route53_zone.unreal_engine_horde_private_zone.zone_id 36 | name = aws_route53_zone.unreal_engine_horde_private_zone.name 37 | type = "A" 38 | alias { 39 | name = module.unreal_engine_horde.internal_alb_dns_name 40 | zone_id = module.unreal_engine_horde.internal_alb_zone_id 41 | evaluate_target_health = true 42 | } 43 | } 44 | 45 | resource "aws_acm_certificate" "unreal_engine_horde" { 46 | domain_name = "horde.${data.aws_route53_zone.root.name}" 47 | validation_method = "DNS" 48 | 49 | tags = { 50 | environment = "dev" 51 | } 52 | 53 | lifecycle { 54 | create_before_destroy = true 55 | } 56 | } 57 | 58 | 59 | resource "aws_route53_record" "unreal_engine_horde_cert" { 60 | for_each = { 61 | for dvo in aws_acm_certificate.unreal_engine_horde.domain_validation_options : dvo.domain_name => { 62 | name = dvo.resource_record_name 63 | record = dvo.resource_record_value 64 | type = dvo.resource_record_type 65 | } 66 | } 67 | 68 | allow_overwrite = true 69 | name = each.value.name 70 | records = [each.value.record] 71 | ttl = 60 72 | type = each.value.type 73 | zone_id = data.aws_route53_zone.root.id 74 | } 75 | 76 | resource "aws_acm_certificate_validation" "unreal_engine_horde" { 77 | timeouts { 78 | create = "15m" 79 | } 80 | certificate_arn = aws_acm_certificate.unreal_engine_horde.arn 81 | validation_record_fqdns = [for record in aws_route53_record.unreal_engine_horde_cert : record.fqdn] 82 | } 83 | -------------------------------------------------------------------------------- /modules/unreal/horde/examples/complete/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | locals { 4 | vpc_cidr_block = "10.0.0.0/16" 5 | public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] 6 | private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] 7 | azs = slice(data.aws_availability_zones.available.names, 0, 2) 8 | tags = {} 9 | } 10 | 11 | module "unreal_engine_horde" { 12 | source = "../../" 13 | unreal_horde_service_subnets = aws_subnet.private_subnets[*].id 14 | unreal_horde_external_alb_subnets = aws_subnet.public_subnets[*].id # External ALB used by developers 15 | unreal_horde_internal_alb_subnets = aws_subnet.private_subnets[*].id # Internal ALB used by agents 16 | vpc_id = aws_vpc.unreal_engine_horde_vpc.id 17 | certificate_arn = aws_acm_certificate.unreal_engine_horde.arn 18 | github_credentials_secret_arn = var.github_credentials_secret_arn 19 | tags = local.tags 20 | 21 | agents = { 22 | ubuntu-x86 = { 23 | ami = data.aws_ami.ubuntu_noble_amd.id 24 | instance_type = "c7a.large" 25 | min_size = 2 26 | max_size = 5 27 | block_device_mappings = [ 28 | { 29 | device_name = "/dev/sda1" 30 | ebs = { 31 | volume_size = 64 32 | } 33 | } 34 | ] 35 | } 36 | } 37 | 38 | fully_qualified_domain_name = "horde.${var.root_domain_name}" 39 | 40 | depends_on = [aws_acm_certificate_validation.unreal_engine_horde] 41 | } 42 | -------------------------------------------------------------------------------- /modules/unreal/horde/examples/complete/variables.tf: -------------------------------------------------------------------------------- 1 | variable "root_domain_name" { 2 | type = string 3 | description = "The root domain name for the Hosted Zone where the Horde record should be created." 4 | } 5 | 6 | variable "github_credentials_secret_arn" { 7 | type = string 8 | description = "The ARN of the Github credentials secret that should be used for pulling the Unreal Horde container from the Epic Games Github organization." 9 | } 10 | -------------------------------------------------------------------------------- /modules/unreal/horde/examples/complete/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.69.0" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/unreal/horde/iam.tf: -------------------------------------------------------------------------------- 1 | ############################################# 2 | # IAM Roles for Unreal Engine Horde Module 3 | ############################################# 4 | 5 | data "aws_iam_policy_document" "ecs_tasks_trust_relationship" { 6 | statement { 7 | effect = "Allow" 8 | actions = ["sts:AssumeRole"] 9 | principals { 10 | type = "Service" 11 | identifiers = ["ecs-tasks.amazonaws.com"] 12 | } 13 | } 14 | } 15 | 16 | data "aws_iam_policy_document" "unreal_horde_default_policy" { 17 | count = var.create_unreal_horde_default_policy ? 1 : 0 18 | # ECS 19 | statement { 20 | sid = "ECSExec" 21 | effect = "Allow" 22 | actions = [ 23 | "ssmmessages:OpenDataChannel", 24 | "ssmmessages:OpenControlChannel", 25 | "ssmmessages:CreateDataChannel", 26 | "ssmmessages:CreateControlChannel", 27 | ] 28 | resources = [ 29 | "*" 30 | ] 31 | } 32 | # Elasticache 33 | statement { 34 | sid = "ElasticacheConnect" 35 | effect = "Allow" 36 | actions = [ 37 | "elasticache:Connect" 38 | ] 39 | resources = [ 40 | aws_elasticache_cluster.horde[0].arn, 41 | ] 42 | } 43 | } 44 | 45 | resource "aws_iam_policy" "unreal_horde_default_policy" { 46 | count = var.create_unreal_horde_default_policy ? 1 : 0 47 | 48 | name = "${var.project_prefix}-unreal_horde-default-policy" 49 | description = "Policy granting permissions for Unreal Horde." 50 | policy = data.aws_iam_policy_document.unreal_horde_default_policy[0].json 51 | } 52 | 53 | resource "aws_iam_role" "unreal_horde_default_role" { 54 | count = var.create_unreal_horde_default_role ? 1 : 0 55 | 56 | name = "${var.project_prefix}-unreal_horde-default-role" 57 | assume_role_policy = data.aws_iam_policy_document.ecs_tasks_trust_relationship.json 58 | 59 | managed_policy_arns = [ 60 | aws_iam_policy.unreal_horde_default_policy[0].arn 61 | ] 62 | 63 | tags = local.tags 64 | } 65 | 66 | data "aws_iam_policy_document" "unreal_horde_secrets_manager_policy" { 67 | statement { 68 | effect = "Allow" 69 | actions = [ 70 | "secretsmanager:GetSecretValue", 71 | ] 72 | resources = [ 73 | var.github_credentials_secret_arn 74 | ] 75 | } 76 | } 77 | 78 | resource "aws_iam_policy" "unreal_horde_secrets_manager_policy" { 79 | name = "${var.project_prefix}-unreal-horde-secrets-manager-policy" 80 | description = "Policy granting permissions for Unreal Horde task execution role to access SSM." 81 | policy = data.aws_iam_policy_document.unreal_horde_secrets_manager_policy.json 82 | } 83 | 84 | resource "aws_iam_role" "unreal_horde_task_execution_role" { 85 | name = "${var.project_prefix}-unreal_horde-task-execution-role" 86 | 87 | assume_role_policy = data.aws_iam_policy_document.ecs_tasks_trust_relationship.json 88 | managed_policy_arns = ["arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy", aws_iam_policy.unreal_horde_secrets_manager_policy.arn] 89 | } 90 | -------------------------------------------------------------------------------- /modules/unreal/horde/local.tf: -------------------------------------------------------------------------------- 1 | # - Random Strings to prevent naming conflicts - 2 | resource "random_string" "unreal_horde" { 3 | length = 4 4 | special = false 5 | upper = false 6 | } 7 | 8 | data "aws_region" "current" {} 9 | 10 | locals { 11 | image = "ghcr.io/epicgames/horde-server:latest-bundled" 12 | name_prefix = "${var.project_prefix}-${var.name}" 13 | tags = merge(var.tags, { 14 | "environment" = var.environment 15 | }) 16 | 17 | elasticache_redis_port = 6379 18 | elasticache_redis_engine_version = "7.0" 19 | elasticache_redis_parameter_group_name = "default.redis7" 20 | 21 | elasticache_connection_strings = [for node in aws_elasticache_cluster.horde[0].cache_nodes : "${node.address}:${node.port}"] 22 | 23 | redis_connection_config = var.redis_connection_config != null ? var.redis_connection_config : join(",", local.elasticache_connection_strings) 24 | database_connection_string = var.database_connection_string != null ? var.database_connection_string : "mongodb://${var.docdb_master_username}:${var.docdb_master_password}@${aws_docdb_cluster.horde[0].endpoint}:27017/?tls=true&tlsCAFile=/app/config/global-bundle.pem&replicaSet=rs0&readPreference=secondaryPreferred&retryWrites=false" 25 | 26 | horde_service_env = [for config in [ 27 | { 28 | name = "Horde__authMethod" 29 | value = var.auth_method 30 | }, 31 | { 32 | name = "Horde__oidcAuthority" 33 | value = var.oidc_authority 34 | }, 35 | { 36 | name = "Horde__oidcAudience", 37 | value = var.oidc_audience 38 | }, 39 | { 40 | name = "Horde__oidcClientId" 41 | value = var.oidc_client_id 42 | }, 43 | { 44 | name = "Horde__oidcClientSecret" 45 | value = var.oidc_client_secret 46 | }, 47 | { 48 | name = "Horde__oidcSigninRedirect" 49 | value = var.oidc_signin_redirect 50 | }, 51 | { 52 | name = "Horde__adminClaimType" 53 | value = var.admin_claim_type 54 | }, 55 | { 56 | name = "Horde__adminClaimValue" 57 | value = var.admin_claim_value 58 | }, 59 | { 60 | name = "Horde__enableNewAgentsByDefault", 61 | value = tostring(var.enable_new_agents_by_default) 62 | }, 63 | { 64 | name = "ASPNETCORE_ENVIRONMENT" 65 | value = var.environment 66 | } 67 | ] : config.value != null ? config : null] 68 | } 69 | -------------------------------------------------------------------------------- /modules/unreal/horde/outputs.tf: -------------------------------------------------------------------------------- 1 | output "external_alb_dns_name" { 2 | value = var.create_external_alb ? aws_lb.unreal_horde_external_alb[0].dns_name : null 3 | } 4 | 5 | output "external_alb_zone_id" { 6 | value = var.create_external_alb ? aws_lb.unreal_horde_external_alb[0].zone_id : null 7 | } 8 | 9 | output "external_alb_sg_id" { 10 | value = var.create_external_alb ? aws_security_group.unreal_horde_external_alb_sg[0].id : null 11 | } 12 | 13 | output "internal_alb_dns_name" { 14 | value = var.create_internal_alb ? aws_lb.unreal_horde_internal_alb[0].dns_name : null 15 | } 16 | 17 | output "internal_alb_zone_id" { 18 | value = var.create_internal_alb ? aws_lb.unreal_horde_internal_alb[0].zone_id : null 19 | } 20 | 21 | output "internal_alb_sg_id" { 22 | value = var.create_internal_alb ? aws_security_group.unreal_horde_internal_alb_sg[0].id : null 23 | } 24 | 25 | output "service_security_group_id" { 26 | value = aws_security_group.unreal_horde_sg.id 27 | } 28 | 29 | output "agent_security_group_id" { 30 | value = length(var.agents) > 0 ? aws_security_group.unreal_horde_agent_sg[0].id : null 31 | } 32 | -------------------------------------------------------------------------------- /modules/unreal/horde/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.89.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.7.1" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-infra/assets/media/diagrams/unreal-cloud-ddc-infra.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-infra/assets/media/diagrams/unreal-cloud-ddc-infra.png -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-infra/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "scylla_ami" { 2 | most_recent = true 3 | owners = ["797456418907", "158855661827"] 4 | filter { 5 | name = "name" 6 | values = [var.scylla_ami_name] 7 | } 8 | filter { 9 | name = "architecture" 10 | values = [var.scylla_architecture] 11 | } 12 | } 13 | 14 | # Get the latest Amazon Linux 2023 AMI 15 | data "aws_ami" "amazon_linux" { 16 | most_recent = true 17 | owners = ["amazon"] 18 | 19 | filter { 20 | name = "name" 21 | values = ["al2023-ami-2023*"] 22 | } 23 | filter { 24 | name = "architecture" 25 | values = [var.scylla_architecture] 26 | } 27 | 28 | filter { 29 | name = "virtualization-type" 30 | values = ["hvm"] 31 | } 32 | 33 | filter { 34 | name = "root-device-type" 35 | values = ["ebs"] 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-infra/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_name" { 2 | value = aws_eks_cluster.unreal_cloud_ddc_eks_cluster.name 3 | description = "Name of the EKS Cluster" 4 | } 5 | 6 | output "cluster_endpoint" { 7 | value = aws_eks_cluster.unreal_cloud_ddc_eks_cluster.endpoint 8 | description = "EKS Cluster Endpoint" 9 | } 10 | 11 | output "cluster_arn" { 12 | value = aws_eks_cluster.unreal_cloud_ddc_eks_cluster.arn 13 | description = "ARN of the EKS Cluster" 14 | } 15 | 16 | output "s3_bucket_id" { 17 | value = aws_s3_bucket.unreal_ddc_s3_bucket.id 18 | description = "Bucket to be used for the Unreal Cloud DDC assets" 19 | } 20 | 21 | output "oidc_provider_arn" { 22 | value = aws_iam_openid_connect_provider.unreal_cloud_ddc_oidc_provider.arn 23 | description = "OIDC provider for the EKS Cluster" 24 | } 25 | 26 | output "cluster_certificate_authority_data" { 27 | value = aws_eks_cluster.unreal_cloud_ddc_eks_cluster.certificate_authority[0].data 28 | description = "Public key for the EKS Cluster" 29 | } 30 | 31 | output "peer_security_group_id" { 32 | value = aws_security_group.scylla_security_group.id 33 | description = "ID of the Peer Security Group" 34 | } 35 | 36 | output "scylla_ips" { 37 | value = tolist(concat([aws_instance.scylla_ec2_instance_seed[0].private_ip], flatten(aws_instance.scylla_ec2_instance_other_nodes[*].private_ip))) 38 | description = "IPs of the Scylla EC2 instances" 39 | } 40 | 41 | output "nvme_node_group_label" { 42 | value = var.nvme_node_group_label 43 | description = "Label for the NVME node group" 44 | } 45 | 46 | output "worker_node_group_label" { 47 | value = var.worker_node_group_label 48 | description = "Label for the Worker node group" 49 | } 50 | 51 | output "system_node_group_label" { 52 | value = var.system_node_group_label 53 | description = "Label for the System node group" 54 | } 55 | 56 | output "external_alb_dns_name" { 57 | value = var.create_scylla_monitoring_stack && var.create_application_load_balancer ? aws_lb.scylla_monitoring_alb[0].dns_name : null 58 | description = "DNS endpoint of Application Load Balancer (ALB)" 59 | } 60 | 61 | output "external_alb_zone_id" { 62 | value = var.create_scylla_monitoring_stack && var.create_application_load_balancer ? aws_lb.scylla_monitoring_alb[0].zone_id : null 63 | description = "Zone ID for internet facing load balancer" 64 | } 65 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-infra/scylla.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Scylla Instance Profile 3 | ################################################################################ 4 | 5 | resource "aws_iam_instance_profile" "scylla_instance_profile" { 6 | name = "${local.name_prefix}-scylladb-instance-profile" 7 | role = aws_iam_role.scylla_role.name 8 | } 9 | ################################################################################ 10 | # Scylla Instances 11 | ################################################################################ 12 | resource "aws_instance" "scylla_ec2_instance_seed" { 13 | count = length([var.scylla_subnets[0]]) 14 | 15 | ami = data.aws_ami.scylla_ami.id 16 | instance_type = var.scylla_instance_type 17 | vpc_security_group_ids = [aws_security_group.scylla_security_group.id] 18 | monitoring = true 19 | 20 | subnet_id = element(var.scylla_subnets, count.index) 21 | 22 | user_data = local.scylla_user_data_primary_node 23 | user_data_replace_on_change = true 24 | ebs_optimized = true 25 | 26 | iam_instance_profile = aws_iam_instance_profile.scylla_instance_profile.name 27 | 28 | root_block_device { 29 | encrypted = true 30 | volume_type = "gp3" 31 | throughput = var.scylla_db_throughput 32 | volume_size = var.scylla_db_storage 33 | } 34 | 35 | metadata_options { 36 | http_endpoint = "enabled" 37 | http_put_response_hop_limit = 1 38 | http_tokens = "required" 39 | } 40 | 41 | tags = merge(var.tags, 42 | { 43 | Name = "${local.name_prefix}-scylla-db" 44 | } 45 | ) 46 | } 47 | 48 | resource "aws_instance" "scylla_ec2_instance_other_nodes" { 49 | count = length(var.scylla_subnets) - 1 50 | 51 | ami = data.aws_ami.scylla_ami.id 52 | instance_type = var.scylla_instance_type 53 | vpc_security_group_ids = [aws_security_group.scylla_security_group.id] 54 | monitoring = true 55 | 56 | subnet_id = element(var.scylla_subnets, count.index + 1) 57 | 58 | user_data = local.scylla_user_data_other_nodes 59 | user_data_replace_on_change = true 60 | ebs_optimized = true 61 | 62 | iam_instance_profile = aws_iam_instance_profile.scylla_instance_profile.name 63 | 64 | root_block_device { 65 | encrypted = true 66 | volume_type = "gp3" 67 | throughput = var.scylla_db_throughput 68 | volume_size = var.scylla_db_storage 69 | } 70 | 71 | metadata_options { 72 | http_endpoint = "enabled" 73 | http_put_response_hop_limit = 1 74 | http_tokens = "required" 75 | } 76 | 77 | tags = merge(var.tags, 78 | { 79 | Name = "${local.name_prefix}-scylla-db" 80 | } 81 | ) 82 | } 83 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-infra/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.10.3" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">=5.89.0" 7 | } 8 | tls = { 9 | source = "hashicorp/tls" 10 | version = ">= 4.0.6" 11 | } 12 | 13 | random = { 14 | source = "hashicorp/random" 15 | version = "3.5.1" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/assets/media/diagrams/unreal-cloud-ddc-single-region.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-games/cloud-game-development-toolkit/60f8177855dfecdede6bf3ddd60c04a0a0906ad7/modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/assets/media/diagrams/unreal-cloud-ddc-single-region.png -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | data "aws_caller_identity" "current" {} 3 | 4 | data "aws_eks_cluster" "unreal_cloud_ddc_cluster" { 5 | name = var.cluster_name 6 | } 7 | 8 | data "aws_s3_bucket" "unreal_cloud_ddc_bucket" { 9 | bucket = var.s3_bucket_id 10 | } 11 | 12 | data "aws_iam_openid_connect_provider" "oidc_provider" { 13 | arn = var.cluster_oidc_provider_arn 14 | } 15 | 16 | data "aws_lb" "unreal_cloud_ddc_load_balancer" { 17 | depends_on = [helm_release.unreal_cloud_ddc] 18 | name = "cgd-unreal-cloud-ddc" 19 | } 20 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/helm.tf: -------------------------------------------------------------------------------- 1 | module "eks_blueprints_all_other_addons" { 2 | #checkov:skip=CKV_TF_1:Upstream commit hash not being checked. This will be broken out in the future. 3 | #checkov:skip=CKV_AWS_356:Upstream requirement for Load Balancer Controller 4 | #checkov:skip=CKV_AWS_111:Upstream requirement for Load Balancer Controller 5 | source = "git::https://github.com/aws-ia/terraform-aws-eks-blueprints-addons.git?ref=a9963f4a0e168f73adb033be594ac35868696a91" 6 | 7 | eks_addons = { 8 | coredns = { 9 | most_recent = true 10 | } 11 | kube-proxy = { 12 | most_recent = true 13 | } 14 | vpc-cni = { 15 | most_recent = true 16 | } 17 | aws-ebs-csi-driver = { 18 | most_recent = true 19 | service_account_role_arn = aws_iam_role.ebs_csi_iam_role.arn 20 | } 21 | } 22 | 23 | 24 | cluster_name = data.aws_eks_cluster.unreal_cloud_ddc_cluster.name 25 | cluster_endpoint = data.aws_eks_cluster.unreal_cloud_ddc_cluster.endpoint 26 | cluster_version = data.aws_eks_cluster.unreal_cloud_ddc_cluster.version 27 | oidc_provider_arn = data.aws_iam_openid_connect_provider.oidc_provider.arn 28 | 29 | enable_aws_load_balancer_controller = true 30 | enable_aws_cloudwatch_metrics = true 31 | enable_cert_manager = var.enable_certificate_manager 32 | 33 | cert_manager_route53_hosted_zone_arns = var.certificate_manager_hosted_zone_arn 34 | 35 | 36 | tags = { 37 | Environment = var.cluster_name 38 | } 39 | } 40 | 41 | resource "kubernetes_namespace" "unreal_cloud_ddc" { 42 | depends_on = [module.eks_blueprints_all_other_addons] 43 | metadata { 44 | name = var.unreal_cloud_ddc_namespace 45 | } 46 | } 47 | 48 | resource "kubernetes_service_account" "unreal_cloud_ddc_service_account" { 49 | depends_on = [kubernetes_namespace.unreal_cloud_ddc] 50 | metadata { 51 | name = var.unreal_cloud_ddc_service_account_name 52 | namespace = var.unreal_cloud_ddc_namespace 53 | labels = { aws-usage : "application" } 54 | annotations = { "eks.amazonaws.com/role-arn" : aws_iam_role.unreal_cloud_ddc_sa_iam_role.arn } 55 | } 56 | automount_service_account_token = true 57 | } 58 | 59 | 60 | 61 | ################################################################################ 62 | # Helm 63 | ################################################################################ 64 | resource "aws_ecr_pull_through_cache_rule" "unreal_cloud_ddc_ecr_pull_through_cache_rule" { 65 | ecr_repository_prefix = "github" 66 | upstream_registry_url = "ghcr.io" 67 | credential_arn = var.ghcr_credentials_secret_manager_arn 68 | } 69 | 70 | resource "helm_release" "unreal_cloud_ddc" { 71 | name = "unreal-cloud-ddc" 72 | chart = "unreal-cloud-ddc" 73 | repository = "oci://${data.aws_caller_identity.current.account_id}.dkr.ecr.${data.aws_region.current.name}.amazonaws.com/github/epicgames" 74 | namespace = var.unreal_cloud_ddc_namespace 75 | version = "${var.unreal_cloud_ddc_version}+helm" 76 | reset_values = true 77 | depends_on = [ 78 | kubernetes_service_account.unreal_cloud_ddc_service_account, 79 | kubernetes_namespace.unreal_cloud_ddc, 80 | aws_ecr_pull_through_cache_rule.unreal_cloud_ddc_ecr_pull_through_cache_rule 81 | ] 82 | values = var.unreal_cloud_ddc_helm_values 83 | } 84 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name_prefix = "${var.project_prefix}-${var.name}" 3 | } 4 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "unreal_cloud_ddc_load_balancer_name" { 2 | value = data.aws_lb.unreal_cloud_ddc_load_balancer.dns_name 3 | } 4 | 5 | output "unreal_cloud_ddc_load_balancer_zone_id" { 6 | value = data.aws_lb.unreal_cloud_ddc_load_balancer.zone_id 7 | } 8 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | ######################################## 2 | # GENERAL CONFIGURATION 3 | ######################################## 4 | 5 | variable "name" { 6 | description = "Unreal Cloud DDC Workload Name" 7 | type = string 8 | default = "unreal-cloud-ddc" 9 | validation { 10 | condition = length(var.name) > 1 && length(var.name) <= 50 11 | error_message = "The defined 'name' has too many characters. This can cause deployment failures for AWS resources with smaller character limits. Please reduce the character count and try again." 12 | } 13 | } 14 | 15 | variable "project_prefix" { 16 | type = string 17 | description = "The project prefix for this workload. This is appended to the beginning of most resource names." 18 | default = "cgd" 19 | } 20 | 21 | variable "tags" { 22 | type = map(any) 23 | default = { 24 | "ModuleBy" = "CGD-Toolkit" 25 | "ModuleName" = "Unreal DDC" 26 | "IaC" = "Terraform" 27 | } 28 | description = "Tags to apply to resources." 29 | } 30 | 31 | variable "cluster_name" { 32 | type = string 33 | description = "Name of the EKS Cluster" 34 | } 35 | variable "cluster_oidc_provider_arn" { 36 | type = string 37 | description = "ARN of the OIDC Provider from EKS Cluster" 38 | } 39 | 40 | variable "s3_bucket_id" { 41 | type = string 42 | description = "ID of the S3 Bucket for Unreal Cloud DDC to use" 43 | } 44 | 45 | variable "unreal_cloud_ddc_namespace" { 46 | type = string 47 | description = "Namespace for Unreal Cloud DDC" 48 | default = "unreal-cloud-ddc" 49 | } 50 | 51 | variable "unreal_cloud_ddc_helm_values" { 52 | type = list(string) 53 | description = "List of YAML files for Unreal Cloud DDC" 54 | default = [] 55 | } 56 | 57 | variable "ghcr_credentials_secret_manager_arn" { 58 | type = string 59 | description = "Arn for credentials stored in secret manager. Needs to be prefixed with 'ecr-pullthroughcache/' to be compatible with ECR pull through cache." 60 | validation { 61 | condition = length(regexall("ecr-pullthroughcache/", var.ghcr_credentials_secret_manager_arn)) > 0 62 | error_message = "Needs to be prefixed with 'ecr-pullthroughcache/' to be compatible with ECR pull through cache." 63 | } 64 | } 65 | 66 | variable "oidc_credentials_secret_manager_arn" { 67 | type = string 68 | description = "Arn for oidc credentials stored in secret manager." 69 | default = null 70 | } 71 | 72 | variable "unreal_cloud_ddc_version" { 73 | type = string 74 | description = "Version of the Unreal Cloud DDC Helm chart." 75 | default = "1.2.0" 76 | } 77 | 78 | variable "unreal_cloud_ddc_service_account_name" { 79 | type = string 80 | description = "Name of Unreal Cloud DDC service account." 81 | default = "unreal-cloud-ddc-sa" 82 | } 83 | 84 | variable "certificate_manager_hosted_zone_arn" { 85 | type = list(string) 86 | description = "ARN of the Certificate Manager for Ingress." 87 | default = [] 88 | } 89 | 90 | variable "enable_certificate_manager" { 91 | type = bool 92 | description = "Enable Certificate Manager for Ingress. Required for TLS termination." 93 | default = false 94 | validation { 95 | condition = var.enable_certificate_manager ? length(var.certificate_manager_hosted_zone_arn) > 0 : true 96 | error_message = "Certificate Manager hosted zone ARN is required." 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /modules/unreal/unreal-cloud-ddc/unreal-cloud-ddc-intra-cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.10.3" 3 | required_providers { 4 | kubernetes = { 5 | source = "hashicorp/kubernetes" 6 | version = ">=2.33.0" 7 | } 8 | helm = { 9 | source = "hashicorp/helm" 10 | version = ">=2.16.0" 11 | } 12 | aws = { 13 | source = "hashicorp/aws" 14 | version = ">=5.73.0" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /samples/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | Samples represent a reference implementation that can be copied, modified and deployed to solve for a specific use-case or workload. These are Terraform configurations and integrations with other common AWS workloads and services. Each sample will provide its own documentation and instructions that follows the template below: 4 | 5 | #### 1) Predeployment 6 | 7 | In the **predeployment** phase the user is instructed to provision or take note of any necessary pre-existing resources. Creating SSL certificates or keypairs, provisioning Amazon Machine Images (AMIs) with Packer, or documenting existing resource IDs and names all fall into this phase. 8 | 9 | #### 2) Deployment 10 | 11 | In the **deployment** phase the user is instructed to run `terraform apply` on one or more Terraform configurations with the appropriate variables. 12 | 13 | #### 3) Postdeployment 14 | 15 | Finally, the **postdeployment** phase includes any Ansible playbooks or remote execution instructions for configuring the applications that have been deployed. These may be automated or manual steps. -------------------------------------------------------------------------------- /samples/simple-build-pipeline/locals.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | data "aws_ami" "ubuntu_noble_amd" { 4 | most_recent = true 5 | filter { 6 | name = "name" 7 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 8 | } 9 | 10 | filter { 11 | name = "virtualization-type" 12 | values = ["hvm"] 13 | } 14 | 15 | filter { 16 | name = "root-device-type" 17 | values = ["ebs"] 18 | } 19 | 20 | filter { 21 | name = "architecture" 22 | values = [ 23 | "x86_64" 24 | ] 25 | } 26 | 27 | owners = ["amazon"] 28 | } 29 | 30 | 31 | locals { 32 | project_prefix = "cgd" 33 | azs = slice(data.aws_availability_zones.available.names, 0, 2) 34 | 35 | # Subdomains 36 | perforce_subdomain = "perforce" 37 | p4_auth_subdomain = "auth" 38 | p4_code_review_subdomain = "review" 39 | jenkins_subdomain = "jenkins" 40 | 41 | # P4 Server Domain 42 | p4_server_fully_qualified_domain_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 43 | 44 | # P4Auth Domain 45 | p4_auth_fully_qualified_domain_name = "${local.p4_auth_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 46 | 47 | # P4 Code Review 48 | p4_code_review_fully_qualified_domain_name = "${local.p4_code_review_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" 49 | 50 | # Jenkins Domain 51 | jenkins_fully_qualified_domain_name = "${local.jenkins_subdomain}.${var.route53_public_hosted_zone_name}" 52 | 53 | # Jenkins and Build Farm Configurations 54 | jenkins_agent_secret_arns = [] 55 | 56 | build_farm_compute = { 57 | ubuntu_builders : { 58 | ami = data.aws_ami.ubuntu_noble_amd.image_id 59 | instance_type = "t3a.small" 60 | } 61 | } 62 | 63 | build_farm_fsx_openzfs_storage = { 64 | /* Example Configuration 65 | cache : { 66 | storage_type = "SSD" 67 | throughput_capacity = 160 68 | storage_capacity = 256 69 | deployment_type = "MULTI_AZ_1" 70 | route_table_ids = [aws_route_table.private_rt.id] 71 | } 72 | workspace : { 73 | storage_type = "SSD" 74 | throughput_capacity = 160 75 | storage_capacity = 564 76 | deployment_type = "MULTI_AZ_1" 77 | route_table_ids = [aws_route_table.private_rt.id] 78 | } 79 | */ 80 | } 81 | 82 | # VPC Configuration 83 | vpc_cidr_block = "10.0.0.0/16" 84 | public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] 85 | private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] 86 | 87 | tags = { 88 | environment = "dev" 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /samples/simple-build-pipeline/outputs.tf: -------------------------------------------------------------------------------- 1 | output "p4_server_connection_string" { 2 | value = "ssl:${local.p4_server_fully_qualified_domain_name}:1666" 3 | description = "The connection string for the P4 Server. Set your P4PORT environment variable to this value." 4 | } 5 | 6 | output "p4_code_review_url" { 7 | value = "https://${local.p4_code_review_fully_qualified_domain_name}" 8 | description = "The URL for the P4 Code Review service." 9 | } 10 | 11 | output "p4_auth_admin_url" { 12 | value = "https://${local.p4_auth_fully_qualified_domain_name}/admin" 13 | description = "The URL for the P4Auth service admin page." 14 | } 15 | 16 | output "jenkins_url" { 17 | value = "https://${local.jenkins_fully_qualified_domain_name}" 18 | description = "The URL for the Jenkins service." 19 | } 20 | -------------------------------------------------------------------------------- /samples/simple-build-pipeline/variables.tf: -------------------------------------------------------------------------------- 1 | variable "route53_public_hosted_zone_name" { 2 | type = string 3 | description = "The fully qualified domain name of your existing Route53 Hosted Zone." 4 | } 5 | -------------------------------------------------------------------------------- /samples/simple-build-pipeline/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "5.97.0" 8 | } 9 | netapp-ontap = { 10 | source = "NetApp/netapp-ontap" 11 | version = "2.1.0" 12 | } 13 | http = { 14 | source = "hashicorp/http" 15 | version = "3.5.0" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/assets/sanity_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script pulls the Cloud DDC NLB DNS record and the bearer token secret value from AWS. 4 | # It then curls the Cloud DDC API to put a blob, and then curls the Cloud DDC API to get that same blob. 5 | # It compares the two to ensure that the Cloud DDC service is running as expected. 6 | 7 | # Query AWS for the unreal-cloud-ddc NLB DNS name and save it to a local variable 8 | unreal_cloud_ddc_nlb_dns_name=$(aws elbv2 describe-load-balancers --names unreal-cloud-ddc --query 'LoadBalancers[*].DNSName' --output text) 9 | 10 | # Query AWS for the bearer token secret value and save it to a local variable 11 | bearer_token_secret_value=$(aws secretsmanager get-secret-value --secret-id unreal-cloud-ddc-token --query 'SecretString' --output text) 12 | 13 | echo "********************" 14 | echo "*Putting test data:*" 15 | echo "********************" 16 | echo "" 17 | # Curl the Cloud DDC API to PUT a "test" data blob 18 | curl http://$unreal_cloud_ddc_nlb_dns_name/api/v1/refs/ddc/default/00000000000000000000000000000000000000aa -X PUT --data 'test' -H 'content-type: application/octet-stream' -H 'X-Jupiter-IoHash: 4878CA0425C739FA427F7EDA20FE845F6B2E46BA' -i -H "Authorization: ServiceAccount $bearer_token_secret_value" 19 | echo "" 20 | echo "" 21 | echo "********************" 22 | echo "*Getting test data:*" 23 | echo "********************" 24 | echo "" 25 | # Curl the Cloud DDC API to GET the "test" data blob back 26 | curl http://$unreal_cloud_ddc_nlb_dns_name/api/v1/refs/ddc/default/00000000000000000000000000000000000000aa.json -i -H "Authorization: ServiceAccount $bearer_token_secret_value" 27 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/dns.tf: -------------------------------------------------------------------------------- 1 | variable "route53_public_hosted_zone_name" { 2 | type = string 3 | description = "The root domain name for the Hosted Zone where the ScyllaDB monitoring record should be created." 4 | } 5 | 6 | ########################################## 7 | # Route53 Hosted Zone for Root 8 | ########################################## 9 | data "aws_route53_zone" "root" { 10 | name = var.route53_public_hosted_zone_name 11 | private_zone = false 12 | } 13 | 14 | # Create a record in the Hosted Zone for the scylla_monitoring server 15 | resource "aws_route53_record" "unreal_cloud_ddc" { 16 | depends_on = [module.unreal_cloud_ddc_infra, module.unreal_cloud_ddc_intra_cluster] 17 | zone_id = data.aws_route53_zone.root.id 18 | name = "ddc.${data.aws_route53_zone.root.name}" 19 | type = "A" 20 | 21 | alias { 22 | name = module.unreal_cloud_ddc_intra_cluster.unreal_cloud_ddc_load_balancer_name 23 | zone_id = module.unreal_cloud_ddc_intra_cluster.unreal_cloud_ddc_load_balancer_zone_id 24 | evaluate_target_health = false 25 | } 26 | } 27 | 28 | ########################################## 29 | # Route53 Hosted Zone for Monitoring 30 | ########################################## 31 | 32 | # Create a record in the Hosted Zone for the scylla_monitoring server 33 | resource "aws_route53_record" "scylla_monitoring" { 34 | zone_id = data.aws_route53_zone.root.id 35 | name = "monitoring.ddc.${data.aws_route53_zone.root.name}" 36 | type = "A" 37 | 38 | alias { 39 | name = module.unreal_cloud_ddc_infra.external_alb_dns_name 40 | zone_id = module.unreal_cloud_ddc_infra.external_alb_zone_id 41 | evaluate_target_health = false 42 | } 43 | } 44 | 45 | # Create a certificate for the scylla_monitoring server 46 | resource "aws_acm_certificate" "scylla_monitoring" { 47 | domain_name = "monitoring.ddc.${data.aws_route53_zone.root.name}" 48 | validation_method = "DNS" 49 | 50 | tags = { 51 | Environment = "test" 52 | } 53 | lifecycle { 54 | create_before_destroy = true 55 | } 56 | } 57 | 58 | resource "aws_route53_record" "scylla_monitoring_cert" { 59 | for_each = { 60 | for dvo in aws_acm_certificate.scylla_monitoring.domain_validation_options : dvo.domain_name => { 61 | name = dvo.resource_record_name 62 | record = dvo.resource_record_value 63 | type = dvo.resource_record_type 64 | } 65 | } 66 | 67 | allow_overwrite = true 68 | name = each.value.name 69 | records = [each.value.record] 70 | ttl = 60 71 | type = each.value.type 72 | zone_id = data.aws_route53_zone.root.id 73 | } 74 | 75 | resource "aws_acm_certificate_validation" "scylla_monitoring" { 76 | timeouts { 77 | create = "15m" 78 | } 79 | certificate_arn = aws_acm_certificate.scylla_monitoring.arn 80 | validation_record_fqdns = [for record in aws_route53_record.scylla_monitoring_cert : record.fqdn] 81 | } 82 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/local.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" { 2 | exclude_zone_ids = ["usw2-lax1-az1", "usw2-lax1-az2", "usw2-hnl1-az1", "usw2-las1-az1", "usw2-den1-az1"] 3 | } 4 | 5 | data "aws_region" "current" {} 6 | 7 | data "aws_ecr_authorization_token" "token" {} 8 | 9 | data "aws_caller_identity" "current" {} 10 | 11 | locals { 12 | azs = slice(data.aws_availability_zones.available.names, 0, 2) 13 | 14 | tags = { 15 | Environment = "cgd" 16 | Application = "unreal-cloud-ddc" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/outputs.tf: -------------------------------------------------------------------------------- 1 | output "unreal_ddc_url" { 2 | value = aws_route53_record.unreal_cloud_ddc.name 3 | } 4 | 5 | output "monitoring_url" { 6 | value = aws_route53_record.scylla_monitoring.name 7 | } 8 | 9 | output "unreal_cloud_ddc_bearer_token_arn" { 10 | value = awscc_secretsmanager_secret.unreal_cloud_ddc_token.id 11 | } 12 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">= 5.89.0" 6 | } 7 | 8 | random = { 9 | source = "hashicorp/random" 10 | version = "3.5.1" 11 | } 12 | kubernetes = { 13 | source = "hashicorp/kubernetes" 14 | version = ">= 2.24.0" 15 | } 16 | helm = { 17 | source = "hashicorp/helm" 18 | version = ">= 2.9.0" 19 | } 20 | http = { 21 | source = "hashicorp/http" 22 | version = ">= 3.4.5" 23 | } 24 | awscc = { 25 | source = "hashicorp/awscc" 26 | version = ">= 1.26.0" 27 | } 28 | } 29 | required_version = ">= 1.10.3" 30 | } 31 | 32 | provider "kubernetes" { 33 | host = module.unreal_cloud_ddc_infra.cluster_endpoint 34 | cluster_ca_certificate = base64decode(module.unreal_cloud_ddc_infra.cluster_certificate_authority_data) 35 | exec { 36 | api_version = "client.authentication.k8s.io/v1beta1" 37 | command = "aws" 38 | args = ["eks", "get-token", "--cluster-name", module.unreal_cloud_ddc_infra.cluster_name, "--output", "json"] 39 | } 40 | } 41 | 42 | provider "helm" { 43 | kubernetes { 44 | host = module.unreal_cloud_ddc_infra.cluster_endpoint 45 | cluster_ca_certificate = base64decode(module.unreal_cloud_ddc_infra.cluster_certificate_authority_data) 46 | exec { 47 | api_version = "client.authentication.k8s.io/v1beta1" 48 | command = "aws" 49 | args = ["eks", "get-token", "--cluster-name", module.unreal_cloud_ddc_infra.cluster_name, "--output", "json"] 50 | } 51 | } 52 | registry { 53 | url = "oci://${data.aws_caller_identity.current.account_id}.dkr.ecr.${data.aws_region.current.name}.amazonaws.com" 54 | username = data.aws_ecr_authorization_token.token.user_name 55 | password = data.aws_ecr_authorization_token.token.password 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/variables.tf: -------------------------------------------------------------------------------- 1 | variable "github_credential_arn" { 2 | type = string 3 | sensitive = true 4 | description = "Github Credential ARN" 5 | } 6 | 7 | variable "allow_my_ip" { 8 | type = bool 9 | default = true 10 | description = "Automatically add your IP to the security groups allowing access to the Unreal DDC and SycllaDB Monitoring load balancers" 11 | } 12 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | output "private_subnet_ids" { 2 | value = aws_subnet.private_subnets[*].id 3 | } 4 | 5 | output "public_subnet_ids" { 6 | value = aws_subnet.public_subnets[*].id 7 | } 8 | 9 | output "vpc_id" { 10 | value = aws_vpc.unreal_cloud_ddc_vpc.id 11 | } 12 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_cidr" { 2 | type = string 3 | description = "VPC CIDR Block" 4 | } 5 | 6 | variable "additional_tags" { 7 | default = {} 8 | description = "Additional resource tags" 9 | type = map(string) 10 | } 11 | 12 | variable "private_subnets_cidrs" { 13 | type = list(string) 14 | description = "Private Subnet CIDR Range" 15 | } 16 | 17 | variable "public_subnets_cidrs" { 18 | type = list(string) 19 | description = "Public Subnet CIDR Range" 20 | } 21 | 22 | variable "availability_zones" { 23 | type = list(string) 24 | description = "Availability Zones" 25 | } 26 | -------------------------------------------------------------------------------- /samples/unreal-cloud-ddc-single-region/vpc/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.5" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">=5.69.0" 7 | } 8 | } 9 | } 10 | --------------------------------------------------------------------------------
{{ page.meta.hero.subtitle }}
{{ feature.description }}