├── .gitattributes ├── .github └── workflows │ └── main.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── ansible ├── README.md ├── ansible.cfg ├── apply ├── envs │ ├── dev-example │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ ├── monitoring.yml │ │ │ └── playground.yml │ │ └── hosts │ ├── prod │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ ├── dev-desktop.yml │ │ │ ├── monitoring.yml │ │ │ ├── playground.yml │ │ │ └── rustc-perf.yml │ │ ├── host_vars │ │ │ ├── crater-aws-1.infra.rust-lang.org.yml │ │ │ ├── crater-gcp-1.infra.rust-lang.org.yml │ │ │ ├── crater-gcp-2.infra.rust-lang.org.yml │ │ │ ├── crater-gcp-3.infra.rust-lang.org.yml │ │ │ └── crater-gcp-4.infra.rust-lang.org.yml │ │ └── hosts │ └── staging │ │ ├── group_vars │ │ ├── all.yml │ │ ├── dev-desktop.yml │ │ └── docs-rs-builder.yml │ │ └── hosts ├── group_vars │ └── all.yml ├── playbooks │ ├── bastion.yml │ ├── crater-server.yml │ ├── dev-desktop.yml │ ├── docs-rs-builder.yml │ ├── monitoring.yml │ ├── playground.yml │ └── rustc-perf.yml ├── requirements.txt ├── requirements.yml └── roles │ ├── backup │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── backup-restic.service │ │ ├── backup-restic.timer │ │ └── backup.py │ ├── bastion │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── sshd_config │ ├── common │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── ssh-keys │ │ │ ├── _master.pub │ │ │ ├── acrichto.pub │ │ │ ├── aidanhs.pub │ │ │ ├── guillaumegomez.pub │ │ │ ├── jdn.pub │ │ │ ├── joshua.pub │ │ │ ├── kobzol.pub │ │ │ ├── marcoieni.pub │ │ │ ├── nemo157.pub │ │ │ ├── oli-obk.pub │ │ │ ├── onur.pub │ │ │ ├── pietro.pub │ │ │ ├── rylev.pub │ │ │ ├── shep.pub │ │ │ ├── simulacrum.pub │ │ │ ├── syphar.pub │ │ │ └── technetos.pub │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── apt.yml │ │ ├── backup.yml │ │ ├── cleanup.yml │ │ ├── main.yml │ │ ├── metrics.yml │ │ ├── networking.yml │ │ ├── papertrail.yml │ │ ├── services.yml │ │ ├── ssh.yml │ │ └── users.yml │ └── templates │ │ ├── metrics │ │ ├── node_exporter-firewall.sh │ │ └── node_exporter.service │ │ ├── networking │ │ ├── etchosts │ │ ├── firewall.service │ │ └── firewall.sh │ │ ├── papertrail │ │ └── rsyslog.conf │ │ ├── ssh │ │ └── sshd_config │ │ └── users │ │ └── sudoers-passwordless-sudo │ ├── dev-desktop │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── git-credential-dev-desktop │ │ ├── git-credential-dev-desktop-inner │ │ ├── podman │ │ │ └── storage.conf │ │ ├── scripts │ │ │ ├── clean.sh │ │ │ ├── detach_merged_prs.sh │ │ │ ├── help.sh │ │ │ ├── init.sh │ │ │ ├── init_git.py │ │ │ ├── link_rust.sh │ │ │ ├── new_worktree.sh │ │ │ ├── set_defaults.sh │ │ │ ├── setup_rust.sh │ │ │ ├── setup_rustup.sh │ │ │ └── status.sh │ │ ├── skel │ │ │ └── config.toml │ │ └── team_login │ │ │ ├── .gitignore │ │ │ ├── Cargo.lock │ │ │ ├── Cargo.toml │ │ │ └── src │ │ │ └── main.rs │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── cleanup.yml │ │ ├── dependencies.yml │ │ ├── fix_llvm_55575.yml │ │ ├── github.yml │ │ ├── main.yml │ │ ├── motd.yml │ │ ├── oom.yml │ │ ├── podman.yml │ │ ├── quota.yml │ │ ├── scripts.yml │ │ ├── services.yml │ │ ├── team_login.yml │ │ ├── user_configuration.yml │ │ └── usermod.yml │ └── templates │ │ ├── 10-perf-event-paranoid.conf │ │ ├── clean-unused-checkouts.sh │ │ ├── cron_cleanup_disk_space.j2 │ │ ├── cron_team_login.j2 │ │ ├── firewall.sh │ │ ├── gitconfig │ │ ├── motd_rules │ │ ├── motd_sshd_config │ │ └── sudoers │ ├── docker │ ├── README.md │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── containers.yml │ │ ├── install.yml │ │ ├── main.yml │ │ └── update-images.yml │ └── templates │ │ ├── containers │ │ └── container.service │ │ ├── firewall.sh │ │ └── update-images │ │ ├── aws-credentials │ │ ├── docker-config.json │ │ ├── docker-images-pull.service │ │ ├── docker-images-update.service │ │ ├── docker-images-update.timer │ │ ├── sudoers │ │ └── update.sh │ ├── docs-rs-builder │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── builder.service │ │ ├── daemon.json │ │ └── sudoers │ ├── letsencrypt │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── install.yml │ │ ├── main.yml │ │ ├── pebble.yml │ │ └── renewer.yml │ └── templates │ │ ├── after-renew.sh │ │ ├── pebble │ │ ├── certs │ │ │ ├── localhost │ │ │ │ ├── cert.pem │ │ │ │ └── key.pem │ │ │ ├── pebble.minica.key.pem │ │ │ └── pebble.minica.pem │ │ ├── config.json │ │ └── pebble.service │ │ ├── renew-ssl-certs.service │ │ ├── renew-ssl-certs.sh │ │ └── renew-ssl-certs.timer │ ├── monitoring-server │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── alertmanager.yml │ │ ├── grafana.yml │ │ ├── main.yml │ │ ├── prometheus-ecs-discovery.yml │ │ └── prometheus.yml │ └── templates │ │ ├── alertmanager │ │ ├── alertmanager.service │ │ └── alertmanager.yml │ │ ├── grafana │ │ ├── grafana.ini │ │ └── provisioning │ │ │ └── datasources │ │ │ └── prometheus.yml │ │ ├── prometheus-ecs-discovery │ │ └── prometheus-ecs-discovery.service │ │ └── prometheus │ │ ├── prometheus.service │ │ ├── prometheus.yml │ │ └── rules.yml │ ├── nginx │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── after-ssl-renew.sh │ │ ├── firewall.sh │ │ ├── nginx.conf │ │ └── override.conf │ ├── playground │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── containerd-override.conf │ │ ├── daemon.json │ │ ├── docker-override.conf │ │ ├── gc.sh │ │ ├── playground-gc.service │ │ ├── playground-gc.timer │ │ ├── playground-update.service │ │ ├── playground-update.timer │ │ ├── playground.service │ │ ├── playground.slice │ │ ├── sudoers │ │ └── update.sh │ └── postgresql │ ├── README.md │ ├── defaults │ └── main.yml │ ├── tasks │ ├── backup.yml │ ├── databases.yml │ ├── main.yml │ └── setup.yml │ └── templates │ └── backup │ ├── generate.sh │ └── manifest.json ├── aws-creds.py ├── aws-psql.py ├── aws-rollback.py ├── github-actions ├── README.md └── upload-docker-image │ ├── README.md │ ├── action.yml │ ├── dist │ └── index.js │ ├── index.js │ ├── package-lock.json │ └── package.json ├── invalidate-dev-static-stable.sh ├── release-scripts ├── promote-release.py └── tag-cargo.sh ├── run-task.sh ├── setup-deploy-keys ├── Cargo.toml └── src │ ├── deploy.rs │ └── main.rs ├── terraform ├── README.md ├── bastion │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _terraform.tf │ ├── firewall.tf │ └── instance.tf ├── bors │ ├── .terraform.lock.hcl │ ├── _config.auto.tfvars │ ├── _terraform.tf │ ├── app.tf │ └── repositories.tf ├── crater │ ├── .terraform.lock.hcl │ ├── README.md │ ├── agent.tf │ ├── ci.tf │ ├── instance.tf │ ├── main.tf │ ├── startup-script.sh │ └── update.sh ├── crates-io-heroku-metrics │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ └── app.tf ├── dev-desktops │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ ├── aws-region │ │ ├── _terraform.tf │ │ ├── instances.tf │ │ └── vpc.tf │ ├── dns.tf │ └── regions.tf ├── discord-mods-bot │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _terraform.tf │ ├── ci.tf │ └── deployment.tf ├── dns-delegation │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ └── main.tf ├── dns │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _shared.tf │ ├── _terraform.tf │ ├── areweasyncyet.rs.tf │ ├── arewewebyet.org.tf │ ├── crates.io.tf │ ├── cratesio.com.tf │ ├── docsrs.com.tf │ ├── impl │ │ ├── main.tf │ │ └── variables.tf │ ├── rustaceans.org.tf │ └── rustconf.com.tf ├── docs-rs │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ ├── cloudfront.tf │ ├── rds.tf │ ├── s3.tf │ ├── static-cloudfront.tf │ └── web-server.tf ├── domain-redirects │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _terraform.tf │ ├── impl │ │ ├── main.tf │ │ └── variables.tf │ └── redirects.tf ├── fastly-exporter │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _terraform.tf │ └── main.tf ├── monitorbot │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ └── app.tf ├── playground │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ ├── artifacts.tf │ ├── cloudfront.tf │ └── instance.tf ├── rds-databases │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _terraform.tf │ ├── databases.tf │ └── instance.tf ├── releases │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _terraform.tf │ ├── environments.tf │ ├── impl │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── promote-release.tf │ │ ├── storage.tf │ │ └── variables.tf │ ├── keys.tf │ ├── lambdas │ │ └── start-release │ │ │ └── index.py │ ├── promote-release-ci.tf │ └── start-release.tf ├── rust-forge │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ └── main.tf ├── rust-log-analyzer │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ └── app.tf ├── rustc-perf │ ├── .terraform.lock.hcl │ ├── _terraform.tf │ ├── ci.tf │ ├── dns.tf │ ├── main.tf │ └── s3.tf ├── shared │ ├── .terraform.lock.hcl │ ├── ci-mirrors-access.tf │ ├── cloudfront-policies.tf │ ├── ec2.tf │ ├── github-actions-oidc.tf │ ├── main.tf │ ├── modules │ │ ├── acm-certificate │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── ecr-repo │ │ │ ├── README.md │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── ecs-app │ │ │ ├── ci.tf │ │ │ ├── deployment.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── ecs-service │ │ │ ├── load-balancer.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── ecs-task │ │ │ ├── iam.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── efs-filesystem │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── gha-iam-user │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── gha-oidc-role │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── lambda │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── pack.py │ │ │ └── variables.tf │ │ ├── static-website │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ └── vpc │ │ │ ├── gateways.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── private.tf │ │ │ ├── public.tf │ │ │ ├── untrusted.tf │ │ │ └── variables.tf │ ├── outputs.tf │ ├── s3.tf │ ├── services.tf │ ├── services │ │ ├── ecs-cluster │ │ │ ├── load-balancer.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ └── triagebot │ │ │ ├── ci.tf │ │ │ ├── main.tf │ │ │ └── variables.tf │ ├── static-websites.tf │ ├── terraform-state.tf │ └── vpc.tf ├── team-members-access │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _shared.tf │ ├── _terraform.tf │ ├── _users.tf │ ├── crates-io.tf │ ├── docs-rs.tf │ ├── foundation.tf │ ├── infra-admins.tf │ ├── infra-deploy-playground.tf │ ├── infra-deploy-staging-dev-desktop.tf │ ├── infra-team.tf │ ├── mods-discord.tf │ └── rustc-perf.tf ├── team-members-datadog │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _data.tf │ ├── _terraform.tf │ ├── crater.tf │ ├── crates-io.tf │ ├── foundation-board.tf │ ├── foundation.tf │ ├── infra-admins.tf │ ├── infra.tf │ └── users.tf └── team-members-fastly │ ├── .terraform.lock.hcl │ ├── README.md │ ├── _terraform.tf │ └── users.tf ├── terragrunt ├── README.md ├── accounts │ ├── bors-prod │ │ ├── account.json │ │ ├── app │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── dns-zone │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── bors-staging │ │ ├── account.json │ │ ├── app │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── dns-zone │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── ci-prod │ │ ├── account.json │ │ ├── ci-runners │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── ci-staging │ │ ├── account.json │ │ ├── ci-runners │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── crates-io-prod │ │ ├── account.json │ │ ├── crates-io-logs │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── deployed-ref │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── crates-io-staging │ │ ├── account.json │ │ ├── crates-io-logs │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── dev-desktops-prod │ │ ├── account.json │ │ ├── azure-provider.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── resource-group │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── westeurope │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── westus2 │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── docs-rs-staging │ │ ├── account.json │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── dns-zone │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── docs-rs │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── ecs-cluster │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── vpc │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── legacy │ │ ├── README.md │ │ ├── account.json │ │ ├── crates-io-prod │ │ │ ├── crates-io │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ └── terragrunt.hcl │ │ │ └── deployed-ref │ │ ├── crates-io-staging │ │ │ └── crates-io │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── datadog-fastly │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── gha-self-hosted-images │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── releases-dev │ │ │ └── release-distribution │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ └── terragrunt.hcl │ │ ├── releases-prod │ │ │ ├── deployed-ref │ │ │ └── release-distribution │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ └── terragrunt.hcl │ │ ├── rustc-ci-prod │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── rustc-ci-staging │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── rustup-dev │ │ │ └── win-rustup-rs │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ └── terragrunt.hcl │ │ ├── rustup-prod │ │ │ ├── deployed-ref │ │ │ ├── rustup │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ └── terragrunt.hcl │ │ │ └── win-rustup-rs │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ ├── metrics-initiative-prod │ │ ├── account.json │ │ ├── datadog-aws │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ ├── grafana │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ │ └── wiz │ │ │ ├── .terraform.lock.hcl │ │ │ └── terragrunt.hcl │ └── root │ │ ├── account.json │ │ ├── aws-organization │ │ ├── .terraform.lock.hcl │ │ └── terragrunt.hcl │ │ ├── datadog-aws │ │ ├── .terraform.lock.hcl │ │ └── terragrunt.hcl │ │ └── wiz │ │ ├── .terraform.lock.hcl │ │ └── terragrunt.hcl ├── import-state.py ├── modules │ ├── acm-certificate │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── aws-lambda │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── pack.py │ │ └── variables.tf │ ├── aws-organization │ │ ├── _terraform.tf │ │ ├── accounts.tf │ │ ├── groups.tf │ │ ├── sso-account-assignment │ │ │ └── main.tf │ │ └── users.tf │ ├── bastion │ │ ├── README.md │ │ ├── firewall.tf │ │ ├── instance.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── bors │ │ └── main.tf │ ├── ci-runners │ │ ├── README.md │ │ ├── codebuild.tf │ │ ├── github_connection.tf │ │ ├── iam.tf │ │ ├── main.tf │ │ └── variables.tf │ ├── codebuild-project │ │ ├── main.tf │ │ ├── project.tf │ │ └── variables.tf │ ├── crates-io-logs │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── crates-io │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── certificate.tf │ │ ├── cloudfront-functions │ │ │ └── static-router.js │ │ ├── cloudfront-index.tf │ │ ├── cloudfront-static.tf │ │ ├── cloudfront-webapp.tf │ │ ├── compute-static │ │ │ ├── .cargo │ │ │ │ └── config.toml │ │ │ ├── .gitignore │ │ │ ├── Cargo.lock │ │ │ ├── Cargo.toml │ │ │ ├── README.md │ │ │ ├── bin │ │ │ │ └── terraform-external-build.sh │ │ │ ├── fastly.toml │ │ │ ├── rust-toolchain.toml │ │ │ └── src │ │ │ │ ├── config.rs │ │ │ │ ├── log_line.rs │ │ │ │ └── main.rs │ │ ├── fastly-iam-role.tf │ │ ├── fastly-static.tf │ │ ├── iam.tf │ │ ├── s3-index.tf │ │ ├── s3-logs.tf │ │ └── s3-static.tf │ ├── datadog-aws │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── datadog-fastly │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── dev-desktops-azure │ │ ├── _terraform.tf │ │ ├── firewall.tf │ │ ├── instances.tf │ │ └── vpc.tf │ ├── dns-zone │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── docs-rs │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── builder.tf │ │ ├── cloudfront.tf │ │ ├── rds.tf │ │ ├── s3.tf │ │ ├── static-cloudfront.tf │ │ ├── variables.tf │ │ └── web-server.tf │ ├── ecr-repo │ │ ├── README.md │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── ecs-app │ │ ├── ci.tf │ │ ├── deployment.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── ecs-cluster │ │ ├── _terraform.tf │ │ ├── cluster.tf │ │ ├── load-balancer.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── ecs-service │ │ ├── load-balancer.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── ecs-task │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── roles.tf │ │ └── variables.tf │ ├── fastly-tls-subscription │ │ ├── _terraform.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── gha-iam-user │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── gha-oidc-role │ │ └── main.tf │ ├── gha-self-hosted-images │ │ ├── _terraform.tf │ │ ├── cdn.tf │ │ ├── ci.tf │ │ └── storage.tf │ ├── grafana │ │ ├── _terraform.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── release-distribution │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── certificate.tf │ │ ├── cloudfront-doc.tf │ │ ├── cloudfront-static.tf │ │ ├── data.tf │ │ ├── fastly-iam-role.tf │ │ ├── fastly-log-format.tftpl │ │ ├── fastly-static.tf │ │ ├── lambdas │ │ │ ├── doc-router │ │ │ │ └── index.js │ │ │ └── static-router │ │ │ │ └── index.js │ │ ├── locals.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── resource-group │ │ ├── _terraform.tf │ │ └── resource-group.tf │ ├── rustc-ci │ │ ├── _terraform.tf │ │ ├── artifacts.tf │ │ ├── caches.tf │ │ └── github_env.tf │ ├── rustup │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── certificate.tf │ │ ├── cloudfront.tf │ │ ├── dns.tf │ │ ├── s3.tf │ │ └── variables.tf │ ├── static-website │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── vpc │ │ ├── _terraform.tf │ │ ├── gateways.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── private.tf │ │ ├── public.tf │ │ ├── untrusted.tf │ │ └── variables.tf │ ├── win-rustup-rs │ │ ├── README.md │ │ ├── _terraform.tf │ │ ├── certificate.tf │ │ ├── data.tf │ │ ├── dns.tf │ │ ├── lambdas │ │ │ └── viewer-request │ │ │ │ └── index.js │ │ ├── main.tf │ │ └── variables.tf │ └── wiz │ │ ├── _terraform.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── role.tf │ │ └── variables.tf ├── terragrunt-locals.py └── terragrunt.hcl └── with-rust-key.sh /.gitattributes: -------------------------------------------------------------------------------- 1 | github-actions/*/dist/*.js linguist-generated=true 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /ansible/.venv 3 | /ansible/envs/dev 4 | /terraform/shared/modules/lambda/packages 5 | /terraform/rds-databases/.forward-ports-cache-*.json 6 | /terragrunt/modules/aws-lambda/packages 7 | .terraform 8 | node_modules 9 | __pycache__ 10 | *.py[co] 11 | terragrunt-generated-*.tf 12 | .terragrunt-cache 13 | 14 | .aws-creds-mfa-arn 15 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | # Opt-in to the new feature resolver introduced in Rust 1.51 and Edition 2021. 3 | # https://doc.rust-lang.org/cargo/reference/resolver.html#resolver-versions 4 | resolver = "2" 5 | members = [ 6 | "setup-deploy-keys", 7 | "ansible/roles/dev-desktop/files/team_login", 8 | ] 9 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | # Message that will be printed when "{{ ansible_managed }}" is present in a 4 | # template. There is no other purpose for this. 5 | ansible_managed = This file is managed by Ansible. Only change it through the rust-lang/simpleinfra repository. 6 | 7 | # Automatically discover any Python intepreter on the target system. 8 | ansible_python_interpreter = auto 9 | 10 | # Disable the useless retry files 11 | retry_files_enabled = False 12 | 13 | 14 | [ssh_connection] 15 | 16 | # Use fewer SSH connections, speeding up applying changes. 17 | pipelining = True 18 | 19 | # Reuse SSH connections between runs. 20 | control_path = /tmp/ansible-ssh-%%h-%%p-%%r 21 | -------------------------------------------------------------------------------- /ansible/envs/dev-example/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | vars_papertrail_url: null 4 | 5 | vars_letsencrypt_dummy_certs: true 6 | -------------------------------------------------------------------------------- /ansible/envs/dev-example/group_vars/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | vars_backup_password: passw0rd 4 | vars_backup_env: {} 5 | vars_postgresql_grafana_password: passw0rd 6 | vars_grafana_github_oauth_id: aaaaaaaaaaaaaaaaaaaa 7 | vars_grafana_github_oauth_secret: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 8 | 9 | vars_prometheus_monitorbot_secret: secret 10 | 11 | vars_grafana_domain: example.com 12 | vars_grafana_admin_password: passw0rd 13 | 14 | vars_backup_repository: /tmp/backups 15 | vars_alertmanager_receiver_zulip_infra: http://0.0.0.0 16 | vars_alertmanager_receiver_zulip_docsrs: http://0.0.0.0 17 | -------------------------------------------------------------------------------- /ansible/envs/dev-example/group_vars/playground.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | vars_playground_s3_bucket: bucket-name 4 | 5 | vars_playground_aws: 6 | access_key_id: THIS_IS_NOT_A_VALID_TOKEN 7 | secret_access_key: THIS_IS_NOT_A_VALID_TOKEN 8 | vars_playground_env_github_token: THIS_IS_NOT_A_VALID_TOKEN 9 | -------------------------------------------------------------------------------- /ansible/envs/dev-example/hosts: -------------------------------------------------------------------------------- 1 | [bastion] 2 | # bastion-server.local 3 | 4 | [monitoring] 5 | # monitoring-server.local 6 | -------------------------------------------------------------------------------- /ansible/envs/prod/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_all: "{{ lookup('aws_ssm', '/prod/ansible/all/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_datadog_api_key: "{{ ssm_all['datadog-api-key'] }}" 7 | 8 | vars_papertrail_url: "{{ ssm_all['papertrail-url'] }}" 9 | 10 | vars_letsencrypt_dummy_certs: false 11 | -------------------------------------------------------------------------------- /ansible/envs/prod/group_vars/dev-desktop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_dev_desktop: "{{ lookup('aws_ssm', '/prod/ansible/dev-desktop/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_extra_sudo_users: [] 7 | 8 | vars_github_app_id: 196781 9 | vars_github_app_private_key: "{{ ssm_dev_desktop['github-app-private-key'] }}" 10 | -------------------------------------------------------------------------------- /ansible/envs/prod/group_vars/playground.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_playground: "{{ lookup('aws_ssm', '/prod/ansible/playground/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_extra_sudo_users: 7 | - shep 8 | 9 | vars_playground_s3_bucket: rust-playground-artifacts 10 | 11 | vars_playground_env_github_token: "{{ ssm_playground['github-token'] }}" 12 | -------------------------------------------------------------------------------- /ansible/envs/prod/group_vars/rustc-perf.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | vars_extra_sudo_users: 4 | - "kobzol" 5 | -------------------------------------------------------------------------------- /ansible/envs/prod/host_vars/crater-aws-1.infra.rust-lang.org.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_crater_aws_1: "{{ lookup('aws_ssm', '/prod/ansible/crater-aws-1/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_crater_token: "{{ ssm_crater_aws_1['crater-token'] }}" 7 | vars_crater_threads: 16 8 | -------------------------------------------------------------------------------- /ansible/envs/prod/host_vars/crater-gcp-1.infra.rust-lang.org.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_crater_gcp_1: "{{ lookup('aws_ssm', '/prod/ansible/crater-gcp-1/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_crater_token: "{{ ssm_crater_gcp_1['crater-token'] }}" 7 | vars_crater_threads: 45 8 | -------------------------------------------------------------------------------- /ansible/envs/prod/host_vars/crater-gcp-2.infra.rust-lang.org.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_crater_gcp_2: "{{ lookup('aws_ssm', '/prod/ansible/crater-gcp-2/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_crater_token: "{{ ssm_crater_gcp_2['crater-token'] }}" 7 | vars_crater_threads: 45 8 | -------------------------------------------------------------------------------- /ansible/envs/prod/host_vars/crater-gcp-3.infra.rust-lang.org.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_crater_gcp_3: "{{ lookup('aws_ssm', '/prod/ansible/crater-gcp-3/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_crater_token: "{{ ssm_crater_gcp_3['crater-token'] }}" 7 | vars_crater_threads: 45 8 | -------------------------------------------------------------------------------- /ansible/envs/prod/host_vars/crater-gcp-4.infra.rust-lang.org.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_crater_gcp_4: "{{ lookup('aws_ssm', '/prod/ansible/crater-gcp-4/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_crater_token: "{{ ssm_crater_gcp_4['crater-token'] }}" 7 | vars_crater_threads: 45 8 | -------------------------------------------------------------------------------- /ansible/envs/prod/hosts: -------------------------------------------------------------------------------- 1 | [bastion] 2 | bastion.infra.rust-lang.org 3 | 4 | [monitoring] 5 | monitoring.infra.rust-lang.org 6 | 7 | [playground] 8 | play-2.infra.rust-lang.org 9 | 10 | [dev-desktop] 11 | dev-desktop-eu-1.infra.rust-lang.org 12 | dev-desktop-eu-2.infra.rust-lang.org 13 | dev-desktop-us-1.infra.rust-lang.org 14 | dev-desktop-us-2.infra.rust-lang.org 15 | 16 | [crater-server] 17 | crater.infra.rust-lang.org 18 | 19 | [rustc-perf] 20 | rustc-perf-one.infra.rust-lang.org 21 | -------------------------------------------------------------------------------- /ansible/envs/staging/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_all: "{{ lookup('aws_ssm', '/staging/ansible/all/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_datadog_api_key: "{{ ssm_all['datadog-api-key'] }}" 7 | 8 | # Do not log to Papertrail in the staging environment. 9 | vars_papertrail_url: null 10 | 11 | # Staging instances have access to the internet, so we can generate 12 | # certificates from them without any problem. 13 | vars_letsencrypt_dummy_certs: false 14 | -------------------------------------------------------------------------------- /ansible/envs/staging/group_vars/dev-desktop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Fetch all the group params from AWS SSM 4 | ssm_dev_desktop: "{{ lookup('aws_ssm', '/staging/ansible/dev-desktop/', region='us-west-1', shortnames=true, bypath=true, recursive=true) }}" 5 | 6 | vars_extra_sudo_users: 7 | - oli-obk 8 | 9 | vars_github_app_id: 196783 10 | vars_github_app_private_key: "{{ ssm_dev_desktop['github-app-private-key'] }}" 11 | -------------------------------------------------------------------------------- /ansible/envs/staging/group_vars/docs-rs-builder.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | sha: "{{ lookup('aws_ssm', '/docs-rs/builder/sha') }}" 4 | vars_repository_sha: "{{ sha | ternary(sha, 'HEAD') }}" -------------------------------------------------------------------------------- /ansible/envs/staging/hosts: -------------------------------------------------------------------------------- 1 | [dev-desktop] 2 | dev-desktop-staging.infra.rust-lang.org 3 | 4 | [bastion] 5 | bastion.docs-rs-staging.rust-lang.net 6 | -------------------------------------------------------------------------------- /ansible/playbooks/dev-desktop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: dev-desktop 4 | become: yes 5 | become_user: root 6 | 7 | roles: 8 | 9 | - role: common 10 | papertrail_url: "{{ vars_papertrail_url }}" 11 | collect_metrics_from: "{{ global_collect_metrics_from }}" 12 | sudo_users: "{{ global_sudo_users + vars_extra_sudo_users }}" 13 | allow_ssh_extra_groups: dev-desktop-allow-ssh 14 | 15 | - role: datadog.datadog 16 | vars: 17 | datadog_api_key: "{{ vars_datadog_api_key }}" 18 | datadog_site: "datadoghq.com" 19 | 20 | datadog_config: 21 | tags: 22 | - "env:{{ vars_environment }}" 23 | - "service:dev-desktops" 24 | process_config: 25 | enabled: "true" 26 | 27 | - role: dev-desktop 28 | -------------------------------------------------------------------------------- /ansible/playbooks/docs-rs-builder.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: docs-rs-builder 4 | become: yes 5 | become_user: root 6 | 7 | roles: 8 | 9 | - role: common 10 | papertrail_url: "{{ vars_papertrail_url }}" 11 | collect_metrics_from: "{{ global_collect_metrics_from }}" 12 | sudo_users: "{{ global_sudo_users }}" 13 | 14 | - role: docs-rs-builder 15 | -------------------------------------------------------------------------------- /ansible/playbooks/rustc-perf.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: rustc-perf 4 | become: yes 5 | become_user: root 6 | 7 | roles: 8 | - role: common 9 | sudo_users: "{{ global_sudo_users + vars_extra_sudo_users }}" 10 | 11 | - role: datadog.datadog 12 | vars: 13 | datadog_api_key: "{{ vars_datadog_api_key }}" 14 | datadog_site: "datadoghq.com" 15 | 16 | datadog_config: 17 | tags: 18 | - "env:{{ vars_environment }}" 19 | - "service:rustc-perf" 20 | process_config: 21 | enabled: "true" 22 | -------------------------------------------------------------------------------- /ansible/requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==8.7.0 2 | boto3==1.26.94 3 | passlib==1.7.4 4 | -------------------------------------------------------------------------------- /ansible/requirements.yml: -------------------------------------------------------------------------------- 1 | # Requirements for Ansible 2 | # 3 | # Dependencies are installed from Ansible Galaxy. See the documentation for more information: 4 | # https://docs.ansible.com/ansible/latest/galaxy/user_guide.html#installing-multiple-roles-from-a-file 5 | --- 6 | - src: datadog.datadog 7 | -------------------------------------------------------------------------------- /ansible/roles/backup/README.md: -------------------------------------------------------------------------------- 1 | # `backup` role 2 | 3 | This role setups a timer to backup the data on the machine with [restic]. 4 | 5 | [restic]: https://restic.net 6 | -------------------------------------------------------------------------------- /ansible/roles/backup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # See this role's README for documentation about these defaults. 4 | env: {} 5 | -------------------------------------------------------------------------------- /ansible/roles/backup/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: reload-systemd 4 | command: systemctl daemon-reload 5 | -------------------------------------------------------------------------------- /ansible/roles/backup/templates/backup-restic.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=backup with restic 7 | 8 | [Service] 9 | Type=oneshot 10 | ExecStart=/usr/local/bin/backup-restic 11 | 12 | WorkingDirectory=/home/local-backup 13 | User=local-backup 14 | Group=local-backup 15 | -------------------------------------------------------------------------------- /ansible/roles/backup/templates/backup-restic.timer: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=automatic restic backup 7 | 8 | [Timer] 9 | OnCalendar={{ interval }} 10 | Persistent=true 11 | 12 | [Install] 13 | WantedBy=timers.target 14 | -------------------------------------------------------------------------------- /ansible/roles/bastion/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: reload-ssh 4 | service: 5 | name: ssh 6 | state: restarted 7 | -------------------------------------------------------------------------------- /ansible/roles/bastion/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install the psql client 4 | apt: 5 | name: postgresql-client 6 | state: present 7 | 8 | - name: allow forwarding the PGPASSWORD environment variable with ssh 9 | template: 10 | src: sshd_config 11 | dest: /etc/ssh/sshd_config.d/bastion 12 | mode: 0700 13 | 14 | notify: 15 | - reload-ssh 16 | -------------------------------------------------------------------------------- /ansible/roles/bastion/templates/sshd_config: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | # Allow our tooling to forward the database password used by psql inside the 6 | # bastion instance. This prevents leaking the password to other users. 7 | AcceptEnv PGPASSWORD 8 | -------------------------------------------------------------------------------- /ansible/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # See this role's README for documentation about these defaults. 4 | unprivileged_users: [] 5 | sudo_users: [] 6 | collect_metrics_from: [] 7 | avoid_removing_docker_users: [] 8 | papertrail_url: null 9 | allow_ssh_extra_groups: "" 10 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/_master.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdGoRV9XPamZwqCMr4uk1oHWPnknzwOOSjuRBnu++WRkn7TtCM4ndDfqtKnvzlX5mzPhdvO1KKx1K8TiJ3wiq7WS4AFLGKQmPHWjg8qxGW7x4S8DHrb4ctmaujZ1+XCNSK3nsCl1lLW8DOrRlKbfeHIAllbMBZxIRmQ+XICVvhKAmSmxzTmYC8tBqvqQprG/uIuKonjLxL/ljtBxXBNECXl/JFCYG0AsB0aiuiMVeHLVzMiEppQ7YP/5Ml1Rpmn6h0dDzFtoD7xenroS98BIQF5kQWhakHbtWcNMz7DVFghWgi9wYr0gtoIshhqWYorC4yJq6HGXd0qdNHuLWNz39h buildbot-west-slave-key 2 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/guillaumegomez.pub: -------------------------------------------------------------------------------- 1 | ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJuAyCPfA0YvzSaBlRiMlW8iwAparFXhSKQiY2ptgiLR/h/QFoml1+tfi0oc0hobQb2mbHW1ysG6P8uhVVM//zQ= imperio@imperio-ThinkPad-P15-Gen-1 2 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/joshua.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCq58SNpVf4jk1Vs5W7Ur4yap/9vSyuKSIo+ZmBHtMwt0sRxqlpaCm7SgxOcggyItzCRgjJd4X4fi5MNufDD8HB26YPO7fC6VJha+CccBX+y67xjGxWEJW8ERSnyD5T6Pf+g9kWQCov6IJreKS7tn0j/rH4GW9EVB+T4G7T4b6anmSz49L8AII/rYDYCYcKU+JSl6fQGrliZP12NZJCW9T5nOhqTBEQBnIrTuUeD2Jdb39jloQ4Xob3UPld63ciWWIOWPF/TL4B3ILeAAz5+yljtI0H5jXmLNkN2noOitZp+9hdD/w7gpcn6WQeQV9hQJRwNhvx0xXfZsnsqkn1Qrvd joshua@debian-acer 2 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/kobzol.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC6HWdu310gT8khULTEG1EEx/gLUztZiUxapcTUtFclK berykubik@gmail.com 2 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/marcoieni.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBrmhY/PexymDRulNHnF1L89GLS9sa/RfDU0kd6NjIX3 2 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/nemo157.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM2xzZhKE1TzvJmhxT/q4v7vPC1zd9+OOjjaY/GUp73P 2 | sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIP7NDZk2lJA+PejK0BiIgzRPX80THcMXzepuxHNiCNdlAAAABHNzaDo= 3 | sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIE5gbS2iAoFj4gbPQrvb/YIUN9WL91CKPPeT8brQ71omAAAABHNzaDo= 4 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/onur.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9zaqu2ovOhuFxEpaqkCjq04Lb9FaLRJa7avWPXQaSaHJJQVXH0jv25j+jl3X8rvF/Q640YLXucs/tcV58yj7EnFNA7aoVLN+9iugFKfSOK9tF3f1qs8VPfRROWay8gjdAvIZaGaj2trKVCKZjAouNACY3zM1Igs4Cqrtv0y8/X7p7yWSoKAp8IFGX877iamx6U3znUiE2fAe67oURT2NyOV7oh3bf+7WGTM+fi9oBxsWDNei+KQWNSmEurFfOyX9DgbQ8/84rS855qV8XJ7t11A1TwYr8/xr1WuzawDGc01gQKw4WqFREJHt4FbtD6vMSx4oIm/Rs/bnNRXh4wrj3 2 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/shep.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCiqtpfx4sjF5qVLP11O/y/xOpgsyPVsw56GzDfDidOZpBnXg92Fl2gK/7jNctL7YIvyr93z2O5W74Yz1GkU6Nudvc3X7wqzbt/w9lKK+xbi00i3Z+2MLQ4p5iAHqdLboYJJb2S0SUfeBrxVy2dJdTw8lV2BiWwJuckBHm86/fCJbKv4ZEmf0YuyfB5aTeiz3lRTeJAlIlbBG7EPggj+XVVPD3s466X15Pvi6fBMrM8586VNLV0xf9hRHjNqk6VSzZbvl0i0pF671GH/SFrlGLebs+0M9cThR71UPzOT4XOP5jawKJ192jjKvAxPkt0hIj3I8JXGzFO7+fP+0zzOKoL 2 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAzkQtcsKybO9+wfLbo7QHCKJAevTvGtRH/w9I8O9DhN 3 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/simulacrum.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIINX/C1IquKxu3HDm2AVUPQIzd3fbaJdbb7+4d/Z26oP 2 | -------------------------------------------------------------------------------- /ansible/roles/common/files/ssh-keys/syphar.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaFaEoHU+J/0x5KRrWwEG9P0drMfPhT9/s8oX5xvbr5hvcvhenhEfufVQr/S5hAOZ5VUJTR34z6l83FD/Tjo1evo+O5clZ6uO+GAUA7QKtoLZFqV+ICn1/IFp90HZw3NY4K0m3AgGyJfIf9AQbAxioz2MnZAENX5w/RvjWxh/Q46qojvgZU9rZaNKkIrjl4zYRpTQcwVmPp08JLXO2KRUqiap7hTrjUyRUUcjkRcJajhQXHOqo+IU3x0jtEIBo/1OQkHqB0Oa4Gw/iYDFBBDNDc1LzF0A135Is3bx5Sgl1cis3u+wZhYo2aK713vBFWRInRVkW/GedaiF8twkOuS/ZdJNbCG0Slpan+zuwWz1R+i/fUrCtAqknG32FhxdbA4Hv+HGpYVH/q07uAjlJOkpNJ3bj75YHMICnjm5phRVPLAbAySx6m1nL59s9JNXr99KcSnXx1ALXAtmFNgknMf5BJ8jLD40g4e4+g7VKYFKi4G+vRlzgUlO/oX0Xzumd1DpMPmAf+EEQLckNjqRyH8nFfw0M4ko6/kP2kmaQRREXiOgJzFR56SHfOlOnZfuNS7BeXm39vm69ZjwW7NlJjrhP9mrzx2FNBBmU7hSLIzD/dCX0K9xRldmX2S7nTijWxTFA/3gttMw1Db/Utp9dVrvjCRKqgLLdK2atn5qS9ypumw== 2 | -------------------------------------------------------------------------------- /ansible/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: reload-ssh 4 | service: 5 | name: ssh 6 | state: restarted 7 | 8 | - name: reload-firewall 9 | service: 10 | name: firewall 11 | state: restarted 12 | 13 | - name: restart-node_exporter 14 | service: 15 | name: node_exporter 16 | state: restarted 17 | when: collect_metrics_from|length > 0 18 | 19 | - name: restart-rsyslog 20 | service: 21 | name: rsyslog 22 | state: restarted 23 | 24 | - name: reload-systemd 25 | shell: systemctl daemon-reload 26 | 27 | - name: reboot 28 | reboot: 29 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/apt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: update apt cache 4 | apt: 5 | update_cache: true 6 | cache_valid_time: 14400 # 4 hours 7 | 8 | - name: install apt packages 9 | apt: 10 | name: 11 | - aptitude # needed by ansible itself 12 | - ca-certificates 13 | - htop 14 | - iptables 15 | - openssh-server 16 | - python3 17 | - python3-apt 18 | - vim 19 | - rsyslog-gnutls # needed for papertrail 20 | state: present 21 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create backup user 4 | user: 5 | name: local-backup 6 | state: present 7 | system: true 8 | 9 | - name: create backup config dir 10 | file: 11 | path: /etc/backup.d 12 | state: directory 13 | mode: 0750 14 | owner: root 15 | group: local-backup 16 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: disable sudo access of the ubuntu user 4 | file: 5 | name: /etc/sudoers.d/90-cloud-init-users 6 | state: absent 7 | # Avoid being locked out of the instance before the playbook ends 8 | when: ansible_user|default("not-ubuntu") != "ubuntu" 9 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_tasks: apt.yml 4 | - include_tasks: users.yml 5 | - include_tasks: ssh.yml 6 | - include_tasks: networking.yml 7 | - include_tasks: backup.yml 8 | - include_tasks: metrics.yml 9 | - include_tasks: papertrail.yml 10 | - include_tasks: cleanup.yml 11 | - include_tasks: services.yml 12 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/networking.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: set the system hostname 4 | hostname: 5 | name: "{{ inventory_hostname }}" 6 | notify: reboot 7 | 8 | - name: upload /etc/hosts 9 | template: 10 | src: networking/etchosts 11 | dest: /etc/hosts 12 | 13 | - name: create the firewall config dir 14 | file: 15 | path: /etc/firewall 16 | mode: 0755 17 | state: directory 18 | 19 | - name: upload main firewall script 20 | template: 21 | src: networking/firewall.sh 22 | dest: /usr/local/sbin/firewall 23 | mode: 0755 24 | 25 | notify: 26 | - reload-firewall 27 | 28 | - name: upload firewall service 29 | template: 30 | src: networking/firewall.service 31 | dest: /etc/systemd/system/firewall.service 32 | 33 | notify: 34 | - reload-systemd 35 | - reload-firewall 36 | 37 | - name: enable firewall 38 | service: 39 | name: firewall 40 | enabled: true 41 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/papertrail.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: upload papertrail rsyslog configuration 4 | template: 5 | src: papertrail/rsyslog.conf 6 | dest: /etc/rsyslog.d/00-papertrail.conf 7 | when: papertrail_url is not none 8 | notify: 9 | - restart-rsyslog 10 | 11 | - name: remove papertrail rsyslog configuratin 12 | file: 13 | path: /etc/rsyslog.d/00-papertrail.conf 14 | state: absent 15 | when: papertrail_url is none 16 | notify: 17 | - restart-rsyslog 18 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Disable the fwupd-refresh timer 4 | ansible.builtin.systemd: 5 | enabled: false 6 | state: stopped 7 | name: fwupd-refresh.timer 8 | # Not all of our hosts actually have this, just ignore it if it fails. 9 | ignore_errors: true 10 | 11 | - name: Disable the fwupd-refresh service 12 | ansible.builtin.systemd: 13 | enabled: false 14 | state: stopped 15 | name: fwupd-refresh.service 16 | # Not all of our hosts actually have this, just ignore it if it fails. 17 | ignore_errors: true 18 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/ssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # SSH keys are stored in /etc/ssh/authorized_keys/$user 4 | # This prevents users without sudo access from changing the keys without 5 | # committing the changes into ansible. 6 | 7 | - name: create the /etc/ssh/sshd_config.d directory 8 | file: 9 | path: /etc/ssh/sshd_config.d 10 | state: directory 11 | mode: 0700 12 | 13 | - name: upload sshd configuration 14 | template: 15 | src: ssh/sshd_config 16 | dest: /etc/ssh/sshd_config 17 | 18 | notify: reload-ssh 19 | 20 | - name: create authorized keys directory 21 | file: 22 | path: /etc/ssh/authorized_keys 23 | state: directory 24 | mode: 0755 25 | 26 | - name: upload users ssh keys 27 | copy: 28 | content: | 29 | {{ lookup('file', 'ssh-keys/' + item + '.pub') }} 30 | {{ lookup('file', 'ssh-keys/_master.pub') }} 31 | dest: "/etc/ssh/authorized_keys/{{ item }}" 32 | mode: 0444 33 | 34 | loop: "{{ unprivileged_users + sudo_users }}" 35 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/metrics/node_exporter-firewall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # {{ ansible_managed }} 4 | # 5 | 6 | {% for ip in collect_metrics_from %} 7 | cmd4 -A public_input_tcp -p tcp -s {{ ip | quote }} --dport 9100 -j ACCEPT 8 | {% else %} 9 | # Intentionally left blank. 10 | {% endfor %} 11 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/metrics/node_exporter.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=node_exporter: export Prometheus metrics about this instance 7 | After=network.target 8 | 9 | [Service] 10 | ExecStart=/usr/local/bin/node_exporter --collector.systemd 11 | User=node_exporter 12 | Group=node_exporter 13 | Nice=-15 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/networking/etchosts: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | 127.0.0.1 localhost {{ inventory_hostname }} 6 | ::1 localhost ip6-localhost ip6-loopback 7 | ff02::1 ip6-allnodes 8 | ff02::2 ip6-allrouters 9 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/networking/firewall.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=Firewall initialization script 7 | After=network.target 8 | 9 | [Service] 10 | Type=oneshot 11 | KillMode=none 12 | ExecStart=/usr/local/sbin/firewall 13 | ExecStop=/usr/local/sbin/firewall --reset 14 | RemainAfterExit=yes 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/papertrail/rsyslog.conf: -------------------------------------------------------------------------------- 1 | # Configure TLS for Papertrail 2 | $DefaultNetstreamDriverCAFile /etc/ssl/certs/ca-certificates.crt 3 | $ActionSendStreamDriver gtls 4 | $ActionSendStreamDriverMode 1 5 | $ActionSendStreamDriverAuthMode x509/name 6 | $ActionSendStreamDriverPermittedPeer *.papertrailapp.com 7 | 8 | # Remove unwanted lines from the logs 9 | :rawmsg, contains, "pam_unix(cron:session):" ~ 10 | :rawmsg, contains, "pam_unix(sudo:session):" ~ 11 | 12 | # Send the chosen facilities to papertrail 13 | auth,authpriv.* @@{{ papertrail_url }} 14 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/ssh/sshd_config: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | Port 22 6 | HostKey /etc/ssh/ssh_host_rsa_key 7 | HostKey /etc/ssh/ssh_host_ecdsa_key 8 | HostKey /etc/ssh/ssh_host_ed25519_key 9 | 10 | SyslogFacility AUTH 11 | LogLevel INFO 12 | 13 | UsePAM yes 14 | LoginGraceTime 120 15 | StrictModes yes 16 | 17 | PermitRootLogin no 18 | PermitEmptyPasswords no 19 | 20 | PasswordAuthentication no 21 | PubkeyAuthentication yes 22 | ChallengeResponseAuthentication no 23 | AuthorizedKeysFile /etc/ssh/authorized_keys/%u 24 | 25 | IgnoreRhosts yes 26 | HostbasedAuthentication no 27 | 28 | X11Forwarding no 29 | PrintMotd no 30 | PrintLastLog yes 31 | TCPKeepAlive yes 32 | 33 | AcceptEnv LANG LC_* 34 | 35 | Subsystem sftp /usr/lib/openssh/sftp-server 36 | 37 | AllowGroups allow-ssh {{ allow_ssh_extra_groups }} 38 | 39 | {# The `Include` directive is not supported on older Ubuntu versions #} 40 | {% if ansible_distribution_release not in ["xenial", "bionic"] %} 41 | Include /etc/ssh/sshd_config.d/* 42 | {% endif %} 43 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/users/sudoers-passwordless-sudo: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | %passwordless-sudo ALL=(ALL:ALL) NOPASSWD: ALL 6 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | vars_rustup_version: "1.27.1" 3 | vars_rustup_checksum: "32a680a84cf76014915b3f8aa44e3e40731f3af92cd45eb0fcc6264fd257c428" 4 | 5 | vars_team_login_path: "/root/team_login" 6 | allow_ssh_extra_groups: "dev-desktop-allow-ssh" 7 | 8 | # Filesystem quota per user in GB 9 | vars_user_quota_gb: 150 10 | 11 | # Prototype user whose file system quota will be copied to new user accounts 12 | vars_user_quota_prototype_user: "quota-prototype" 13 | 14 | vars_user_config: 15 | - username: gh-Alexendoo 16 | shell: /usr/bin/zsh 17 | - username: gh-jdno 18 | shell: /usr/bin/zsh 19 | - username: gh-WaffleLapkin 20 | shell: /usr/bin/fish 21 | - username: gh-vincenzopalazzo 22 | shell: /usr/bin/zsh 23 | - username: gh-thomcc 24 | shell: /usr/bin/fish 25 | - username: gh-jieyouxu 26 | shell: /usr/bin/fish 27 | - username: gh-syphar 28 | shell: /usr/bin/fish 29 | - username: gh-jhpratt 30 | shell: /usr/bin/zsh 31 | - username: gh-fee1-dead 32 | shell: /usr/bin/fish 33 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/git-credential-dev-desktop: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | IFS=$'\n\t' 5 | 6 | # `setuid` doesn't work in scripts, so we allow every user to `sudo` into the 7 | # `github-app-credentials` user to execute the actual script. 8 | sudo -u github-app-credentials /usr/local/bin/git-credential-dev-desktop-inner $@ 9 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/podman/storage.conf: -------------------------------------------------------------------------------- 1 | [storage] 2 | driver = "overlay" 3 | runroot = "/run/containers/storage" 4 | graphroot = "/var/lib/containers/storage" 5 | 6 | [storage.options.overlay] 7 | mount_program = "/usr/bin/fuse-overlayfs" 8 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | rm -rf ~/rust*/build 4 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/detach_merged_prs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | for d in ~/rust* 4 | do 5 | cd $d 6 | echo $d 7 | # if the fast forward is successful, this branch is merged, so we can kill it 8 | git pull upstream master --ff-only && git checkout --detach && git submodule update --init --recursive 9 | cd .. 10 | done 11 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/help.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "# Available scripts for managing your Rust checkouts" 4 | echo "setup_rustup.sh | first time setup, you should only have to execute this once on a new machine" 5 | echo "status.sh | list the branches and git status of all copies of the Rust repo" 6 | echo "new_worktree.sh | creates a worktree (shallow copy of the main git checkout of Rust, sharing the .git folder)" 7 | echo "detach_merged_prs.sh | invokes \"git pull --fast-forward-only\" on all worktrees and detaches those that are equal to the \"master\" branch" 8 | echo "" 9 | echo "# Rarer commands:" 10 | echo "set_defaults.sh | connects the global config.toml with all worktrees. Use this when your setup is broken" 11 | echo "setup_rust.sh | Clone your fork of rust-lang/rust, compile, and then link it" 12 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Enable strict mode for Bash 4 | # http://redsymbol.net/articles/unofficial-bash-strict-mode/ 5 | set -euo pipefail 6 | IFS=$'\n\t' 7 | 8 | init_git.py 9 | setup_rustup.sh 10 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/new_worktree.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ex 4 | 5 | cd ~ 6 | N=$(ls | grep -E -e "rust[0-9]+" | wc -l) 7 | echo $N 8 | pushd rust 9 | git worktree add --detach ../rust$N 10 | popd 11 | pushd rust$N 12 | git fetch upstream 13 | git checkout upstream/master 14 | ln -s ../config.toml 15 | popd 16 | 17 | link_rust.sh 18 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/set_defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Enable strict mode for Bash 4 | # http://redsymbol.net/articles/unofficial-bash-strict-mode/ 5 | set -euo pipefail 6 | IFS=$'\n\t' 7 | 8 | for D in rust*; do 9 | if [ -d "${D}" ]; then 10 | pushd "${D}" || exit 11 | if [[ ! -f config.toml ]]; then 12 | ln -s ~/config.toml . 13 | fi 14 | popd || exit 15 | fi 16 | done 17 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/setup_rust.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Enable strict mode for Bash 4 | # http://redsymbol.net/articles/unofficial-bash-strict-mode/ 5 | set -euo pipefail 6 | IFS=$'\n\t' 7 | 8 | username=$(id -u -n) 9 | gh_name=${username#"gh-"} 10 | 11 | set -x 12 | 13 | if [[ ! -d "rust" ]]; then 14 | # Using https instead of git urls because vscode only handles login on push/pull 15 | git clone "https://github.com/${gh_name}/rust.git" 16 | fi 17 | 18 | pushd rust 19 | 20 | if ! git remote | grep upstream; then 21 | git remote add upstream https://github.com/rust-lang/rust.git 22 | fi 23 | 24 | git fetch upstream 25 | git checkout upstream/master 26 | popd 27 | 28 | set_defaults.sh 29 | link_rust.sh 30 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/setup_rustup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Enable strict mode for Bash 4 | # http://redsymbol.net/articles/unofficial-bash-strict-mode/ 5 | set -euo pipefail 6 | IFS=$'\n\t' 7 | 8 | # Check if rustup is already installed and exit if that's the case. 9 | if command -v rustup &>/dev/null; then 10 | rustup --version 11 | exit 0 12 | fi 13 | 14 | echo "Installing rustup..." 15 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 16 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/scripts/status.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cd ~ 4 | 5 | for d in rust* 6 | do 7 | if ! [ -d $d ]; then 8 | continue 9 | fi 10 | cd $d 11 | echo $d 12 | git status --short --branch --untracked-files=no --ignore-submodules --no-ahead-behind --no-renames 13 | cd .. 14 | done 15 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/skel/config.toml: -------------------------------------------------------------------------------- 1 | changelog-seen = 2 2 | 3 | # Change this to the profile of your choice 4 | profile = "compiler" 5 | 6 | [rust] 7 | # Build with debug assertions so we get useful RUSTC_LOG 8 | # and notice debug assertions failing. 9 | debug = true 10 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/team_login/.gitignore: -------------------------------------------------------------------------------- 1 | target -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/files/team_login/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Oli Scherer "] 3 | edition = "2021" 4 | name = "team_login" 5 | version = "0.1.0" 6 | 7 | [dependencies] 8 | miniserde = "0.1" 9 | curl = "0.4" 10 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart-firewall 3 | service: 4 | name: firewall 5 | state: restarted 6 | 7 | - name: reboot-machine 8 | reboot: 9 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy cleanup script 3 | template: 4 | src: clean-unused-checkouts.sh 5 | dest: /etc/cron.cleanup_disk_space 6 | owner: root 7 | group: root 8 | mode: 0744 9 | 10 | - name: Set up the cleanup cron job 11 | template: 12 | src: cron_cleanup_disk_space.j2 13 | dest: /etc/cron.d/cleanup_disk_space 14 | # if the cron job is running right now, keep retrying until it finishes 15 | register: cleanup_cron_result 16 | until: cleanup_cron_result is not failed 17 | retries: 10 18 | delay: 5 19 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_tasks: oom.yml 4 | - include_tasks: dependencies.yml 5 | - include_tasks: podman.yml 6 | - include_tasks: quota.yml 7 | - include_tasks: user_configuration.yml 8 | - include_tasks: usermod.yml 9 | with_items: "{{ vars_user_config }}" 10 | when: vars_user_config is not none and vars_user_config | length > 0 11 | - include_tasks: cleanup.yml 12 | - include_tasks: team_login.yml 13 | - include_tasks: github.yml 14 | - include_tasks: motd.yml 15 | - include_tasks: scripts.yml 16 | - include_tasks: fix_llvm_55575.yml 17 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/motd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Enable message of the day 4 | template: 5 | src: motd_sshd_config 6 | dest: /etc/ssh/sshd_config.d/motd 7 | 8 | - name: Find all existing motd scripts 9 | find: 10 | paths: /etc/update-motd.d/ 11 | patterns: "*" 12 | register: files_to_delete 13 | 14 | - name: Disable automatically generated message of the day 15 | file: 16 | path: "{{ item.path }}" 17 | state: absent 18 | with_items: "{{ files_to_delete.files }}" 19 | 20 | - name: Stop and disable news service 21 | service: 22 | name: "motd-news" 23 | state: stopped 24 | 25 | - name: Stop and disable news service timer 26 | ansible.builtin.systemd: 27 | name: motd-news.timer 28 | state: stopped 29 | enabled: no 30 | 31 | - name: Set up message of the day 32 | template: 33 | src: motd_rules 34 | dest: /etc/motd 35 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/oom.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # earlyoom kills processes using too much memory before they can cause trouble. 3 | - name: Install earlyoom 4 | ansible.builtin.apt: 5 | name: earlyoom 6 | state: present 7 | 8 | # The staging instance is so small that earlyoom prevents Ansible from executing 9 | # the playbook successfully. 10 | - name: Disable earlyoom on staging 11 | ansible.builtin.service: 12 | name: earlyoom 13 | enabled: no 14 | state: stopped 15 | when: ansible_hostname == "dev-desktop-staging" 16 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/podman.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # These tasks follow the instructions for running Podman without root privileges 4 | # https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md 5 | 6 | - name: Install podman 7 | package: 8 | name: podman 9 | state: present 10 | 11 | - name: Install podman-docker 12 | package: 13 | name: podman-docker 14 | state: present 15 | 16 | # Required for user-space networking 17 | - name: Install slirp4netns 18 | package: 19 | name: slirp4netns 20 | state: present 21 | 22 | # Recommended instead of the default VFS file system 23 | - name: Install fuse-overlayfs 24 | package: 25 | name: fuse-overlayfs 26 | state: present 27 | 28 | - name: Copy global configuration file for storage driver 29 | copy: 30 | src: podman/storage.conf 31 | dest: /etc/containers/storage.conf 32 | owner: root 33 | group: root 34 | mode: 0644 35 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/scripts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set up the convenience files for managing worktrees 4 | copy: 5 | src: scripts/ 6 | dest: /usr/local/bin/ 7 | mode: a+x 8 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Disable the apport service 4 | ansible.builtin.systemd: 5 | enabled: false 6 | state: stopped 7 | name: apport.service 8 | # Not all of our hosts actually have this, just ignore it if it fails. 9 | ignore_errors: true 10 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/team_login.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Configure update script service 4 | copy: 5 | src: team_login/ 6 | dest: "{{ vars_team_login_path }}" 7 | 8 | - name: Build team login cron job 9 | shell: "cd {{ vars_team_login_path }} && PATH=$PATH:$HOME/.cargo/bin cargo build" 10 | 11 | - name: Install the team login binary 12 | shell: "cp {{ vars_team_login_path }}/target/debug/team_login /etc/cron.team_login" 13 | 14 | - name: Set up the team login cron job 15 | template: 16 | src: cron_team_login.j2 17 | dest: /etc/cron.d/team_login 18 | # if the cron job is running right now, keep retrying until it finishes 19 | register: task_result 20 | until: task_result is not failed 21 | retries: 10 22 | delay: 5 23 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/user_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Allow users to debug their own processes 4 | replace: 5 | path: /etc/sysctl.d/10-ptrace.conf 6 | regexp: '^kernel.yama.ptrace_scope = [\d]$' 7 | replace: 'kernel.yama.ptrace_scope = 0' 8 | notify: 9 | - reboot-machine 10 | 11 | - name: Allow users to run perf on their own processes 12 | template: 13 | src: 10-perf-event-paranoid.conf 14 | dest: /etc/sysctl.d/10-perf-event-paranoid.conf 15 | mode: 0644 16 | notify: 17 | - reboot-machine 18 | 19 | - name: Set up the files that initially appear in a user's home dir 20 | copy: 21 | src: skel/ 22 | dest: /etc/skel/ 23 | 24 | - name: Set up the group for people allowed to ssh into the dev-desktop 25 | group: 26 | name: dev-desktop-allow-ssh 27 | state: present 28 | 29 | - name: Upload sudo configuration 30 | template: 31 | src: sudoers 32 | dest: /etc/sudoers.d/dev-desktop 33 | mode: 0440 34 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/tasks/usermod.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check user {{ item.username }} exists 3 | ansible.builtin.getent: 4 | database: passwd 5 | key: "{{ item.username }}" 6 | register: user_exists 7 | ignore_errors: true 8 | 9 | - name: Set the user shell to {{ item.shell }} 10 | ansible.builtin.user: 11 | name: "{{ item.username }}" 12 | shell: "{{ item.shell }}" 13 | when: user_exists is succeeded 14 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/10-perf-event-paranoid.conf: -------------------------------------------------------------------------------- 1 | # From the Linux kernel documentation: 2 | # https://www.kernel.org/doc/Documentation/sysctl/kernel.txt 3 | # 4 | # Controls use of the performance events system by unprivileged 5 | # users (without CAP_SYS_ADMIN). The default value is 2. 6 | # 7 | # -1: Allow use of (almost) all events by all users 8 | # Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK 9 | # >=0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN 10 | # Disallow raw tracepoint access by users without CAP_SYS_ADMIN 11 | # >=1: Disallow CPU event access by users without CAP_SYS_ADMIN 12 | # >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN 13 | # 14 | # Ubuntu has two more settings, which have been reverse-engineered here: 15 | # https://askubuntu.com/questions/1400874/what-does-perf-paranoia-level-four-do 16 | kernel.perf_event_paranoid = 2 17 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/cron_cleanup_disk_space.j2: -------------------------------------------------------------------------------- 1 | PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin 2 | 0 0 * * * root /etc/cron.cleanup_disk_space 3 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/cron_team_login.j2: -------------------------------------------------------------------------------- 1 | PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin 2 | */5 * * * * root /etc/cron.team_login --user-quota-gb={{ vars_user_quota_gb }} 3 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/firewall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # {{ ansible_managed }} 4 | # 5 | 6 | cmd -A public_input_udp -p udp --dport "60000:61000" -j ACCEPT 7 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/gitconfig: -------------------------------------------------------------------------------- 1 | [credential] 2 | helper = dev-desktop 3 | UseHttpPath = true 4 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/motd_rules: -------------------------------------------------------------------------------- 1 | By accessing and making use of this Cloud Compute Program resource, you are agreeing that you have both read and will abide by its official Access Agreement. The Access Agreement can be found at https://foundation.rust-lang.org/policies/cloud-compute-program/. 2 | 3 | Documentation on how to set up and use this machine can be found at: https://forge.rust-lang.org/infra/docs/dev-desktop.html 4 | -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/motd_sshd_config: -------------------------------------------------------------------------------- 1 | PrintMotd yes -------------------------------------------------------------------------------- /ansible/roles/dev-desktop/templates/sudoers: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | ALL ALL=(github-app-credentials:github-app-credentials) NOPASSWD: /usr/local/bin/git-credential-dev-desktop-inner 6 | -------------------------------------------------------------------------------- /ansible/roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: reload-systemd 4 | shell: systemctl daemon-reload 5 | 6 | - name: restart-firewall 7 | service: 8 | name: firewall 9 | state: restarted 10 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install docker 4 | apt: 5 | name: 6 | - docker.io 7 | state: present 8 | 9 | # Install AWS to pull images from ECR 10 | - name: Install aws (Ubuntu < 24) 11 | apt: 12 | name: awscli 13 | state: present 14 | when: ansible_distribution_version is version('24', '<') 15 | - name: Install aws (Ubuntu >= 24) 16 | community.general.snap: 17 | name: aws-cli 18 | classic: true 19 | state: present 20 | when: ansible_distribution_version is version('24', '>=') 21 | 22 | - name: unmask docker.service 23 | systemd: 24 | name: docker.service 25 | masked: false 26 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_tasks: install.yml 4 | - include_tasks: update-images.yml 5 | - include_tasks: containers.yml 6 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/firewall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # {{ ansible_managed }} 4 | # 5 | 6 | {% set container = containers[item] -%} 7 | {% set expose = container.expose|default({}) %} 8 | {% for host, inside in expose.items() %} 9 | cmd4 -A public_input_tcp -p tcp --dport {{ host }} -j ACCEPT 10 | {% else %} 11 | # Intentionally left blank. 12 | {% endfor %} 13 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/update-images/aws-credentials: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | {% if images.aws_credentials %} 6 | [default] 7 | role_arn = {{ images.aws_credentials.role_arn }} 8 | source_profile = user 9 | 10 | [user] 11 | aws_access_key_id = {{ images.aws_credentials.access_key_id }} 12 | aws_secret_access_key = {{ images.aws_credentials.secret_access_key }} 13 | {% endif %} 14 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/update-images/docker-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "credsStore": "" 3 | } 4 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/update-images/docker-images-pull.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=pull docker images 7 | After=docker.service 8 | Requires=docker.service 9 | 10 | [Service] 11 | Type=oneshot 12 | RemainAfterExit=yes 13 | ExecStart=/home/docker-update-images/update.sh 14 | 15 | WorkingDirectory=/home/docker-update-images 16 | Environment=HOME=/home/docker-update-images 17 | Environment=NO_CONTAINER_RESTART=1 18 | 19 | User=docker-update-images 20 | Group=docker 21 | 22 | [Install] 23 | WantedBy=multi-user.target 24 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/update-images/docker-images-update.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=update docker images 7 | After=docker.service 8 | Requires=docker.service 9 | 10 | [Service] 11 | Type=oneshot 12 | ExecStart=/home/docker-update-images/update.sh 13 | 14 | WorkingDirectory=/home/docker-update-images 15 | Environment=HOME=/home/docker-update-images 16 | 17 | User=docker-update-images 18 | Group=docker 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/update-images/docker-images-update.timer: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=docker images update 7 | 8 | [Timer] 9 | OnBootSec={{ images.update_every }} 10 | OnUnitActiveSec={{ images.update_every }} 11 | 12 | [Install] 13 | WantedBy=timers.target 14 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/update-images/sudoers: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | docker-update-images ALL= NOPASSWD: {% for container in containers %}/bin/systemctl restart container-{{ container }}.service{% if not loop.last %},{% endif %}{% endfor %} 6 | -------------------------------------------------------------------------------- /ansible/roles/docs-rs-builder/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | vars_mountpoint_file_path: /builder.fs 4 | vars_mountpoint_size: 53687091200 5 | vars_mountpoint_path: /mnt/builder 6 | 7 | vars_user: builder 8 | vars_group: "{{ vars_user }}" 9 | vars_home_path: "/home/{{ vars_user }}" 10 | 11 | vars_repository_url: https://github.com/rust-lang/docs.rs 12 | vars_repository_sha: HEAD 13 | 14 | vars_checkout_path: "{{ vars_home_path }}/builder" 15 | vars_working_dir: "{{ vars_checkout_path }}" 16 | vars_executable_path: "{{ vars_working_dir }}/target/release/cratesfyi" 17 | 18 | vars_toolchain: nightly 19 | 20 | vars_database_url: "{{ lookup('aws_ssm', '/docs-rs/database-url') }}" 21 | vars_s3_bucket: docs-rs-storage-519825364412 22 | -------------------------------------------------------------------------------- /ansible/roles/docs-rs-builder/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart-docker 4 | systemd: 5 | name: docker 6 | state: restarted 7 | daemon_reload: true 8 | -------------------------------------------------------------------------------- /ansible/roles/docs-rs-builder/templates/builder.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=The docs.rs Builder 7 | 8 | [Service] 9 | Environment=TMPDIR={{ vars_mountpoint_path }} 10 | Environment=DOCSRS_PREFIX={{ vars_mountpoint_path }} 11 | Environment=DOCSRS_RUSTWIDE_WORKSPACE={{ vars_mountpoint_path }} 12 | Environment=DOCSRS_LOG=debug 13 | Environment=DOCSRS_DATABASE_URL={{ vars_database_url }} 14 | Environment=DOCSRS_DOCKER_IMAGE=ghcr.io/rust-lang/crates-build-env/linux 15 | Environment=DOCSRS_STORAGE_BACKEND=s3 16 | Environment=S3_REGION=us-east-1 17 | Environment=DOCSRS_S3_BUCKET={{ vars_s3_bucket }} 18 | Environment=RUST_BACKTRACE=1 19 | Environment=DOCSRS_TOOLCHAIN={{ vars_toolchain }} 20 | 21 | LimitNOFILE=20000 # Consider removing now that we upload zip files 22 | Restart=on-failure 23 | WorkingDirectory={{ vars_working_dir }} 24 | ExecStart={{ vars_executable_path }} start-build-server 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | -------------------------------------------------------------------------------- /ansible/roles/docs-rs-builder/templates/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "storage-driver": "overlay2" 3 | } 4 | -------------------------------------------------------------------------------- /ansible/roles/docs-rs-builder/templates/sudoers: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | builder ALL= NOPASSWD: /bin/systemctl stop builder.service 6 | builder ALL= NOPASSWD: /bin/systemctl start builder.service 7 | builder ALL= NOPASSWD: /bin/systemctl restart builder.service 8 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dummy_certs: false 4 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart-pebble 4 | service: 5 | name: pebble 6 | state: restarted 7 | 8 | - name: reload-systemd 9 | command: systemctl daemon-reload 10 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_tasks: pebble.yml 4 | when: dummy_certs 5 | - include_tasks: install.yml 6 | - include_tasks: renewer.yml 7 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/templates/after-renew.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | IFS=$'\n\t' 4 | 5 | BASE="/etc/ssl/letsencrypt/after-renew.d" 6 | 7 | for file in $(ls "${BASE}"); do 8 | if [[ -x "${BASE}/${file}" ]]; then 9 | "${BASE}/${file}" 10 | fi 11 | done 12 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/templates/pebble/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "pebble": { 3 | "listenAddress": "0.0.0.0:14000", 4 | "managementListenAddress": "0.0.0.0:15000", 5 | "certificate": "/etc/pebble/certs/localhost/cert.pem", 6 | "privateKey": "/etc/pebble/certs/localhost/key.pem", 7 | "httpPort": 5002, 8 | "tlsPort": 5001, 9 | "ocspResponderURL": "" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/templates/pebble/pebble.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=pebble: dummy ACME certificate authority 7 | After=network.target 8 | 9 | [Service] 10 | ExecStart=/usr/local/bin/pebble -config /etc/pebble/config.json 11 | User=pebble 12 | Group=pebble 13 | Environment=PEBBLE_VA_NOSLEEP=1 14 | Environment=PEBBLE_VA_ALWAYS_VALID=1 15 | Environment=PEBBLE_WFE_NONCEREJECT=0 16 | Environment=PEBBLE_AUTHZREUSE=100 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/templates/renew-ssl-certs.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Renew SSL certificates 3 | 4 | [Service] 5 | Type=oneshot 6 | ExecStart=/usr/local/bin/renew-ssl-certs 7 | User=ssl-renew 8 | Group=ssl-read-keys 9 | 10 | # Create /var/run/acme-challenges when the unit starts and delete it 11 | # afterwards. It will be owned by the user and group defined above. 12 | RuntimeDirectory=acme-challenges 13 | RuntimeDirectoryMode=0755 14 | 15 | # Sometimes the renew can fail, such as when the Let's Encrypt servers 16 | # are overloaded. Give it a little break and try again. 17 | Restart=on-failure 18 | RestartSec=1m 19 | -------------------------------------------------------------------------------- /ansible/roles/letsencrypt/templates/renew-ssl-certs.timer: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Renew SSL certificates each week 3 | 4 | [Timer] 5 | OnCalendar=weekly 6 | Persistent=true 7 | 8 | # Add a bit of randomness to avoid hitting the Let's Encrypt servers 9 | # at the exact same time the entire world is doing this. 10 | RandomizedDelaySec=5m 11 | 12 | [Install] 13 | WantedBy=timers.target 14 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # See this role's README for documentation about these defaults. 4 | prometheus_scrape_interval: 15s 5 | prometheus_retention: 15d 6 | prometheus_scrape: [] 7 | prometheus_rule_groups: [] 8 | grafana_github_teams: [] 9 | alertmanager_resolve_timeout: 5m 10 | alertmanager_route: {} 11 | alertmanager_receivers: [] 12 | alertmanager_inhibit_rules: [] 13 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart-prometheus-ecs-discovery 4 | service: 5 | name: prometheus-ecs-discovery 6 | state: restarted 7 | 8 | - name: restart-prometheus 9 | service: 10 | name: prometheus 11 | state: restarted 12 | 13 | - name: restart-alertmanager 14 | service: 15 | name: alertmanager 16 | state: restarted 17 | 18 | - name: restart-grafana 19 | service: 20 | name: grafana-server 21 | state: restarted 22 | 23 | - name: reload-systemd 24 | shell: systemctl daemon-reload 25 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_tasks: prometheus-ecs-discovery.yml 4 | - include_tasks: prometheus.yml 5 | - include_tasks: alertmanager.yml 6 | - include_tasks: grafana.yml 7 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/templates/alertmanager/alertmanager.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=alertmanager: alerting for prometheus 7 | After=network.target 8 | 9 | [Service] 10 | ExecStart=/usr/local/bin/alertmanager --config.file=/etc/alertmanager/alertmanager.yml --storage.path=/var/lib/alertmanager 11 | User=alertmanager 12 | Group=alertmanager 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/templates/alertmanager/alertmanager.yml: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | global: 6 | resolve_timeout: {{ alertmanager_resolve_timeout }} 7 | 8 | route: {{ alertmanager_route | to_json }} 9 | 10 | receivers: {{ alertmanager_receivers | to_json }} 11 | 12 | inhibit_rules: {{ alertmanager_inhibit_rules | to_json }} 13 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/templates/grafana/provisioning/datasources/prometheus.yml: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | apiVersion: 1 6 | 7 | deleteDatasources: 8 | - name: prometheus 9 | 10 | datasources: 11 | # Local prometheus server 12 | - name: prometheus 13 | type: prometheus 14 | access: proxy 15 | url: http://localhost:9090 16 | isDefault: true 17 | jsonData: 18 | timeInterval: {{ prometheus_scrape_interval }} 19 | editable: false 20 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/templates/prometheus-ecs-discovery/prometheus-ecs-discovery.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=discover list of hosts for Prometheus on AWS ECS 7 | After=network.target 8 | 9 | [Service] 10 | ExecStart=/usr/local/bin/prometheus-ecs-discovery --config.write-to /var/lib/prometheus-ecs-discovery/ecs_file_sd.yml 11 | User=prometheus-ecs-discovery 12 | Group=prometheus-ecs-discovery 13 | Environment=AWS_REGION=us-west-1 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | 18 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/templates/prometheus/prometheus.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=prometheus: metrics scraper and aggregator 7 | After=network.target 8 | 9 | [Service] 10 | ExecStart=/usr/local/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus --storage.tsdb.retention.time={{ prometheus_retention }} 11 | User=prometheus 12 | Group=prometheus 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/templates/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | global: 6 | scrape_interval: {{ prometheus_scrape_interval }} 7 | evaluation_interval: 15s 8 | 9 | alerting: 10 | alertmanagers: 11 | - static_configs: 12 | - targets: 13 | - localhost:9093 14 | 15 | rule_files: 16 | - /etc/prometheus/rules.yml 17 | - /etc/prometheus/rules.d/*.yml 18 | 19 | scrape_configs: {{ prometheus_scrape | to_json }} 20 | -------------------------------------------------------------------------------- /ansible/roles/monitoring-server/templates/prometheus/rules.yml: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | groups: {{ prometheus_rule_groups | to_json }} 6 | -------------------------------------------------------------------------------- /ansible/roles/nginx/README.md: -------------------------------------------------------------------------------- 1 | # `nginx` role 2 | 3 | This role installs and configures the nginx web server on the instance. It 4 | requires the [letsencrypt](../letsencrypt/README.md) role to be listed before 5 | this role as well. 6 | 7 | ## Configuration 8 | 9 | ```yaml 10 | - role: nginx 11 | 12 | # The number of worker connections. [optional] 13 | # https://nginx.org/en/docs/ngx_core_module.html#worker_connections 14 | worker_connections: 123 15 | 16 | # Configures reverse proxies with HTTPS termination. [optional] 17 | proxied: 18 | # The domain to proxy from 19 | - domain: subdomain.example.com 20 | # The destination to proxy to 21 | to: http://localhost:8000 22 | # Additional `location` directives to proxy, beyond the default `/` location [optional] 23 | extra_locations: 24 | # The location to respond to 25 | - path: /my/awesome/location 26 | # The URL to proxy to 27 | to: http:127.0.0.1:9999/something 28 | ``` 29 | -------------------------------------------------------------------------------- /ansible/roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # See this role's README for documentation about these defaults. 4 | worker_connections: 768 5 | proxied: {} 6 | -------------------------------------------------------------------------------- /ansible/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: reload-nginx 4 | service: 5 | name: nginx 6 | state: reloaded 7 | 8 | - name: restart-firewall 9 | service: 10 | name: firewall 11 | state: restarted 12 | -------------------------------------------------------------------------------- /ansible/roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install nginx 4 | apt: 5 | name: nginx 6 | state: present 7 | 8 | - name: upload nginx.conf 9 | template: 10 | src: nginx.conf 11 | dest: /etc/nginx/nginx.conf 12 | notify: 13 | - reload-nginx 14 | 15 | - name: upload firewall rules for nginx 16 | template: 17 | src: firewall.sh 18 | dest: /etc/firewall/nginx.sh 19 | mode: 0750 20 | notify: 21 | - restart-firewall 22 | 23 | - name: upload nginx's after ssl renew script 24 | template: 25 | src: after-ssl-renew.sh 26 | dest: /etc/ssl/letsencrypt/after-renew.d 27 | mode: 0750 28 | 29 | - name: create systemd override file 30 | file: 31 | path: /etc/systemd/system/nginx.service.d 32 | state: directory 33 | 34 | - name: create systemd override file 35 | template: 36 | src: override.conf 37 | dest: /etc/systemd/system/nginx.service.d/override.conf 38 | -------------------------------------------------------------------------------- /ansible/roles/nginx/templates/after-ssl-renew.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # {{ ansible_managed }} 4 | # 5 | 6 | systemctl reload nginx.service 7 | -------------------------------------------------------------------------------- /ansible/roles/nginx/templates/firewall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # {{ ansible_managed }} 4 | # 5 | 6 | cmd -A public_input_tcp -p tcp --dport 80 -j ACCEPT 7 | cmd -A public_input_tcp -p tcp --dport 443 -j ACCEPT 8 | -------------------------------------------------------------------------------- /ansible/roles/nginx/templates/override.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | # This assumes that the NGINX instance will usually be used as an 3 | # upstream proxy. Each incoming connection takes one FD for the client 4 | # and one FD for the proxy. We add a few extra FDs to account for 5 | # things like config and log files. 6 | LimitNOFILE={{ (worker_connections * 2) + 32 }} 7 | -------------------------------------------------------------------------------- /ansible/roles/playground/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart-docker 4 | systemd: 5 | name: docker 6 | state: restarted 7 | daemon_reload: true 8 | 9 | - name: restart-containerd 10 | systemd: 11 | name: containerd 12 | state: restarted 13 | daemon_reload: true 14 | 15 | - name: start-playground-update 16 | systemd: 17 | name: playground-update 18 | state: started 19 | daemon_reload: true 20 | 21 | - name: restart-playground-update-timer 22 | systemd: 23 | name: playground-update.timer 24 | state: restarted 25 | daemon_reload: true 26 | 27 | - name: start-playground-gc 28 | systemd: 29 | name: playground-gc 30 | state: started 31 | daemon_reload: true 32 | 33 | - name: restart-playground-gc-timer 34 | systemd: 35 | name: playground-gc.timer 36 | state: restarted 37 | daemon_reload: true 38 | 39 | - name: restart-playground 40 | systemd: 41 | name: playground 42 | state: restarted 43 | daemon_reload: true 44 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/containerd-override.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Slice=playground.slice 3 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics-addr": "127.0.0.1:9323", 3 | "cgroup-parent": "playground.slice", 4 | "log-driver": "local", 5 | "storage-driver": "overlay2" 6 | } 7 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/docker-override.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Slice=playground.slice 3 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/gc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # {{ ansible_managed }} 5 | # 6 | 7 | # {% raw %} 8 | 9 | set -euv -o pipefail 10 | 11 | # How long a container must be running to be killed. 12 | # Number of seconds. 13 | MAX_TIME=3600 14 | 15 | now=$(date "+%s") 16 | to_kill=() 17 | 18 | readarray -t container_ids < <(docker ps --format '{{ .ID }}' --no-trunc) 19 | 20 | while read -r id started_at; do 21 | started_at=$(date --date "${started_at}" "+%s") 22 | running_time=$((now - started_at)) 23 | 24 | if [[ "${running_time}" -gt "${MAX_TIME}" ]]; then 25 | to_kill+=("${id}") 26 | fi 27 | done < <(docker inspect "${container_ids[@]}" --format '{{ .ID }} {{ .State.StartedAt }}') 28 | 29 | if [[ ${#to_kill[@]} -gt 0 ]]; then 30 | docker kill "${to_kill[@]}" 31 | fi 32 | 33 | # {% endraw %} 34 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/playground-gc.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=Garbage collect dead playground containers 7 | 8 | [Service] 9 | Type=oneshot 10 | ExecStart={{ vars_playground_gc_path }} 11 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/playground-gc.timer: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description = Garbage collect playground containers every 15 minutes 7 | 8 | [Timer] 9 | OnBootSec = 15min 10 | OnUnitActiveSec = 15min 11 | 12 | [Install] 13 | WantedBy = timers.target 14 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/playground-update.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=Update the playground 7 | 8 | [Service] 9 | Type=oneshot 10 | ExecStart={{ vars_playground_update_path }} 11 | 12 | WorkingDirectory={{ vars_playground_home_path }} 13 | 14 | {% if vars_playground_aws is defined %} 15 | Environment=AWS_ACCESS_KEY_ID={{ vars_playground_aws['access_key_id'] }} 16 | Environment=AWS_SECRET_ACCESS_KEY={{ vars_playground_aws['secret_access_key'] }} 17 | {% endif %} 18 | 19 | User={{ vars_playground_user }} 20 | Group={{ vars_playground_group }} 21 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/playground-update.timer: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description = Update the playground every hour 7 | 8 | [Timer] 9 | OnBootSec = 10min 10 | OnUnitActiveSec = 1h 11 | 12 | [Install] 13 | WantedBy = timers.target 14 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/playground.service: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | [Unit] 6 | Description=The Rust Playground 7 | 8 | [Service] 9 | Slice=playground.slice 10 | 11 | Restart=on-failure 12 | 13 | Environment=TMPDIR={{ vars_playground_mountpoint_path }} 14 | Environment=RUST_LOG=info 15 | Environment=PLAYGROUND_CORS_ENABLED={{ vars_playground_env_cors_enabled }} 16 | Environment=PLAYGROUND_GITHUB_TOKEN={{ vars_playground_env_github_token }} 17 | Environment=PLAYGROUND_UI_ADDRESS={{ vars_playground_env_ui_address }} 18 | Environment=PLAYGROUND_UI_PORT={{ vars_playground_env_ui_port }} 19 | Environment=PLAYGROUND_UI_ROOT={{ vars_playground_env_ui_root_path }} 20 | Environment=PLAYGROUND_SERVER_AXUM=1 21 | 22 | WorkingDirectory={{ vars_playground_artifacts_path }} 23 | 24 | ExecStart={{ vars_playground_executable_path }} 25 | 26 | LimitNOFILE={{ vars_playground_number_connections }} 27 | 28 | [Install] 29 | WantedBy=multi-user.target 30 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/playground.slice: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Resource management group for playground processes 3 | Before=slices.target 4 | -------------------------------------------------------------------------------- /ansible/roles/playground/templates/sudoers: -------------------------------------------------------------------------------- 1 | # 2 | # {{ ansible_managed }} 3 | # 4 | 5 | playground ALL= NOPASSWD: /bin/systemctl stop playground.service 6 | playground ALL= NOPASSWD: /bin/systemctl start playground.service 7 | -------------------------------------------------------------------------------- /ansible/roles/postgresql/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # See this role's README for documentation about these defaults. 4 | users: {} 5 | databases: {} 6 | -------------------------------------------------------------------------------- /ansible/roles/postgresql/tasks/databases.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create database users 4 | postgresql_user: 5 | name: "{{ item }}" 6 | password: "{{ users[item].password|default('') }}" 7 | state: present 8 | 9 | loop: "{{ users.keys() | list }}" 10 | become_user: postgres 11 | 12 | 13 | - name: create databases 14 | postgresql_db: 15 | name: "{{ item }}" 16 | owner: "{{ databases[item].owner }}" 17 | encoding: "{{ databases[item].encoding }}" 18 | state: present 19 | 20 | loop: "{{ databases.keys() | list }}" 21 | become_user: postgres 22 | -------------------------------------------------------------------------------- /ansible/roles/postgresql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_tasks: setup.yml 4 | - include_tasks: databases.yml 5 | - include_tasks: backup.yml 6 | -------------------------------------------------------------------------------- /ansible/roles/postgresql/tasks/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install postgresql 4 | apt: 5 | name: 6 | - postgresql 7 | - python-psycopg2 # needed by ansible 8 | - python3-psycopg2 # needed by ansible 9 | state: present 10 | -------------------------------------------------------------------------------- /ansible/roles/postgresql/templates/backup/generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | DATABASES="{% for db in databases.keys() %}{{ db }}{% endfor %}" 5 | SAVE_TO="/tmp/postgresql-backups" 6 | 7 | # Create the destination directory 8 | rm -rf "${SAVE_TO}" 9 | mkdir "${SAVE_TO}" 10 | 11 | # Ensure no one can read the destination directory content 12 | chmod 0700 "${SAVE_TO}" 13 | 14 | # Dump all the databases 15 | for db in ${DATABASES}; do 16 | pg_dump "${db}" | gzip > "${SAVE_TO}/${db}.sql.gz" 17 | done 18 | -------------------------------------------------------------------------------- /ansible/roles/postgresql/templates/backup/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "postgresql", 3 | "path": "/tmp/postgresql-backups", 4 | "before-script": "/usr/local/bin/generate-postgresql-dumps" 5 | } 6 | -------------------------------------------------------------------------------- /github-actions/README.md: -------------------------------------------------------------------------------- 1 | # GitHub Actions 2 | 3 | This directory contains some shared [GitHub Actions][docs] used on CIs managed 4 | by the Rust Infrastructure team. There are no stability guarantees for these 5 | actions, since they're supposed to only be used in infra managed by us. 6 | 7 | * [**upload-docker-image**](upload-docker-image): upload a Docker image to ECR. 8 | * [**static-websites**](static-websites): deploy a directory to GitHub Pages. 9 | 10 | [docs]: https://help.github.com/en/articles/about-actions 11 | -------------------------------------------------------------------------------- /github-actions/upload-docker-image/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "upload-docker-image", 3 | "version": "1.0.0", 4 | "description": "Upload a Docker image to rust-lang's ECR registry.", 5 | "main": "index.js", 6 | "scripts": { 7 | "build": "node_modules/.bin/ncc build --minify index.js" 8 | }, 9 | "author": "", 10 | "license": "MIT", 11 | "dependencies": { 12 | "@actions/core": "^1.2.6", 13 | "@actions/exec": "^1.0.1", 14 | "aws-sdk": "^2.542.0", 15 | "base64-js": "^1.3.1" 16 | }, 17 | "devDependencies": { 18 | "@zeit/ncc": "^0.20.5" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /invalidate-dev-static-stable.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | curl https://dev-static.rust-lang.org/dist/channel-rust-1.35.0.toml > channel-rust-stable.toml 4 | aws s3 cp ./channel-rust-stable.toml s3://dev-static-rust-lang-org/dist/ 5 | # E30AO2GXMDY230 is dev-static.rust-lang.org distribution ID 6 | aws cloudfront create-invalidation \ 7 | --distribution-id E30AO2GXMDY230 \ 8 | --paths /dist/channel-rust-stable.toml 9 | rm channel-rust-stable.toml 10 | echo "dev-static is published hourly, or you can manually trigger a run if you have RCS access" 11 | -------------------------------------------------------------------------------- /release-scripts/tag-cargo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | if [ ! -e x.py ]; then 6 | echo "Should be run from a rust-lang/rust checkout" 7 | exit 1 8 | fi 9 | 10 | SIMPLEINFRA_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.." 11 | 12 | git fetch git@github.com:rust-lang/rust 13 | CURRENT_STABLE=`git ls-remote -q git@github.com:rust-lang/rust stable | awk '{ print $1 }'` 14 | git checkout "$CURRENT_STABLE" 15 | 16 | git submodule update --init -- src/tools/cargo 17 | 18 | cd src/tools/cargo 19 | 20 | ./publish.py 21 | CARGO_VERSION=$(cargo read-manifest | jq -r .version) 22 | "$SIMPLEINFRA_DIR/with-rust-key.sh" git tag -m "$CARGO_VERSION release" -u 108F66205EAEB0AAA8DD5E1C85AB96E6FA1BE5FE "$CARGO_VERSION" 23 | -------------------------------------------------------------------------------- /setup-deploy-keys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "setup-deploy-keys" 3 | version = "0.1.0" 4 | authors = ["Alex Crichton "] 5 | edition = "2021" 6 | 7 | [[bin]] 8 | name = "deploy" 9 | path = "src/deploy.rs" 10 | 11 | [dependencies] 12 | clap = { version = "4", features = ["derive", "env"] } 13 | chrono = "0.4" 14 | reqwest = { version = "0.11", features = ["blocking", "json"] } 15 | serde = { version = "1.0", features = ["derive"] } 16 | serde_json = "1.0" 17 | base64 = "0.13" 18 | -------------------------------------------------------------------------------- /terraform/bastion/README.md: -------------------------------------------------------------------------------- 1 | # Bastion server configuration 2 | 3 | This directory contains the Terraform configuration to deploy the bastion 4 | server on the us-west-1 AWS region, inside our main production VPC. 5 | 6 | * [Bastion configuration on the forge][docs] 7 | 8 | [docs]: https://forge.rust-lang.org/infra/docs/bastion.html 9 | 10 | ## Configuration overview 11 | 12 | ### `firewall.tf` 13 | 14 | Configuration for the security group protecting our bastion server. If you want 15 | to tweak who can connect to the bastion this is the place to look into! 16 | 17 | ### `instance.tf` 18 | 19 | Creation of the bastion's EC2 instance. If you need to tweak the instance specs 20 | this is the place to look for! 21 | 22 | ### `_terraform.tf` 23 | 24 | Terraform boilerplate. 25 | -------------------------------------------------------------------------------- /terraform/bastion/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | dns = { 12 | source = "hashicorp/dns" 13 | version = "~> 3.4" 14 | } 15 | } 16 | 17 | backend "s3" { 18 | bucket = "rust-terraform" 19 | key = "simpleinfra/bastion.tfstate" 20 | region = "us-west-1" 21 | dynamodb_table = "terraform-state-lock" 22 | encrypt = true 23 | } 24 | } 25 | 26 | provider "aws" { 27 | region = "us-west-1" 28 | } 29 | 30 | data "terraform_remote_state" "shared" { 31 | backend = "s3" 32 | config = { 33 | bucket = "rust-terraform" 34 | key = "simpleinfra/shared.tfstate" 35 | region = "us-west-1" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /terraform/bors/_config.auto.tfvars: -------------------------------------------------------------------------------- 1 | domain_name = "bors.rust-lang.org" 2 | legacy_domain_names = ["buildbot2.rust-lang.org"] 3 | github_org = "rust-lang" 4 | 5 | # bors-name = "repository-name-inside-rust-lang" 6 | repositories = { 7 | rust = "rust" 8 | } 9 | -------------------------------------------------------------------------------- /terraform/crater/README.md: -------------------------------------------------------------------------------- 1 | # Crater 2 | 3 | Currently this is the only resource managed by terraform with Google Cloud auth 4 | required. You'll want to install [gcloud] and authenticate; see the linked 5 | guide for reference. 6 | 7 | [gcloud]: https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/getting_started#configuring-the-provider 8 | -------------------------------------------------------------------------------- /terraform/crater/ci.tf: -------------------------------------------------------------------------------- 1 | // This file contains the configuration for the rust-lang/crater's CI. 2 | 3 | resource "aws_iam_user" "ci" { 4 | name = "ci--rust-lang--crater" 5 | } 6 | 7 | resource "aws_iam_access_key" "ci" { 8 | user = aws_iam_user.ci.name 9 | } 10 | 11 | resource "aws_iam_user_policy_attachment" "ci_push" { 12 | user = aws_iam_user.ci.name 13 | policy_arn = module.ecr.policy_push_arn 14 | } 15 | 16 | resource "aws_iam_user_policy_attachment" "ci_pull" { 17 | user = aws_iam_user.ci.name 18 | policy_arn = module.ecr.policy_pull_arn 19 | } 20 | -------------------------------------------------------------------------------- /terraform/crates-io-heroku-metrics/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | github = { 12 | source = "integrations/github" 13 | version = "~> 6.2.3" 14 | } 15 | } 16 | 17 | backend "s3" { 18 | bucket = "rust-terraform" 19 | key = "simpleinfra/crates-io-heroku-metrics.tfstate" 20 | region = "us-west-1" 21 | dynamodb_table = "terraform-state-lock" 22 | encrypt = true 23 | } 24 | } 25 | 26 | data "terraform_remote_state" "shared" { 27 | backend = "s3" 28 | config = { 29 | bucket = "rust-terraform" 30 | key = "simpleinfra/shared.tfstate" 31 | region = "us-west-1" 32 | } 33 | } 34 | 35 | provider "aws" { 36 | region = "us-west-1" 37 | } 38 | -------------------------------------------------------------------------------- /terraform/crates-io-heroku-metrics/app.tf: -------------------------------------------------------------------------------- 1 | // ECS deployment and CI integration of crates-io-heroku-metrics. 2 | 3 | module "crates_io_heroku_metrics" { 4 | source = "../shared/modules/ecs-app" 5 | cluster_config = data.terraform_remote_state.shared.outputs.ecs_cluster_config 6 | 7 | env = "prod" 8 | name = "crates-io-heroku-metrics" 9 | repo = "rust-lang/crates-io-heroku-metrics" 10 | 11 | cpu = 1024 12 | memory = 2048 13 | tasks_count = 1 14 | platform_version = "1.4.0" 15 | 16 | secrets = { 17 | PASSWORD_DRAIN = "/prod/crates-io-heroku-metrics/password-drain" 18 | PASSWORD_METRICS = "/prod/crates-io-heroku-metrics/password-metrics" 19 | } 20 | 21 | expose_http = { 22 | container_port = 80 23 | domains = ["crates-io-heroku-metrics.infra.rust-lang.org"] 24 | 25 | prometheus = null 26 | 27 | health_check_path = "/health" 28 | health_check_interval = 60 29 | health_check_timeout = 15 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /terraform/dev-desktops/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/dev-desktop.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | data "terraform_remote_state" "shared" { 23 | backend = "s3" 24 | config = { 25 | bucket = "rust-terraform" 26 | key = "simpleinfra/shared.tfstate" 27 | region = "us-west-1" 28 | } 29 | } 30 | 31 | provider "aws" { 32 | region = "us-west-1" 33 | } 34 | 35 | provider "aws" { 36 | alias = "eu-central-1" 37 | region = "eu-central-1" 38 | } 39 | 40 | provider "aws" { 41 | alias = "us-east-1" 42 | region = "us-east-1" 43 | } 44 | -------------------------------------------------------------------------------- /terraform/dev-desktops/aws-region/_terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 5.64" 6 | } 7 | } 8 | } 9 | 10 | variable "instances" { 11 | type = map(object({ 12 | instance_type = string 13 | instance_arch = string 14 | storage = number 15 | })) 16 | } 17 | -------------------------------------------------------------------------------- /terraform/dev-desktops/dns.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "rust_lang_org" { 2 | name = "rust-lang.org" 3 | } 4 | 5 | resource "aws_route53_record" "dev_desktop_eu_2" { 6 | zone_id = data.aws_route53_zone.rust_lang_org.id 7 | name = "dev-desktop-eu-2.infra.rust-lang.org" 8 | type = "CNAME" 9 | records = ["dev-desktop-eu-2.westeurope.cloudapp.azure.com"] 10 | ttl = 60 11 | } 12 | 13 | resource "aws_route53_record" "dev_desktop_us_2" { 14 | zone_id = data.aws_route53_zone.rust_lang_org.id 15 | name = "dev-desktop-us-2.infra.rust-lang.org" 16 | type = "CNAME" 17 | records = ["dev-desktop-us-2.westus2.cloudapp.azure.com"] 18 | ttl = 60 19 | } 20 | -------------------------------------------------------------------------------- /terraform/dev-desktops/regions.tf: -------------------------------------------------------------------------------- 1 | module "aws_eu_central_1" { 2 | source = "./aws-region" 3 | providers = { 4 | aws = aws.eu-central-1 5 | } 6 | 7 | instances = { 8 | "dev-desktop-staging" = { 9 | instance_type = "t3a.micro" 10 | instance_arch = "amd64" 11 | storage = 250 12 | } 13 | "dev-desktop-eu-1" = { 14 | instance_type = "c6g.8xlarge" 15 | instance_arch = "arm64" 16 | storage = 2000 17 | } 18 | } 19 | } 20 | 21 | module "aws_us_east_1" { 22 | source = "./aws-region" 23 | providers = { 24 | aws = aws.us-east-1 25 | } 26 | 27 | instances = { 28 | "dev-desktop-us-1" = { 29 | instance_type = "c7g.8xlarge" 30 | instance_arch = "arm64" 31 | storage = 2000 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /terraform/discord-mods-bot/README.md: -------------------------------------------------------------------------------- 1 | # Discord moderation bot 2 | 3 | This directory contains the Terraform configuration for the [Discord Moderation 4 | Bot][bot-source] deployment and CI. 5 | 6 | * [How to interact with our Terraform configuration](../README.md) 7 | * [Documentation on the Forge][forge] 8 | 9 | [forge]: https://forge.rust-lang.org/infra/docs/discord-mods-bot.html 10 | [bot-source]: https://github.com/rust-lang/discord-mods-bot 11 | 12 | ## Configuration overview 13 | 14 | ### `deployment.tf` 15 | 16 | Deployment of the bot in the production ECS cluster. This is the place to look 17 | for if you need to tweak the environment the bot runs in. 18 | 19 | ### `ci.tf` 20 | 21 | Definition of the ECR repository storing the containers built by CI, and the 22 | user CI uses to authenticate to the repository. 23 | 24 | ### `_terraform.tf` 25 | 26 | Terraform boilerplate. 27 | -------------------------------------------------------------------------------- /terraform/discord-mods-bot/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/discord-mods-bot.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | data "terraform_remote_state" "shared" { 23 | backend = "s3" 24 | config = { 25 | bucket = "rust-terraform" 26 | key = "simpleinfra/shared.tfstate" 27 | region = "us-west-1" 28 | } 29 | } 30 | 31 | provider "aws" { 32 | region = "us-west-1" 33 | } 34 | -------------------------------------------------------------------------------- /terraform/dns-delegation/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = ">= 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/dns-delegation.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | data "terraform_remote_state" "shared" { 23 | backend = "s3" 24 | config = { 25 | bucket = "rust-terraform" 26 | key = "simpleinfra/shared.tfstate" 27 | region = "us-west-1" 28 | } 29 | } 30 | 31 | provider "aws" { 32 | region = "us-west-1" 33 | } 34 | -------------------------------------------------------------------------------- /terraform/dns/_shared.tf: -------------------------------------------------------------------------------- 1 | // Variables used by other files in this directory. 2 | 3 | locals { 4 | // List of IPv4 addresses we need to use when pointing to GitHub pages in an 5 | // A record. The list of IPs was fetched from: 6 | // 7 | // https://help.github.com/en/github/working-with-github-pages/managing-a-custom-domain-for-your-github-pages-site#configuring-an-apex-domain 8 | // 9 | github_pages_ipv4 = [ 10 | "185.199.108.153", 11 | "185.199.109.153", 12 | "185.199.110.153", 13 | "185.199.111.153", 14 | ] 15 | 16 | // MX records to use when a domain's mail is managed by Mailgun. 17 | mailgun_mx = ["10 mxa.mailgun.org", "10 mxb.mailgun.org"] 18 | 19 | // SPF record to use when a domain's mail is managed by Mailgun. 20 | mailgun_spf = "v=spf1 include:mailgun.org ~all" 21 | } 22 | -------------------------------------------------------------------------------- /terraform/dns/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/dns.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | provider "aws" { 23 | region = "us-west-1" 24 | } 25 | -------------------------------------------------------------------------------- /terraform/dns/areweasyncyet.rs.tf: -------------------------------------------------------------------------------- 1 | // DNS records for the areweasyncyet.rs domain. 2 | 3 | module "areweasyncyet_rs" { 4 | source = "./impl" 5 | 6 | domain = "areweasyncyet.rs" 7 | comment = "domain for rust-lang/areweasyncyet.rs" 8 | ttl = 300 9 | 10 | A = { 11 | "@" = local.github_pages_ipv4, # Defined in _shared.tf 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /terraform/dns/arewewebyet.org.tf: -------------------------------------------------------------------------------- 1 | // DNS records for the areweasyncyet.rs domain. 2 | 3 | module "arewewebyet_org" { 4 | source = "./impl" 5 | 6 | domain = "arewewebyet.org" 7 | comment = "domain for rust-lang/arewewebyet" 8 | ttl = 300 9 | 10 | CNAME = { 11 | "www" = ["rust-lang.github.io."], 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /terraform/dns/cratesio.com.tf: -------------------------------------------------------------------------------- 1 | // DNS records for the cratesio.com domain. 2 | // 3 | // Note that some of the records are managed by other Terraform resources, and 4 | // thus are missing from this file! 5 | 6 | module "cratesio_com" { 7 | source = "./impl" 8 | 9 | domain = "cratesio.com" 10 | comment = "parked and reserved for future use" 11 | ttl = 300 12 | } 13 | -------------------------------------------------------------------------------- /terraform/dns/docsrs.com.tf: -------------------------------------------------------------------------------- 1 | // DNS records for the docsrs.com domain. 2 | // 3 | // Note that some of the records are managed by other Terraform resources, and 4 | // thus are missing from this file! 5 | 6 | module "docsrs_com" { 7 | source = "./impl" 8 | 9 | domain = "docsrs.com" 10 | comment = "parked and reserved for future use" 11 | ttl = 300 12 | } 13 | -------------------------------------------------------------------------------- /terraform/dns/rustaceans.org.tf: -------------------------------------------------------------------------------- 1 | // DNS records for the rustaceans.org domain. 2 | 3 | module "rustaceans_org" { 4 | source = "./impl" 5 | 6 | domain = "rustaceans.org" 7 | comment = "domain for nrc/rustaceans.org" 8 | ttl = 300 9 | 10 | A = { 11 | "@" = ["161.35.234.130"], 12 | } 13 | 14 | CNAME = { 15 | "www" = ["rustaceans.org."], 16 | } 17 | } 18 | 19 | -------------------------------------------------------------------------------- /terraform/docs-rs/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | random = { 12 | source = "hashicorp/random" 13 | version = "~> 3.6.2" 14 | } 15 | } 16 | 17 | backend "s3" { 18 | bucket = "rust-terraform" 19 | key = "simpleinfra/docs-rs.tfstate" 20 | region = "us-west-1" 21 | dynamodb_table = "terraform-state-lock" 22 | encrypt = true 23 | } 24 | } 25 | 26 | data "terraform_remote_state" "shared" { 27 | backend = "s3" 28 | config = { 29 | bucket = "rust-terraform" 30 | key = "simpleinfra/shared.tfstate" 31 | region = "us-west-1" 32 | } 33 | } 34 | 35 | provider "aws" { 36 | region = "us-west-1" 37 | } 38 | 39 | provider "aws" { 40 | region = "us-east-1" 41 | alias = "east1" 42 | } 43 | -------------------------------------------------------------------------------- /terraform/domain-redirects/README.md: -------------------------------------------------------------------------------- 1 | # Doman redirects 2 | 3 | This directory contains the Terraform configuration for our redirects from a 4 | whole subdomain to an URL. 5 | 6 | * [How to interact with our Terraform configuration](../README.md) 7 | * [Documentation on our domain redirects setup][forge] 8 | 9 | [forge]: https://forge.rust-lang.org/infra/docs/dns.html 10 | 11 | ## Configuration overview 12 | 13 | ### `redirects.tf` 14 | 15 | Definition of the redirects we have in our infrastructure. If you need to add 16 | or tweak a redirect this is the place to look for. 17 | 18 | ### `impl/` 19 | 20 | Custom module that actually defines the Terraform resources needed to maintain 21 | a domain redirect. You should only need to tweak it if you need to change how 22 | domain redirects work. 23 | 24 | ### `_terraform.tf` 25 | 26 | Terraform boilerplate. 27 | -------------------------------------------------------------------------------- /terraform/domain-redirects/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/domain-redirects.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | provider "aws" { 23 | region = "us-west-1" 24 | } 25 | 26 | provider "aws" { 27 | region = "us-east-1" 28 | alias = "east1" 29 | } 30 | -------------------------------------------------------------------------------- /terraform/domain-redirects/impl/variables.tf: -------------------------------------------------------------------------------- 1 | // Variables used to configure the module, set by ../redirects.tf. 2 | 3 | variable "from" { 4 | description = "List of source domains" 5 | type = list(string) 6 | } 7 | 8 | variable "to_host" { 9 | description = "Destination host of the redirect" 10 | type = string 11 | } 12 | 13 | variable "to_path" { 14 | description = "Destination path of the redirect" 15 | type = string 16 | default = "" 17 | } 18 | 19 | variable "permanent" { 20 | description = "Whether this redirect is permanent" 21 | type = bool 22 | default = false 23 | } 24 | -------------------------------------------------------------------------------- /terraform/fastly-exporter/README.md: -------------------------------------------------------------------------------- 1 | # Prometheus Exporter for Fastly 2 | 3 | This module deploys a Prometheus exporter for Fastly using the official 4 | [fastly/fastly-exporter] Docker image. The implementation uses the [`ecs-task`] 5 | and [`ecs-service`] modules to deploy the exporter to ECS. 6 | 7 | [`ecs-service`]: ../../terragrunt/modules/ecs-service 8 | [`ecs-task`]: ../../terragrunt/modules/ecs-task 9 | [fastly/fastly-exporter]: https://github.com/fastly/fastly-exporter 10 | -------------------------------------------------------------------------------- /terraform/fastly-exporter/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/fastly-exporter.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | data "terraform_remote_state" "shared" { 23 | backend = "s3" 24 | config = { 25 | bucket = "rust-terraform" 26 | key = "simpleinfra/shared.tfstate" 27 | region = "us-west-1" 28 | } 29 | } 30 | 31 | provider "aws" { 32 | region = "us-west-1" 33 | } 34 | -------------------------------------------------------------------------------- /terraform/monitorbot/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | github = { 12 | source = "integrations/github" 13 | version = "~> 6.2.3" 14 | } 15 | random = { 16 | source = "hashicorp/random" 17 | version = "~> 3.6.2" 18 | } 19 | } 20 | 21 | backend "s3" { 22 | bucket = "rust-terraform" 23 | key = "simpleinfra/monitorbot.tfstate" 24 | region = "us-west-1" 25 | dynamodb_table = "terraform-state-lock" 26 | encrypt = true 27 | } 28 | } 29 | 30 | provider "aws" { 31 | region = "us-west-1" 32 | } 33 | 34 | data "terraform_remote_state" "shared" { 35 | backend = "s3" 36 | config = { 37 | bucket = "rust-terraform" 38 | key = "simpleinfra/shared.tfstate" 39 | region = "us-west-1" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /terraform/playground/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/playground.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | data "terraform_remote_state" "shared" { 23 | backend = "s3" 24 | config = { 25 | bucket = "rust-terraform" 26 | key = "simpleinfra/shared.tfstate" 27 | region = "us-west-1" 28 | } 29 | } 30 | 31 | provider "aws" { 32 | region = "us-west-1" 33 | } 34 | 35 | provider "aws" { 36 | alias = "us-east-1" 37 | region = "us-east-1" 38 | } 39 | -------------------------------------------------------------------------------- /terraform/rds-databases/README.md: -------------------------------------------------------------------------------- 1 | # RDS databases 2 | 3 | This module creates a `shared` RDS instance that contains three databases: 4 | 5 | - `discord-mods-bot` 6 | - `triagebot` 7 | - `rustc_perf` 8 | 9 | You can access the DB instance using [bastion]. 10 | 11 | To run `terraform` commands, use the following command 12 | to port-forward the bastion host: 13 | 14 | ```sh 15 | ssh -L localhost:57467:shared..us-west-1.rds.amazonaws.com:5432 @bastion.infra.rust-lang.org 16 | ``` 17 | 18 | Where `57467` can be any unused port on your local machine. 19 | 20 | Then you can connect to the database using `psql`: 21 | 22 | ```sh 23 | psql postgres://:@localhost:57467/ 24 | ``` 25 | 26 | You can find the full endpoint (including ``) in the 27 | AWS console. 28 | 29 | [bastion]: https://github.com/rust-lang/infra-team/tree/master/service-catalog/bastion 30 | -------------------------------------------------------------------------------- /terraform/releases/README.md: -------------------------------------------------------------------------------- 1 | # Releases 2 | 3 | This module creates the infrastructure that publishes Rust releases. 4 | 5 | Releases are produced using the 6 | [promote-release](https://github.com/rust-lang/promote-release) 7 | tool, which runs on AWS CodeBuild. 8 | Releases are stored in the AWS S3 bucket `static-rust-lang-org`. 9 | 10 | The `start-release` lambda allows the release team to trigger `promote-release` to start the release process. 11 | 12 | This module also manages the GPG keys used to sign releases. 13 | The keys are stored in the encrypted AWS bucket `rust-release-keys`. 14 | -------------------------------------------------------------------------------- /terraform/releases/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.65" 10 | } 11 | external = { 12 | source = "hashicorp/external" 13 | version = "~> 2.3.3" 14 | } 15 | } 16 | 17 | backend "s3" { 18 | bucket = "rust-terraform" 19 | key = "simpleinfra/releases.tfstate" 20 | region = "us-west-1" 21 | dynamodb_table = "terraform-state-lock" 22 | encrypt = true 23 | } 24 | } 25 | 26 | data "terraform_remote_state" "shared" { 27 | backend = "s3" 28 | config = { 29 | bucket = "rust-terraform" 30 | key = "simpleinfra/shared.tfstate" 31 | region = "us-west-1" 32 | } 33 | } 34 | 35 | provider "aws" { 36 | region = "us-west-1" 37 | } 38 | 39 | provider "aws" { 40 | region = "us-east-1" 41 | alias = "east1" 42 | } 43 | 44 | data "aws_caller_identity" "current" {} 45 | -------------------------------------------------------------------------------- /terraform/releases/impl/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 5.65" 6 | configuration_aliases = [aws.east1] 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /terraform/releases/impl/outputs.tf: -------------------------------------------------------------------------------- 1 | output "promote_release_role_id" { 2 | value = aws_iam_role.promote_release.unique_id 3 | } 4 | 5 | output "codebuild_project_arn" { 6 | value = aws_codebuild_project.promote_release.arn 7 | } 8 | -------------------------------------------------------------------------------- /terraform/releases/impl/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | 5 | variable "bucket" { 6 | type = string 7 | } 8 | 9 | variable "inventories_bucket_arn" { 10 | type = string 11 | } 12 | 13 | variable "static_domain_name" { 14 | type = string 15 | } 16 | 17 | variable "cloudfront_static_id" { 18 | description = "ID of the CloudFront distribution for the Rust releases" 19 | type = string 20 | } 21 | 22 | variable "cloudfront_doc_id" { 23 | description = "ID of the CloudFront distribution for the Rust documentation" 24 | type = string 25 | } 26 | 27 | variable "release_keys_bucket_arn" { 28 | type = string 29 | } 30 | 31 | variable "promote_release_ecr_repo" { 32 | type = object({ 33 | arn = string 34 | url = string 35 | policy_push_arn = string 36 | policy_pull_arn = string 37 | }) 38 | } 39 | 40 | variable "promote_release_cron" { 41 | type = map(string) 42 | } 43 | -------------------------------------------------------------------------------- /terraform/rust-forge/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/github-actions-roles.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | data "terraform_remote_state" "shared" { 23 | backend = "s3" 24 | config = { 25 | bucket = "rust-terraform" 26 | key = "simpleinfra/shared.tfstate" 27 | region = "us-west-1" 28 | } 29 | } 30 | 31 | provider "aws" { 32 | region = "us-east-1" 33 | } 34 | -------------------------------------------------------------------------------- /terraform/rust-log-analyzer/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | github = { 12 | source = "integrations/github" 13 | version = "~> 6.2.3" 14 | } 15 | } 16 | 17 | backend "s3" { 18 | bucket = "rust-terraform" 19 | key = "simpleinfra/rust-log-analyzer.tfstate" 20 | region = "us-west-1" 21 | dynamodb_table = "terraform-state-lock" 22 | encrypt = true 23 | } 24 | } 25 | 26 | data "terraform_remote_state" "shared" { 27 | backend = "s3" 28 | config = { 29 | bucket = "rust-terraform" 30 | key = "simpleinfra/shared.tfstate" 31 | region = "us-west-1" 32 | } 33 | } 34 | 35 | provider "aws" { 36 | region = "us-west-1" 37 | } 38 | 39 | provider "aws" { 40 | region = "us-east-1" 41 | alias = "east1" 42 | } 43 | -------------------------------------------------------------------------------- /terraform/rustc-perf/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = ">= 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.65" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/rustc-perf.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | data "terraform_remote_state" "shared" { 23 | backend = "s3" 24 | config = { 25 | bucket = "rust-terraform" 26 | key = "simpleinfra/shared.tfstate" 27 | region = "us-west-1" 28 | } 29 | } 30 | 31 | provider "aws" { 32 | region = "us-west-1" 33 | } 34 | 35 | provider "aws" { 36 | region = "us-east-1" 37 | alias = "east1" 38 | } 39 | -------------------------------------------------------------------------------- /terraform/rustc-perf/dns.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "rust_lang_org" { 2 | name = "rust-lang.org" 3 | } 4 | 5 | resource "aws_route53_record" "legacy" { 6 | zone_id = data.aws_route53_zone.rust_lang_org.id 7 | name = "rustc-perf-legacy.infra.rust-lang.org" 8 | type = "A" 9 | records = ["159.69.58.186"] 10 | ttl = 300 11 | } 12 | 13 | resource "aws_route53_record" "one" { 14 | zone_id = data.aws_route53_zone.rust_lang_org.id 15 | name = "rustc-perf-one.infra.rust-lang.org" 16 | type = "A" 17 | records = ["144.76.186.39"] 18 | ttl = 300 19 | } 20 | -------------------------------------------------------------------------------- /terraform/shared/cloudfront-policies.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudfront_response_headers_policy" "mdbook" { 2 | name = "MdbookSitePolicy" 3 | comment = "Policy for hosted mdbook-style websites" 4 | 5 | security_headers_config { 6 | content_type_options { 7 | override = true 8 | } 9 | frame_options { 10 | frame_option = "DENY" 11 | override = true 12 | } 13 | xss_protection { 14 | protection = true 15 | mode_block = true 16 | override = true 17 | } 18 | referrer_policy { 19 | referrer_policy = "no-referrer" 20 | override = true 21 | } 22 | strict_transport_security { 23 | access_control_max_age_sec = 63072000 24 | override = true 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /terraform/shared/github-actions-oidc.tf: -------------------------------------------------------------------------------- 1 | // This section configures the trust relationship between GitHub Actions and 2 | // our AWS account. 3 | 4 | locals { 5 | url = "https://token.actions.githubusercontent.com" 6 | } 7 | 8 | data "tls_certificate" "github_actions" { 9 | url = local.url 10 | } 11 | 12 | resource "aws_iam_openid_connect_provider" "github_actions" { 13 | url = local.url 14 | 15 | client_id_list = ["sts.amazonaws.com"] 16 | thumbprint_list = [data.tls_certificate.github_actions.certificates.0.sha1_fingerprint] 17 | } 18 | -------------------------------------------------------------------------------- /terraform/shared/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.64" 8 | } 9 | dns = { 10 | source = "hashicorp/dns" 11 | version = "~> 3.4.1" 12 | } 13 | tls = { 14 | source = "hashicorp/tls" 15 | version = "4.0.5" 16 | } 17 | } 18 | 19 | backend "s3" { 20 | bucket = "rust-terraform" 21 | key = "simpleinfra/shared.tfstate" 22 | region = "us-west-1" 23 | dynamodb_table = "terraform-state-lock" 24 | encrypt = true 25 | } 26 | } 27 | 28 | provider "aws" { 29 | region = "us-west-1" 30 | } 31 | 32 | provider "aws" { 33 | region = "us-east-1" 34 | alias = "east1" 35 | } 36 | 37 | data "aws_caller_identity" "current" {} 38 | data "aws_canonical_user_id" "current" {} 39 | -------------------------------------------------------------------------------- /terraform/shared/modules/acm-certificate/outputs.tf: -------------------------------------------------------------------------------- 1 | output "arn" { 2 | value = aws_acm_certificate_validation.cert.certificate_arn 3 | } 4 | -------------------------------------------------------------------------------- /terraform/shared/modules/acm-certificate/variables.tf: -------------------------------------------------------------------------------- 1 | variable "domains" { 2 | description = "List of domain names included in the certificate" 3 | type = list(string) 4 | } 5 | -------------------------------------------------------------------------------- /terraform/shared/modules/ecr-repo/README.md: -------------------------------------------------------------------------------- 1 | # `ecr-repo` Terraform module 2 | 3 | The `ecr-repo` Terraform module creates a repository on ECR (AWS's container 4 | registry) and two IAM Policies: 5 | 6 | * `ecr-pull-{repo name}`: allows to pull from the repository 7 | * `ecr-push-{repo name}`: allows to push to the repository 8 | 9 | The repository has a lifecycle policy configured to store only tagged images and 10 | the latest 3 untagged images: this will prevent its storage usage growing 11 | indefinitely, while still allowing rollbacks to previous images. 12 | 13 | You can find the input and output variables of this module in the 14 | `variables.tf` and `outputs.tf` files respectively. 15 | -------------------------------------------------------------------------------- /terraform/shared/modules/ecr-repo/outputs.tf: -------------------------------------------------------------------------------- 1 | output "arn" { 2 | value = aws_ecr_repository.repo.arn 3 | description = "The ARN of the ECR repository created by this module." 4 | } 5 | 6 | output "url" { 7 | value = aws_ecr_repository.repo.repository_url 8 | description = "The URL of the ECR repository created by this module." 9 | } 10 | 11 | output "policy_push_arn" { 12 | value = aws_iam_policy.push.arn 13 | description = "The ARN of the IAM policy allowed to push to this repository." 14 | } 15 | 16 | output "policy_pull_arn" { 17 | value = aws_iam_policy.pull.arn 18 | description = "The ARN of the IAM policy allowed to pull from this repository." 19 | } 20 | -------------------------------------------------------------------------------- /terraform/shared/modules/ecr-repo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | -------------------------------------------------------------------------------- /terraform/shared/modules/ecs-app/outputs.tf: -------------------------------------------------------------------------------- 1 | output "role_id" { 2 | value = aws_iam_role.task.id 3 | } 4 | -------------------------------------------------------------------------------- /terraform/shared/modules/ecs-service/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ecs_service" "service" { 2 | name = var.name 3 | cluster = var.cluster_config.cluster_id 4 | task_definition = var.task_arn 5 | desired_count = var.tasks_count 6 | launch_type = "FARGATE" 7 | platform_version = var.platform_version 8 | 9 | deployment_minimum_healthy_percent = var.deployment_minimum_healty_percent 10 | deployment_maximum_percent = var.deployment_maximum_percent 11 | 12 | enable_ecs_managed_tags = true 13 | 14 | load_balancer { 15 | target_group_arn = aws_lb_target_group.service.arn 16 | container_name = var.http_container 17 | container_port = var.http_port 18 | } 19 | 20 | network_configuration { 21 | subnets = var.cluster_config.subnet_ids 22 | security_groups = concat( 23 | [var.cluster_config.service_security_group_id], 24 | var.additional_security_group_ids, 25 | ) 26 | assign_public_ip = false 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /terraform/shared/modules/ecs-service/outputs.tf: -------------------------------------------------------------------------------- 1 | output "arn" { 2 | value = aws_ecs_service.service.id 3 | } 4 | -------------------------------------------------------------------------------- /terraform/shared/modules/ecs-task/outputs.tf: -------------------------------------------------------------------------------- 1 | output "arn" { 2 | description = "The ARN of the task definition created by this module." 3 | value = aws_ecs_task_definition.task.arn 4 | } 5 | 6 | output "execution_role_name" { 7 | description = "The name of the task execution role created by this module." 8 | value = aws_iam_role.task_execution.name 9 | } 10 | -------------------------------------------------------------------------------- /terraform/shared/modules/efs-filesystem/outputs.tf: -------------------------------------------------------------------------------- 1 | output "id" { 2 | value = aws_efs_file_system.efs.id 3 | } 4 | 5 | output "arn" { 6 | value = aws_efs_file_system.efs.arn 7 | } 8 | 9 | output "root_policy_arn" { 10 | value = aws_iam_policy.efs_root.arn 11 | } 12 | -------------------------------------------------------------------------------- /terraform/shared/modules/efs-filesystem/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "Name of the EFS filesystem" 4 | } 5 | 6 | variable "allow_subnets" { 7 | type = list(string) 8 | description = "List of subnet IDs allowed to interact with the EFS filesystem" 9 | } 10 | 11 | variable "elastic_throughput" { 12 | type = bool 13 | description = "Whether to enable elastic throughput" 14 | } 15 | -------------------------------------------------------------------------------- /terraform/shared/modules/gha-iam-user/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | github = { 4 | source = "integrations/github" 5 | version = "~> 6.2.3" 6 | } 7 | } 8 | } 9 | 10 | provider "github" { 11 | owner = var.org 12 | } 13 | 14 | resource "aws_iam_user" "ci" { 15 | name = var.user_name != null ? var.user_name : "ci--${var.org}--${var.repo}" 16 | } 17 | 18 | resource "aws_iam_access_key" "ci" { 19 | user = aws_iam_user.ci.name 20 | } 21 | 22 | resource "github_actions_secret" "aws_access_key_id" { 23 | repository = var.repo 24 | secret_name = "${var.env_prefix != null ? "${var.env_prefix}_" : ""}AWS_ACCESS_KEY_ID" 25 | plaintext_value = aws_iam_access_key.ci.id 26 | } 27 | 28 | resource "github_actions_secret" "aws_secret_access_key" { 29 | repository = var.repo 30 | secret_name = "${var.env_prefix != null ? "${var.env_prefix}_" : ""}AWS_SECRET_ACCESS_KEY" 31 | plaintext_value = aws_iam_access_key.ci.secret 32 | } 33 | -------------------------------------------------------------------------------- /terraform/shared/modules/gha-iam-user/outputs.tf: -------------------------------------------------------------------------------- 1 | output "user_name" { 2 | value = aws_iam_user.ci.name 3 | } 4 | -------------------------------------------------------------------------------- /terraform/shared/modules/gha-iam-user/variables.tf: -------------------------------------------------------------------------------- 1 | variable "org" { 2 | type = string 3 | description = "The GitHub organization where the repository lives" 4 | } 5 | 6 | variable "repo" { 7 | type = string 8 | description = "The name of the repository inside the organization" 9 | } 10 | 11 | variable "user_name" { 12 | type = string 13 | default = null 14 | description = "Custom name for the IAM user. If omitted a default username is generated" 15 | } 16 | 17 | variable "env_prefix" { 18 | type = string 19 | default = null 20 | description = "Prefix the environment variables in GitHub Actions should have" 21 | } 22 | -------------------------------------------------------------------------------- /terraform/shared/modules/gha-oidc-role/main.tf: -------------------------------------------------------------------------------- 1 | data "terraform_remote_state" "shared" { 2 | backend = "s3" 3 | config = { 4 | bucket = "rust-terraform" 5 | key = "simpleinfra/shared.tfstate" 6 | region = "us-west-1" 7 | } 8 | } 9 | 10 | resource "aws_iam_role" "ci_role" { 11 | name = "ci--${var.org}--${var.repo}" 12 | 13 | assume_role_policy = jsonencode({ 14 | Version = "2012-10-17" 15 | Statement = [ 16 | { 17 | Effect = "Allow" 18 | Action = "sts:AssumeRoleWithWebIdentity" 19 | Principal = { 20 | Federated = data.terraform_remote_state.shared.outputs.gha_oidc_arn 21 | } 22 | Condition = { 23 | StringEquals = { 24 | "token.actions.githubusercontent.com:sub" = (var.environment != null ? 25 | "repo:${var.org}/${var.repo}:environment:${var.environment}" : 26 | "repo:${var.org}/${var.repo}:ref:refs/heads/${var.branch}") 27 | } 28 | } 29 | } 30 | ] 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /terraform/shared/modules/gha-oidc-role/outputs.tf: -------------------------------------------------------------------------------- 1 | output "role" { 2 | value = aws_iam_role.ci_role 3 | } 4 | -------------------------------------------------------------------------------- /terraform/shared/modules/gha-oidc-role/variables.tf: -------------------------------------------------------------------------------- 1 | variable "org" { 2 | type = string 3 | description = "The GitHub organization where the repository lives" 4 | } 5 | 6 | variable "repo" { 7 | type = string 8 | description = "The name of the repository inside the organization" 9 | } 10 | 11 | variable "branch" { 12 | type = string 13 | description = "The branch of the repository allowed to assume the role" 14 | default = null 15 | } 16 | 17 | variable "environment" { 18 | type = string 19 | description = "The GitHub environment allowed to assume the role" 20 | default = null 21 | } 22 | -------------------------------------------------------------------------------- /terraform/shared/modules/lambda/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">= 5.64" 6 | } 7 | } 8 | } 9 | 10 | data "aws_region" "current" {} 11 | 12 | data "external" "source_zip" { 13 | program = ["${path.module}/pack.py"] 14 | query = { 15 | source_dir = var.source_dir, 16 | destination = "${path.module}/packages/${data.aws_region.current.name}/${var.name}.zip" 17 | } 18 | } 19 | 20 | resource "aws_lambda_function" "lambda" { 21 | filename = data.external.source_zip.result.path 22 | function_name = var.name 23 | handler = var.handler 24 | role = var.role_arn 25 | runtime = var.runtime 26 | timeout = var.timeout_seconds 27 | publish = true 28 | 29 | source_code_hash = data.external.source_zip.result.base64sha256 30 | 31 | dynamic "environment" { 32 | for_each = length(var.environment) == 0 ? toset([]) : toset([true]) 33 | content { 34 | variables = var.environment 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /terraform/shared/modules/lambda/outputs.tf: -------------------------------------------------------------------------------- 1 | output "arn" { 2 | value = aws_lambda_function.lambda.arn 3 | } 4 | 5 | output "version_arn" { 6 | value = aws_lambda_function.lambda.qualified_arn 7 | } 8 | 9 | output "name" { 10 | value = aws_lambda_function.lambda.function_name 11 | } 12 | -------------------------------------------------------------------------------- /terraform/shared/modules/lambda/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | 5 | variable "source_dir" { 6 | type = string 7 | } 8 | 9 | variable "handler" { 10 | type = string 11 | } 12 | 13 | variable "runtime" { 14 | type = string 15 | } 16 | 17 | variable "role_arn" { 18 | type = string 19 | } 20 | 21 | variable "environment" { 22 | type = map(string) 23 | default = {} 24 | } 25 | 26 | variable "timeout_seconds" { 27 | type = number 28 | default = 3 29 | } 30 | -------------------------------------------------------------------------------- /terraform/shared/modules/static-website/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rust-lang/simpleinfra/296e1448b608b241ec8bc254ebe6044848a338ef/terraform/shared/modules/static-website/outputs.tf -------------------------------------------------------------------------------- /terraform/shared/modules/static-website/variables.tf: -------------------------------------------------------------------------------- 1 | variable "domain_name" { 2 | type = string 3 | description = "Domain name of the CloudFront distribution" 4 | } 5 | 6 | variable "origin_domain_name" { 7 | type = string 8 | description = "Domain name of the origin" 9 | } 10 | 11 | variable "origin_path" { 12 | type = string 13 | default = null 14 | description = "Root path in the origin" 15 | } 16 | 17 | variable "origin_access_identity" { 18 | type = string 19 | default = null 20 | description = "Origin Access Identity to use to fetch contents from S3" 21 | } 22 | 23 | variable "response_policy_id" { 24 | type = string 25 | description = "CloudFront response headers policy ID" 26 | } 27 | -------------------------------------------------------------------------------- /terraform/shared/modules/vpc/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | resource "aws_vpc" "vpc" { 4 | cidr_block = var.ipv4_cidr 5 | assign_generated_ipv6_cidr_block = true 6 | enable_dns_hostnames = true 7 | 8 | tags = { 9 | Name = var.name 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /terraform/shared/modules/vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | output "id" { 2 | value = aws_vpc.vpc.id 3 | description = "ID of the VPC" 4 | } 5 | 6 | output "cidr" { 7 | value = var.ipv4_cidr 8 | description = "CIDR of the VPC" 9 | } 10 | 11 | output "public_subnets" { 12 | value = [for count, subnet in aws_subnet.public : subnet.id] 13 | description = "IDs of the public subnets inside the VPC" 14 | } 15 | 16 | output "private_subnets" { 17 | value = [for count, subnet in aws_subnet.private : subnet.id] 18 | description = "IDs of the private subnets inside the VPC" 19 | } 20 | 21 | output "untrusted_subnets" { 22 | value = [for count, subnet in aws_subnet.untrusted : subnet.id] 23 | description = "IDs of the untrusted subnets inside the VPC" 24 | } 25 | -------------------------------------------------------------------------------- /terraform/shared/modules/vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "Name of the VPC" 4 | } 5 | 6 | variable "ipv4_cidr" { 7 | type = string 8 | description = "CIDR of the IPv4 address range of the VPC" 9 | } 10 | 11 | variable "public_subnets" { 12 | type = map(string) 13 | description = "Map of public subnet numbers and the associated AZ IDs" 14 | } 15 | 16 | variable "private_subnets" { 17 | type = map(string) 18 | description = "Map of private subnet numbers and the associated AZ IDs" 19 | } 20 | 21 | variable "untrusted_subnets" { 22 | type = map(string) 23 | description = "Map of untrusted subnet numbers and the associated AZ IDs" 24 | } 25 | 26 | variable "peering" { 27 | type = map(string) 28 | default = {} 29 | description = "Map of CIDR blocks to peering connection IDs" 30 | } 31 | -------------------------------------------------------------------------------- /terraform/shared/services.tf: -------------------------------------------------------------------------------- 1 | // This terraform module imports all the services from the services/ directory, 2 | // and configures them. 3 | 4 | module "service_ecs_cluster" { 5 | source = "./services/ecs-cluster" 6 | 7 | cluster_name = "rust-ecs-prod" 8 | load_balancer_domain = "ecs-prod.infra.rust-lang.org" 9 | load_balancer_subnet_ids = module.vpc_prod.public_subnets 10 | vpc_id = module.vpc_prod.id 11 | subnet_ids = module.vpc_prod.private_subnets 12 | } 13 | 14 | module "service_triagebot" { 15 | source = "./services/triagebot" 16 | 17 | domain_name = "triagebot.infra.rust-lang.org" 18 | cluster_config = module.service_ecs_cluster.config 19 | } 20 | -------------------------------------------------------------------------------- /terraform/shared/services/ecs-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "config" { 2 | value = local.cluster_config 3 | description = "Shared configuration of the ECS cluster" 4 | } 5 | -------------------------------------------------------------------------------- /terraform/shared/services/ecs-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | type = string 3 | } 4 | 5 | variable "load_balancer_domain" { 6 | type = string 7 | } 8 | 9 | variable "load_balancer_subnet_ids" { 10 | type = list(string) 11 | } 12 | 13 | variable "vpc_id" { 14 | type = string 15 | } 16 | 17 | variable "subnet_ids" { 18 | type = list(string) 19 | } 20 | -------------------------------------------------------------------------------- /terraform/shared/services/triagebot/variables.tf: -------------------------------------------------------------------------------- 1 | variable "domain_name" { 2 | type = string 3 | description = "Domain name hosting the triagebot application" 4 | } 5 | 6 | variable "cluster_config" { 7 | type = object({ 8 | cluster_id = string, 9 | lb_listener_arn = string, 10 | lb_dns_name = string, 11 | service_security_group_id = string, 12 | subnet_ids = list(string), 13 | vpc_id = string, 14 | }) 15 | description = "Shared configuration of the ECS cluster" 16 | } 17 | -------------------------------------------------------------------------------- /terraform/team-members-access/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 5.64" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/team-members-access.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | provider "aws" { 23 | region = "us-west-1" 24 | } 25 | -------------------------------------------------------------------------------- /terraform/team-members-access/infra-admins.tf: -------------------------------------------------------------------------------- 1 | // This file defines and permissions of Infrastructure Team members with admin 2 | // access. 3 | 4 | resource "aws_iam_group" "infra_admins" { 5 | name = "infra-admins" 6 | } 7 | 8 | resource "aws_iam_group_policy_attachment" "infra_admins_manage_own_credentials" { 9 | group = aws_iam_group.infra_admins.name 10 | policy_arn = aws_iam_policy.manage_own_credentials.arn 11 | } 12 | 13 | resource "aws_iam_group_policy_attachment" "infra_admins_enforce_mfa" { 14 | group = aws_iam_group.infra_admins.name 15 | policy_arn = aws_iam_policy.enforce_mfa.arn 16 | } 17 | 18 | resource "aws_iam_group_policy" "infra_admins" { 19 | group = aws_iam_group.infra_admins.name 20 | name = "full-access" 21 | policy = jsonencode({ 22 | Version = "2012-10-17" 23 | Statement = [ 24 | { 25 | Sid = "FullAccess" 26 | Effect = "Allow" 27 | Action = "*" 28 | Resource = "*" 29 | }, 30 | ] 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /terraform/team-members-datadog/_data.tf: -------------------------------------------------------------------------------- 1 | # Fetch all available permissions 2 | data "datadog_permissions" "all" {} 3 | -------------------------------------------------------------------------------- /terraform/team-members-datadog/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | datadog = { 8 | source = "datadog/datadog" 9 | version = "3.43.1" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/team-members-datadog.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | provider "datadog" {} 23 | -------------------------------------------------------------------------------- /terraform/team-members-datadog/crater.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | crater = { 3 | "walter" = local.users.walter 4 | } 5 | } 6 | 7 | resource "datadog_role" "crater" { 8 | name = "crater" 9 | 10 | dynamic "permission" { 11 | for_each = toset([ 12 | data.datadog_permissions.all.permissions.dashboards_write, 13 | data.datadog_permissions.all.permissions.notebooks_write, 14 | data.datadog_permissions.all.permissions.api_keys_read, 15 | data.datadog_permissions.all.permissions.user_app_keys, 16 | ]) 17 | 18 | content { 19 | id = permission.value 20 | } 21 | } 22 | } 23 | 24 | resource "datadog_team" "crater" { 25 | name = "crater" 26 | description = "The team maintaining crater" 27 | handle = "crater" 28 | } 29 | 30 | resource "datadog_team_membership" "crater" { 31 | for_each = local.crater 32 | 33 | team_id = datadog_team.crater.id 34 | user_id = datadog_user.users[each.key].id 35 | } 36 | -------------------------------------------------------------------------------- /terraform/team-members-datadog/foundation-board.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | foundation_board = { 3 | "nell" = local.users.nell 4 | "peixin" = local.users.peixin 5 | "seth" = local.users.seth 6 | } 7 | } 8 | 9 | resource "datadog_role" "board_member" { 10 | name = "Board Member" 11 | 12 | dynamic "permission" { 13 | for_each = toset([ 14 | data.datadog_permissions.all.permissions.dashboards_write, 15 | ]) 16 | 17 | content { 18 | id = permission.value 19 | } 20 | } 21 | } 22 | 23 | resource "datadog_team" "foundation_board" { 24 | name = "Rust Foundation Board" 25 | description = "The board of the Rust Foundation" 26 | handle = "foundation-board" 27 | } 28 | 29 | resource "datadog_team_membership" "foundation_board" { 30 | for_each = local.foundation_board 31 | 32 | team_id = datadog_team.foundation_board.id 33 | user_id = datadog_user.users[each.key].id 34 | } 35 | -------------------------------------------------------------------------------- /terraform/team-members-datadog/infra-admins.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | infra_admins = { 3 | "admin" = local.users.admin 4 | "jdn" = local.users.jdn 5 | "joel" = local.users.joel 6 | "marcoieni" = local.users.marcoieni 7 | "mark" = local.users.mark 8 | "pietro" = local.users.pietro 9 | "rustfoundation" = local.users.rustfoundation 10 | } 11 | } 12 | 13 | resource "datadog_team" "infra_admins" { 14 | name = "Infrastructure Admins" 15 | description = "The infra-admins" 16 | handle = "infra-admins" 17 | } 18 | 19 | resource "datadog_team_membership" "infra_admins" { 20 | for_each = local.infra_admins 21 | 22 | team_id = datadog_team.infra_admins.id 23 | user_id = datadog_user.users[each.key].id 24 | } 25 | -------------------------------------------------------------------------------- /terraform/team-members-fastly/_terraform.tf: -------------------------------------------------------------------------------- 1 | // Configuration for Terraform itself. 2 | 3 | terraform { 4 | required_version = "~> 1" 5 | 6 | required_providers { 7 | fastly = { 8 | source = "fastly/fastly" 9 | version = "5.13.0" 10 | } 11 | } 12 | 13 | backend "s3" { 14 | bucket = "rust-terraform" 15 | key = "simpleinfra/team-members-fastly.tfstate" 16 | region = "us-west-1" 17 | dynamodb_table = "terraform-state-lock" 18 | encrypt = true 19 | } 20 | } 21 | 22 | provider "fastly" {} 23 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-prod/account.json: -------------------------------------------------------------------------------- 1 | { 2 | "aws": { 3 | "profile": "bors-prod", 4 | "regions": [ 5 | { 6 | "region": "us-east-2" 7 | } 8 | ] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-prod/app/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//bors" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | domain = "bors-prod.rust-lang.net" 12 | gh_app_id = "278306" 13 | trusted_sub = "repo:rust-lang/bors:environment:production" 14 | } 15 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-prod/datadog-aws/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//datadog-aws" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | env = "prod" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-prod/dns-zone/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//dns-zone" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | name = "bors-prod.rust-lang.net" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-prod/wiz/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/wiz" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-staging/account.json: -------------------------------------------------------------------------------- 1 | { 2 | "aws": { 3 | "profile": "bors-staging", 4 | "regions": [ 5 | { 6 | "region": "us-east-2" 7 | } 8 | ] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-staging/app/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//bors" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | domain = "bors-staging.rust-lang.net" 12 | gh_app_id = "343095" 13 | trusted_sub = "repo:rust-lang/bors:environment:staging" 14 | } 15 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-staging/datadog-aws/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//datadog-aws" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | env = "staging" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-staging/dns-zone/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//dns-zone" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | name = "bors-staging.rust-lang.net" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/bors-staging/wiz/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/wiz" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-prod/account.json: -------------------------------------------------------------------------------- 1 | { 2 | "aws": { 3 | "profile": "ci-prod", 4 | "regions": [ 5 | { 6 | "region": "us-east-2" 7 | } 8 | ] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-prod/ci-runners/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/ci-runners" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | code_connection_name = "rust-lang-prod-gh-connection" 12 | repository = "rust-lang/rust" 13 | } 14 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-prod/datadog-aws/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//datadog-aws" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | env = "staging" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-prod/wiz/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/wiz" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-staging/account.json: -------------------------------------------------------------------------------- 1 | { 2 | "aws": { 3 | "profile": "ci-staging", 4 | "regions": [ 5 | { 6 | "region": "us-east-2" 7 | } 8 | ] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-staging/ci-runners/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/ci-runners" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | code_connection_name = "staging-gh-connection" 12 | repository = "rust-lang/aws-runners-test" 13 | } 14 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-staging/datadog-aws/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//datadog-aws" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | env = "staging" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/ci-staging/wiz/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/wiz" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-prod/account.json: -------------------------------------------------------------------------------- 1 | { 2 | "aws": { 3 | "profile": "crates-io-prod", 4 | "regions": [ 5 | { 6 | "region": "us-west-1" 7 | }, 8 | { 9 | "region": "us-east-1", 10 | "alias": "us-east-1" 11 | } 12 | ] 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-prod/crates-io-logs/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "git::../../../..//terragrunt/modules/crates-io-logs?ref=${trimspace(file("../deployed-ref"))}" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | bucket_account = 890664054962 12 | bucket_arn = "arn:aws:s3:::rust-crates-io-logs" 13 | } 14 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-prod/datadog-aws/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//datadog-aws" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | env = "prod" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-prod/deployed-ref: -------------------------------------------------------------------------------- 1 | ac659cdc022c00bae70dd77c84e8e08f3515d26d 2 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-prod/wiz/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/wiz" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-staging/account.json: -------------------------------------------------------------------------------- 1 | { 2 | "aws": { 3 | "profile": "crates-io-staging", 4 | "regions": [ 5 | { 6 | "region": "us-west-1" 7 | }, 8 | { 9 | "region": "us-east-1", 10 | "alias": "us-east-1" 11 | } 12 | ] 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-staging/crates-io-logs/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/crates-io-logs" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | bucket_account = 890664054962 12 | bucket_arn = "arn:aws:s3:::rust-staging-crates-io-logs" 13 | } 14 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-staging/datadog-aws/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../modules//datadog-aws" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | 10 | inputs = { 11 | env = "staging" 12 | } 13 | -------------------------------------------------------------------------------- /terragrunt/accounts/crates-io-staging/wiz/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | source = "../../../..//terragrunt/modules/wiz" 3 | } 4 | 5 | include { 6 | path = find_in_parent_folders() 7 | merge_strategy = "deep" 8 | } 9 | -------------------------------------------------------------------------------- /terragrunt/accounts/dev-desktops-prod/account.json: -------------------------------------------------------------------------------- 1 | { 2 | "aws": { 3 | "profile": "dev-desktops-prod", 4 | "regions": [ 5 | { 6 | "region": "us-east-1" 7 | } 8 | ] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /terragrunt/accounts/dev-desktops-prod/azure-provider.hcl: -------------------------------------------------------------------------------- 1 | # Automatically include the Azure provider with the right subscription 2 | generate "azure" { 3 | path = "terragrunt-generated-azure-provider.tf" 4 | if_exists = "overwrite_terragrunt" 5 | contents = <