├── examples └── gcp │ ├── outputs.tf │ ├── locals.tf │ └── main.tf ├── .gitignore ├── .github └── CODEOWNERS ├── modules └── gcp │ ├── main.tf │ ├── iam.tf │ ├── storage.tf │ ├── migrate.tf │ ├── services.tf │ ├── variables.tf │ ├── vpc.tf │ ├── sql.tf │ ├── frontend.tf │ └── backend.tf ├── ecs ├── main.tf ├── kms.tf ├── rds.tf ├── route53.tf ├── s3.tf ├── backup.tf ├── appconfig.tf ├── README.md ├── maintenance.tf ├── lambda.tf ├── debug.tf ├── iam.tf ├── variables.tf ├── ecs.tf └── vpc.tf ├── eks ├── gen_secrets.sh ├── values.yaml ├── values.example.local.yaml ├── Chart.yaml ├── values.example.okta.yaml ├── templates │ ├── frontend │ │ ├── kube2iam.yaml │ │ ├── frontend_roles.yaml │ │ └── deployments-frontend.yaml │ └── public │ │ ├── deployments-public.yaml │ │ └── public_roles.yaml └── README.md ├── LICENSE └── README.md /examples/gcp/outputs.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | */.terraform* 2 | */*.tfstate 3 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @jkennedyvz @jrozner @JoelAtDeluxe 2 | -------------------------------------------------------------------------------- /modules/gcp/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.project 3 | region = var.region 4 | } 5 | -------------------------------------------------------------------------------- /ecs/main.tf: -------------------------------------------------------------------------------- 1 | # Data providers 2 | 3 | provider "aws" { 4 | region = var.region 5 | } 6 | data "aws_availability_zones" "az" {} 7 | data "aws_caller_identity" "current" {} 8 | -------------------------------------------------------------------------------- /modules/gcp/iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_project_iam_member" "backend-storage-user" { 2 | project = var.project 3 | role = "roles/storage.objectUser" 4 | member = google_service_account.backend.member 5 | } 6 | -------------------------------------------------------------------------------- /modules/gcp/storage.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "bucket_suffix" { 2 | length = 8 3 | special = false 4 | upper = false 5 | } 6 | 7 | resource "google_storage_bucket" "ashirt_storage" { 8 | name = "ashirt-storage-${random_string.bucket_suffix.result}" 9 | location = var.region 10 | force_destroy = true 11 | } 12 | -------------------------------------------------------------------------------- /examples/gcp/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | project = "" 3 | region = "us-west1" 4 | environment = "prod" 5 | tag = "sha-26ab23f" 6 | backend_env = { 7 | "AUTH_SERVICES" = "ashirt" 8 | "AUTH_SERVICES_ALLOW_REGISTRATION" = "ashirt" 9 | } 10 | min_backend_instances = 0 11 | min_frontend_instances = 0 12 | } 13 | -------------------------------------------------------------------------------- /ecs/kms.tf: -------------------------------------------------------------------------------- 1 | resource "aws_kms_key" "ashirt" { 2 | count = var.kms ? 1 : 0 3 | description = "ashirt default key" 4 | #deletion_window_in_days = 10 5 | enable_key_rotation = true 6 | tags = { 7 | Name = "${var.app_name}-key" 8 | } 9 | } 10 | 11 | resource "aws_kms_alias" "ashirt" { 12 | count = var.kms ? 1 : 0 13 | name = "alias/ashirt" 14 | target_key_id = aws_kms_key.ashirt[count.index].key_id 15 | } 16 | -------------------------------------------------------------------------------- /examples/gcp/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = local.project 3 | region = local.region 4 | } 5 | 6 | module "gcp" { 7 | source = "./ashirt-deployments/modules/gcp" 8 | 9 | project = local.project 10 | region = local.region 11 | environment = local.environment 12 | tag = local.tag 13 | backend_env = local.backend_env 14 | min_backend_instances = local.min_backend_instances 15 | min_frontend_instances = local.min_frontend_instances 16 | } 17 | -------------------------------------------------------------------------------- /eks/gen_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo 'Input DSN string' 3 | read dsn_string 4 | echo 'Input okta secret' 5 | read okta_secret 6 | kubectl create secret generic dsn-private --from-literal=dsn=$dsn_string --namespace=ashirt-frontend 7 | kubectl create secret generic dsn-public-api --from-literal=dsn-public=$dsn_string --namespace=ashirt-public 8 | kubectl create secret generic csrf-auth-key --from-literal=csrf-auth-key="`head -c 48 /dev/urandom`" --namespace=ashirt-frontend 9 | kubectl create secret generic session-store-key --from-literal=session-store-key="`head -c 48 /dev/urandom`" --namespace=ashirt-frontend 10 | kubectl create secret generic auth-okta-client-secret --from-literal=auth-okta-client-secret=$okta_secret --namespace=ashirt-frontend 11 | -------------------------------------------------------------------------------- /eks/values.yaml: -------------------------------------------------------------------------------- 1 | frontend: 2 | api: 3 | env: 4 | APP_PORT: "" 5 | APP_IMGSTORE_REGION: "" 6 | APP_IMGSTORE_BUCKET_NAME: "" 7 | AUTH_SERVICES: "" 8 | AUTH_OKTA_CLIENT_ID: "" 9 | AUTH_OKTA_ISSUER: "" 10 | AUTH_OKTA_BACKEND_URL: "" 11 | AUTH_OKTA_SUCCESS_REDIRECT_URL: "" 12 | AUTH_OKTA_FAILURE_REDIRECT_URL_PREFIX: "" 13 | AUTH_OKTA_PROFILE_TO_SHORTNAME_FIELD: "" 14 | AUTH_SERVICES_ALLOW_REGISTRATION: "" 15 | #ADDITIONAL_FREEFORM_ENVIRONMENT_VARIABLE https://github.com/theparanoids/ashirt-server/blob/cf8cfb2f57511986c2cdc00605a0f6b64ae20dbb/backend/Readme.md 16 | s3IAMRole: "" 17 | lb: 18 | acmCert: "" 19 | extraSg: "" 20 | public: 21 | api: 22 | env: 23 | APP_PORT: "" 24 | APP_IMGSTORE_REGION: "" 25 | APP_IMGSTORE_BUCKET_NAME: "" 26 | #ADDITIONAL_FREEFORM_ENVIRONMENT_VARIABLE 27 | s3IAMRole: "" 28 | lb: 29 | acmCert: "" 30 | -------------------------------------------------------------------------------- /eks/values.example.local.yaml: -------------------------------------------------------------------------------- 1 | frontend: 2 | api: 3 | env: 4 | APP_PORT: "8000" 5 | APP_IMGSTORE_REGION: "us-west-2" 6 | APP_IMGSTORE_BUCKET_NAME: "" 7 | AUTH_SERVICES: "ashirt" 8 | AUTH_OKTA_CLIENT_ID: "" 9 | AUTH_OKTA_ISSUER: "" 10 | AUTH_OKTA_BACKEND_URL: "" 11 | AUTH_OKTA_SUCCESS_REDIRECT_URL: "" 12 | AUTH_OKTA_FAILURE_REDIRECT_URL_PREFIX: "" 13 | AUTH_OKTA_PROFILE_TO_SHORTNAME_FIELD: "" 14 | AUTH_SERVICES_ALLOW_REGISTRATION: "ashirt" 15 | s3IAMRole: "arn:aws:iam:::role/ashirt-k8s-s3" 16 | lb: 17 | acmCert: "arn:aws:acm:us-west-2::certificate/" 18 | public: 19 | api: 20 | env: 21 | APP_PORT: "8000" 22 | APP_IMGSTORE_REGION: "us-west-2" 23 | APP_IMGSTORE_BUCKET_NAME: "" 24 | s3IAMRole: "arn:aws:iam:::role/ashirt-k8s-s3-public" 25 | lb: 26 | acmCert: "arn:aws:acm:us-west-2::certificate/" 27 | -------------------------------------------------------------------------------- /modules/gcp/migrate.tf: -------------------------------------------------------------------------------- 1 | resource "google_cloud_run_v2_job" "init_api" { 2 | deletion_protection = false 3 | project = var.project 4 | name = "ashirt-migrate-${var.environment}" 5 | location = var.region 6 | template { 7 | template { 8 | service_account = google_service_account.backend.email 9 | containers { 10 | image = "docker.io/ashirt/init:${var.tag}" 11 | 12 | env { 13 | name = "DB_URI" 14 | value_source { 15 | secret_key_ref { 16 | secret = google_secret_manager_secret.ashirt_dsn.secret_id 17 | version = "latest" 18 | } 19 | } 20 | } 21 | } 22 | 23 | vpc_access { 24 | network_interfaces { 25 | network = google_compute_network.vpc_network.id 26 | subnetwork = google_compute_subnetwork.subnet[0].name 27 | tags = [] 28 | } 29 | 30 | egress = "ALL_TRAFFIC" 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /eks/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ashirt 3 | description: A Helm chart for ASHIRT 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | version: 0.1.0 18 | 19 | # This is the version number of the application being deployed. This version number should be 20 | # incremented each time you make changes to the application. 21 | appVersion: 0.1.0 22 | 23 | icon: "https://github.com/theparanoids/ashirt/blob/master/icons/shirt-dark.svg" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /eks/values.example.okta.yaml: -------------------------------------------------------------------------------- 1 | frontend: 2 | api: 3 | env: 4 | APP_PORT: "8000" 5 | APP_IMGSTORE_REGION: "us-west-2" 6 | APP_IMGSTORE_BUCKET_NAME: "" 7 | AUTH_SERVICES: "okta" 8 | AUTH_OKTA_CLIENT_ID: "AUTH_OKTA_CLIENT_ID" 9 | AUTH_OKTA_ISSUER: "https://.okta.com/oauth2/AUTH_OKTA_CLIENT_ID" 10 | AUTH_OKTA_BACKEND_URL: "https:///web" 11 | AUTH_OKTA_SUCCESS_REDIRECT_URL: "https://" 12 | AUTH_OKTA_FAILURE_REDIRECT_URL_PREFIX: "https://" 13 | AUTH_OKTA_PROFILE_TO_SHORTNAME_FIELD: "short_id" 14 | AUTH_SERVICES_ALLOW_REGISTRATION: "" 15 | s3IAMRole: "arn:aws:iam:::role/ashirt-k8s-s3" 16 | lb: 17 | acmCert: "arn:aws:acm:us-west-2::certificate/" 18 | public: 19 | api: 20 | env: 21 | APP_PORT: "8000" 22 | APP_IMGSTORE_REGION: "us-west-2" 23 | APP_IMGSTORE_BUCKET_NAME: "" 24 | s3IAMRole: "arn:aws:iam:::role/ashirt-k8s-s3-public" 25 | lb: 26 | acmCert: "arn:aws:acm:us-west-2::certificate/" 27 | -------------------------------------------------------------------------------- /ecs/rds.tf: -------------------------------------------------------------------------------- 1 | # RDS Cluster and subnet group 2 | 3 | resource "random_password" "db_password" { 4 | length = 24 5 | special = false 6 | } 7 | 8 | resource "aws_rds_cluster" "ashirt" { 9 | cluster_identifier = "${var.app_name}-ecs" 10 | engine = "aurora-mysql" 11 | database_name = "ashirt" 12 | master_username = "ashirt" 13 | master_password = random_password.db_password.result 14 | preferred_backup_window = "07:00-09:00" 15 | engine_mode = "serverless" 16 | vpc_security_group_ids = [aws_security_group.rds.id] 17 | db_subnet_group_name = aws_db_subnet_group.default.name 18 | skip_final_snapshot = false 19 | final_snapshot_identifier = "${var.app_name}-${formatdate("YYYY-MM-DD-hh-mm", timestamp())}" 20 | kms_key_id = var.kms ? aws_kms_key.ashirt.0.arn : "" 21 | scaling_configuration { 22 | min_capacity = var.rds_min_capacity 23 | auto_pause = var.auto_pause_rds 24 | } 25 | } 26 | 27 | resource "aws_db_subnet_group" "default" { 28 | name = "${var.app_name}-main" 29 | subnet_ids = var.private_subnet ? aws_subnet.private.*.id : aws_subnet.public.*.id 30 | } 31 | -------------------------------------------------------------------------------- /modules/gcp/services.tf: -------------------------------------------------------------------------------- 1 | resource "google_project_service" "secretmanager" { 2 | project = var.project 3 | service = "secretmanager.googleapis.com" 4 | disable_on_destroy = false 5 | } 6 | 7 | resource "google_project_service" "vpcaccess-api" { 8 | project = var.project 9 | service = "vpcaccess.googleapis.com" 10 | disable_on_destroy = false 11 | } 12 | 13 | resource "google_project_service" "servicenetworking" { 14 | project = var.project 15 | service = "servicenetworking.googleapis.com" 16 | disable_on_destroy = false 17 | } 18 | 19 | resource "google_project_service" "run" { 20 | project = var.project 21 | service = "run.googleapis.com" 22 | disable_on_destroy = false 23 | } 24 | 25 | resource "google_project_service" "compute" { 26 | project = var.project 27 | service = "compute.googleapis.com" 28 | disable_on_destroy = false 29 | } 30 | 31 | #resource "google_project_service" "domains" { 32 | # project = var.project 33 | # service = "domains.googleapis.com" 34 | # disable_on_destroy = false 35 | #} 36 | 37 | #resource "google_project_service" "domains" { 38 | # project = var.project 39 | # service = "dns.googleapis.com" 40 | # disable_on_destroy = false 41 | #} 42 | -------------------------------------------------------------------------------- /ecs/route53.tf: -------------------------------------------------------------------------------- 1 | # Data provider uses existing primary zone for a route53 domain 2 | 3 | data "aws_route53_zone" "ashirt" { 4 | name = var.domain 5 | } 6 | 7 | # ACM cert and deps 8 | 9 | resource "aws_acm_certificate" "ashirt" { 10 | domain_name = var.domain 11 | subject_alternative_names = ["ashirt.${var.domain}", "api.${var.domain}"] 12 | validation_method = "DNS" 13 | lifecycle { 14 | create_before_destroy = true 15 | } 16 | } 17 | 18 | resource "aws_route53_record" "ashirt-cert" { 19 | for_each = { 20 | for dvo in aws_acm_certificate.ashirt.domain_validation_options : dvo.domain_name => { 21 | name = dvo.resource_record_name 22 | record = dvo.resource_record_value 23 | type = dvo.resource_record_type 24 | } 25 | } 26 | allow_overwrite = true 27 | name = each.value.name 28 | records = [each.value.record] 29 | ttl = 60 30 | type = each.value.type 31 | zone_id = data.aws_route53_zone.ashirt.zone_id 32 | } 33 | 34 | # Target for the browser and ashirt application 35 | 36 | resource "aws_route53_record" "frontend" { 37 | zone_id = data.aws_route53_zone.ashirt.zone_id 38 | name = var.domain 39 | type = "A" 40 | alias { 41 | name = aws_lb.frontend.dns_name 42 | zone_id = aws_lb.frontend.zone_id 43 | evaluate_target_health = true 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /ecs/s3.tf: -------------------------------------------------------------------------------- 1 | # Environment config and data buckets 2 | 3 | resource "aws_s3_bucket" "env" { 4 | bucket = var.appenv 5 | tags = { 6 | Name = "${var.appenv}" 7 | } 8 | } 9 | 10 | resource "aws_s3_bucket" "data" { 11 | bucket = var.appdata 12 | force_destroy = true 13 | tags = { 14 | Name = "${var.appdata}" 15 | } 16 | } 17 | 18 | resource "aws_s3_bucket_server_side_encryption_configuration" "env" { 19 | # If KMS enabled, use the key. Otherwise do not apply SSE 20 | count = var.kms ? 1 : 0 21 | bucket = aws_s3_bucket.data.id 22 | rule { 23 | apply_server_side_encryption_by_default { 24 | kms_master_key_id = var.kms ? aws_kms_key.ashirt.0.arn : "" 25 | sse_algorithm = "aws:kms" 26 | } 27 | } 28 | } 29 | 30 | resource "aws_s3_bucket_versioning" "env" { 31 | bucket = aws_s3_bucket.env.id 32 | versioning_configuration { 33 | status = "Enabled" 34 | } 35 | } 36 | 37 | resource "aws_s3_bucket_server_side_encryption_configuration" "data" { 38 | count = var.kms ? 1 : 0 39 | bucket = aws_s3_bucket.data.id 40 | rule { 41 | apply_server_side_encryption_by_default { 42 | kms_master_key_id = var.kms ? aws_kms_key.ashirt.0.arn : "" 43 | sse_algorithm = "aws:kms" 44 | } 45 | } 46 | } 47 | 48 | resource "aws_s3_bucket_versioning" "data" { 49 | bucket = aws_s3_bucket.data.id 50 | versioning_configuration { 51 | status = "Enabled" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /modules/gcp/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project" { 2 | type = string 3 | description = "The GCP project name" 4 | } 5 | 6 | variable "region" { 7 | type = string 8 | description = "The region to deploy into" 9 | } 10 | 11 | variable "db_tier" { 12 | type = string 13 | description = "The tier for the database" 14 | default = "db-g1-small" 15 | } 16 | 17 | variable "db_edition" { 18 | type = string 19 | description = "The CloudSQL edition for the database" 20 | default = "ENTERPRISE" 21 | } 22 | 23 | variable "db_availability_type" { 24 | type = string 25 | description = "The availability type for the database" 26 | default = "REGIONAL" 27 | } 28 | 29 | variable "environment" { 30 | type = string 31 | description = "The name of the deployment environment" 32 | default = "prod" 33 | } 34 | 35 | variable "tag" { 36 | type = string 37 | description = "The image tag for the containers" 38 | } 39 | 40 | variable "backend_env" { 41 | type = map(any) 42 | description = "Environment variables for the backend service" 43 | } 44 | 45 | variable "min_backend_instances" { 46 | type = number 47 | description = "The minimum number of backend instances" 48 | default = 0 49 | } 50 | 51 | 52 | variable "min_frontend_instances" { 53 | type = number 54 | description = "The minimum number of frontend instances" 55 | default = 0 56 | } 57 | -------------------------------------------------------------------------------- /ecs/backup.tf: -------------------------------------------------------------------------------- 1 | # Daily s3 and RDS backups for 35 days 2 | resource "aws_backup_vault" "ashirt" { 3 | name = "ashirt" 4 | kms_key_arn = var.kms ? aws_kms_key.ashirt.0.arn : "" 5 | } 6 | 7 | resource "aws_backup_plan" "backup" { 8 | name = "ashirt" 9 | 10 | rule { 11 | rule_name = "ashirt_backups_rule" 12 | target_vault_name = aws_backup_vault.ashirt.name 13 | schedule = "cron(0 5 ? * * *)" 14 | lifecycle { 15 | delete_after = 35 16 | } 17 | } 18 | } 19 | 20 | resource "aws_iam_role" "backup" { 21 | name = "backup" 22 | assume_role_policy = </main.tf` and `examples//outputs.tf` to your repo 27 | 5. Copy `examples//locals.tf` into your repo 28 | 29 | All configuration options will be versioned and configured through the `locals.tf` file. You should periodically check for updates to the [ashirt-deployments](https://www.github.com/ashirt-ops/ashirt-deployments) repo and update the submodule as needed to ensure that you are deploying with templates that match the appropriate version of ASHIRT. 30 | 31 | ## Where is the state stored 32 | 33 | Terraform expects to store state somewhere. This is a decision you'll need to make. The most simple decision is to store it in your repo along side your `locals.tf`. You can version it within git. This is ideal if you're not using CI/CD to automatically deploy the environment from the repository. You can also set up the deployment to use remote backends. You can view the instructions [here](https://developer.hashicorp.com/terraform/language/backend) to configure a remote backend that fits your needs. 34 | 35 | ## How to managed multiple environments 36 | 37 | If you want to create a staging and production environment for rolling out new versions, consider creating a staging branch and using the main branch to map to production. Each branch can have it's own copy of the `locals.tf` file that adjusts the configuration appropriately. This can be used with or without CI/CD for automated deployments. 38 | 39 | -------------------------------------------------------------------------------- /ecs/maintenance.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "ubuntu" { 2 | most_recent = true 3 | filter { 4 | name = "name" 5 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 6 | } 7 | filter { 8 | name = "virtualization-type" 9 | values = ["hvm"] 10 | } 11 | owners = ["099720109477"] # Canonical 12 | } 13 | 14 | resource "aws_instance" "maintenance" { 15 | count = var.maintenance_mode ? 1 : 0 16 | ami = data.aws_ami.ubuntu.id 17 | instance_type = "t3.micro" 18 | subnet_id = aws_subnet.public.0.id 19 | vpc_security_group_ids = [aws_security_group.web-ecs.id, aws_security_group.maintenance[count.index].id] 20 | key_name = aws_key_pair.maintenance[count.index].key_name 21 | root_block_device { 22 | encrypted = true 23 | kms_key_id = var.kms ? aws_kms_key.ashirt[count.index].arn : null 24 | volume_size = 50 25 | } 26 | 27 | associate_public_ip_address = true 28 | tags = { 29 | Name = "${var.app_name}-maintenance" 30 | } 31 | } 32 | 33 | resource "tls_private_key" "maintenance" { 34 | count = var.maintenance_mode ? 1 : 0 35 | algorithm = "RSA" 36 | rsa_bits = 4096 37 | } 38 | 39 | resource "aws_key_pair" "maintenance" { 40 | count = var.maintenance_mode ? 1 : 0 41 | key_name = "${var.app_name}-maintenance" # Create "myKey" to AWS!! 42 | public_key = tls_private_key.maintenance[count.index].public_key_openssh 43 | provisioner "local-exec" { 44 | command = "echo '${tls_private_key.maintenance[count.index].private_key_pem}' > ./maintenance-${var.app_name}.pem; chmod 400 maintenance-${var.app_name}.pem" 45 | } 46 | } 47 | 48 | resource "aws_security_group" "maintenance" { 49 | count = var.maintenance_mode ? 1 : 0 50 | name = "maintenance-ssh" 51 | description = "Allow ssh inbound traffic" 52 | vpc_id = aws_vpc.ashirt.id 53 | tags = { 54 | Name = "ashirt-maintenance-ssh" 55 | } 56 | } 57 | 58 | resource "aws_security_group_rule" "allow-egress-maintenance" { 59 | count = var.maintenance_mode ? 1 : 0 60 | type = "egress" 61 | to_port = 0 62 | protocol = "-1" 63 | cidr_blocks = ["0.0.0.0/0"] 64 | from_port = 0 65 | security_group_id = aws_security_group.maintenance[count.index].id 66 | } 67 | 68 | resource "aws_security_group_rule" "allow-ingress-maintenance" { 69 | count = var.maintenance_mode ? 1 : 0 70 | type = "ingress" 71 | to_port = 22 72 | protocol = "TCP" 73 | cidr_blocks = var.allow_maintenance_cidrs 74 | from_port = 22 75 | security_group_id = aws_security_group.maintenance[count.index].id 76 | } 77 | 78 | output "maintenance_rds" { 79 | value = var.maintenance_mode ? "ssh -fN -i maintenance-${var.app_name}.pem -L 127.0.0.1:3306:${aws_rds_cluster.ashirt.endpoint}:3306 ubuntu@${aws_instance.maintenance.0.public_ip}" : null 80 | } 81 | -------------------------------------------------------------------------------- /modules/gcp/sql.tf: -------------------------------------------------------------------------------- 1 | resource "random_password" "db_root_password" { 2 | length = 24 3 | special = false 4 | } 5 | 6 | resource "random_password" "ashirt_db_password" { 7 | length = 24 8 | special = false 9 | } 10 | 11 | resource "google_compute_global_address" "private_ip_address" { 12 | project = var.project 13 | name = "ashirt-sql-ip-address-${var.environment}" 14 | purpose = "VPC_PEERING" 15 | address_type = "INTERNAL" 16 | prefix_length = 20 17 | network = google_compute_network.vpc_network.id 18 | } 19 | 20 | resource "google_service_networking_connection" "vpc_connection" { 21 | network = google_compute_network.vpc_network.id 22 | service = "servicenetworking.googleapis.com" 23 | reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] 24 | } 25 | 26 | resource "google_sql_database_instance" "ashirt" { 27 | project = var.project 28 | name = "ashirt-${var.environment}" 29 | database_version = "MYSQL_8_0" 30 | region = var.region 31 | root_password = random_password.db_root_password.result 32 | deletion_protection = false 33 | 34 | settings { 35 | tier = var.db_tier 36 | edition = var.db_edition 37 | availability_type = var.db_availability_type 38 | # TODO: do we want this enabled? 39 | deletion_protection_enabled = false 40 | 41 | ip_configuration { 42 | ipv4_enabled = false 43 | private_network = google_compute_network.vpc_network.id 44 | 45 | authorized_networks { 46 | name = "all" 47 | value = "0.0.0.0/0" 48 | } 49 | } 50 | 51 | backup_configuration { 52 | enabled = true 53 | binary_log_enabled = true 54 | } 55 | } 56 | } 57 | 58 | resource "google_sql_database" "ashirt" { 59 | project = var.project 60 | name = "ashirt" 61 | instance = google_sql_database_instance.ashirt.name 62 | } 63 | 64 | resource "google_secret_manager_secret" "ashirt_dsn" { 65 | project = var.project 66 | secret_id = "ashirt-dsn-${var.environment}" 67 | labels = { 68 | label = "ashirt-dsn-${var.environment}" 69 | } 70 | 71 | replication { 72 | auto {} 73 | } 74 | } 75 | 76 | resource "google_secret_manager_secret_version" "ashirt" { 77 | secret = google_secret_manager_secret.ashirt_dsn.id 78 | secret_data = "ashirt:${random_password.ashirt_db_password.result}@tcp(${google_sql_database_instance.ashirt.ip_address.0.ip_address}:3306)/ashirt" 79 | } 80 | 81 | resource "google_secret_manager_secret_iam_binding" "backend_sql_secret" { 82 | project = var.project 83 | secret_id = google_secret_manager_secret.ashirt_dsn.secret_id 84 | role = "roles/secretmanager.secretAccessor" 85 | members = [ 86 | //"serviceAccount:${var.project_id}-compute@developer.gserviceaccount.com", 87 | "serviceAccount:${google_service_account.backend.email}", 88 | ] 89 | } 90 | 91 | resource "google_sql_user" "ashirt" { 92 | project = var.project 93 | name = "ashirt" 94 | host = "%" 95 | instance = google_sql_database_instance.ashirt.name 96 | password = random_password.ashirt_db_password.result 97 | depends_on = [google_sql_database_instance.ashirt] 98 | } 99 | -------------------------------------------------------------------------------- /ecs/lambda.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | prefix = "ashirt-workers" 3 | account_id = data.aws_caller_identity.current.account_id 4 | ecr_repository_name = "${local.prefix}-ocr" 5 | ecr_image_tag = var.worker_tag 6 | } 7 | 8 | resource "aws_ecr_repository" "repo" { 9 | name = local.ecr_repository_name 10 | force_delete = true 11 | image_scanning_configuration { 12 | scan_on_push = true 13 | } 14 | } 15 | 16 | # Pull the demo-ocr image and re-upload to ECR registry (required for container lambda) 17 | resource "null_resource" "ocr_image" { 18 | provisioner "local-exec" { 19 | command = <" 63 | }, 64 | "Action": "sts:AssumeRole" 65 | } 66 | ``` 67 | 68 | In your EKS node instance role for your ec2, attach a policy which allows the node to assume any policies starting with ashirt-k8s-*. 69 | ``` 70 | { 71 | "Version": "2012-10-17", 72 | "Statement": [ 73 | { 74 | "Effect": "Allow", 75 | "Resource": [ 76 | "arn:aws:iam::{{ACCOUNT_NUMBER}}:role/ashirt-k8s-*" 77 | ], 78 | "Action": "sts:AssumeRole" 79 | } 80 | ] 81 | } 82 | ``` 83 | 84 | ### Database Creation 85 | 86 | Now we have to setup the ashirt [database](https://us-west-2.console.aws.amazon.com/rds/home?region=us-west-2#launch-dbinstance:gdb=false;s3-import=false), this is done manually but requires minimal clicking. Ensure to change the default options for the following: 87 | - Choose Aurora, Mysql 5.7 (serverless or provisioned) 88 | - Db cluster identifier as ashirt 89 | - Autogenerate password, instance size t3.medium and choose the rds sg 90 | - In Additional options choose the encryption key you created. 91 | Make note of the autogen credentials...(db creation takes some time, continue with other setup - confirm before deploying the ashirt cluster) 92 | - Create a new Security group which only allows 3306 traffic from your VPC CIDR 93 | 94 | 95 | ### Setup DB Schema 96 | 97 | This is currently a manual step that requires setting up an SSH tunnel to connect to the RDS instance and load the schema into the database. The schema file referenced below can be found in the ASHIRT repository in `backend/schema.sql`. 98 | 99 | ```sh 100 | mysql -h {{RDS_HOST}}.us-west-2.compute.amazonaws.com -u -p -D ashirt < schema.sql 101 | ``` 102 | 103 | ### Setup Calico 104 | 105 | Install calico which is a network policy engine for amazon's EKS. This is needed later for allowing/denying traffic to certain pods etc. 106 | `kubectl apply -f https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/master/config/v1.6/calico.yaml` 107 | 108 | ### Install Application 109 | 110 | ```sh 111 | helm install ashirt . -f --set "tag=" 112 | ``` 113 | 114 | ### Add Secrets 115 | 116 | Adding secrets must be done after installing the application so that the namespaces will exist. All of the non-secret configuration values will already be present from the installation. 117 | 118 | ```sh 119 | ./gen_secrets.sh 120 | ``` 121 | 122 | ### Setup ELB 123 | 124 | This is currently a manual step but should be automated in the future. The zone file in Route53 needs to be updated to point the DNS entries at the load balancers that have been created during the helm install process. You can use kubectl to get the values to use. This may take a few minutes for DNS changes to propagate. 125 | 126 | ```sh 127 | kubectl get svc --all-namespaces 128 | ``` 129 | 130 | Take the two entries with type LoadBalancer (frontend-lb and public-lb) and add the value for external ip to the DNS entrys for your domains as required. 131 | 132 | ## Maintainence 133 | 134 | ### Updating ASHIRT 135 | 136 | Updating ASHIRT generally will consist of simply changing the running image to a new one. For updates that require database changes you'll need to run the mgirations 137 | 138 | #### Running Migrations 139 | 140 | Migrations are performed using the sql-migrate tool that can migrate and revert db changes. Once installed this should be run from the root of the ashirt repo. 141 | 142 | ```sh 143 | sql-migrate up -config=backend/dbconfig.yml 144 | ``` 145 | 146 | #### Upgrade Containers/Demployment 147 | 148 | ```sh 149 | helm upgrade ashirt . -f --set "tag=" 150 | ``` 151 | 152 | ### Reverting a Bad Updgrade 153 | 154 | #### Migrate Down any Database Changes 155 | 156 | ```sh 157 | sql-migrate down -limit= -config=backend/dbconfig.yml 158 | ``` 159 | 160 | #### Revert Containers/Deployment 161 | 162 | ```sh 163 | helm upgrade ashirt . -f --set "tag=" 164 | ``` 165 | 166 | ### Scale your deployment to 6 pods 167 | Current pods is 3, suppose you starting hacking the planet and pulling the stager from the builder becomes a bottleneck run this 168 | 169 | `kubectl scale --replicas=6 deployment/ashirt-frontend` 170 | -------------------------------------------------------------------------------- /ecs/ecs.tf: -------------------------------------------------------------------------------- 1 | # Cluster and logging 2 | 3 | resource "aws_ecs_cluster" "ashirt" { 4 | name = var.app_name 5 | setting { 6 | name = "containerInsights" 7 | value = "enabled" 8 | } 9 | } 10 | 11 | resource "aws_cloudwatch_log_group" "logs" { 12 | name = "/fargate/service/${var.app_name}" 13 | retention_in_days = 90 14 | } 15 | 16 | # Web service (backend service for nginx frontend) 17 | 18 | resource "aws_ecs_service" "ashirt-web" { 19 | name = "${var.app_name}-web" 20 | depends_on = [null_resource.ecs-run-task-init] 21 | cluster = aws_ecs_cluster.ashirt.id 22 | task_definition = aws_ecs_task_definition.web.arn 23 | desired_count = var.web_count 24 | launch_type = "FARGATE" 25 | 26 | load_balancer { 27 | target_group_arn = aws_lb_target_group.web.arn 28 | container_name = "${var.app_name}-web" 29 | container_port = var.app_port 30 | } 31 | 32 | dynamic "load_balancer" { 33 | for_each = var.debug_mode ? ["true"] : [] 34 | content { 35 | target_group_arn = aws_lb_target_group.debug[0].arn 36 | container_name = "${var.app_name}-web" 37 | container_port = var.debug_port 38 | } 39 | } 40 | 41 | network_configuration { 42 | security_groups = var.debug_mode ? [aws_security_group.debug.0.id, "${aws_security_group.web-ecs.id}"] : ["${aws_security_group.web-ecs.id}"] 43 | subnets = var.private_subnet ? aws_subnet.private.*.id : aws_subnet.public.*.id 44 | assign_public_ip = var.private_subnet ? false : true 45 | } 46 | } 47 | 48 | resource "aws_ecs_task_definition" "web" { 49 | family = "${var.app_name}-web" 50 | execution_role_arn = aws_iam_role.web.arn 51 | task_role_arn = aws_iam_role.web.arn 52 | container_definitions = jsonencode([ 53 | { 54 | name = "${var.app_name}-web" 55 | image = "ashirt/web:${var.tag}" 56 | cpu = var.cpu 57 | memory = var.mem 58 | essential = true 59 | portMappings = [ 60 | { 61 | containerPort = var.app_port 62 | hostPort = var.app_port 63 | }, 64 | { 65 | containerPort = var.debug_port 66 | hostPort = var.debug_port 67 | } 68 | ] 69 | logConfiguration = { 70 | logDriver = "awslogs" 71 | options = { 72 | awslogs-group = "/fargate/service/${var.app_name}" 73 | awslogs-region = var.region 74 | awslogs-stream-prefix = "ecs" 75 | } 76 | } 77 | environmentFiles = [ 78 | { 79 | value = "${aws_s3_bucket.env.arn}/web/.env" 80 | type = "s3" 81 | }, 82 | ] 83 | } 84 | ]) 85 | cpu = var.cpu 86 | memory = var.mem 87 | network_mode = "awsvpc" 88 | requires_compatibilities = [ 89 | "FARGATE" 90 | ] 91 | } 92 | 93 | # Frontend service. Nginx serves static content and proxies to web service. 94 | 95 | resource "aws_ecs_service" "ashirt-frontend" { 96 | name = "${var.app_name}-frontend" 97 | cluster = aws_ecs_cluster.ashirt.id 98 | task_definition = aws_ecs_task_definition.frontend.arn 99 | desired_count = var.frontend_count 100 | launch_type = "FARGATE" 101 | 102 | load_balancer { 103 | target_group_arn = aws_lb_target_group.frontend.arn 104 | container_name = "${var.app_name}-frontend" 105 | container_port = var.nginx_port 106 | } 107 | 108 | network_configuration { 109 | security_groups = ["${aws_security_group.frontend-ecs.id}"] 110 | subnets = var.private_subnet ? aws_subnet.private.*.id : aws_subnet.public.*.id 111 | assign_public_ip = var.private_subnet ? false : true 112 | } 113 | } 114 | 115 | resource "aws_ecs_task_definition" "frontend" { 116 | family = "${var.app_name}-frontend" 117 | execution_role_arn = aws_iam_role.web.arn 118 | task_role_arn = aws_iam_role.web.arn 119 | container_definitions = jsonencode([ 120 | { 121 | name = "${var.app_name}-frontend" 122 | image = "ashirt/frontend:${var.tag}" 123 | cpu = var.cpu 124 | memory = var.mem 125 | essential = true 126 | portMappings = [ 127 | { 128 | containerPort = var.nginx_port 129 | hostPort = var.nginx_port 130 | } 131 | ] 132 | logConfiguration = { 133 | logDriver = "awslogs" 134 | options = { 135 | awslogs-group = "/fargate/service/${var.app_name}" 136 | awslogs-region = var.region 137 | awslogs-stream-prefix = "ecs" 138 | } 139 | } 140 | environment = [ 141 | { 142 | name = "WEB_URL" 143 | value = "http://${aws_lb.web.dns_name}:${var.app_port}" 144 | }, 145 | { 146 | name = "NGINX_PORT" 147 | value = tostring(var.nginx_port) 148 | } 149 | ] 150 | } 151 | ]) 152 | cpu = var.cpu 153 | memory = var.mem 154 | network_mode = "awsvpc" 155 | requires_compatibilities = [ 156 | "FARGATE" 157 | ] 158 | } 159 | 160 | # DB init task. Runs once, and anytime you change the global tag to apply sql migrations. 161 | 162 | resource "aws_ecs_task_definition" "init" { 163 | family = "init" 164 | execution_role_arn = aws_iam_role.web.arn 165 | task_role_arn = aws_iam_role.web.arn 166 | container_definitions = jsonencode([ 167 | { 168 | name = "${var.app_name}-init" 169 | image = "ashirt/init:${var.tag}" 170 | cpu = var.cpu 171 | memory = var.mem 172 | essential = true 173 | portMappings = [ 174 | { 175 | containerPort = var.app_port 176 | hostPort = var.app_port 177 | } 178 | ] 179 | logConfiguration = { 180 | logDriver = "awslogs" 181 | options = { 182 | awslogs-group = "/fargate/service/${var.app_name}" 183 | awslogs-region = var.region 184 | awslogs-stream-prefix = "ecs" 185 | } 186 | } 187 | environmentFiles = [ 188 | { 189 | value = "${aws_s3_bucket.env.arn}/web/.env" 190 | type = "s3" 191 | } 192 | ] 193 | } 194 | ]) 195 | cpu = var.cpu 196 | memory = var.mem 197 | network_mode = "awsvpc" 198 | requires_compatibilities = [ 199 | "FARGATE" 200 | ] 201 | } 202 | 203 | resource "null_resource" "ecs-run-task-init" { 204 | provisioner "local-exec" { 205 | command = <