├── AWS ├── README.md ├── enforce_aws_resource.rego ├── enforce_cidr.rego ├── enforce_iam_instance_profiles.rego ├── enforce_instance_subnet.rego ├── enforce_kms_key_names.rego ├── enforce_lb_subnets.rego ├── enforce_rds_subnets.rego ├── enforce_s3_buckets_encryption.rego ├── enforce_s3_private.rego └── enforce_sec_group.rego ├── Admin Policies ├── README.md ├── denied_provisioners.rego ├── enforce_var_desc.rego ├── resource_tags.rego ├── workspace_name.rego └── workspace_tags.rego ├── Azure ├── README.md ├── enforce_az_blob_private.rego └── enforce_inbound_cidr.rego ├── GCP ├── README.md ├── enforce_fw_source.rego └── enforce_gcs_private.rego ├── Generic ├── README.md ├── actions-blacklist.rego ├── array-blacklist.rego ├── array-whitelist.rego ├── attribute_check.rego ├── attribute_value_regex.rego ├── instance_types.rego ├── numeric-range.rego ├── resource-type-blacklist.rego ├── resource-type-whitelist.rego ├── scalar-blacklist.rego ├── scalar-whitelist.rego ├── tags-required.rego └── vcs-user-whitelist.rego └── README.md /AWS/README.md: -------------------------------------------------------------------------------- 1 | # AWS Templates 2 | 3 | Library of OPA templates to meet common AWS Terraform requirements. 4 | 5 | **Work in Progress** 6 | 7 | | Rego | Description | 8 | | ------------------------ | -- | 9 | | enforce_aws_resource.rego | Whitelist of allowed AWS resource types | 10 | | enforce_cidr.rego | black list of CIDR's allowed on security group rules | 11 | | enforce_iam_instance_profiles.rego | Whitelist of allowed IAM Instance profiles | 12 | | enforce_instance_subnet.rego | Whitelist of allowed subnets | 13 | | enforce_kms_key_names.rego | Enforces the use of specified KMS Keys in all applicable resource types | 14 | | enforce_lb_subnets.rego | Enforces specified subnets on load balancers | 15 | | enforce_rds_subnets.rego | Enforces specified subnets on RDS | 16 | | enforce_s3_buckets_encryption.rego | Enforces encryption on S3 buckets | 17 | | enforce_s3_private.rego | Ensures S3 buckets are private | 18 | | enforce_sec_group.rego | Enforces the use of specific security group | 19 | -------------------------------------------------------------------------------- /AWS/enforce_aws_resource.rego: -------------------------------------------------------------------------------- 1 | # Implements an allowed list of resource types. 2 | # 3 | # NOTE: This policy would also prevent the use of all providers except AWS. 4 | # To allow other clouds with no restrictions on resource types add the following line to the rule 5 | # before "not array_contains..." 6 | # 7 | # startswith(resources.type,"aws_") 8 | 9 | package terraform 10 | 11 | import input.tfplan as tfplan 12 | 13 | # Allowed Terraform resources 14 | allowed_resources = [ 15 | "aws_security_group", 16 | "aws_instance", 17 | "aws_s3_bucket" 18 | ] 19 | 20 | 21 | array_contains(arr, elem) { 22 | arr[_] = elem 23 | } 24 | 25 | deny[reason] { 26 | resource := tfplan.resource_changes[_] 27 | action := resource.change.actions[count(resource.change.actions) - 1] 28 | array_contains(["create", "update"], action) # allow destroy action 29 | 30 | not array_contains(allowed_resources, resource.type) 31 | 32 | reason := sprintf( 33 | "%s: resource type %q is not allowed", 34 | [resource.address, resource.type] 35 | ) 36 | } 37 | -------------------------------------------------------------------------------- /AWS/enforce_cidr.rego: -------------------------------------------------------------------------------- 1 | # Enforces the denial of CIDR 0.0.0.0/0 in security groups 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | # Add CIDRS that should be disallowed 8 | invalid_cidrs = [ 9 | "0.0.0.0/0" 10 | ] 11 | 12 | array_contains(arr, elem) { 13 | arr[_] = elem 14 | } 15 | 16 | # Checks security groups embdedded ingress rules 17 | deny[reason] { 18 | r := tfplan.resource_changes[_] 19 | r.type == "aws_security_group" 20 | in := r.change.after.ingress[_] 21 | invalid := invalid_cidrs[_] 22 | array_contains(in.cidr_blocks,invalid) 23 | reason := sprintf( 24 | "%-40s :: security group invalid ingress CIDR %s", 25 | [r.address,invalid] 26 | ) 27 | } 28 | 29 | # Checks security groups embdedded egress rules 30 | deny[reason] { 31 | r := tfplan.resource_changes[_] 32 | r.type == "aws_security_group" 33 | eg := r.change.after.egress[_] 34 | invalid := invalid_cidrs[_] 35 | array_contains(eg.cidr_blocks,invalid) 36 | reason := sprintf( 37 | "%-40s :: security group invalid egress CIDR %s", 38 | [r.address,invalid] 39 | ) 40 | } 41 | 42 | # Checks security groups rules 43 | deny[reason] { 44 | r := tfplan.resource_changes[_] 45 | r.type == "aws_security_group_rule" 46 | invalid := invalid_cidrs[_] 47 | array_contains(r.change.after.cidr_blocks,invalid) 48 | reason := sprintf( 49 | "%-40s :: security group rule invalid CIDR %s", 50 | [r.address,invalid] 51 | ) 52 | } -------------------------------------------------------------------------------- /AWS/enforce_iam_instance_profiles.rego: -------------------------------------------------------------------------------- 1 | # Validate that the iam_instance_profile is in the allowed list 2 | # 3 | 4 | package terraform 5 | 6 | import input.tfplan as tfplan 7 | import input.tfrun as tfrun 8 | 9 | allowed_iam_profiles = [ 10 | "my_iam_profile", 11 | "my_iam_profile_2", 12 | "my_iam_profile_3" 13 | ] 14 | 15 | array_contains(arr, elem) { 16 | arr[_] = elem 17 | } 18 | 19 | eval_expression(expr) = name { 20 | name := expr[_].name 21 | } else = iamp { 22 | iamp = expr 23 | } 24 | 25 | deny[reason] { 26 | resource := tfplan.resource_changes[_] 27 | iam := eval_expression(resource.change.after.iam_instance_profile) 28 | not array_contains(allowed_iam_profiles, iam) 29 | 30 | reason := sprintf( 31 | "%-40s :: iam_instance_profile '%s' is not allowed.", 32 | [resource.address, iam] 33 | ) 34 | } -------------------------------------------------------------------------------- /AWS/enforce_instance_subnet.rego: -------------------------------------------------------------------------------- 1 | # Enforces the use of specific subnets on EC2 instances 2 | # This policy first checks that a subnet_id has been specified, i.e. not default for an AZ 3 | 4 | package terraform 5 | 6 | import input.tfplan as tfplan 7 | import input.tfrun as tfrun 8 | 9 | # Add only private subnets to this list. 10 | # NOTE: OPA cannot validate that a subnet is private unless the terraform config is actaully creating the subnet. 11 | allowed_subnets = [ 12 | "subnet-019c416174b079502", 13 | "subnet-04dbded374ed11690" 14 | ] 15 | 16 | array_contains(arr, elem) { 17 | arr[_] = elem 18 | } 19 | 20 | # Check that subnet has been specified 21 | deny[reason] { 22 | r = tfplan.resource_changes[_] 23 | r.mode == "managed" 24 | r.type == "aws_instance" 25 | true == r.change.after_unknown.subnet_id 26 | 27 | reason := sprintf( 28 | "%-40s :: subnet_id must be specied in terraform configuration.", 29 | [r.address] 30 | ) 31 | } 32 | 33 | # Check subnet is in allowed list for EC2 instances 34 | deny[reason] { 35 | r = tfplan.resource_changes[_] 36 | r.mode == "managed" 37 | r.type == "aws_instance" 38 | not array_contains(allowed_subnets, r.change.after.subnet_id) 39 | 40 | reason := sprintf( 41 | "%-40s :: subnet_id '%s' is public and not allowed", 42 | [r.address, r.change.after.subnet_id] 43 | ) 44 | } 45 | -------------------------------------------------------------------------------- /AWS/enforce_kms_key_names.rego: -------------------------------------------------------------------------------- 1 | # Validate that the KMS key name(alias) is in the allowed list 2 | # 3 | # To enforce this the policy must check that all uses of KMS keys a referenced from a data source and the data source itself uses and allowed key name. 4 | 5 | # KMS is used in many AWS services. This policy will attempt to deal with all cases 6 | 7 | # AWS CloudTrail : DONE 8 | # AWS Cloudwatch : DONE 9 | # Amazon DynamoDB : DONE 10 | # Amazon Elastic Block Store (Amazon EBS) : DONE 11 | # Amazon Elastic Transcoder : DONE 12 | # Amazon EMR : DONE 13 | # Amazon Redshift : DONE 14 | # Amazon Relational Database Service (Amazon RDS) : DONE 15 | # AWS Secrets Manager : DONE 16 | # Amazon Simple Email Service (Amazon SES) : DONE 17 | # Amazon Simple Storage Service (Amazon S3) : DONE 18 | # AWS Systems Manager Parameter Store : DONE 19 | 20 | 21 | package terraform 22 | 23 | import input.tfplan as tfplan 24 | import input.tfrun as tfrun 25 | 26 | allowed_kms_keys = [ 27 | "pg-kms-key", 28 | "alias-1" 29 | ] 30 | 31 | contains(arr, elem) { 32 | arr[_] = elem 33 | } 34 | 35 | # Configuration may be specified in one of 3 ways 36 | # - Constant value, i.e. a quoted string "the_value" 37 | # - A variable in the references[] specifying a value 38 | # - An actual reference e.g. data.foo.bar 39 | # 40 | # This extracts the relevant value, i.e. the constant value, the value of the variable or the reference 41 | eval_expression(plan, expr) = constant_value { 42 | constant_value := expr.constant_value 43 | } else = var_value { 44 | ref = expr.references[0] 45 | startswith(ref, "var.") 46 | var_name := replace(ref, "var.", "") 47 | var_value := plan.variables[var_name].value 48 | } else = reference { 49 | reference := expr.references[_] 50 | } 51 | 52 | eval_key_name(plan, res) = constant_value { 53 | constant_value := res.expressions.key_id.constant_value 54 | } else = var_value { 55 | ref = res.expressions.key_id.references[0] 56 | startswith(ref, "var.") 57 | var_name := replace(ref, "var.", "") 58 | var_value := plan.variables[var_name].value 59 | } else = reference { 60 | ref := res.expressions.key_id.references[_] 61 | startswith(ref, "data.") 62 | ps := plan.prior_state.values.root_module.resources[_] 63 | ps.address == res.address 64 | reference := ps.values.key_id 65 | } 66 | 67 | #-------------------- 68 | 69 | # Tests that the key_id used in the data source is in the allowed list. 70 | deny[reason] { 71 | tfrun.is_destroy == false 72 | r := tfplan.configuration.root_module.resources[_] 73 | r.mode == "data" 74 | r.type == "aws_kms_key" 75 | key_alias := eval_key_name(tfplan, r) 76 | key_name := trim_prefix(key_alias, "alias/") 77 | not contains(allowed_kms_keys, key_name) 78 | reason := sprintf("%-40s :: KMS key name '%s' not in permitted list",[concat(".",[r.type,r.name]), key_name]) 79 | } 80 | 81 | #--------------- 82 | # S3 Buckets 83 | # Tests for a replication configuration rule referencing a KMS key. This MUST be a data source. 84 | deny[reason] { 85 | tfrun.is_destroy == false 86 | r := tfplan.configuration.root_module.resources[_] 87 | r.mode == "managed" 88 | r.type == "aws_s3_bucket" 89 | kms_key := eval_expression(tfplan, r.expressions.replication_configuration[_].rules[_].destination[_].replica_kms_key_id) 90 | not startswith(kms_key, "data.aws_kms_key.") 91 | reason := sprintf("%-40s :: replication KMS Master key ID '%s' not derived from data source!",[concat(".",[r.type,r.name]),kms_key]) 92 | } 93 | 94 | # Tests for a server side encryption rule referencing a KMS key. This MUST be a data source. 95 | deny[reason] { 96 | tfrun.is_destroy == false 97 | r := tfplan.configuration.root_module.resources[_] 98 | r.mode == "managed" 99 | r.type == "aws_s3_bucket" 100 | kms_key := eval_expression(tfplan, r.expressions.server_side_encryption_configuration[_].rule[_].apply_server_side_encryption_by_default[_].kms_master_key_id) 101 | not startswith(kms_key, "data.aws_kms_key.") 102 | reason := sprintf("%-40s :: server_side_encryption KMS Master key ID '%s' not derived from data source!",[concat(".",[r.type,r.name]),kms_key]) 103 | } 104 | 105 | #--------------- 106 | # GENERAL 107 | # Search for attributes in the list and check they are referencing data sources 108 | 109 | attributes = { 110 | "aws_ebs_volume": ["kms_key_id"], 111 | "aws_ebs_default_kms_key": ["key_arn"], 112 | "aws_db_instance": ["kms_key_id","performance_insights_kms_key_id"], 113 | "aws_rds_cluster": ["kms_key_id"], 114 | "aws_rds_cluster_instance": ["performance_insights_kms_key_id"], 115 | "aws_cloudtrail": ["kms_key_id"], 116 | "aws_cloudwatch_log_group": ["kms_key_id"], 117 | "aws_dynamodb_table": ["kms_key_arn"], 118 | "aws_elastictranscoder_pipeline": ["aws_kms_key_arn"], 119 | "aws_redshift_cluster": ["kms_key_id"], 120 | "aws_redshift_snapshot_copy_grant": ["kms_key_id"], 121 | "aws_secretsmanager_secret": ["kms_key_id"], 122 | "aws_ssm_parameter": ["key_id"] 123 | } 124 | 125 | deny[reason] { 126 | tfrun.is_destroy == false 127 | r := tfplan.configuration.root_module.resources[_] 128 | a := attributes[r.type][_] 129 | r.mode == "managed" 130 | kms_key := eval_expression(tfplan, r.expressions[a]) 131 | not startswith(kms_key, "data.aws_kms_key.") 132 | reason := sprintf("%-40s :: KMS Key not derived from data source (%s=%s) :: ",[concat(".",[r.type,r.name]),a,kms_key]) 133 | } 134 | 135 | #--------- 136 | # EMR 137 | 138 | # Extract ARN from the JSON config. This may be a UUID or alias/name arn. 139 | 140 | deny[reason] { 141 | tfrun.is_destroy == false 142 | r := tfplan.configuration.root_module.resources[_] 143 | r.mode == "managed" 144 | r.type == "aws_emr_security_configuration" 145 | config := eval_expression(tfplan, r.expressions.configuration) 146 | arn := regex.find_n("arn:aws:kms:[a-z0-9-:/_]*", config, 1) 147 | arn_bits := split(arn[0],":") 148 | id := arn_bits[count(arn_bits)-1] 149 | key_name := trim_prefix(id, "alias/") 150 | not contains(allowed_kms_keys, key_name) 151 | reason := sprintf("%-40s :: configuration disc encryption key '%s' not from permitted list",[concat(".",[r.type,r.name]),key_name]) 152 | } 153 | 154 | #----- 155 | # SES 156 | 157 | # Tests for a S3 encryption referencing a KMS key. This MUST be a data source. 158 | deny[reason] { 159 | tfrun.is_destroy == false 160 | r := tfplan.configuration.root_module.resources[_] 161 | r.mode == "managed" 162 | r.type == "aws_ses_receipt_rule" 163 | kms_key := eval_expression(tfplan, r.expressions.s3_action.kms_key_arn) 164 | not startswith(kms_key, "data.aws_kms_key.") 165 | reason := sprintf("%-40s :: s3_action KMS Master key ID '%s' not derived from data source!",[concat(".",[r.type,r.name]),kms_key]) 166 | } 167 | 168 | #------ 169 | # SSM 170 | 171 | # Tests for a S3 encryption referencing a KMS key. This MUST be a data source. 172 | deny[reason] { 173 | tfrun.is_destroy == false 174 | r := tfplan.configuration.root_module.resources[_] 175 | r.mode == "managed" 176 | r.type == "aws_ssm_resource_data_sync" 177 | kms_key := eval_expression(tfplan, r.expressions.s3_destination.kms_key_arn) 178 | not startswith(kms_key, "data.aws_kms_key.") 179 | reason := sprintf("%-40s :: s3_action KMS Master key ID '%s' not derived from data source!",[concat(".",[r.type,r.name]),kms_key]) 180 | } 181 | 182 | -------------------------------------------------------------------------------- /AWS/enforce_lb_subnets.rego: -------------------------------------------------------------------------------- 1 | # Enforces the use of specific subnets on AS Load Balancers subnet groups 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | import input.tfrun as tfrun 7 | 8 | # Add only private subnets to this list. 9 | # NOTE: OPA cannot validate that a subnet is private unless the terraform config is actaully creating the subnet. 10 | allowed_subnets = [ 11 | "subnet-019c416174b079502", 12 | "subnet-04dbded374ed11690" 13 | ] 14 | 15 | array_contains(arr, elem) { 16 | arr[_] = elem 17 | } 18 | 19 | lbs = [ 20 | "aws_elb", 21 | "aws_lb" 22 | ] 23 | 24 | # Check subnets are in allowed list 25 | deny[reason] { 26 | r = tfplan.resource_changes[_] 27 | r.mode == "managed" 28 | r.type == lbs[_] 29 | sid := r.change.after.subnets[_] 30 | not array_contains(allowed_subnets, sid) 31 | 32 | reason := sprintf( 33 | "%-40s :: subnet_id '%s' is public and not allowed!", 34 | [r.address, sid] 35 | ) 36 | } -------------------------------------------------------------------------------- /AWS/enforce_rds_subnets.rego: -------------------------------------------------------------------------------- 1 | # Enforces the use of specific subnets on RDS subnet groups 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | import input.tfrun as tfrun 7 | 8 | # Add only private subnets to this list. 9 | # NOTE: OPA cannot validate that a subnet is private unless the terraform config is actaully creating the subnet. 10 | allowed_subnets = [ 11 | "subnet-019c416174b079502", 12 | "subnet-04dbded374ed11690" 13 | ] 14 | 15 | array_contains(arr, elem) { 16 | arr[_] = elem 17 | } 18 | 19 | # Check subnets are in allowed list 20 | deny[reason] { 21 | r = tfplan.resource_changes[_] 22 | r.mode == "managed" 23 | r.type == "aws_db_subnet_group" 24 | sid := r.change.after.subnet_ids[_] 25 | not array_contains(allowed_subnets, sid) 26 | 27 | reason := sprintf( 28 | "%-40s :: subnet_id '%s' is public and not allowed!", 29 | [r.address, sid] 30 | ) 31 | } 32 | -------------------------------------------------------------------------------- /AWS/enforce_s3_buckets_encryption.rego: -------------------------------------------------------------------------------- 1 | # Enforce encryption on S3 buckets 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | allowed_acls = ["private"] 8 | allowed_sse_algorithms = ["aws:kms", "AES256"] 9 | 10 | s3_buckets[r] { 11 | r := tfplan.resource_changes[_] 12 | r.type == "aws_s3_bucket" 13 | } 14 | 15 | array_contains(arr, elem) { 16 | arr[_] = elem 17 | } 18 | 19 | # Rule to restrict S3 bucket ACLs 20 | deny[reason] { 21 | r := s3_buckets[_] 22 | not array_contains(allowed_acls, r.change.after.acl) 23 | reason := sprintf( 24 | "%s: ACL %q is not allowed", 25 | [r.address, r.change.after.acl] 26 | ) 27 | } 28 | 29 | # Rule to require server-side encryption 30 | deny[reason] { 31 | r := s3_buckets[_] 32 | count(r.change.after.server_side_encryption_configuration) == 0 33 | reason := sprintf( 34 | "%s: requires server-side encryption with expected sse_algorithm to be one of %v", 35 | [r.address, allowed_sse_algorithms] 36 | ) 37 | } 38 | 39 | # Rule to enforce specific SSE algorithms 40 | deny[reason] { 41 | r := s3_buckets[_] 42 | sse_configuration := r.change.after.server_side_encryption_configuration[_] 43 | apply_sse_by_default := sse_configuration.rule[_].apply_server_side_encryption_by_default[_] 44 | not array_contains(allowed_sse_algorithms, apply_sse_by_default.sse_algorithm) 45 | reason := sprintf( 46 | "%s: expected sse_algorithm to be one of %v", 47 | [r.address, allowed_sse_algorithms] 48 | ) 49 | } -------------------------------------------------------------------------------- /AWS/enforce_s3_private.rego: -------------------------------------------------------------------------------- 1 | # Check S3 bucket is not public 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | deny[reason] { 8 | r = tfplan.resource_changes[_] 9 | r.mode == "managed" 10 | r.type == "aws_s3_bucket" 11 | r.change.after.acl == "public" 12 | 13 | reason := sprintf("%-40s :: S3 buckets must not be PUBLIC", 14 | [r.address]) 15 | } 16 | -------------------------------------------------------------------------------- /AWS/enforce_sec_group.rego: -------------------------------------------------------------------------------- 1 | # Enforces the use of specific a specific security group 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | import input.tfrun as tfrun 7 | 8 | required_sg := "sg-0434611e67ac24e27" 9 | 10 | array_contains(arr, elem) { 11 | arr[_] = elem 12 | } 13 | 14 | # Checks that a list of sec groups has been included in the config 15 | deny[reason] { 16 | r := tfplan.resource_changes[_] 17 | r.change.after_unknown.vpc_security_group_ids == true 18 | 19 | reason := sprintf( 20 | "%-40s :: security group '%s' must be specified", 21 | [r.address,required_sg] 22 | ) 23 | } 24 | 25 | # If a list of sec groups has been given, check that the required on is in the list. 26 | deny[reason] { 27 | r := tfplan.resource_changes[_] 28 | vsg := r.change.after.vpc_security_group_ids 29 | not array_contains(vsg, required_sg) 30 | 31 | reason := sprintf( 32 | "%-40s :: security group '%s' must be included in list", 33 | [r.address,required_sg] 34 | ) 35 | } -------------------------------------------------------------------------------- /Admin Policies/README.md: -------------------------------------------------------------------------------- 1 | # Admin Templates 2 | 3 | Library of OPA templates to meet common Admin Terraform requirements. 4 | 5 | **Work in Progress** 6 | 7 | | Rego | Description | 8 | | ------------------------ | -- | 9 | | denied_provisioners.rego | Prevent the use of specified provsioners | 10 | | enforce_var_desc.rego | Ensure all variables have descriptions | 11 | | resource_tags.rego | Ensure resources have required tags | 12 | | workspace_name.rego | Simple example of putting constraints on workspace names | 13 | | workspace_tags.rego | Enforce tagging on workspaces | 14 | -------------------------------------------------------------------------------- /Admin Policies/denied_provisioners.rego: -------------------------------------------------------------------------------- 1 | # Prevent specified provisioners from being used. 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | 8 | # List of disallowed provisioner types 9 | denied_provisioners = ["local-exec"] 10 | 11 | 12 | array_contains(arr, elem) { 13 | arr[_] = elem 14 | } 15 | 16 | module_name(path) = name { 17 | name := sprintf("module.%s", [path[count(path)-2]]) 18 | } else = root { 19 | root := "root-module" 20 | } 21 | 22 | # Walk the configuration looking in root module and any called module for provisioners and cehck against allowed list. 23 | deny[reason] { 24 | walk(tfplan.configuration.root_module, [path, value]) 25 | resource = value.resources[_] 26 | provisioner = resource.provisioners[_] 27 | array_contains(denied_provisioners, provisioner.type) 28 | module := module_name(path) 29 | reason := sprintf( 30 | "%s.%s: provisioner of type %s is not allowed", 31 | [module, resource.address, provisioner.type] 32 | ) 33 | } 34 | -------------------------------------------------------------------------------- /Admin Policies/enforce_var_desc.rego: -------------------------------------------------------------------------------- 1 | # Check variables have descriptions 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | get_desc(avar) = desc { 8 | desc := avar.description 9 | } else = no_desc { 10 | no_desc := "" 11 | } 12 | 13 | deny[reason] { 14 | 15 | var = tfplan.configuration.root_module.variables[key] 16 | 17 | get_desc(var) == "" 18 | 19 | reason := sprintf("%-40s :: Variable must have a description", 20 | [key]) 21 | } 22 | 23 | deny[reason] { 24 | 25 | var = tfplan.configuration.root_module.module_calls[mod].module.variables[key] 26 | 27 | get_desc(var) == "" 28 | 29 | reason := sprintf("%-40s :: Variable in call to module '%s' must have a description", 30 | [key,mod]) 31 | } -------------------------------------------------------------------------------- /Admin Policies/resource_tags.rego: -------------------------------------------------------------------------------- 1 | # Enforces a set of required tag keys. Values are bot checked 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | 8 | required_tags = ["owner", "department"] 9 | 10 | 11 | array_contains(arr, elem) { 12 | arr[_] = elem 13 | } 14 | 15 | get_basename(path) = basename{ 16 | arr := split(path, "/") 17 | basename:= arr[count(arr)-1] 18 | } 19 | 20 | # Extract the tags catering for Google where they are called "labels" 21 | get_tags(resource) = labels { 22 | # registry.terraform.io/hashicorp/google -> google 23 | provider_name := get_basename(resource.provider_name) 24 | "google" == provider_name 25 | labels := resource.change.after.labels 26 | } else = tags { 27 | tags := resource.change.after.tags 28 | } else = empty { 29 | empty := {} 30 | } 31 | 32 | deny[reason] { 33 | resource := tfplan.resource_changes[_] 34 | action := resource.change.actions[count(resource.change.actions) - 1] 35 | array_contains(["create", "update"], action) 36 | tags := get_tags(resource) 37 | # creates an array of the existing tag keys 38 | existing_tags := [ key | tags[key] ] 39 | required_tag := required_tags[_] 40 | not array_contains(existing_tags, required_tag) 41 | 42 | reason := sprintf( 43 | "%s: missing required tag %q", 44 | [resource.address, required_tag] 45 | ) 46 | } 47 | -------------------------------------------------------------------------------- /Admin Policies/workspace_name.rego: -------------------------------------------------------------------------------- 1 | # Checks the worksoace name for a specific suffix. 2 | 3 | package terraform 4 | 5 | import input.tfrun as tfrun 6 | 7 | 8 | deny["Forbidden workspace name"] { 9 | not endswith(tfrun.workspace.name, "-dev") 10 | } 11 | -------------------------------------------------------------------------------- /Admin Policies/workspace_tags.rego: -------------------------------------------------------------------------------- 1 | # Enforces that workspaces are tagged with the names of the providers. 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | import input.tfrun as tfrun 7 | 8 | 9 | array_contains(arr, elem) { 10 | arr[_] = elem 11 | } 12 | 13 | get_basename(path) = basename{ 14 | arr := split(path, "/") 15 | basename:= arr[count(arr)-1] 16 | } 17 | 18 | deny[reason] { 19 | resource := tfplan.resource_changes[_] 20 | action := resource.change.actions[count(resource.change.actions) - 1] 21 | array_contains(["create", "update"], action) 22 | 23 | # registry.terraform.io/hashicorp/aws -> aws 24 | cloud_tag := get_basename(resource.provider_name) 25 | 26 | not tfrun.workspace.tags[cloud_tag] 27 | 28 | reason := sprintf("Workspace must be marked with '%s' tag to create resources in %s cloud", 29 | [cloud_tag, cloud_tag]) 30 | } 31 | 32 | 33 | -------------------------------------------------------------------------------- /Azure/README.md: -------------------------------------------------------------------------------- 1 | # Azure Templates 2 | 3 | Library of OPA templates to meet common Azure Terraform requirements. 4 | 5 | **Work in Progress** 6 | 7 | | Rego | Description | 8 | | ------------------------ | -- | 9 | | enforce_az_blob_private.rego | Ensures Azure blobs are private | 10 | | enforce_inbound_cidr.rego | Ensures invalid CIDR's are not used in firewall rules | 11 | -------------------------------------------------------------------------------- /Azure/enforce_az_blob_private.rego: -------------------------------------------------------------------------------- 1 | # Check Azure blob storage is not public 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | deny[reason] { 8 | r = tfplan.resource_changes[_] 9 | r.mode == "managed" 10 | r.type == "azurerm_storage_account" 11 | r.change.after.allow_blob_public_access == true 12 | 13 | reason := sprintf("%-40s :: Azure storage account blob access must not be PUBLIC", 14 | [r.address]) 15 | } 16 | -------------------------------------------------------------------------------- /Azure/enforce_inbound_cidr.rego: -------------------------------------------------------------------------------- 1 | # Enforces the denial of CIDR 0.0.0.0/0 in Inbound rules 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | # Add CIDRS that should be disallowed 8 | invalid_cidrs = [ 9 | "0.0.0.0/0" 10 | ] 11 | 12 | array_contains(arr, elem) { 13 | arr[_] = elem 14 | } 15 | 16 | types = [ 17 | "azurerm_firewall_nat_rule_collection", 18 | "azurerm_firewall_network_rule_collection", 19 | "azurerm_firewall_application_rule_collection" 20 | ] 21 | 22 | # Checks embdedded rules 23 | deny[reason] { 24 | r := tfplan.resource_changes[_] 25 | array_contains(types,r.type) 26 | in := r.change.after.rule[_] 27 | invalid := invalid_cidrs[_] 28 | array_contains(in.source_addresses,invalid) 29 | reason := sprintf( 30 | "%-40s :: invalid ingress CIDR %s", 31 | [r.address,invalid] 32 | ) 33 | } 34 | -------------------------------------------------------------------------------- /GCP/README.md: -------------------------------------------------------------------------------- 1 | # GCP Templates 2 | 3 | Library of OPA templates to meet common GCP Terraform requirements. 4 | 5 | **Work in Progress** 6 | 7 | | Rego | Description | 8 | | ------------------------ | -- | 9 | | enforce_fw_source.rego | Black list of allowed CIDR's in FW source_range | 10 | | enforces_gcs_private.rego | Ensures GCS buckets are not publicly exposed | 11 | -------------------------------------------------------------------------------- /GCP/enforce_fw_source.rego: -------------------------------------------------------------------------------- 1 | # Enforces the denial of CIDR 0.0.0.0/0 in firewalls 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | # Add CIDRS that should be disallowed 8 | invalid_cidrs = [ 9 | "0.0.0.0/0" 10 | ] 11 | 12 | array_contains(arr, elem) { 13 | arr[_] = elem 14 | } 15 | 16 | # Checks firewall rules 17 | deny[reason] { 18 | r := tfplan.resource_changes[_] 19 | r.type == "google_compute_firewall" 20 | invalid := invalid_cidrs[_] 21 | array_contains(r.change.after.source_ranges,invalid) 22 | reason := sprintf( 23 | "%-40s :: Firewall source range invalid CIDR %s", 24 | [r.address,invalid] 25 | ) 26 | } -------------------------------------------------------------------------------- /GCP/enforce_gcs_private.rego: -------------------------------------------------------------------------------- 1 | # Ensure GCS buckets are not public 2 | # Need to check the various ways this can happen 3 | 4 | package terraform 5 | 6 | import input.tfplan as tfplan 7 | 8 | array_contains(arr, elem) { 9 | arr[_] = elem 10 | } 11 | 12 | # Check Bucket Access Control 13 | 14 | deny[reason] { 15 | r = tfplan.resource_changes[_] 16 | r.mode == "managed" 17 | r.type == "google_storage_bucket_access_control" 18 | r.change.after.entity == "Public" 19 | 20 | reason := sprintf("%-40s :: GCS buckets must not be PUBLIC", 21 | [r.address]) 22 | } 23 | 24 | # Check google_storage_bucket_acl for predefined ACL's 25 | 26 | deny[reason] { 27 | bad_acls := [ "publicRead", "publicReadWrite" ] 28 | r = tfplan.resource_changes[_] 29 | r.mode == "managed" 30 | r.type == "google_storage_bucket_acl" 31 | bad_acls[_] == r.change.after.predefined_acl 32 | 33 | reason := sprintf("%-40s :: GCS buckets must not use predefined ACL '%s'", 34 | [r.address, r.change.after.predefined_acl]) 35 | } -------------------------------------------------------------------------------- /Generic/README.md: -------------------------------------------------------------------------------- 1 | # Generic Templates 2 | 3 | Library of OPA templates to meet common Terraform requirements. These templates are cloud agnostic and provide a simple way to implement white or black lists on values for attributes. 4 | 5 | | Rego | Description | 6 | | ------------------------ | -- | 7 | | actions-blacklist.rego | Black list for Actions, Create, Update Delete | 8 | | array-blacklist.rego | Black list for values of an array type attribute | 9 | | array-whitelist.rego | White list for values of an array type attribute | 10 | | attribute_check.rego | Check that an attribute has been specified and with a non-null value | 11 | | attribute_value_regex.rego | Check attribute value matches a regular expression | 12 | | instance_types.rego | Enforces instances types on the 3 main cloud providers | 13 | | numeric-range.rego | Check an attribute numeric value is within range (>=min, <=max or both) | 14 | | resource-type-blacklist.rego | Black list of resource types | 15 | | resource-type-whitelist.rego | White list of resource types | 16 | | scalar-blacklist.rego | Black list for values of a scalar type attribute | 17 | | scalar-whitelist.rego | White list for values of a scalar type attribute | 18 | | vcs-user-whitelist.rego | Implements a white list fo users allowed to auto-apply VCS runs | 19 | 20 | In general these templates can be configured simply by setting the resources, attribute and ...list variables as in this example 21 | 22 | ```rego 23 | ... 24 | resource := "{resource_name}" 25 | 26 | # The planned value for this scalr attribute 27 | attribute := "{attribute_name}" 28 | 29 | # Is checked against this blacklist. If the value IS present in the list the polciy is violated. 30 | # This can be a single value list, and can be numerics, booleans or strings 31 | black_list := [ 32 | "{value}", 33 | "{value}", 34 | ] 35 | ... 36 | ``` -------------------------------------------------------------------------------- /Generic/actions-blacklist.rego: -------------------------------------------------------------------------------- 1 | # Implements a black list of acions, with optional resource list. 2 | # 3 | # This can be tailored to deny specific actions on all resources or just those in the resource list 4 | # 5 | 6 | package terraform 7 | 8 | import input.tfplan as tfplan 9 | 10 | # Terraform resources this policy applies to. 11 | # Comment this out to apply the black list to ALL resources. 12 | # This is ignored if `resources[_] == type` is removed from the deny rule 13 | resources = [ 14 | "{resource_type}", 15 | "{resource_type}", 16 | "{resource_type}", 17 | ] 18 | 19 | # Modify the array to only include disallowed actions 20 | actions_black_list := [ 21 | "delete", 22 | "create", 23 | "update", 24 | ] 25 | 26 | deny[reason] { 27 | r := tfplan.resource_changes[_] 28 | type := r.type 29 | 30 | # Comment out this line to apply the policy to ALL resources 31 | resources[_] == type 32 | 33 | action := r.change.actions[_] 34 | 35 | actions_black_list[_] == action 36 | 37 | reason := sprintf("%-40s :: '%s' action is not allowed for resource type '%s'", [r.address, action, type]) 38 | } 39 | -------------------------------------------------------------------------------- /Generic/array-blacklist.rego: -------------------------------------------------------------------------------- 1 | # Implements a blacklist on a array attribute 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | resource := "{resource_name}" 8 | 9 | # The planned value for this array attribute 10 | attribute := "{attribute_name}" 11 | 12 | # Is checked against this blacklist. 13 | # If any of the values ARE present in the list the policy is violated. 14 | # This can be a single value list, and can be numerics, booleans or strings 15 | black_list := [ 16 | "{value}", 17 | "{value}", 18 | ] 19 | 20 | # Check if value is in black list for the attribute 21 | deny[reason] { 22 | r = tfplan.resource_changes[_] 23 | r.mode == "managed" 24 | r.type == resource 25 | list_item = r.change.after[attribute][_] 26 | black_list[_] == list_item 27 | 28 | reason := sprintf("%-40s :: %s value '%s' is not allowed", [r.address, attribute, list_item]) 29 | } 30 | -------------------------------------------------------------------------------- /Generic/array-whitelist.rego: -------------------------------------------------------------------------------- 1 | # Implements a whitelist on a array attribute 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | resource := "{resource_name}" 8 | 9 | # The planned value for this array attribute 10 | attribute := "{attribute_name}" 11 | 12 | # Is checked against this whitelist. 13 | # If any of the values ARE NOT present in the list the policy is violated. 14 | # This can be a single value list, and can be numerics, booleans or strings 15 | white_list := [ 16 | "{value}", 17 | "{value}", 18 | ] 19 | 20 | array_contains(arr, elem) { 21 | arr[_] = elem 22 | } 23 | 24 | # Check if value is in white list for the attribute 25 | deny[reason] { 26 | r = tfplan.resource_changes[_] 27 | r.mode == "managed" 28 | r.type == resource 29 | list_item = r.change.after[attribute][_] 30 | not array_contains(white_list, list_item) 31 | 32 | reason := sprintf("%-40s :: %s value '%s' is not allowed", 33 | [r.address, attribute, list_item]) 34 | } 35 | -------------------------------------------------------------------------------- /Generic/attribute_check.rego: -------------------------------------------------------------------------------- 1 | # Checks that required attributes have been specified 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | resource := "{resource}" 8 | 9 | # Attributes in the list must have a planned value (not null). 10 | attribute_list := [ 11 | "{attribute}", 12 | "{attribute}", 13 | ] 14 | 15 | # Check if the attribute has been specified 16 | deny[reason] { 17 | r = tfplan.resource_changes[_] 18 | r.mode == "managed" 19 | r.type == resource 20 | attribute = attribute_list[_] 21 | not r.change.after[attribute] 22 | 23 | reason := sprintf("%-40s :: '%s' must be specified in the configuration", [r.address, attribute]) 24 | } 25 | 26 | # Check if the attribute value is null 27 | deny[reason] { 28 | r = tfplan.resource_changes[_] 29 | r.mode == "managed" 30 | r.type == resource 31 | attribute = attribute_list[_] 32 | is_null(r.change.after[attribute]) 33 | 34 | reason := sprintf("%-40s :: '%s' must have a none null value in the configuration", [r.address, attribute]) 35 | } 36 | -------------------------------------------------------------------------------- /Generic/attribute_value_regex.rego: -------------------------------------------------------------------------------- 1 | # Implements a REGEX check on an attribute value 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | resource := "{resource}" 8 | 9 | # The planned value for this array attribute 10 | attribute := "{attribute}" 11 | 12 | regex := "{regex}" 13 | 14 | # Check if value matches the regex 15 | deny[reason] { 16 | r = tfplan.resource_changes[_] 17 | r.mode == "managed" 18 | r.type == resource 19 | not re_match(regex, r.change.after[attribute]) 20 | 21 | reason := sprintf("%-40s :: %s value '%s' does not match allowed regex '%s'.", [r.address, attribute, r.change.after[attribute], regex]) 22 | } 23 | -------------------------------------------------------------------------------- /Generic/instance_types.rego: -------------------------------------------------------------------------------- 1 | # Multi proivder rule to enforce instnace type/size 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | # Allowed sizes by provider 8 | allowed_types = { 9 | "aws": ["t2.nano", "t2.micro"], 10 | "azurerm": ["Standard_A0", "Standard_A1"], 11 | "google": ["n1-standard-1", "n1-standard-2"] 12 | } 13 | 14 | # Attribute name for instance type/size by provider 15 | instance_type_key = { 16 | "aws": "instance_type", 17 | "azurerm": "vm_size", 18 | "google": "machine_type" 19 | } 20 | 21 | array_contains(arr, elem) { 22 | arr[_] = elem 23 | } 24 | 25 | get_basename(path) = basename{ 26 | arr := split(path, "/") 27 | basename:= arr[count(arr)-1] 28 | } 29 | 30 | # Extracts the instance type/size 31 | get_instance_type(resource) = instance_type { 32 | # registry.terraform.io/hashicorp/aws -> aws 33 | provider_name := get_basename(resource.provider_name) 34 | instance_type := resource.change.after[instance_type_key[provider_name]] 35 | } 36 | 37 | deny[reason] { 38 | resource := tfplan.resource_changes[_] 39 | instance_type := get_instance_type(resource) 40 | # registry.terraform.io/hashicorp/aws -> aws 41 | provider_name := get_basename(resource.provider_name) 42 | not array_contains(allowed_types[provider_name], instance_type) 43 | 44 | reason := sprintf( 45 | "%s: instance type %q is not allowed", 46 | [resource.address, instance_type] 47 | ) 48 | } 49 | -------------------------------------------------------------------------------- /Generic/numeric-range.rego: -------------------------------------------------------------------------------- 1 | # Implements min/max values for numeric attribute 2 | 3 | # Policy can implement a min value, max value or both. 4 | # Simply set max or min to null to remove these checks 5 | 6 | package terraform 7 | 8 | import input.tfplan as tfplan 9 | 10 | resource := "{resource}" 11 | 12 | # Check the planned value for this numeric attribute 13 | attribute := "{attribute}" 14 | 15 | # Minimum value allowed. Set to null if no minimum. 16 | min := {N or null} 17 | 18 | # Maximum value allowed. Set to null if no maximum. 19 | max := {N or null} 20 | 21 | # Check if value is below min 22 | deny[reason] { 23 | r = tfplan.resource_changes[_] 24 | r.mode == "managed" 25 | r.type == resource 26 | 27 | # OPA considers 'null' to be a type rather than an indicator of no value. 28 | # It has a pecendent for comparing in which a null type is always < a numeric. 29 | # Thus when min := null the attribute value is always > min. 30 | # This gives the desired result, albeit for the wrong reason. 31 | r.change.after[attribute] < min 32 | 33 | reason := sprintf("%-40s :: %s value '%d' is less then allowed minimum '%d'.", [r.address, attribute, r.change.after[attribute], min]) 34 | } 35 | 36 | # Check if value is above max 37 | deny[reason] { 38 | r = tfplan.resource_changes[_] 39 | r.mode == "managed" 40 | r.type == resource 41 | 42 | # This fudge is needed due to an odd use of 'null' in OPA. 43 | # OPA considers 'null' to be a type rather than an indicator of no value. 44 | # It has a pecendent for comparing in which a null type is always < a numeric. 45 | # Thus when comparing the attribute > max this is always true when max := null, 46 | # when in fact we want undefined so the rule ends. 47 | # The rule below forces this. 48 | not is_null(max) 49 | 50 | r.change.after[attribute] > max 51 | 52 | reason := sprintf("%-40s :: %s value '%d' is greater than allowed maximum '%d'.", [r.address, attribute, r.change.after[attribute], max]) 53 | } -------------------------------------------------------------------------------- /Generic/resource-type-blacklist.rego: -------------------------------------------------------------------------------- 1 | # Implements a blacklist on resource types 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | # resource type is checked against this blacklist. 8 | # If the value IS present in the list the policy is violated. 9 | black_list := [ 10 | "{value}", 11 | "{value}", 12 | ] 13 | 14 | # Check if value is in black list for the attribute 15 | deny[reason] { 16 | r = tfplan.resource_changes[_] 17 | r.mode == "managed" 18 | black_list[_] == r.type 19 | 20 | reason := sprintf("%-40s :: Resource type '%s' is not allowed", [r.address, r.type]) 21 | } 22 | -------------------------------------------------------------------------------- /Generic/resource-type-whitelist.rego: -------------------------------------------------------------------------------- 1 | # Implements a whitelist on resource types 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | # resource type is checked against this whitelist. 8 | # If the value IS NOT present in the list the policy is violated. 9 | white_list := [ 10 | "{value}", 11 | "{value}", 12 | ] 13 | 14 | array_contains(arr, elem) { 15 | arr[_] == elem 16 | } 17 | 18 | # Check if value is in black list for the attribute 19 | deny[reason] { 20 | r = tfplan.resource_changes[_] 21 | r.mode == "managed" 22 | not array_contains(white_list, r.type) 23 | 24 | reason := sprintf("%-40s :: Resource type '%s' is not allowed", [r.address, r.type]) 25 | } 26 | -------------------------------------------------------------------------------- /Generic/scalar-blacklist.rego: -------------------------------------------------------------------------------- 1 | # Implements a blacklist on a scalar attribute 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | resource := "{resource_name}" 8 | 9 | # The planned value for this scalr attribute 10 | attribute := "{attribute_name}" 11 | 12 | # Is checked against this blacklist. 13 | # If the value IS present in the list the policy is violated. 14 | # This can be a single value list, and can be numerics, booleans or strings 15 | black_list := [ 16 | "{value}", 17 | "{value}", 18 | ] 19 | 20 | # Check if value is in black list for the attribute 21 | deny[reason] { 22 | r = tfplan.resource_changes[_] 23 | r.mode == "managed" 24 | r.type == resource 25 | black_list[_] == r.change.after[attribute] 26 | 27 | reason := sprintf("%-40s :: %s value '%s' is not allowed", 28 | [r.address, attribute, r.change.after[attribute]]) 29 | } 30 | -------------------------------------------------------------------------------- /Generic/scalar-whitelist.rego: -------------------------------------------------------------------------------- 1 | # Implements a whitelist on a scalar attribute 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | resource := "{resource_name}" 8 | 9 | # The planned value for this scalr attribute 10 | attribute := "{attribute_name}" 11 | 12 | # Is checked against this whitelist. 13 | # If the value IS NOT present in the list the policy is violated. 14 | # This can be a single value list, and can be numerics, booleans or strings 15 | white_list := [ 16 | "{value}", 17 | "{value}", 18 | ] 19 | 20 | array_contains(arr, elem) { 21 | arr[_] == elem 22 | } 23 | 24 | # Check if value is in white list for the attribute 25 | deny[reason] { 26 | r = tfplan.resource_changes[_] 27 | r.mode == "managed" 28 | r.type == resource 29 | not array_contains(white_list, r.change.after[attribute]) 30 | 31 | reason := sprintf("%-40s :: %s value '%s' is not allowed.", 32 | [r.address, attribute, r.change.after[attribute]]) 33 | } 34 | -------------------------------------------------------------------------------- /Generic/tags-required.rego: -------------------------------------------------------------------------------- 1 | # Enforces a set of required tag keys for given resource types. Values are not checked 2 | 3 | package terraform 4 | 5 | import input.tfplan as tfplan 6 | 7 | tags_map := [ 8 | { 9 | "type": "aws_instance", 10 | "tags_required": [ 11 | "owner", 12 | "foo", 13 | "department", 14 | ], 15 | }, 16 | { 17 | "type": "aws_subnet", 18 | "tags_required": [ 19 | "secure", 20 | "muppets" 21 | ], 22 | } 23 | ] 24 | 25 | array_contains(arr, elem) { 26 | arr[_] = elem 27 | } 28 | 29 | get_basename(path) = basename{ 30 | arr := split(path, "/") 31 | basename:= arr[count(arr)-1] 32 | } 33 | 34 | # Extract the tags catering for Google where they are called "labels" 35 | get_tags(resource) = labels { 36 | # registry.terraform.io/hashicorp/google -> google 37 | provider_name := get_basename(resource.provider_name) 38 | "google" == provider_name 39 | labels := resource.change.after.labels 40 | } else = tags { 41 | tags := resource.change.after.tags 42 | } else = empty { 43 | empty := {} 44 | } 45 | 46 | deny[reason] { 47 | r := tfplan.resource_changes[_] 48 | action := r.change.actions[count(r.change.actions) - 1] 49 | array_contains(["create", "update"], action) 50 | tags := get_tags(r) 51 | # creates an array of the existing tag keys 52 | existing_tags := [ key | tags[key] ] 53 | # Traverse the maps comparing tags by type 54 | tm := tags_map[_] 55 | r.type == tm.type 56 | required_tag := tm.tags_required[_] 57 | not array_contains(existing_tags, required_tag) 58 | 59 | reason := sprintf("%-40s :: tag '%s' is required.", 60 | [r.address, required_tag]) 61 | 62 | } 63 | -------------------------------------------------------------------------------- /Generic/vcs-user-whitelist.rego: -------------------------------------------------------------------------------- 1 | # Implements a whitelist of users allowed to auto-apply runs 2 | 3 | package terraform 4 | 5 | import input.tfrun as tfrun 6 | 7 | allowed_emails := [ 8 | "{value}", 9 | "{value}", 10 | ] 11 | 12 | array_contains(arr, elem) { 13 | arr[_] == elem 14 | } 15 | 16 | # Check if value is in white list for the attribute 17 | deny[reason] { 18 | tfrun.source == "vcs" 19 | tfrun.workspace.auto_apply == true 20 | tfrun.is_dry == false 21 | not array_contains(allowed_emails, tfrun.created_by.email) 22 | 23 | reason := sprintf("%s.%s :: user %s is not allowed to auto-apply runs.", 24 | [tfrun.environment.name, tfrun.workspace.name, tfrun.created_by.email]) 25 | } 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Scalr OPA Templates 2 | 3 | Library of OPA templates to meet common Terraform requirements. 4 | 5 | Organized into folders for convenience. 6 | 7 | DISCLAIMER: 8 | 9 | These policies have been tested against limited sets of `terraform plan` outputs. No warranty is given that they will work as desired in all cases and anyone utilizing these policies must test them in their own environments before putting them into live use. 10 | 11 | | Folder | Description | 12 | | ------------------------ | -- | 13 | | Generic | These templates are cloud agnostic and provide a simple way to implement white or black lists on values for attributes. | 14 | | AWS | Policies specific to Amazon Web Services | 15 | | Azure | Policies specific to Azure | 16 | | GCP | Policies specific to Google Cloud Platform | 17 | | Admin | Policies related to general admin compliance such a restricting provisioners, enforcing tags etc | 18 | 19 | Please see https://docs.scalr.com/en/latest/opa.html for details of integrating OPA policies with Scalr. 20 | 21 | 22 | --------------------------------------------------------------------------------