├── .github ├── labeler.yml └── workflows │ ├── auto_approve.yml │ ├── auto_merge.yml │ ├── codeql-analysis.yml │ ├── labeler.yml │ ├── linter.yml │ └── tf.yml ├── .gitignore ├── LICENSE ├── README.md ├── backend.tf ├── img └── org_policy_flow.png ├── main.tf ├── outputs.tf ├── policies └── org_policy.json ├── src ├── main.py └── requirements.txt ├── terraform-example.tfvars └── variables.tf /.github/labeler.yml: -------------------------------------------------------------------------------- 1 | automerge: policies/org_policy.json -------------------------------------------------------------------------------- /.github/workflows/auto_approve.yml: -------------------------------------------------------------------------------- 1 | #################################################################### 2 | ## Approve the PR based on author ## 3 | #################################################################### 4 | 5 | name: Auto-approve 6 | 7 | on: 8 | pull_request: 9 | types: 10 | - opened 11 | 12 | jobs: 13 | pr-automation: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Auto approve PRs based on author 17 | uses: hmarr/auto-approve-action@v4.0.0 18 | if: github.actor == 'scalesec-automation-bot' 19 | with: 20 | github-token: ${{ secrets.GH_TOKEN }} 21 | -------------------------------------------------------------------------------- /.github/workflows/auto_merge.yml: -------------------------------------------------------------------------------- 1 | #################################################################### 2 | ## Merge the PR if the label 'automerge' is assigned to the PR. ## 3 | #################################################################### 4 | 5 | name: Auto-merge 6 | 7 | on: 8 | pull_request: 9 | types: 10 | - labeled 11 | 12 | jobs: 13 | pr-automation: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Auto merge if 'automerge' label exists 17 | uses: pascalgn/automerge-action@main 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} 20 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | 2 | # This is auto-generated by GitHub. 3 | # Code located here: https://github.com/github/codeql 4 | 5 | name: "CodeQL" 6 | 7 | on: 8 | push: 9 | branches: [ main ] 10 | paths: 11 | - "**.py" 12 | - "**.tf" 13 | pull_request: 14 | # The branches below must be a subset of the branches above 15 | branches: [ main ] 16 | paths: 17 | - "**.py" 18 | - "**.tf" 19 | schedule: 20 | - cron: '25 11 * * 4' 21 | 22 | jobs: 23 | analyze: 24 | name: Analyze 25 | runs-on: ubuntu-latest 26 | 27 | strategy: 28 | fail-fast: false 29 | matrix: 30 | language: [ 'python' ] 31 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 32 | # Learn more: 33 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 34 | 35 | steps: 36 | - name: Checkout repository 37 | uses: actions/checkout@v4 38 | 39 | # Initializes the CodeQL tools for scanning. 40 | - name: Initialize CodeQL 41 | uses: github/codeql-action/init@v3 42 | with: 43 | languages: ${{ matrix.language }} 44 | # If you wish to specify custom queries, you can do so here or in a config file. 45 | # By default, queries listed here will override any specified in a config file. 46 | # Prefix the list here with "+" to use these queries and those in the config file. 47 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 48 | 49 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 50 | # If this step fails, then you should remove it and run the build manually (see below) 51 | - name: Autobuild 52 | uses: github/codeql-action/autobuild@v3 53 | 54 | # ℹ️ Command-line programs to run using the OS shell. 55 | # 📚 https://git.io/JvXDl 56 | 57 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 58 | # and modify them (or add more) to build your code if your project 59 | # uses a compiled language 60 | 61 | #- run: | 62 | # make bootstrap 63 | # make release 64 | 65 | - name: Perform CodeQL Analysis 66 | uses: github/codeql-action/analyze@v3 67 | -------------------------------------------------------------------------------- /.github/workflows/labeler.yml: -------------------------------------------------------------------------------- 1 | #################################################################### 2 | ## Add an 'automerge' label to the PR based on file touched ## 3 | #################################################################### 4 | 5 | name: Labeler 6 | 7 | on: 8 | pull_request: 9 | types: 10 | - opened 11 | - synchronize 12 | paths: 13 | - "policies/org_policy.json" 14 | 15 | jobs: 16 | triage: 17 | permissions: 18 | contents: read 19 | pull-requests: write 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/labeler@v4 23 | -------------------------------------------------------------------------------- /.github/workflows/linter.yml: -------------------------------------------------------------------------------- 1 | ########################### 2 | ########################### 3 | ## Linter GitHub Actions ## 4 | ########################### 5 | ########################### 6 | name: Lint Code Base 7 | 8 | # 9 | # Documentation: 10 | # https://help.github.com/en/articles/workflow-syntax-for-github-actions 11 | # 12 | 13 | ############################# 14 | # Start the job on all push # 15 | ############################# 16 | on: 17 | push: 18 | branches-ignore: 19 | - 'main' 20 | paths: 21 | - "**.py" 22 | - "**.tf" 23 | 24 | ############### 25 | # Set the Job # 26 | ############### 27 | jobs: 28 | build: 29 | # Name the Job 30 | name: Lint Code Base 31 | # Set the agent to run on 32 | runs-on: ubuntu-latest 33 | 34 | ################## 35 | # Load all steps # 36 | ################## 37 | steps: 38 | ########################## 39 | # Checkout the code base # 40 | ########################## 41 | - name: Checkout Code 42 | uses: actions/checkout@v4 43 | with: 44 | # Full git history is needed to get a proper list of changed files within `super-linter` 45 | fetch-depth: 0 46 | 47 | ################################ 48 | # Run Linter against code base # 49 | # Upgraded linter v3 -> v4 50 | ################################ 51 | - name: Lint Code Base 52 | uses: docker://github/super-linter:v4 53 | env: 54 | DEFAULT_BRANCH: 'main' 55 | VALIDATE_ALL_CODEBASE: false 56 | VALIDATE_TERRAFORM: true 57 | VALIDATE_PYTHON_PYLINT: true 58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 59 | -------------------------------------------------------------------------------- /.github/workflows/tf.yml: -------------------------------------------------------------------------------- 1 | name: Terraform lint and apply 2 | 3 | ### 4 | # terrafrom action repo/docs 5 | # tf actions doc: https://github.com/marketplace/actions/hashicorp-setup-terraform 6 | # gcp actions doc: 7 | # - https://github.com/GoogleCloudPlatform/github-actions/tree/master/setup-gcloud 8 | # - https://github.com/actions-hub/gcloud 9 | # 10 | # defaults: 11 | # run: 12 | # working-directory: ${{ env.tf_actions_working_dir }} 13 | on: 14 | push: 15 | branches: 16 | - main 17 | paths: 18 | - "**.tf" 19 | pull_request: 20 | paths: 21 | - "**.tf" 22 | 23 | jobs: 24 | build: 25 | runs-on: ubuntu-latest 26 | 27 | name: Terraform Validation 28 | steps: 29 | 30 | - uses: actions/checkout@v4 31 | 32 | # pull prod file if we are on main otherwise dev 33 | # GH Action error: Terraform lint and apply 34 | # changed 'google-github-actions/setup-gcloud@master' ---> 'google-github-actions/setup-gcloud@v0' 35 | - uses: google-github-actions/auth@v2 36 | if: github.ref == 'refs/heads/main' 37 | with: 38 | service_account_key: ${{ secrets.SA_VARS_PROD }} 39 | export_default_credentials: false 40 | 41 | - uses: google-github-actions/auth@v2 42 | if: github.ref != 'refs/heads/main' 43 | with: 44 | service_account_key: ${{ secrets.SA_VARS_DEV }} 45 | export_default_credentials: false 46 | 47 | - name: 'Set up Cloud SDK' 48 | uses: 'google-github-actions/setup-gcloud@v2' 49 | 50 | - name: Get tfvars file 51 | if: github.ref == 'refs/heads/main' 52 | run: gsutil cp gs://${{ secrets.GCP_TFVAR_BUCKET_PROD }}/terraform.tfvars ./terraform.tfvars 53 | 54 | - name: Get tfvars file 55 | if: github.ref != 'refs/heads/main' 56 | run: gsutil cp gs://${{ secrets.GCP_TFVAR_BUCKET_DEV }}/terraform.tfvars ./terraform.tfvars 57 | 58 | - uses: google-github-actions/setup-gcloud@v2 59 | with: 60 | project_id: ${{ secrets.GCP_PROJECT_ID }} 61 | service_account_key: ${{ secrets.GCP_SA_KEY }} 62 | export_default_credentials: true 63 | 64 | - uses: hashicorp/setup-terraform@v3 65 | 66 | - run: gcloud info 67 | - name: Terraform fmt 68 | id: fmt 69 | run: terraform fmt 70 | continue-on-error: true 71 | 72 | - name: Terraform Init 73 | id: init 74 | run: terraform init --backend-config=${{ secrets.TF_STATE_BUCKET }} --backend-config="prefix=org_policy/" 75 | 76 | - name: Terraform Validate 77 | id: validate 78 | run: terraform validate -no-color 79 | 80 | - name: Terraform Plan 81 | id: plan 82 | run: terraform plan -no-color 83 | continue-on-error: false 84 | 85 | - name: Terraform Apply 86 | id: apply 87 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 88 | run: terraform apply -auto-approve 89 | continue-on-error: false 90 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | **venv** 113 | 114 | # Spyder project settings 115 | .spyderproject 116 | .spyproject 117 | 118 | # Rope project settings 119 | .ropeproject 120 | 121 | # mkdocs documentation 122 | /site 123 | 124 | # mypy 125 | .mypy_cache/ 126 | .dmypy.json 127 | dmypy.json 128 | 129 | # Pyre type checker 130 | .pyre/ 131 | 132 | # Terraform 133 | .terraform 134 | *.tfstate 135 | *.tfstate.backup 136 | .terraform.tfstate.lock.info 137 | 138 | # Compressed file 139 | src.zip 140 | 141 | terraform.tfvars 142 | .DS_Store 143 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | Copyright 2020 ScaleSec 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GCP Organization Policy Notifier 2 | [![GitHub Super-Linter](https://github.com/ScaleSec/gcp_org_policy_notifier/workflows/Lint%20Code%20Base/badge.svg)](https://github.com/marketplace/actions/super-linter) 3 | 4 | 5 | ## Description 6 | 7 | This is a 100% serverless tool that analyzes [GCP Organization Policies](https://cloud.google.com/resource-manager/docs/organization-policy/overview) for updates and then posts to a slack channel as well as twitter via our [Twitter bot](https://twitter.com/gcporgpolicybot). 8 | 9 | ## Process Flow 10 | 11 | ![Organization Policy Notifier Flow](./img/org_policy_flow.png) 12 | 13 | 1. A Cloud Scheduler job kicks off the comparison every hour and sends a base64 encoded message to Cloud Pub/Sub. 14 | 2. Cloud Pub/Sub forwards the encoded message to a Cloud Function via a Pub/Sub Subscription. 15 | 3. The Cloud Function receives the message, decodes it, and announces the beginning of the comparison. 16 | 4. The Cloud Function searches for a pre-existing Organization Policy Constraint baseline file in a GCS bucket. 17 | - If a baseline file exists: it copies the file locally for comparison. 18 | - If a baseline file does not exist in the GCS bucket: The function creates a baseline based on the current available Organization Policy Constraints and then uploads to GCS for future comparisons. 19 | 5. After the baseline is copied locally (if a baseline existed), the Cloud Function queries the available Organization Policy Constraints performs a comparison. 20 | - If there are updates, the new Organization Policy Constraint list that was generated becomes the new baseline and is updated to the GCS bucket for future comparisons. The Cloud Function then moves on to steps 6, 7, and 8. 21 | 6. The Cloud Function creates a GitHub Pull Request with the new Organization Policies which can be found [here](https://github.com/ScaleSec/gcp_org_policy_notifier/blob/main/policies/org_policy.json). 22 | 7. The Cloud Function posts to Twitter via the handle [@gcporgpolicybot](https://twitter.com/gcporgpolicybot) and includes the new constraints plus a link to the GitHub commit. 23 | 8. In addition to a Twitter post, the Cloud Function will post in a Slack Channel alerting the participants of the new constraints. 24 | 25 | ## Configuration 26 | 27 | 1. Update or comment out the `backend.tf` file for the terraform state file. 28 | ``` 29 | terraform { 30 | backend "gcs" { 31 | bucket = "" 32 | prefix = "" 33 | } 34 | } 35 | ``` 36 | 37 | 2. In order to keep secrets out of the Terraform state file, we recommend manually uploading the [Slack Webhook](https://api.slack.com/messaging/webhooks) into GCP Secrets Manager and then referencing that in the below terraform variables. 38 | 39 | 3. Fill in the required values for the `terraform.tfvars` file. We recommend an isolated project for this solution as well as a separate GCS bucket for your policy file and the function's code file `src.zip` 40 | ``` 41 | project_id = "" 42 | org_id = "" 43 | secret_project = "" 44 | name_prefix = "" 45 | secret_slack_name = "" 46 | secret_token_name = "" 47 | secret_version = "" 48 | twitter_consumer_key_name = "" 49 | twitter_consumer_key_secret_name = "" 50 | twitter_access_token_name = "" 51 | twitter_access_token_secret_name = "" 52 | policy_bucket_location. = "" 53 | ``` 54 | 55 | ## Deployment 56 | 57 | 1. Clone the repository locally: 58 | ``` 59 | git clone git@github.com:ScaleSec/gcp_org_policy_notifier.git 60 | ``` 61 | 62 | 2. Create your virtual environment: 63 | ``` 64 | python3 -m venv my_venv 65 | ``` 66 | 67 | 3. Activate environment and install dependencies: 68 | ``` 69 | source my_venv/bin/activate 70 | pip install -r src/requirements.txt 71 | ``` 72 | 73 | 4. Deploy via terraform: 74 | ``` 75 | terraform init 76 | terraform plan 77 | terraform apply 78 | ``` 79 | 80 | ## Inputs 81 | 82 | | Name | Description | Type | Default | Required | 83 | |------|-------------|------|---------|:--------:| 84 | | bucket\_force\_destroy | When deleting the GCS bucket containing the cloud function, delete all objects in the bucket first. | `bool` | `true` | no | 85 | | file\_location | Location to store the org policy file in the Cloud Function. Needs to be in /tmp/. | `string` | `"/tmp/policies.txt"` | no | 86 | | function\_available\_memory\_mb | The amount of memory in megabytes allotted for the function to use. | `number` | `2048` | no | 87 | | function\_description | The description of the function. | `string` | `"Compares Org Policies and alerts users."` | no | 88 | | function\_entry\_point | The name of a method in the function source which will be invoked when the function is executed. | `string` | `"announce_kickoff"` | no | 89 | | function\_event\_trigger\_failure\_policy\_retry | A toggle to determine if the function should be retried on failure. | `bool` | `false` | no | 90 | | function\_perms | The Cloud Function custom IAM role permissions. Must be a list. | `list` |
[
"secretmanager.secrets.get",
"secretmanager.versions.get",
"secretmanager.versions.access",
"orgpolicy.policy.get",
"resourcemanager.projects.get",
"resourcemanager.projects.list",
"storage.objects.create",
"storage.objects.get",
"storage.objects.update",
"storage.objects.delete",
"storage.objects.list"
]
| no | 91 | | function\_runtime | The runtime in which the function will be executed. | `string` | `"python37"` | no | 92 | | function\_source\_directory | The contents of this directory will be archived and used as the function source. | `string` | `"./src"` | no | 93 | | function\_timeout\_s | The amount of time in seconds allotted for the execution of the function. | `number` | `60` | no | 94 | | job\_description | The description of the Cloud Scheduler. | `string` | `"Starts Organization Policies check."` | no | 95 | | job\_schedule | The job frequency, in cron syntax. The default is every hour. | `string` | `"0 * * * *"` | no | 96 | | message\_data | The data to send in the topic message. | `string` | `"U3RhcnRpbmcgQ29tcGFyaXNvbg=="` | no | 97 | | name\_prefix | The prefixed used to name resources | `string` | n/a | yes | 98 | | org\_id | The GCP Org ID to assign permissions to. | `any` | n/a | yes | 99 | | policy\_file | The name of the Org policy file in the GCS bucket. | `string` | `"policies.txt"` | no | 100 | | project\_id | The ID of the project where the resources will be created. | `string` | n/a | yes | 101 | | region | The region in which resources will be applied. | `string` | `"us-central1"` | no | 102 | | scheduler\_job | An existing Cloud Scheduler job instance. | `object({ name = string })` | `null` | no | 103 | | secret\_project | The GCP project the Slack Webhook is stored. | `any` | n/a | yes | 104 | | secret\_slack\_name | The name of the Slack Webhook secret in GCP. | `any` | n/a | yes | 105 | | secret\_token\_name | The name of the GitHub token secret in GCP. | `any` | n/a | yes | 106 | | secret\_version | The version of the Slack Webhook secret in GCP. Leave as an empty string to use 'latest' | `string` | `"latest"` | no | 107 | | time\_zone | The timezone to use in scheduler. | `string` | `"America/Detroit"` | no | 108 | | twitter\_access\_token\_name | The name of the Twitter Access Token secret in GCP. | `any` | n/a | yes | 109 | | twitter\_access\_token\_secret\_name | The name of the Twitter Access Token Secret secret in GCP. | `any` | n/a | yes | 110 | | twitter\_consumer\_key\_name | The name of the Twitter Consumer Key secret in GCP. | `any` | n/a | yes | 111 | | twitter\_consumer\_key\_secret\_name | The name of the Twitter Consumer Key Secret secret in GCP. | `any` | n/a | yes | 112 | 113 | 114 | ## Feedback 115 | 116 | Feedback is welcome and encouraged via a GitHub issue. Please open an issue for any bugs, feature requests, or general improvements you would like to see. Thank you in advance! 117 | -------------------------------------------------------------------------------- /backend.tf: -------------------------------------------------------------------------------- 1 | ###################### 2 | # GCS Backend Bucket # 3 | ###################### 4 | 5 | # The GCS bucket needs to be pre-existing 6 | terraform { 7 | backend "gcs" {} 8 | } 9 | -------------------------------------------------------------------------------- /img/org_policy_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ScaleSec/gcp_org_policy_notifier/00c90271ed652c65f96d31cf96e6b1c51a15977f/img/org_policy_flow.png -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } 4 | 5 | provider "google" { 6 | project = var.project_id 7 | region = var.region 8 | version = ">=3.38" 9 | } 10 | 11 | #------------------------# 12 | # Naming # 13 | #------------------------# 14 | locals { 15 | name_prefix = var.name_prefix 16 | } 17 | 18 | resource "random_id" "random" { 19 | byte_length = 4 20 | } 21 | 22 | #------------------------# 23 | # GCS Policy Bucket # 24 | #------------------------# 25 | resource "google_storage_bucket" "policy_bucket" { 26 | name = "${local.name_prefix}-policy-bucket-${random_id.random.hex}" 27 | location = var.policy_bucket_location 28 | force_destroy = true 29 | versioning { 30 | enabled = true 31 | } 32 | uniform_bucket_level_access = true 33 | } 34 | 35 | #-------------------------------------# 36 | # Cloud Function Service Account # 37 | #-------------------------------------# 38 | resource "google_service_account" "org_policy_compare_sa" { 39 | account_id = "org-policy-compare" 40 | display_name = "Organization Policy Compare" 41 | } 42 | 43 | #----------------------------------------------# 44 | # Cloud Function Service Account IAM Role # 45 | #----------------------------------------------# 46 | resource "google_organization_iam_custom_role" "org_policy_compare_custom_role" { 47 | role_id = "org_policy_compare_cfn" 48 | org_id = var.org_id 49 | title = "Organization Policy Function Role" 50 | description = "IAM role for Cloud Function to Compare Org Policies" 51 | permissions = var.function_perms 52 | } 53 | 54 | #------------------------------------------------# 55 | # Cloud Function Service Account IAM Member # 56 | #------------------------------------------------# 57 | resource "google_organization_iam_member" "org_policy_compare_member" { 58 | org_id = var.org_id 59 | role = "organizations/${var.org_id}/roles/${google_organization_iam_custom_role.org_policy_compare_custom_role.role_id}" 60 | member = "serviceAccount:${google_service_account.org_policy_compare_sa.email}" 61 | } 62 | 63 | #---------------------# 64 | # Architecture Module # 65 | #---------------------# 66 | module "pubsub_scheduled_example" { 67 | source = "terraform-google-modules/scheduled-function/google" 68 | 69 | project_id = var.project_id 70 | region = var.region 71 | 72 | function_entry_point = var.function_entry_point 73 | function_source_directory = var.function_source_directory 74 | function_name = "${local.name_prefix}-${random_id.random.hex}" 75 | function_available_memory_mb = var.function_available_memory_mb 76 | function_description = var.function_description 77 | function_event_trigger_failure_policy_retry = var.function_event_trigger_failure_policy_retry 78 | function_runtime = var.function_runtime 79 | function_timeout_s = var.function_timeout_s 80 | function_service_account_email = google_service_account.org_policy_compare_sa.email 81 | 82 | topic_name = "${local.name_prefix}-topic" 83 | job_description = var.job_description 84 | job_name = "${local.name_prefix}-job-${random_id.random.hex}" 85 | job_schedule = var.job_schedule 86 | scheduler_job = var.scheduler_job 87 | 88 | bucket_force_destroy = var.bucket_force_destroy 89 | bucket_name = "${local.name_prefix}-cfn-bucket-${random_id.random.hex}" 90 | message_data = var.message_data 91 | time_zone = var.time_zone 92 | function_environment_variables = { 93 | POLICY_BUCKET = google_storage_bucket.policy_bucket.name 94 | FILE_LOCATION = var.file_location 95 | POLICY_FILE = var.policy_file 96 | ORG_ID = var.org_id 97 | S_PROJECT = var.secret_project 98 | S_SLACK_NAME = var.secret_slack_name 99 | S_TOKEN_NAME = var.secret_token_name 100 | S_VERSION = var.secret_version == "" ? "latest" : var.secret_version 101 | CONSUMER_KEY_NAME = var.twitter_consumer_key_name 102 | CONSUMER_KEY_SECRET_NAME = var.twitter_consumer_key_secret_name 103 | ACCESS_TOKEN_NAME = var.twitter_access_token_name 104 | ACCESS_TOKEN_SECRET_NAME = var.twitter_access_token_secret_name 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ScaleSec/gcp_org_policy_notifier/00c90271ed652c65f96d31cf96e6b1c51a15977f/outputs.tf -------------------------------------------------------------------------------- /policies/org_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "constraints": [ 3 | { 4 | "name": "constraints/ainotebooks.accessMode", 5 | "displayName": "Define access mode for Vertex AI Workbench notebooks and instances", 6 | "description": "This list constraint defines the modes of access allowed to Vertex AI Workbench notebooks and instances where enforced. The allow or deny list can specify multiple users with the service-account mode or single-user access with the single-user mode. The access mode to be allowed or denied must be listed explicitly.", 7 | "constraintDefault": "ALLOW", 8 | "listConstraint": {} 9 | }, 10 | { 11 | "name": "constraints/ainotebooks.disableFileDownloads", 12 | "displayName": "Disable file downloads on new Vertex AI Workbench instances", 13 | "description": "This boolean constraint, when enforced, prevents the creation of Vertex AI Workbench instances with the file download option enabled. By default, the file download option can be enabled on any Vertex AI Workbench instance.", 14 | "constraintDefault": "ALLOW", 15 | "booleanConstraint": {} 16 | }, 17 | { 18 | "name": "constraints/ainotebooks.disableRootAccess", 19 | "displayName": "Disable root access on new Vertex AI Workbench user-managed notebooks and instances", 20 | "description": "This boolean constraint, when enforced, prevents newly created Vertex AI Workbench user-managed notebooks and instances from enabling root access. By default, Vertex AI Workbench user-managed notebooks and instances can have root access enabled.", 21 | "constraintDefault": "ALLOW", 22 | "booleanConstraint": {} 23 | }, 24 | { 25 | "name": "constraints/ainotebooks.disableTerminal", 26 | "displayName": "Disable terminal on new Vertex AI Workbench instances", 27 | "description": "This boolean constraint, when enforced, prevents the creation of Vertex AI Workbench instances with the terminal enabled. By default, the terminal can be enabled on Vertex AI Workbench instances.", 28 | "constraintDefault": "ALLOW", 29 | "booleanConstraint": {} 30 | }, 31 | { 32 | "name": "constraints/ainotebooks.environmentOptions", 33 | "displayName": "Restrict environment options on new Vertex AI Workbench notebooks and instances", 34 | "description": "This list constraint defines the VM and container image options a user can select when creating new Vertex AI Workbench notebooks and instances where this constraint is enforced. The options to be allowed or denied must be listed explicitly.The expected format for VM instances is ainotebooks-vm/PROJECT_ID/IMAGE_TYPE/CONSTRAINED_VALUE. Replace IMAGE_TYPE with image-family or image-name. Examples: ainotebooks-vm/deeplearning-platform-release/image-family/pytorch-1-4-cpu, ainotebooks-vm/deeplearning-platform-release/image-name/pytorch-latest-cpu-20200615.The expected format for container images will be ainotebooks-container/CONTAINER_REPOSITORY:TAG. Examples: ainotebooks-container/gcr.io/deeplearning-platform-release/tf-gpu.1-15:latest, ainotebooks-container/gcr.io/deeplearning-platform-release/tf-gpu.1-15:m48.", 35 | "constraintDefault": "ALLOW", 36 | "listConstraint": {} 37 | }, 38 | { 39 | "name": "constraints/ainotebooks.requireAutoUpgradeSchedule", 40 | "displayName": "Require automatic scheduled upgrades on new Vertex AI Workbench user-managed notebooks and instances", 41 | "description": "This boolean constraint, when enforced, requires that newly created Vertex AI Workbench user-managed notebooks and instances have an automatic upgrade schedule set. The automatic upgrade schedule can be defined by using the `notebook-upgrade-schedule` metadata flag to specify a cron schedule for the automatic upgrades. For example: `--metadata=notebook-upgrade-schedule=\"00 19 * * MON\"`.", 42 | "constraintDefault": "ALLOW", 43 | "booleanConstraint": {} 44 | }, 45 | { 46 | "name": "constraints/ainotebooks.restrictVpcNetworks", 47 | "displayName": "Restrict VPC networks on new Vertex AI Workbench instances", 48 | "description": "This list constraint defines the VPC networks a user can select when creating new Vertex AI Workbench instances where this constraint is enforced. By default, a Vertex AI Workbench instance can be created with any VPC networks. The allowed or denied list of networks must be identified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, or projects/PROJECT_ID/global/networks/NETWORK_NAME.", 49 | "constraintDefault": "ALLOW", 50 | "listConstraint": { 51 | "supportsUnder": true 52 | } 53 | }, 54 | { 55 | "name": "constraints/ainotebooks.restrictPublicIp", 56 | "displayName": "Restrict public IP access on new Vertex AI Workbench notebooks and instances", 57 | "description": "This boolean constraint, when enforced, restricts public IP access to newly created Vertex AI Workbench notebooks and instances. By default, public IPs can access Vertex AI Workbench notebooks and instances.", 58 | "constraintDefault": "ALLOW", 59 | "booleanConstraint": {} 60 | }, 61 | { 62 | "name": "constraints/appengine.disableCodeDownload", 63 | "displayName": "Disable Source Code Download", 64 | "description": "Disables code downloads of source code previously uploaded to App Engine.", 65 | "constraintDefault": "ALLOW", 66 | "booleanConstraint": {} 67 | }, 68 | { 69 | "name": "constraints/appengine.runtimeDeploymentExemption", 70 | "displayName": "Runtime Deployment Exemption (App Engine)", 71 | "description": "This list constraint defines the set of App Engine Standard legacy runtimes (Python 2.7, PHP 5.5 and Java 8) allowed for deployments past End of Support. App Engine Standard legacy runtimes will reach End of Support on Jan 30, 2024. Generally, attempts to deploy applications using legacy runtimes after this date will be blocked. See App Engine Standard runtime support schedule. Setting this constraint to \u201cAllow\u201d unblocks App Engine Standard deployments for the legacy runtime(s) that you specify until the Runtime Deprecation Date. Setting this constraint to \u201cAllow All\u201d unblocks App Engine Standard deployments for all legacy runtime(s) until the Runtime Deprecation Date. Runtimes that have reached End of Support do not receive routine security and maintenance patches. We strongly encourage you to upgrade your applications to use a Generally Available runtime version.", 72 | "constraintDefault": "DENY", 73 | "listConstraint": {} 74 | }, 75 | { 76 | "name": "constraints/bigquery.disableBQOmniAWS", 77 | "displayName": "Disable BigQuery Omni for Cloud AWS", 78 | "description": "This boolean constraint, when set to True, will disable users from using BigQuery Omni to process data on Amazon Web Services where this constraint is enforced.", 79 | "constraintDefault": "ALLOW", 80 | "booleanConstraint": {} 81 | }, 82 | { 83 | "name": "constraints/bigquery.disableBQOmniAzure", 84 | "displayName": "Disable BigQuery Omni for Cloud Azure", 85 | "description": "This boolean constraint, when set to True, will disable users from using BigQuery Omni to process data on Microsoft Azure where this constraint is enforced.", 86 | "constraintDefault": "ALLOW", 87 | "booleanConstraint": {} 88 | }, 89 | { 90 | "name": "constraints/cloudbuild.allowedIntegrations", 91 | "displayName": "Allowed Integrations (Cloud Build)", 92 | "description": "This list constraint defines the allowed Cloud Build integrations for performing Builds through receiving webhooks from services outside Google Cloud. When this constraint is enforced, only webhooks for services whose host matches one of the allowed values will be processed. By default, Cloud Build processes all webhooks for projects that have at least one LIVE trigger.", 93 | "constraintDefault": "ALLOW", 94 | "listConstraint": {} 95 | }, 96 | { 97 | "name": "constraints/cloudbuild.allowedWorkerPools", 98 | "displayName": "Allowed Worker Pools (Cloud Build)", 99 | "description": "This list constraint defines the set of allowed Cloud Build Worker Pools for performing Builds using Cloud Build. When this constraint is enforced, builds will be required to build in a Worker Pool that matches one of the allowed values. By default, Cloud Build can use any Worker Pool. The allowed list of Worker Pools must be of the form: [under:organizations/ORGANIZATION_ID , under:folders/FOLDER_ID , under:projects/PROJECT_ID , projects/PROJECT_ID/locations/REGION/workerPools/WORKER_POOL_ID]. ", 100 | "constraintDefault": "ALLOW", 101 | "listConstraint": { 102 | "supportsUnder": true 103 | } 104 | }, 105 | { 106 | "name": "constraints/cloudbuild.disableCreateDefaultServiceAccount", 107 | "displayName": "Disable Create Default Service Account (Cloud Build)", 108 | "description": "This boolean constraint, when enforced, prevents the legacy Cloud Build service account from being created.", 109 | "constraintDefault": "DENY", 110 | "booleanConstraint": {} 111 | }, 112 | { 113 | "name": "constraints/clouddeploy.disableServiceLabelGeneration", 114 | "displayName": "Disable Cloud Deploy service labels", 115 | "description": "This boolean constraint, when enforced, prevents Cloud Deploy from adding Cloud Deploy identifier labels to deployed objects.By default, labels identifying Cloud Deploy resources are added to deployed objects during release creation.", 116 | "constraintDefault": "ALLOW", 117 | "booleanConstraint": {} 118 | }, 119 | { 120 | "name": "constraints/cloudfunctions.restrictAllowedGenerations", 121 | "displayName": "Allowed Cloud Functions Generations", 122 | "description": "This list constraint defines the set of allowed Cloud Function Generations that can be used to create new Function resources. Valid values are: 1stGen, 2ndGen.", 123 | "constraintDefault": "ALLOW", 124 | "listConstraint": {} 125 | }, 126 | { 127 | "name": "constraints/cloudfunctions.allowedVpcConnectorEgressSettings", 128 | "displayName": "Allowed VPC Connector egress settings (Cloud Functions)", 129 | "description": "This list constraint defines the allowed VPC Connector egress settings for deployment of a Cloud Function (1st gen). When this constraint is enforced, functions will be required to have VPC Connector egress settings that match one of the allowed values. By default, Cloud Functions can use any VPC Connector egress settings. VPC Connector egress settings must be specified in the allowed list using the values of the VpcConnectorEgressSettings enum.For Cloud Functions (2nd gen) use the constraint constraints/run.allowedVPCEgress.", 130 | "constraintDefault": "ALLOW", 131 | "listConstraint": {} 132 | }, 133 | { 134 | "name": "constraints/cloudfunctions.allowedIngressSettings", 135 | "displayName": "Allowed ingress settings (Cloud Functions)", 136 | "description": "This list constraint defines the allowed ingress settings for deployment of a Cloud Function (1st gen). When this constraint is enforced, functions will be required to have ingress settings that match one of the allowed values. By default, Cloud Functions can use any ingress settings. Ingress settings must be specified in the allowed list using the values of the IngressSettings enum.For Cloud Functions (2nd gen) use the constraint constraints/run.allowedIngress.", 137 | "constraintDefault": "ALLOW", 138 | "listConstraint": {} 139 | }, 140 | { 141 | "name": "constraints/cloudfunctions.requireVPCConnector", 142 | "displayName": "Require VPC Connector (Cloud Functions)", 143 | "description": "This boolean constraint enforces setting a VPC Connector when deploying a Cloud Function (1st gen). When this constraint is enforced, functions will be required to specify a VPC Connector. By default, specifying a VPC Connector is not required to deploy a Cloud Function.", 144 | "constraintDefault": "ALLOW", 145 | "booleanConstraint": {} 146 | }, 147 | { 148 | "name": "constraints/cloudkms.allowedProtectionLevels", 149 | "displayName": "Restrict which KMS CryptoKey types may be created.", 150 | "description": "This list constraint defines the Cloud KMS key types which may be created under a given hierarchy node. When this constraint is enforced, only KMS key types specified within this org policy may be created within the associated hierarchy node. Configuring this org policy will also impact the protection level of import jobs and key versions. By default, all key types are allowed. Valid values are: SOFTWARE, HSM, EXTERNAL, EXTERNAL_VPC. Deny policies are disallowed.", 151 | "constraintDefault": "ALLOW", 152 | "listConstraint": {} 153 | }, 154 | { 155 | "name": "constraints/cloudkms.minimumDestroyScheduledDuration", 156 | "displayName": "Minimum destroy scheduled duration per key", 157 | "description": "This list constraint defines the minimum destroy scheduled duration in days that the user can specify when creating a new key. No keys with destroy scheduled duration lower than this value may be created after the constraint is enforced. By default, the minimum destroy scheduled duration for all keys is 1 day, except in the case of import-only keys for which it is 0 days. Only one allowed value can be specified in the format in:1d, in:7d, in:15d, in:30d, in:60d, in:90d, or in:120d. For example, if constraints/cloudkms.minimumDestroyScheduledDuration is set to in:15d, then users can create keys with destroy scheduled duration set to any value higher than 15 days, such as 16 days or 31 days. However, users cannot create keys with destroy scheduled duration lower than 15 days, such as 14 days. For each resource in the hierarchy, the minimum destroy scheduled duration may inherit, replace, or be merged with the parent's policy. When the resource's policy is merged with the parent's policy, the effective value of minimum destroy scheduled duration at the resource is the lowest between that value specified at the resource's policy and the parent's effective minimum destroy scheduled duration. For example, if an organization has minimum destroy scheduled duration of 7 days and in a child project the policy is set to 'Merge with parent' with a value of in:15d, then the effective minimum destroy scheduled duration at the project is 7 days. ", 158 | "constraintDefault": "ALLOW", 159 | "listConstraint": {} 160 | }, 161 | { 162 | "name": "constraints/cloudkms.disableBeforeDestroy", 163 | "displayName": "Restrict key destruction to disabled key versions", 164 | "description": "This boolean constraint, when enforced, only allows the destruction of key versions that are in the disabled state. By default, key versions that are in the enabled state and key versions that are in the disabled state can be destroyed. When this constraint is enforced, it applies to both new and existing key versions.", 165 | "constraintDefault": "ALLOW", 166 | "booleanConstraint": {} 167 | }, 168 | { 169 | "name": "constraints/compute.allowedVlanAttachmentEncryption", 170 | "displayName": "Allowed VLAN Attachment encryption settings", 171 | "description": "This list constraint defines the allowed encryption settings for new VLAN Attachments. By default, VLAN Attachments are allowed to use any encryption settings. Set IPSEC as the allowed value to enforce creating encrypted VLAN attachments only.", 172 | "constraintDefault": "ALLOW", 173 | "listConstraint": {} 174 | }, 175 | { 176 | "name": "constraints/compute.disableNestedVirtualization", 177 | "displayName": "Disable VM nested virtualization", 178 | "description": "This boolean constraint disables hardware-accelerated nested virtualization for all Compute Engine VMs belonging to the organization, project, or folder where this constraint is set to True. By default, hardware-accelerated nested virtualization is allowed for all Compute Engine VMs running on Intel Haswell or newer CPU platforms. ", 179 | "constraintDefault": "ALLOW", 180 | "booleanConstraint": {} 181 | }, 182 | { 183 | "name": "constraints/compute.disableSerialPortLogging", 184 | "displayName": "Disable VM serial port logging to Stackdriver", 185 | "description": "This boolean constraint disables serial port logging to Stackdriver from Compute Engine VMs belonging to the organization, project, or folder where this constraint is being enforced. By default, serial port logging for Compute Engine VMs is disabled, and can be selectively enabled on a per-VM or per-project basis using metadata attributes. When enforced, this constraint disables serial port logging for new Compute Engine VMs whenever a new VM is created, as well as preventing users from changing the metadata attribute of any VMs (old or new) to True. Disabling serial port logging can cause certain services that rely on it, such as Google Kubernetes Engine clusters, to not function correctly. Before you enforce this constraint, verify that the products in your project do not rely on serial port logging.", 186 | "constraintDefault": "ALLOW", 187 | "booleanConstraint": {} 188 | }, 189 | { 190 | "name": "constraints/compute.disableSerialPortAccess", 191 | "displayName": "Disable VM serial port access", 192 | "description": "This boolean constraint disables serial port access to Compute Engine VMs belonging to the organization, project, or folder where this constraint is set to True. By default, customers can enable serial port access for Compute Engine VMs on a per-VM or per-project basis using metadata attributes. Enforcing this constraint will disable serial port access for Compute Engine VMs, regardless of the metadata attributes.", 193 | "constraintDefault": "ALLOW", 194 | "booleanConstraint": {} 195 | }, 196 | { 197 | "name": "constraints/compute.disableGlobalSerialPortAccess", 198 | "displayName": "Disable Global Access to VM Serial Ports", 199 | "description": "This boolean constraint disables global serial port access to Compute Engine VMs belonging to the organization, project, or folder where the constraint is enforced. By default, customers can enable serial port access for Compute Engine VMs on a per-VM or per-project basis using metadata attributes. Enforcing this constraint will disable global serial port access for Compute Engine VMs, regardless of the metadata attributes. Regional serial port access is not affected by this constraint. To disable all serial port access, use the compute.disableSerialPortAccess constraint instead.", 200 | "constraintDefault": "ALLOW", 201 | "booleanConstraint": {} 202 | }, 203 | { 204 | "name": "constraints/compute.disableInstanceDataAccessApis", 205 | "displayName": "Disable Instance Data Access APIs", 206 | "description": "Do not configure or modify this policy. This constraint is automatically configured during Assured Workloads onboarding and is only intended for advanced regulatory control for Assured Workloads. This boolean constraint, when enforced, will disable the GetSerialPortOutput and GetScreenshot APIs that access VM's serial port output and capture screen shot from VM UIs.", 207 | "constraintDefault": "ALLOW", 208 | "booleanConstraint": {} 209 | }, 210 | { 211 | "name": "constraints/compute.disableGuestAttributesAccess", 212 | "displayName": "Disable Guest Attributes of Compute Engine metadata", 213 | "description": "This boolean constraint disables Compute Engine API access to the Guest Attributes of Compute Engine VMs belonging to the organization, project, or folder where this constraint is set to True. By default, the Compute Engine API can be used to access Compute Engine VM guest attributes.", 214 | "constraintDefault": "ALLOW", 215 | "booleanConstraint": {} 216 | }, 217 | { 218 | "name": "constraints/compute.restrictXpnProjectLienRemoval", 219 | "displayName": "Restrict shared VPC project lien removal", 220 | "description": "This boolean constraint restricts the set of users that can remove a Shared VPC host project lien without organization-level permission where this constraint is set to True. By default, any user with the permission to update liens can remove a Shared VPC host project lien. Enforcing this constraint requires that permission be granted at the organization level.", 221 | "constraintDefault": "ALLOW", 222 | "booleanConstraint": {} 223 | }, 224 | { 225 | "name": "constraints/compute.requireOsLogin", 226 | "displayName": "Require OS Login", 227 | "description": "This boolean constraint, when set to true, enables OS Login on all newly created Projects. All VM instances created in new projects will have OS Login enabled. On new and existing projects, this constraint prevents metadata updates that disable OS Login at the project or instance level. By default, the OS Login feature is disabled on Compute Engine projects.GKE instances in private clusters running node pool versions 1.20.5-gke.2000 and later support OS Login. GKE instances in public clusters do not currently support OS Login. If this constraint is applied to a Project running public clusters, GKE instances running in that Project may not function properly.", 228 | "constraintDefault": "ALLOW", 229 | "booleanConstraint": {} 230 | }, 231 | { 232 | "name": "constraints/compute.requireShieldedVm", 233 | "displayName": "Shielded VMs", 234 | "description": "This boolean constraint, when set to True, requires that all new Compute Engine VM instances use Shielded disk images with Secure Boot, vTPM, and Integrity Monitoring options enabled. Secure Boot can be disabled after creation, if desired. Existing running instances will continue to work as usual. By default, Shielded VM features do not need to be enabled in order to create Compute Engine VM instances. Shielded VM features add verifiable integrity and exfiltration resistance to your VMs.", 235 | "constraintDefault": "ALLOW", 236 | "booleanConstraint": {} 237 | }, 238 | { 239 | "name": "constraints/compute.restrictNonConfidentialComputing", 240 | "displayName": "Restrict Non-Confidential Computing", 241 | "description": "The deny list of this list constraint defines the set of services that require all new resources to be created with Confidential Computing enabled. By default, new resources are not required to use Confidential Computing. While this list constraint is enforced, Confidential Computing cannot be disabled throughout the lifecycle of the resource. Existing resources will continue to work as usual. The denied list of services must be identified as the string name of an API, and can only include explicitly denied values from the list below. Explicitly allowing APIs is not currently supported. Explicitly denying APIs not in this list will result in an error. List of supported APIs: [compute.googleapis.com, container.googleapis.com]", 242 | "constraintDefault": "ALLOW", 243 | "listConstraint": {} 244 | }, 245 | { 246 | "name": "constraints/compute.setNewProjectDefaultToZonalDNSOnly", 247 | "displayName": "Sets the internal DNS setting for new projects to Zonal DNS Only", 248 | "description": "When set to `True`, newly created projects will use Zonal DNS as default. By default, this constraint is set to `False` and newly created projects will be using the default DNS type.", 249 | "constraintDefault": "ALLOW", 250 | "booleanConstraint": {} 251 | }, 252 | { 253 | "name": "constraints/compute.storageResourceUseRestrictions", 254 | "displayName": "Compute Storage resource use restrictions (Compute Engine disks, images, and snapshots)", 255 | "description": "This list constraint defines a set of projects that are allowed to use Compute Engine's storage resources. By default, anyone with appropriate Cloud IAM permissions can access Compute Engine resources. When using this constraint, users must have Cloud IAM permissions, and they must not be restricted by the constraint to access the resource. Projects, folders, and organizations specified in allowed or denied lists must be in the form: under:projects/PROJECT_ID, under:folders/FOLDER_ID, under:organizations/ORGANIZATION_ID. ", 256 | "constraintDefault": "ALLOW", 257 | "listConstraint": { 258 | "supportsUnder": true 259 | } 260 | }, 261 | { 262 | "name": "constraints/compute.trustedImageProjects", 263 | "displayName": "Define trusted image projects", 264 | "description": "This list constraint defines the set of projects that can be used for image storage and disk instantiation for Compute Engine. By default, instances can be created from images in any project that shares images publicly or explicitly with the user. The allowed/denied list of publisher projects must be strings in the form: projects/PROJECT_ID. If this constraint is active, only images from trusted projects will be allowed as the source for boot disks for new instances.", 265 | "constraintDefault": "ALLOW", 266 | "listConstraint": {} 267 | }, 268 | { 269 | "name": "constraints/compute.vmExternalIpAccess", 270 | "displayName": "Define allowed external IPs for VM instances", 271 | "description": "This list constraint defines the set of Compute Engine VM instances that are allowed to use external IP addresses. By default, all VM instances are allowed to use external IP addresses. The allowed/denied list of VM instances must be identified by the VM instance name, in the form: projects/PROJECT_ID/zones/ZONE/instances/INSTANCE ", 272 | "constraintDefault": "ALLOW", 273 | "listConstraint": {} 274 | }, 275 | { 276 | "name": "constraints/container.restrictNoncompliantDiagnosticDataAccess", 277 | "displayName": "Disable diagnostic administrative access pathways in GKE.", 278 | "description": "Do not configure or modify this policy. This constraint is automatically configured during Assured Workloads onboarding and is only intended for advanced regulatory control for Assured Workloads. When this boolean constraint is enforced, all access paths for diagnostics and other customer support use cases that do not comply with Assured Workloads requirements will be disabled.", 279 | "constraintDefault": "ALLOW", 280 | "booleanConstraint": {} 281 | }, 282 | { 283 | "name": "constraints/essentialcontacts.allowedContactDomains", 284 | "displayName": "Domain restricted contacts", 285 | "description": "This list constraint defines the set of domains that email addresses added to Essential Contacts can have. By default, email addresses with any domain can be added to Essential Contacts. The allowed/denied list must specify one or more domains of the form @example.com. If this constraint is active and configured with allowed values, only email addresses with a suffix matching one of the entries from the list of allowed domains can be added in Essential Contacts.This constraint has no effect on updating or removing existing contacts.", 286 | "constraintDefault": "ALLOW", 287 | "listConstraint": {} 288 | }, 289 | { 290 | "name": "constraints/essentialcontacts.disableProjectSecurityContacts", 291 | "displayName": "Disable Project Security Contacts", 292 | "description": "This boolean constraint, when enforced, allows organization policy administrators to ensure that only contacts assigned at the organization or folder level can receive security notifications. Specifically, enforcing this constraint blocks project owners and contact administrators from creating or updating an Essential Contact with a notification_category_subscriptions field that contains either the SECURITY or ALL category, if the contact also has a project resource as a parent.", 293 | "constraintDefault": "ALLOW", 294 | "booleanConstraint": {} 295 | }, 296 | { 297 | "name": "constraints/gcp.resourceLocations", 298 | "displayName": "Google Cloud Platform - Resource Location Restriction", 299 | "description": "This list constraint defines the set of locations where location-based Google Cloud resources can be created. By default, resources can be created in any location. Policies for this constraint can specify multi-regions such as asia and europe, regions such as us-east1 or europe-west1 as allowed or denied locations. Allowing or denying a multi-region does not imply that all included sub-locations should also be allowed or denied. For example, if the policy denies the us multi-region (which refers to multi-region resources, like some storage services), resources can still be created in the regional location us-east1. On the other hand, the in:us-locations group contains all locations within the us region, and can be used to block every region. We recommend using value groups to define your policy. You can specify value groups, collections of locations that are curated by Google to provide a simple way to define your resource locations. To use value groups in your organization policy, prefix your entries with the string in:, followed by the value group. For example, to create resources that will only be physically located within the US, set in:us-locations in the list of allowed values.If the suggested_value field is used in a location policy, it should be a region. If the value specified is a region, a UI for a zonal resource may pre-populate any zone in that region. ", 300 | "constraintDefault": "ALLOW", 301 | "listConstraint": {} 302 | }, 303 | { 304 | "name": "constraints/gcp.restrictCmekCryptoKeyProjects", 305 | "displayName": "Restrict which projects may supply KMS CryptoKeys for CMEK", 306 | "description": "This list constraint defines which projects may be used to supply Customer-Managed Encryption Keys (CMEK) when creating resources. Setting this constraint to Allow (i.e. only allow CMEK keys from these projects) ensures that CMEK keys from other projects cannot be used to protect newly created resources. Values for this constraint must be specified in the form of under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, or projects/PROJECT_ID. Supported services that enforce this constraint are: [aiplatform.googleapis.com, artifactregistry.googleapis.com, bigquery.googleapis.com, bigquerydatatransfer.googleapis.com, bigtable.googleapis.com, cloudfunctions.googleapis.com, composer.googleapis.com, compute.googleapis.com, container.googleapis.com, dataflow.googleapis.com, dataproc.googleapis.com, documentai.googleapis.com, firestore.googleapis.com, integrations.googleapis.com, logging.googleapis.com, notebooks.googleapis.com, pubsub.googleapis.com, run.googleapis.com, secretmanager.googleapis.com, spanner.googleapis.com, sqladmin.googleapis.com, storage.googleapis.com]. Enforcement of this constraint may grow over time to include additional services. Use caution when applying this constraint to projects, folders, or organizations where a mix of supported and unsupported services are used. Setting this constraint to Deny or Deny All is not permitted. Enforcement of this constraint is not retroactive. Existing CMEK Google Cloud resources with KMS CryptoKeys from disallowed projects must be reconfigured or recreated manually to ensure enforcement.", 307 | "constraintDefault": "ALLOW", 308 | "listConstraint": { 309 | "supportsUnder": true 310 | } 311 | }, 312 | { 313 | "name": "constraints/gcp.restrictNonCmekServices", 314 | "displayName": "Restrict which services may create resources without CMEK", 315 | "description": "This list constraint defines which services require Customer-Managed Encryption Keys (CMEK). Setting this constraint to Deny (i.e. deny resource creation without CMEK) requires that, for the specified services, newly created resources must be protected by a CMEK key. Supported services that can be set in this constraint are: [aiplatform.googleapis.com, artifactregistry.googleapis.com, bigquery.googleapis.com, bigquerydatatransfer.googleapis.com, bigtable.googleapis.com, cloudfunctions.googleapis.com, composer.googleapis.com, compute.googleapis.com, container.googleapis.com, dataflow.googleapis.com, dataproc.googleapis.com, documentai.googleapis.com, firestore.googleapis.com, integrations.googleapis.com, logging.googleapis.com, notebooks.googleapis.com, pubsub.googleapis.com, run.googleapis.com, secretmanager.googleapis.com, spanner.googleapis.com, sqladmin.googleapis.com, storage.googleapis.com, storagetransfer.googleapis.com]. Setting this constraint to Deny All is not permitted. Setting this constraint to Allow is not permitted. Enforcement of this constraint is not retroactive. Existing non-CMEK Google Cloud resources must be reconfigured or recreated manually to ensure enforcement.", 316 | "constraintDefault": "ALLOW", 317 | "listConstraint": {} 318 | }, 319 | { 320 | "name": "constraints/iam.allowedPolicyMemberDomains", 321 | "displayName": "Domain restricted sharing", 322 | "description": "This list constraint defines one or more Cloud Identity or Google Workspace customer IDs whose principals can be added to IAM policies. By default, all user identities are allowed to be added to IAM policies. Only allowed values can be defined in this constraint, denied values are not supported. If this constraint is active, only principals that belong to the allowed customer IDs can be added to IAM policies.You do not need to add the google.com customer ID to this list in order to interoperate with Google services. Adding google.com allows sharing with Google employees and non-production systems, and should only be used for sharing data with Google employees.", 323 | "constraintDefault": "ALLOW", 324 | "listConstraint": {} 325 | }, 326 | { 327 | "name": "constraints/iam.disableAuditLoggingExemption", 328 | "displayName": "Disable Audit Logging exemption", 329 | "description": "This boolean constraint, when enforced, prevents you from exempting additional principals from audit logging. This constraint does not affect any audit-logging exemptions that existed before you enforced the constraint.", 330 | "constraintDefault": "ALLOW", 331 | "booleanConstraint": {} 332 | }, 333 | { 334 | "name": "constraints/iam.disableServiceAccountKeyUpload", 335 | "displayName": "Disable Service Account Key Upload", 336 | "description": "This boolean constraint disables the feature that allows uploading public keys to service accounts where this constraint is set to `True`. By default, users can upload public keys to service accounts based on their Cloud IAM roles and permissions.", 337 | "constraintDefault": "ALLOW", 338 | "booleanConstraint": {} 339 | }, 340 | { 341 | "name": "constraints/iam.disableServiceAccountCreation", 342 | "displayName": "Disable service account creation", 343 | "description": "This boolean constraint disables the creation of service accounts where this constraint is set to `True`. By default, service accounts can be created by users based on their Cloud IAM roles and permissions.", 344 | "constraintDefault": "ALLOW", 345 | "booleanConstraint": {} 346 | }, 347 | { 348 | "name": "constraints/iam.disableServiceAccountKeyCreation", 349 | "displayName": "Disable service account key creation", 350 | "description": "This boolean constraint disables the creation of service account external keys where this constraint is set to `True`. By default, service account external keys can be created by users based on their Cloud IAM roles and permissions.", 351 | "constraintDefault": "ALLOW", 352 | "booleanConstraint": {} 353 | }, 354 | { 355 | "name": "constraints/iam.restrictCrossProjectServiceAccountLienRemoval", 356 | "displayName": "Restrict removal of Cross Project Service Account liens", 357 | "description": "This boolean constraint, when ENFORCED, prevents users from removing a Cross Project Service Account lien without organization-level permission. By default, any user with the permission to update liens can remove a Cross Project Service Account lien. Enforcing this constraint requires that permission to be granted at the organization level.", 358 | "constraintDefault": "ALLOW", 359 | "booleanConstraint": {} 360 | }, 361 | { 362 | "name": "constraints/iam.automaticIamGrantsForDefaultServiceAccounts", 363 | "displayName": "Disable Automatic IAM Grants for Default Service Accounts", 364 | "description": "This boolean constraint, when enforced, prevents the default App Engine and Compute Engine service accounts that are created in your projects from being automatically granted any IAM role on the project when the accounts are created. By default, these service accounts automatically receive the Editor role when they are created.", 365 | "constraintDefault": "ALLOW", 366 | "booleanConstraint": {} 367 | }, 368 | { 369 | "name": "constraints/iam.allowServiceAccountCredentialLifetimeExtension", 370 | "displayName": "Allow extending lifetime of OAuth 2.0 access tokens to up to 12 hours", 371 | "description": "This list constraint defines the set of service accounts that can be granted OAuth 2.0 access tokens with a lifetime of up to 12 hours. By default, the maximum lifetime for these access tokens is 1 hour. The allowed/denied list of service accounts must specify one or more service account email addresses.", 372 | "constraintDefault": "DENY", 373 | "listConstraint": {} 374 | }, 375 | { 376 | "name": "constraints/resourcemanager.allowedExportDestinations", 377 | "displayName": "Allowed Destinations for Exporting Resources", 378 | "description": "This list constraint defines the set of external Organizations to which resources can be moved, and denies all moves to all other Organizations. By default, resources cannot be moved between Organizations. If this constraint is applied to a resource, the resource can be moved only to Organizations that are explicitly allowed by this constraint. Moves within an Organization are not governed by this constraint. The move operation will still require the same IAM permissions as normal resource moves. Values specified in allow/deny lists must be in the form: under:organizations/ORGANIZATION_ID.", 379 | "constraintDefault": "DENY", 380 | "listConstraint": { 381 | "supportsUnder": true 382 | } 383 | }, 384 | { 385 | "name": "constraints/resourcemanager.allowedImportSources", 386 | "displayName": "Allowed Sources for Importing Resources", 387 | "description": "This list constraint defines the set of external Organizations from which resources can be imported, and denies all moves from all other Organizations. By default, resources cannot be moved between Organizations. If this constraint is applied to a resource, imported resources directly under this resource must be explicitly allowed by this constraint. Moves within an Organization are not governed by this constraint. The move operation will still require the same IAM permissions as normal resource moves. Values specified in allow/deny lists must be in the form: under:organizations/ORGANIZATION_ID.", 388 | "constraintDefault": "DENY", 389 | "listConstraint": { 390 | "supportsUnder": true 391 | } 392 | }, 393 | { 394 | "name": "constraints/resourcemanager.allowEnabledServicesForExport", 395 | "displayName": "Require Enabled Services Allow List for Cross-Organization Move", 396 | "description": "This list constraint acts as a check to verify that a project with a service enabled is eligible for cross-organization move. A resource with a supported service enabled must have this constraint enforced and that supported service included in the allowed values to be eligible for a cross-organization move. The current list of allowed values for supported services that can be used is: [SHARED_VPC]. This constraint provides an additional control on top of constraints/resourcemanager.allowedExportDestinations. This list_constraint is empty by default and will not block cross organization moves unless a supported service is enabled on the resource to be exported. This constraint allows more fine-grained control over resources using features that require more caution when being moved to another organization. By default, a resource with a supported service enabled cannot be moved across organizations. ", 397 | "constraintDefault": "DENY", 398 | "listConstraint": {} 399 | }, 400 | { 401 | "name": "constraints/resourcemanager.accessBoundaries", 402 | "displayName": "Restrict resource query visibility", 403 | "description": "This list constraint, when enforced on an organization resource, defines the set of Google Cloud resources that are returned in list and search methods for users in the domain of the organization where this constraint is enforced. This can be used to limit what resources are visible in various parts of the Cloud Console, such as the Resource Picker, Search, and Manage Resources page. Note that this Constraint is only ever evaluated at the Organization level. Values specified in allow/deny lists must be in the form: under:organizations/ORGANIZATION_ID.", 404 | "constraintDefault": "ALLOW", 405 | "listConstraint": { 406 | "supportsUnder": true 407 | } 408 | }, 409 | { 410 | "name": "constraints/run.allowedIngress", 411 | "displayName": "Allowed ingress settings (Cloud Run)", 412 | "description": "This list constraint defines the allowed ingress settings for Cloud Run services. When this constraint is enforced, services will be required to have ingress settings that match one of the allowed values. Existing Cloud Run services with ingress settings that violate this constraint can continue to be updated until the service's ingress settings are changed to comply with this constraint. Once a service complies with this constraint the service can only use ingress settings allowed by this constraint. By default, Cloud Run services can use any ingress settings. The allowed list must contain supported ingress settings values, which are all, internal, and internal-and-cloud-load-balancing.", 413 | "constraintDefault": "ALLOW", 414 | "listConstraint": {} 415 | }, 416 | { 417 | "name": "constraints/run.allowedVPCEgress", 418 | "displayName": "Allowed VPC egress settings (Cloud Run)", 419 | "description": "This list constraint defines the allowed VPC egress settings to be specified on a Cloud Run resource. When this constraint is enforced, Cloud Run resources are required to be deployed with a Serverless VPC Access connector or with Direct VPC egress enabled, and VPC egress settings are required to match one of the allowed values. By default, Cloud Run resources can set VPC egress settings to any supported value. The allowed list must contain supported VPC egress settings values, which are private-ranges-only and all-traffic.For existing Cloud Run services, all new revisions must comply with this constraint. Existing services with revisions serving traffic that violate this constraint can continue to migrate traffic to revisions that violate this constraint. Once all traffic for a service is served by revisions compliant with this constraint, all subsequent traffic migrations must only migrate traffic to revisions that comply with this constraint.", 420 | "constraintDefault": "ALLOW", 421 | "listConstraint": {} 422 | }, 423 | { 424 | "name": "constraints/run.allowedBinaryAuthorizationPolicies", 425 | "displayName": "Allowed Binary Authorization Policies (Cloud Run)", 426 | "description": "This list constraint defines the set of Binary Authorization policy names that are allowed to be specified on a Cloud Run resource. To allow/disallow a default policy, use the value `default`. To allow/disallow one or more custom platform policies, the resource ID of each such policy must be added separately.", 427 | "constraintDefault": "ALLOW", 428 | "listConstraint": {} 429 | }, 430 | { 431 | "name": "constraints/serviceuser.services", 432 | "displayName": "Restrict allowed Google Cloud APIs and services", 433 | "description": "This list constraint restricts the set of services and their APIs that can be enabled on this resource. By default, all services are allowed. The denied list of services must come from the list below. Explicitly enabling APIs via this constraint is not currently supported. Specifying an API not in this list will result in an error. [ compute.googleapis.com , deploymentmanager.googleapis.com , dns.googleapis.com ]. Enforcement of this constraint is not retroactive. If a service is already enabled on a resource when this constraint is enforced, it will remain enabled.", 434 | "constraintDefault": "ALLOW", 435 | "listConstraint": {} 436 | }, 437 | { 438 | "name": "constraints/sql.restrictAuthorizedNetworks", 439 | "displayName": "Restrict Authorized Networks on Cloud SQL instances", 440 | "description": "This boolean constraint restricts adding Authorized Networks for unproxied database access to Cloud SQL instances where this constraint is set to True. This constraint is not retroactive, Cloud SQL instances with existing Authorized Networks will still work even after this constraint is enforced. By default, Authorized Networks can be added to Cloud SQL instances.", 441 | "constraintDefault": "ALLOW", 442 | "booleanConstraint": {} 443 | }, 444 | { 445 | "name": "constraints/sql.restrictPublicIp", 446 | "displayName": "Restrict Public IP access on Cloud SQL instances", 447 | "description": "This boolean constraint restricts configuring Public IP on Cloud SQL instances where this constraint is set to True. This constraint is not retroactive, Cloud SQL instances with existing Public IP access will still work even after this constraint is enforced. By default, Public IP access is allowed to Cloud SQL instances.", 448 | "constraintDefault": "ALLOW", 449 | "booleanConstraint": {} 450 | }, 451 | { 452 | "name": "constraints/sql.restrictNoncompliantResourceCreation", 453 | "displayName": "Restrict non-compliant workloads for Cloud SQL instances.", 454 | "description": "Do not configure or modify this policy. This constraint is automatically configured during Assured Workloads onboarding and is only intended for advanced regulatory control for Assured Workloads. When this boolean constraint is enforced, certain aspects of supportability will be impaired and provisioned resources will strictly follow advanced sovereignty requirements for Assured Workloads. This policy is retroactive in that it will apply to existing projects, but it will not affect resources that have already been provisioned; ie. modifications to the policy will only be reflected in resources created after the policy is modified.", 455 | "constraintDefault": "ALLOW", 456 | "booleanConstraint": {} 457 | }, 458 | { 459 | "name": "constraints/sql.restrictNoncompliantDiagnosticDataAccess", 460 | "displayName": "Disable diagnostic and administrative access pathways in Cloud SQL to meet compliance requirements.", 461 | "description": "Do not configure or modify this policy. This constraint is automatically configured during Assured Workloads onboarding and is only intended for advanced regulatory control for Assured Workloads. When this boolean constraint is enforced, certain aspects of supportability will be impaired and all access paths for diagnostics and other customer support use cases that do not meet advanced sovereignty requirements for Assured Workloads will be disabled.", 462 | "constraintDefault": "ALLOW", 463 | "booleanConstraint": {} 464 | }, 465 | { 466 | "name": "constraints/storage.uniformBucketLevelAccess", 467 | "displayName": "Enforce uniform bucket-level access", 468 | "description": "This boolean constraint requires buckets to use uniform bucket-level access where this constraint is set to True. Any new bucket in the Organization resource must have uniform bucket-level access enabled, and no existing buckets in the organization resource can disable uniform bucket-level access. Enforcement of this constraint is not retroactive: existing buckets with uniform bucket-level access disabled continue to have it disabled. The default value for this constraint is False. Uniform bucket-level access disables the evaluation of ACLs assigned to Cloud Storage objects in the bucket. Consequently, only IAM policies grant access to objects in these buckets.", 469 | "constraintDefault": "ALLOW", 470 | "booleanConstraint": {} 471 | }, 472 | { 473 | "name": "constraints/storage.retentionPolicySeconds", 474 | "displayName": "Retention policy duration in seconds", 475 | "description": "This list constraint defines the set of durations for retention policies that can be set on Cloud Storage buckets. By default, if no organization policy is specified, a Cloud Storage bucket can have a retention policy of any duration. The list of allowed durations must be specified as a positive integer value greater than zero, representing the retention policy in seconds. Any insert, update, or patch operation on a bucket in the organization resource must have a retention policy duration that matches the constraint. Enforcement of this constraint is not retroactive. When a new organization policy is enforced, the retention policy of existing buckets remains unchanged and valid.", 476 | "constraintDefault": "ALLOW", 477 | "listConstraint": {} 478 | }, 479 | { 480 | "name": "constraints/storage.restrictAuthTypes", 481 | "displayName": "Cloud Storage - restrict authentication types", 482 | "description": "The constraint defines the set of authentication types that would be restricted from accessing any storage resources under the organization in Cloud Storage. Supported values are USER_ACCOUNT_HMAC_SIGNED_REQUESTS and SERVICE_ACCOUNT_HMAC_SIGNED_REQUESTS. Use in:ALL_HMAC_SIGNED_REQUESTS to include both.", 483 | "constraintDefault": "ALLOW", 484 | "listConstraint": {} 485 | }, 486 | { 487 | "name": "constraints/compute.skipDefaultNetworkCreation", 488 | "displayName": "Skip default network creation", 489 | "description": "This boolean constraint skips the creation of the default network and related resources during Google Cloud Platform Project resource creation where this constraint is set to True. By default, a default network and supporting resources are automatically created when creating a Project resource.", 490 | "constraintDefault": "ALLOW", 491 | "booleanConstraint": {} 492 | }, 493 | { 494 | "name": "constraints/compute.requireSslPolicy", 495 | "displayName": "Require SSL Policy", 496 | "description": "This list constraint defines the set of target SSL proxies and target HTTPS proxies that are allowed to use the default SSL policy. By default, all target SSL proxies and target HTTPS proxies are allowed to use the default SSL policy. When this constraint is enforced, new target SSL proxies and target HTTPS proxies will be required to specify an SSL policy. Enforcement of this constraint is not retroactive. Existing target proxies that use the default SSL policy are not affected. The allowed/denied list of target SSL proxies and target HTTPS proxies must be identified in the form:[under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, projects/PROJECT_ID/global/targetHttpsProxies/TARGET_PROXY_NAME, projects/PROJECT_ID/regions/REGION_NAME/targetHttpsProxies/TARGET_PROXY_NAME, projects/PROJECT_ID/global/targetSslProxies/TARGET_PROXY_NAME]. ", 497 | "constraintDefault": "ALLOW", 498 | "listConstraint": { 499 | "supportsUnder": true 500 | } 501 | }, 502 | { 503 | "name": "constraints/compute.restrictVpcPeering", 504 | "displayName": "Restrict VPC peering usage", 505 | "description": "This list constraint defines the set of VPC networks that are allowed to be peered with the VPC networks belonging to this project, folder, or organization. By default, a Network Admin for one network can peer with any other network. The allowed/denied list of networks must be identified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, or projects/PROJECT_ID/global/networks/NETWORK_NAME.", 506 | "constraintDefault": "ALLOW", 507 | "listConstraint": { 508 | "supportsUnder": true 509 | } 510 | }, 511 | { 512 | "name": "constraints/compute.restrictSharedVpcHostProjects", 513 | "displayName": "Restrict Shared VPC Host Projects", 514 | "description": "This list constraint defines the set of Shared VPC host projects that projects at or below this resource can attach to. By default, a project can attach to any host project in the same organization, thereby becoming a service project. Projects, folders, and organizations in allowed/denied lists affect all objects underneath them in the resource hierarchy, and must be specified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, or projects/PROJECT_ID.", 515 | "constraintDefault": "ALLOW", 516 | "listConstraint": { 517 | "supportsUnder": true 518 | } 519 | }, 520 | { 521 | "name": "constraints/compute.restrictSharedVpcSubnetworks", 522 | "displayName": "Restrict Shared VPC Subnetworks", 523 | "description": "This list constraint defines the set of shared VPC subnetworks that eligible resources can use. This constraint does not apply to resources within the same project. By default, eligible resources can use any shared VPC subnetwork. The allowed/denied list of subnetworks must be specified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, or projects/PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK-NAME.", 524 | "constraintDefault": "ALLOW", 525 | "listConstraint": { 526 | "supportsUnder": true 527 | } 528 | }, 529 | { 530 | "name": "constraints/compute.restrictSharedVpcBackendServices", 531 | "displayName": "Restrict Shared VPC Backend Services", 532 | "description": "This list constraint defines the set of shared VPC Backend Services that eligible resources can use. This constraint does not apply to resources within the same project. By default, eligible resources can use any shared VPC Backend Services. The allowed/denied list of Backend Services must be specified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, projects/PROJECT_ID/regions/REGION/backendServices/BACKEND_SERVICE_NAME or projects/PROJECT_ID/global/backendServices/BACKEND_SERVICE_NAME. This constraint is not retroactive.", 533 | "constraintDefault": "ALLOW", 534 | "listConstraint": { 535 | "supportsUnder": true 536 | } 537 | }, 538 | { 539 | "name": "constraints/compute.restrictCrossProjectServices", 540 | "displayName": "Restrict cross-project backend buckets and backend services", 541 | "description": "This list constraint limits BackendBucket and BackendService resources that a urlMap resource can attach to. This constraint does not apply to BackendBuckets and BackendServices within the same project as the urlMap resource. By default, a urlMap resource in one project can reference compatible backendBuckets and BackendServices from other projects in the same organization as long as the user has compute.backendService.use, compute.regionBackendServices.use or compute.backendBuckets.use permission. We recommend not using this constraint with the compute.restrictSharedVpcBackendServices constraint to avoid conflicts. Projects, folders, and organization resources in allowed or denied lists affect all BackendBuckets and BackendServices underneath them in the resource hierarchy. Only projects, folders, and organization resources can be included in the allowed or denied list, and must be specified in the form: [under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, projects/PROJECT_ID/regions/REGION/backendbuckets/BACKEND_BUCKET_NAME, projects/PROJECT_ID/global/backendbuckets/BACKEND_BUCKET_NAME, projects/PROJECT_ID/regions/REGION/backendservices/BACKEND_SERVICE_NAME, projects/PROJECT_ID/global/backendservices/BACKEND_SERVICE_NAME", 542 | "constraintDefault": "ALLOW", 543 | "listConstraint": { 544 | "supportsUnder": true 545 | } 546 | }, 547 | { 548 | "name": "constraints/compute.restrictVpnPeerIPs", 549 | "displayName": "Restrict VPN Peer IPs", 550 | "description": "This list constraint defines the set of valid IP addresses that can be configured as VPN peer IPs. By default, any IP can be a VPN peer IP for a VPC network. The allowed/denied list of IP addresses must be specified as valid IP addresses in the form: IP_V4_ADDRESS or IP_V6_ADDRESS.", 551 | "constraintDefault": "ALLOW", 552 | "listConstraint": {} 553 | }, 554 | { 555 | "name": "constraints/compute.restrictLoadBalancerCreationForTypes", 556 | "displayName": "Restrict Load Balancer Creation Based on Load Balancer Types", 557 | "description": "This list constraint defines the set of load balancer types which can be created for an organization, folder, or project. Every load balancer type to be allowed or denied must be listed explicitly. By default, creation of all types of load balancers is allowed. The list of allowed or denied values must be identified as the string name of a load balancer, and can only include values from the list below: [INTERNAL_TCP_UDP, INTERNAL_HTTP_HTTPS, GLOBAL_INTERNAL_MANAGED_HTTP_HTTPS, GLOBAL_INTERNAL_MANAGED_TCP_PROXY, REGIONAL_INTERNAL_MANAGED_TCP_PROXY, EXTERNAL_NETWORK_TCP_UDP, EXTERNAL_TCP_PROXY, EXTERNAL_SSL_PROXY, EXTERNAL_HTTP_HTTPS, EXTERNAL_MANAGED_HTTP_HTTPS, GLOBAL_EXTERNAL_MANAGED_HTTP_HTTPS, GLOBAL_EXTERNAL_MANAGED_TCP_PROXY, GLOBAL_EXTERNAL_MANAGED_SSL_PROXY]. , REGIONAL_EXTERNAL_MANAGED_TCP_PROXY To include all internal or all external load balancer types, use the in: prefix followed by INTERNAL or EXTERNAL. For example, allowing in:INTERNAL will allow all load balancer types from the above list that include INTERNAL.", 558 | "constraintDefault": "ALLOW", 559 | "listConstraint": {} 560 | }, 561 | { 562 | "name": "constraints/compute.restrictProtocolForwardingCreationForTypes", 563 | "displayName": "Restrict Protocol Forwarding Based on type of IP Address", 564 | "description": "This list constraint defines the type of protocol forwarding rule objects with target instance that a user can create. When this constraint is enforced, new forwarding rule objects with target instance will be limited to internal and/or external IP addresses, based on the types specified. The types to be allowed or denied must be listed explicitly. By default, creation of both internal and external protocol forwarding rule objects with target instance are allowed. The list of allowed or denied values can only include values from the list below: [INTERNAL, EXTERNAL]. .", 565 | "constraintDefault": "ALLOW", 566 | "listConstraint": {} 567 | }, 568 | { 569 | "name": "constraints/compute.vmCanIpForward", 570 | "displayName": "Restrict VM IP Forwarding", 571 | "description": "This list constraint defines the set of VM instances that can enable IP forwarding. By default, any VM can enable IP forwarding in any virtual network. VM instances must be specified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, or projects/PROJECT_ID/zones/ZONE/instances/INSTANCE-NAME. This constraint is not retroactive.", 572 | "constraintDefault": "ALLOW", 573 | "listConstraint": { 574 | "supportsUnder": true 575 | } 576 | }, 577 | { 578 | "name": "constraints/compute.restrictDedicatedInterconnectUsage", 579 | "displayName": "Restrict Dedicated Interconnect usage", 580 | "description": "This list constraint defines the set of Compute Engine networks that are allowed to use Dedicated Interconnect. By default, networks are allowed to use any type of Interconnect. The allowed/denied list of networks must be identified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, or projects/PROJECT_ID/global/networks/NETWORK_NAME.", 581 | "constraintDefault": "ALLOW", 582 | "listConstraint": { 583 | "supportsUnder": true 584 | } 585 | }, 586 | { 587 | "name": "constraints/compute.restrictPartnerInterconnectUsage", 588 | "displayName": "Restrict Partner Interconnect usage", 589 | "description": "This list constraint defines the set of Compute Engine networks that are allowed to use Partner Interconnect. By default, networks are allowed to use any type of Interconnect. The allowed/denied list of networks must be identified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, or projects/PROJECT_ID/global/networks/NETWORK_NAME.", 590 | "constraintDefault": "ALLOW", 591 | "listConstraint": { 592 | "supportsUnder": true 593 | } 594 | }, 595 | { 596 | "name": "constraints/compute.restrictCloudNATUsage", 597 | "displayName": "Restrict Cloud NAT usage", 598 | "description": "This list constraint defines the set of subnetworks that are allowed to use Cloud NAT. By default, all subnetworks are allowed to use Cloud NAT. The allowed/denied list of subnetworks must be identified in the form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, under:projects/PROJECT_ID, or projects/PROJECT_ID/regions/REGION_NAME/subnetworks/SUBNETWORK_NAME.", 599 | "constraintDefault": "ALLOW", 600 | "listConstraint": { 601 | "supportsUnder": true 602 | } 603 | }, 604 | { 605 | "name": "constraints/compute.sharedReservationsOwnerProjects", 606 | "displayName": "Shared Reservations Owner Projects", 607 | "description": "This list constraint defines the set of projects that are allowed to create and own shared reservations in the org. A shared reservation is similar to a local reservation, except that instead of being consumable by only owner projects, they can be consumed by other Compute Engine projects in the resource hierarchy. The list of projects allowed to access the shared reservation must be of the form: projects/PROJECT_NUMBER or under:projects/PROJECT_NUMBER.", 608 | "constraintDefault": "DENY", 609 | "listConstraint": { 610 | "supportsUnder": true 611 | } 612 | }, 613 | { 614 | "name": "constraints/gcp.restrictServiceUsage", 615 | "displayName": "Restrict Resource Service Usage", 616 | "description": "This constraint defines the set of Google Cloud resource services that can be used within an organization, folder, or project, such as compute.googleapis.com and storage.googleapis.com. By default, all Google Cloud resource services are allowed. For more information, see https://cloud.google.com/resource-manager/help/organization-policy/restricting-resources. ", 617 | "constraintDefault": "ALLOW", 618 | "listConstraint": {} 619 | }, 620 | { 621 | "name": "constraints/gcp.restrictTLSVersion", 622 | "displayName": "Restrict TLS Versions", 623 | "description": "This constraint defines the set of TLS versions that cannot be used on the organization, folder, or project where this constraint is enforced, or any of that resource's children in the resource hierarchy. By default, all TLS versions are allowed. TLS versions can only be specified in the denied list, and must be identified in the form TLS_VERSION_1 or TLS_VERSION_1_1.This constraint is only applied to requests using TLS. It will not be used to restrict unencrpyted requests. For more information, see https://cloud.google.com/assured-workloads/docs/restrict-tls-versions.", 624 | "constraintDefault": "ALLOW", 625 | "listConstraint": {} 626 | }, 627 | { 628 | "name": "constraints/iam.disableWorkloadIdentityClusterCreation", 629 | "displayName": "Disable Workload Identity Cluster Creation", 630 | "description": "This boolean constraint, when set to `True`, requires that all new GKE clusters have Workload Identity disabled at creation time. Existing GKE clusters with Workload Identity already enabled will continue to work as usual. By default, Workload Identity can be enabled for any GKE cluster.", 631 | "constraintDefault": "ALLOW", 632 | "booleanConstraint": {} 633 | }, 634 | { 635 | "name": "constraints/compute.disableInternetNetworkEndpointGroup", 636 | "displayName": "Disable Internet Network Endpoint Groups", 637 | "description": "This boolean constraint restricts whether a user can create Internet Network Endpoint Groups (NEG) with a type of INTERNET_FQDN_PORT and INTERNET_IP_PORT.By default, any user with appropriate IAM permissions can create Internet NEGs in any project.", 638 | "constraintDefault": "ALLOW", 639 | "booleanConstraint": {} 640 | }, 641 | { 642 | "name": "constraints/gcp.detailedAuditLoggingMode", 643 | "displayName": "Google Cloud Platform - Detailed Audit Logging Mode", 644 | "description": "When Detailed Audit Logging Mode is enforced, both the request and response are included in Cloud Audit Logs. Changes to this feature may take up to 10 minutes to reflect. This Org Policy is highly encouraged in coordination with Bucket Lock when seeking compliances such as SEC Rule 17a-4(f), CFTC Rule 1.31(c)-(d), and FINRA Rule 4511(c). This policy is currently only supported in Cloud Storage.", 645 | "constraintDefault": "ALLOW", 646 | "booleanConstraint": {} 647 | }, 648 | { 649 | "name": "constraints/gcp.disableCloudLogging", 650 | "displayName": "Disable Cloud Logging for Cloud Healthcare API", 651 | "description": "This boolean constraint, when enforced, disables Cloud Logging for the Cloud Healthcare API. Audit logs aren't affected by this constraint. Cloud Logs generated for the Cloud Healthcare API before the constraint is enforced are not deleted and can still be accessed. ", 652 | "constraintDefault": "ALLOW", 653 | "booleanConstraint": {} 654 | }, 655 | { 656 | "name": "constraints/iam.workloadIdentityPoolProviders", 657 | "displayName": "Allowed external Identity Providers for workloads in Cloud IAM", 658 | "description": "Identity Providers that can be configured for workload authentication within Cloud IAM, specified by URI/URLs.", 659 | "constraintDefault": "ALLOW", 660 | "listConstraint": {} 661 | }, 662 | { 663 | "name": "constraints/iam.workloadIdentityPoolAwsAccounts", 664 | "displayName": "Allowed AWS accounts that can be configured for workload identity federation in Cloud IAM", 665 | "description": "List of AWS account IDs that can be configured for workload identity federation in Cloud IAM.", 666 | "constraintDefault": "ALLOW", 667 | "listConstraint": {} 668 | }, 669 | { 670 | "name": "constraints/dataform.restrictGitRemotes", 671 | "displayName": "Restrict git remotes for repositories in Dataform", 672 | "description": "This list constraint defines a set of remotes that repositories in the Dataform project can communicate with. To block communication with all remotes, set the value to Deny all. This constraint is retroactive, and blocks communication for existing repositories that violate it. Entries should be links to trusted remotes, in the same format as provided in Dataform.By default, repositories in Dataform projects can communicate with any remote.", 673 | "constraintDefault": "ALLOW", 674 | "listConstraint": {} 675 | }, 676 | { 677 | "name": "constraints/compute.disablePrivateServiceConnectCreationForConsumers", 678 | "displayName": "Disable Private Service Connect for Consumers", 679 | "description": "This list constraint defines the set of Private Service Connect endpoint types for which users cannot create forwarding rules. When this constraint is enforced, users will be blocked from creating forwarding rules for the Private Service Connect endpoint type. This constraint is not retroactively enforced. By default, forwarding rules can be created for any Private Service Connect endpoint type. The allowed/denied list of Private Service Connect endpoints must come from the list below: [GOOGLE_APIS, SERVICE_PRODUCERS]. Using GOOGLE_APIS in the allowed/denied list will restrict the creation of Private Service Connect forwarding rules for accessing Google APIs. Using SERVICE_PRODUCERS in the allowed/denied list will restrict the creation of Private Service Connect forwarding rules for accessing services in another VPC network.", 680 | "constraintDefault": "ALLOW", 681 | "listConstraint": {} 682 | }, 683 | { 684 | "name": "constraints/compute.restrictPrivateServiceConnectConsumer", 685 | "displayName": "Restrict allowed Private Service Connect Consumers", 686 | "description": "This list constraint defines the organizations, folders, and projects that can connect to service attachments within a producer's organization or project. The allowed or denied lists must be identified in the following form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, or under:projects/PROJECT_ID. By default, all connections are allowed.", 687 | "constraintDefault": "ALLOW", 688 | "listConstraint": { 689 | "supportsUnder": true 690 | } 691 | }, 692 | { 693 | "name": "constraints/compute.restrictPrivateServiceConnectProducer", 694 | "displayName": "Restrict allowed Private Service Connect Producers", 695 | "description": "This list constraint defines which service attachments Private Service Connect consumers can connect to. The constraint blocks the deployment of Private Service Connect endpoints or backends based on the organization, folder, or project resource of the service attachment that the endpoints or backends refer to. The allowed or denied lists must be identified in the following form: under:organizations/ORGANIZATION_ID, under:folders/FOLDER_ID, or under:projects/PROJECT_ID. By default, all connections are allowed.", 696 | "constraintDefault": "ALLOW", 697 | "listConstraint": { 698 | "supportsUnder": true 699 | } 700 | }, 701 | { 702 | "name": "constraints/storage.publicAccessPrevention", 703 | "displayName": "Enforce Public Access Prevention", 704 | "description": "Secure your Cloud Storage data from public exposure by enforcing public access prevention. This governance policy prevents existing and future resources from being accessed via the public internet by disabling and blocking ACLs and IAM permissions that grant access to allUsers and allAuthenticatedUsers. Enforce this policy on the entire organization (recommended), specific projects, or specific folders to ensure no data is publicly exposed.This policy overrides existing public permissions. Public access will be revoked for existing buckets and objects after this policy is enabled.", 705 | "constraintDefault": "ALLOW", 706 | "booleanConstraint": {} 707 | }, 708 | { 709 | "name": "constraints/storage.secureHttpTransport", 710 | "displayName": "Restrict unencrypted HTTP access", 711 | "description": "This boolean constraint, when enforced, explicitly denies HTTP (unencrypted) access to all storage resources. By default, the Cloud Storage XML API allows unencrypted HTTP access. Note that the Cloud Storage JSON API, gRPC, and Cloud console only allow encrypted HTTP access to Cloud Storage resources.", 712 | "constraintDefault": "ALLOW", 713 | "booleanConstraint": {} 714 | }, 715 | { 716 | "name": "constraints/compute.disableVpcInternalIpv6", 717 | "displayName": "Disable VPC Internal IPv6 usage", 718 | "description": "This boolean constraint, when set to True, disables the creation of or update to subnetworks with a stack_type of IPV4_IPV6 and ipv6_access_type of INTERNAL. By default, anyone with appropriate Cloud IAM permissions can create or update subnetworks with stack_type of IPV4_IPV6 in any projects, folders, and organizations.", 719 | "constraintDefault": "ALLOW", 720 | "booleanConstraint": {} 721 | }, 722 | { 723 | "name": "constraints/compute.disableHybridCloudIpv6", 724 | "displayName": "Disable Hybrid Cloud IPv6 usage", 725 | "description": "This boolean constraint, when set to True, disables the creation of or update to hybrid cloud resources including Cloud Router, Interconnect Attachments, and Cloud VPN with a stack_type of IPV4_IPV6. By default, anyone with appropriate Cloud IAM permissions can create or update hybrid cloud resources with stack_type of IPV4_IPV6 in any projects, folders and organizations.", 726 | "constraintDefault": "ALLOW", 727 | "booleanConstraint": {} 728 | }, 729 | { 730 | "name": "constraints/compute.disableVpcExternalIpv6", 731 | "displayName": "Disable VPC External IPv6 usage", 732 | "description": "This boolean constraint, when set to True, disables the creation of or update to subnetworks with a stack_type of IPV4_IPV6 and ipv6_access_type of EXTERNAL. By default, anyone with appropriate Cloud IAM permissions can create or update subnetworks with stack_type of IPV4_IPV6 in any projects, folders, and organizations.", 733 | "constraintDefault": "ALLOW", 734 | "booleanConstraint": {} 735 | }, 736 | { 737 | "name": "constraints/compute.disableAllIpv6", 738 | "displayName": "Disable All IPv6 usage", 739 | "description": "This boolean constraint, when set to True, disables the creation of or update to any Google Compute Engine resources involved in IPv6 usage. By default, anyone with appropriate Cloud IAM permissions can create or update Google Compute Engine resources with IPv6 usage in any projects, folders, and organizations. If set, this constraint will have higher priority than other IPv6 org constraints including disableVpcInternalIpv6, disableVpcExternalIpv6, and disableHybridCloudIpv6.", 740 | "constraintDefault": "ALLOW", 741 | "booleanConstraint": {} 742 | }, 743 | { 744 | "name": "constraints/datastream.disablePublicConnectivity", 745 | "displayName": "Datastream - Block Public Connectivity Methods", 746 | "description": "By default, Datastream connection profiles can be created with public or private connectivity methods. If the boolean constraint for this organization policy is enforced, then only private connectivity methods (for example, VPC peering) can be used to create connection profiles.", 747 | "constraintDefault": "ALLOW", 748 | "booleanConstraint": {} 749 | }, 750 | { 751 | "name": "constraints/cloudscheduler.allowedTargetTypes", 752 | "displayName": "Allowed target types for jobs", 753 | "description": "This list constraint defines the list of target types, such as App Engine HTTP, HTTP, or Pubsub, allowed for Cloud Scheduler jobs. By default, all job targets are allowed. Valid values are: APPENGINE, HTTP, PUBSUB.", 754 | "constraintDefault": "ALLOW", 755 | "listConstraint": {} 756 | }, 757 | { 758 | "name": "constraints/compute.requireVpcFlowLogs", 759 | "displayName": "Require predefined policies for VPC flow logs", 760 | "description": "This list constraint defines the set of predefined policies that can be enforced for VPC Flow logs.By default VPC Flow logs may be configured with any settings in each subnet.This constraint enforces enabling flow logs for all subnetworks in scope with a required minimum sampling rate.Specify one or more of the following valid values:[ESSENTIAL (allows values >= 0.1 and < 0.5), LIGHT (allows values >= 0.5 and < 1.0), COMPREHENSIVE (allows values == 1.0)]. ", 761 | "constraintDefault": "ALLOW", 762 | "listConstraint": {} 763 | }, 764 | { 765 | "name": "constraints/firestore.requireP4SAforImportExport", 766 | "displayName": "Require Firestore Service Agent for import/export", 767 | "description": "This boolean constraint, when enforced, requires Firestore imports and exports to use the Firestore Service Agent. By default, Firestore imports and exports may use the App Engine service account. Firestore will stop using the App Engine service account for imports and exports in the future and all accounts will need to migrate to the Firestore Service Agent, after which time this constraint will no longer be necessary.", 768 | "constraintDefault": "ALLOW", 769 | "booleanConstraint": {} 770 | }, 771 | { 772 | "name": "constraints/meshconfig.allowedVpcscModes", 773 | "displayName": "Allowed VPC Service Controls mode for Anthos Service Mesh Managed Control Planes", 774 | "description": "This constraint determines what VPC Service Controls modes can be set when provisioning a new Anthos Service Mesh Managed Control Plane. Valid values are \"NONE\" and \"COMPATIBLE\".", 775 | "constraintDefault": "ALLOW", 776 | "listConstraint": {} 777 | }, 778 | { 779 | "name": "constraints/compute.enableComplianceMemoryProtection", 780 | "displayName": "Enable settings required for compliance memory protection workloads", 781 | "description": "Do not configure or modify this policy. This constraint is automatically configured during Assured Workloads onboarding and is only intended for advanced regulatory control for Assured Workloads. This constraint controls settings required to eliminate potential access paths to VM core memory. When enforced it limits the ability to access VM core memory by disabling access pathways and restricts internal data collection when an error occurs.", 782 | "constraintDefault": "ALLOW", 783 | "booleanConstraint": {} 784 | }, 785 | { 786 | "name": "constraints/compute.disableGlobalCloudArmorPolicy", 787 | "displayName": "Disable Creation of Cloud Armor Security Policies", 788 | "description": "This boolean constraint, when enforced, disables creating Cloud Armor security policies. By default, you can create Cloud Armor security policies in any organization, folder, or project.", 789 | "constraintDefault": "ALLOW", 790 | "booleanConstraint": {} 791 | }, 792 | { 793 | "name": "constraints/compute.disableGlobalSelfManagedSslCertificate", 794 | "displayName": "Disable Creation of global self-managed SSL Certificates", 795 | "description": "This boolean constraint, when enforced, disables creation of global self-managed SSL Certificates. Creation of google-managed or regional self-managed certificates is not disabled by this constraint. By default, you can create global self-managed SSL Certificates in any organization, folder, or project.", 796 | "constraintDefault": "ALLOW", 797 | "booleanConstraint": {} 798 | }, 799 | { 800 | "name": "constraints/iap.requireGlobalIapWebDisabled", 801 | "displayName": "Disable Enabling Identity-Aware Proxy (IAP) on global resources", 802 | "description": "This boolean constraint, when enforced, disables turning on Identity-Aware Proxy on global resources. Enabling IAP on regional resources is not restricted by this constraint. By default, enabling IAP on global resources is allowed.", 803 | "constraintDefault": "ALLOW", 804 | "booleanConstraint": {} 805 | }, 806 | { 807 | "name": "constraints/iap.requireRegionalIapWebDisabled", 808 | "displayName": "Disable Enabling Identity-Aware Proxy (IAP) on regional resources", 809 | "description": "This boolean constraint, when enforced, disables turning on Identity-Aware Proxy on regional resources. Enabling IAP on global resources is not restricted by this constraint. By default, enabling IAP on regional resources is allowed.", 810 | "constraintDefault": "ALLOW", 811 | "booleanConstraint": {} 812 | }, 813 | { 814 | "name": "constraints/compute.disableGlobalLoadBalancing", 815 | "displayName": "Disable Global Load Balancing", 816 | "description": "This boolean constraint disables creation of global load balancing products. When enforced, only regional load balancing products without global dependencies can be created. By default, creation of global load balancing is allowed.", 817 | "constraintDefault": "ALLOW", 818 | "booleanConstraint": {} 819 | }, 820 | { 821 | "name": "constraints/compute.disableSshInBrowser", 822 | "displayName": "Disable SSH in browser", 823 | "description": "This boolean constraint disables the SSH-in-browser tool in the Cloud Console. When enforced, the SSH-in-browser button is disabled. By default, using the SSH-in-browser tool is allowed.", 824 | "constraintDefault": "ALLOW", 825 | "booleanConstraint": {} 826 | }, 827 | { 828 | "name": "constraints/iam.serviceAccountKeyExpiryHours", 829 | "displayName": "Service account key expiry duration in hours", 830 | "description": "This list constraint defines the maximum duration allowed for service account key expiry. By default, created keys never expire. The allowed duration is specified in hours, and must come from the list below. Only one allowed value can be specified, and denied values are not supported. Specifying a duration not in this list will result in an error. [1h, 8h, 24h, 168h, 336h, 720h, 1440h, 2160h]. To enforce this constraint, you must set it to replace the parent policy in the Cloud Console, or set inheritFromParent=false in the policy file if using the gcloud CLI. This constraint can't be merged with a parent policy. Enforcement of the constraint is not retroactive and will not change pre-existing keys.", 831 | "constraintDefault": "ALLOW", 832 | "listConstraint": {} 833 | }, 834 | { 835 | "name": "constraints/commerceorggovernance.marketplaceServices", 836 | "displayName": "Restrict access on marketplace services", 837 | "description": "This list constraint defines the set of services allowed for marketplace organizations, and can only include values from the list below: [PRIVATE_MARKETPLACE, IAAS_PROCUREMENT]. If PRIVATE_MARKETPLACE is in the allowed value list, the private marketplace is enabled. If the IAAS_PROCUREMENT is in the allowed value list, the IaaS procurement governance experience is enabled for all products. By default, the private marketplace is disabled and the IaaS procurement governance experience is disabled. Also, the IAAS_PROCUREMENT policy works independently from the Request Procurement governance capability, which is specifically for SaaS products listed on the marketplace.", 838 | "constraintDefault": "DENY", 839 | "listConstraint": {} 840 | }, 841 | { 842 | "name": "constraints/commerceorggovernance.disablePublicMarketplace", 843 | "displayName": "Disable Public Marketplace", 844 | "description": "This boolean constraint, when enforced, disables public marketplace for all users under the org. By default, public marketplace access is enabled for the org.", 845 | "constraintDefault": "ALLOW", 846 | "booleanConstraint": {} 847 | }, 848 | { 849 | "name": "constraints/compute.disableNonFIPSMachineTypes", 850 | "displayName": "Enforce FIPS-compliant machine types", 851 | "description": "This boolean constraint when enforced, disables creation of VM instance types that do not comply with FIPS requirements.", 852 | "constraintDefault": "ALLOW", 853 | "booleanConstraint": {} 854 | }, 855 | { 856 | "name": "constraints/spanner.assuredWorkloadsAdvancedServiceControls", 857 | "displayName": "Enable advanced service control for compliance workloads", 858 | "description": "Do not configure or modify this policy. This constraint is automatically configured during Assured Workloads onboarding and is only intended for advanced regulatory control for Assured Workloads. When this boolean constraint is enforced, certain aspects of supportability will be impaired and provisioned resources will strictly follow advanced sovereignty requirements for Assured Workloads. This policy will apply to existing projects, but it will not affect resources that have already been provisioned; ie. modifications to the policy will only be reflected in resources created after the policy is modified.", 859 | "constraintDefault": "ALLOW", 860 | "booleanConstraint": {} 861 | }, 862 | { 863 | "name": "constraints/spanner.disableMultiRegionInstanceIfNoLocationSelected", 864 | "displayName": "Disable Cloud Spanner multi-region if no location selected", 865 | "description": "Do not configure or modify this policy. This constraint is automatically configured during Assured Workloads onboarding and is only intended for advanced regulatory control for Assured Workloads. This boolean constraint, when enforced, prevents the creation of spanner instances using multi region instance config unless a location is selected. Cloud Spanner today does not yet support selecting location, so all multi regions will be disallowed. In the future, Spanner will provide the functionality for users to select a location for multi regions. Enforcement of this constraint is not retroactive. Spanner instances that have been already created will be unaffected.", 866 | "constraintDefault": "ALLOW", 867 | "booleanConstraint": {} 868 | }, 869 | { 870 | "name": "constraints/pubsub.enforceInTransitRegions", 871 | "displayName": "Enforce in-transit regions for Pub/Sub messages", 872 | "description": "This boolean constraint, when enforced, sets MessageStoragePolicy::enforce_in_transit to true for all new Pub/Sub topics at creation time. This ensures that Customer Data transits only within the allowed regions specified in the message storage policy for the topic.", 873 | "constraintDefault": "ALLOW", 874 | "booleanConstraint": {} 875 | }, 876 | { 877 | "name": "constraints/iam.serviceAccountKeyExposureResponse", 878 | "displayName": "Service account key exposure response", 879 | "description": "This list constraint defines the response taken if Google detects that a service account key is exposed publicly. By default, there is no response. The allowed values are DISABLE_KEY and WAIT_FOR_ABUSE. Values not explicitly part of this list cannot be used. Only one allowed value can be specified, and denied values are not supported. Allowing the DISABLE_KEY value automatically disables any publicly exposed service account key, and creates an entry in the audit log. Allowing the WAIT_FOR_ABUSE value opts out of this protection, and does not disable exposed service account keys automatically. However, Google Cloud may disable exposed service account keys if they are used in ways that adversely affect the platform, but makes no promise to do so. To enforce this constraint, set it to replace the parent policy in the Google Cloud Console, or set inheritFromParent=false in the policy file if using the gcloud CLI. This constraint can't be merged with a parent policy. ", 880 | "constraintDefault": "DENY", 881 | "listConstraint": {} 882 | } 883 | ] 884 | } -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | ''' 4 | This Cloud Function compares the old available Organization Policies 5 | to the current Organization Policies and determines if there are updates. 6 | ''' 7 | 8 | import base64 9 | import json 10 | import datetime # pylint: disable=import-error 11 | import requests # pylint: disable=import-error 12 | import googleapiclient.discovery # pylint: disable=import-error 13 | 14 | from os import getenv 15 | from google.cloud import storage # pylint: disable=import-error 16 | from google.cloud import secretmanager # pylint: disable=import-error 17 | from google.api_core import exceptions # pylint: disable=import-error 18 | from github import Github # pylint: disable=import-error 19 | from googleapiclient.discovery_cache.base import Cache # pylint: disable=import-error 20 | import tweepy # pylint: disable=import-error 21 | 22 | def announce_kickoff(event, context): 23 | """ 24 | Announces the start of the org policy comparison function. 25 | """ 26 | pubsub_message = base64.b64decode(event['data']).decode('utf-8') 27 | print(pubsub_message) 28 | # Starts Logic 29 | compare_policies() 30 | 31 | def compare_policies(): 32 | ''' 33 | Compares the old constraints vs the new ones. 34 | ''' 35 | 36 | # Creates our two Org Policies lists for comparison 37 | old_policies = fetch_old_policies() 38 | current_policies = constraint_transform() 39 | 40 | # Sort Both Lists 41 | current_policies.sort() 42 | old_policies.sort() 43 | 44 | # Compare Sorted Lists 45 | if current_policies == old_policies: 46 | print("No new Org Policies Detected.") 47 | else: 48 | print("New Org Policies Detected!") 49 | # Subtract the last version from the current version to get new policies 50 | new_policies = list(set(current_policies) - set(old_policies)) 51 | # List comprehension to determine if any policies were removed 52 | removed_policies = [ policy for policy in old_policies if policy not in current_policies ] 53 | 54 | # Create lists of strings to post - constraints/ at the beginning of each string is redundant, so it is removed 55 | new_policies_to_post = [] 56 | removed_policies_to_post = [] 57 | if new_policies: 58 | new_policies_to_post = [ f"New Organization Policy Detected: {policy.split('constraints/')[-1]}" for policy in new_policies ] 59 | if removed_policies: 60 | removed_policies_to_post = [ f"Removal of Organization Policy Detected: {policy.split('constraints/')[-1]}" for policy in removed_policies ] 61 | # Add the two new lists together 62 | policies_to_post = new_policies_to_post + removed_policies_to_post 63 | 64 | # Create GitHub PR for new policies - save the commit to post the URL to Twitter 65 | github_commit = create_pr_file_content() 66 | # Posts new policies to slack channel - move somewhere else? 67 | post_to_slack(policies_to_post, github_commit) 68 | # Posts to Twitter 69 | post_to_twitter(policies_to_post, github_commit) 70 | # Updates the GCS bucket to create our new baseline 71 | upload_policy_file() 72 | 73 | def list_org_policies(): 74 | """ 75 | List the available Organization Policies 76 | """ 77 | 78 | # Grab the Organization ID from the CFN Environment Var 79 | org_id = getenv('ORG_ID') 80 | 81 | # Create Cloud Resource Manager API Service 82 | service = googleapiclient.discovery.build("cloudresourcemanager", 'v1', cache=MemoryCache()) 83 | 84 | # Configures the API request 85 | request = service.organizations().listAvailableOrgPolicyConstraints(resource=f"organizations/{org_id}") 86 | 87 | # Execute the API request and display any errors 88 | try: 89 | org_response = request.execute() 90 | except Exception as e: 91 | print(e) 92 | raise 93 | 94 | return org_response 95 | 96 | def constraint_transform(): 97 | """ 98 | Transforms our List Org policy response into a list of constraint names for comparison. 99 | """ 100 | #Grabs our response from the List Org Policy call 101 | org_response = list_org_policies() 102 | 103 | #Drill into constraints response 104 | constraints = org_response['constraints'] 105 | 106 | # Create New Org Policies list 107 | # We create a list here to more easily sort and compare in compare_policies() 108 | current_org_policies = [] 109 | for key in constraints: 110 | current_org_policies.append(key['name']) 111 | 112 | return current_org_policies 113 | 114 | def fetch_old_policies(): 115 | """ 116 | Grabs the old Organization Policies from a GCS bucket. 117 | """ 118 | # Set our GCS vars, these come from the terraform.tfvars file 119 | bucket_name = getenv('POLICY_BUCKET') 120 | source_blob_name = getenv('POLICY_FILE') 121 | 122 | # Create the GCS client 123 | storage_client = storage.Client() 124 | 125 | # Create our bucket variable 126 | bucket = storage_client.bucket(bucket_name) 127 | 128 | # List the objects in our GCS bucket 129 | try: 130 | files = storage_client.list_blobs(bucket) 131 | except Exception as e: 132 | print(e) 133 | raise 134 | 135 | 136 | # Create a list of file names that we will scan for an old policy file 137 | file_list = [] 138 | for gcs_file in files: 139 | file_list.append(gcs_file.name) 140 | 141 | # Check for pre-existing Org Policy File in GCS 142 | if source_blob_name in file_list: 143 | old_policies = download_policy_file() 144 | return old_policies 145 | # If file does not exist, create and upload 146 | else: 147 | upload_policy_file() 148 | 149 | def upload_policy_file(): 150 | """ 151 | Uploads the new Org Policy baseline to the GCS bucket 152 | """ 153 | # Grabs our new baseline in a list format 154 | new_policies = constraint_transform() 155 | 156 | # Set our GCS vars, these come from the terraform.tfvars file 157 | bucket_name = getenv('POLICY_BUCKET') 158 | source_file_name = getenv('FILE_LOCATION') 159 | destination_blob_name = getenv('POLICY_FILE') 160 | 161 | # Create the GCS client 162 | storage_client = storage.Client() 163 | 164 | # Write the new policies to our local file by converting from a list 165 | # to a multi-line string file 166 | with open(f"{source_file_name}", 'w') as policy_file: 167 | policy_file.write('\n'.join(new_policies)) 168 | 169 | # Upload the new Organization Policy file to GCS 170 | bucket = storage_client.bucket(bucket_name) 171 | blob = bucket.blob(destination_blob_name) 172 | try: 173 | blob.upload_from_filename(source_file_name) 174 | except Exception as e: 175 | print(e) 176 | raise 177 | 178 | print("New Policies Uploaded. Exiting.") 179 | 180 | def download_policy_file(): 181 | """ 182 | Downloads the Org Policy baseline from the GCS bucket 183 | """ 184 | # Set our GCS vars, these come from the terraform.tfvars file 185 | bucket_name = getenv('POLICY_BUCKET') 186 | source_blob_name = getenv('POLICY_FILE') 187 | destination_file_name = getenv('FILE_LOCATION') 188 | 189 | # Create the GCS client 190 | storage_client = storage.Client() 191 | 192 | # Create our bucket via the GCS client 193 | bucket = storage_client.bucket(bucket_name) 194 | 195 | # Creates our gcs -> prefix -> file variable 196 | blob = bucket.blob(source_blob_name) 197 | 198 | # Pulldown the baseline Org policy file 199 | try: 200 | blob.download_to_filename(destination_file_name) 201 | except Exception as e: 202 | print(e) 203 | raise 204 | 205 | # Read contents of old policy file and turn into a list for comparison 206 | # We turn into a list because thats how we write the contents of list_org_policies() 207 | with open(f"{destination_file_name}", 'r') as policy_file: 208 | old_policies = [line.rstrip() for line in policy_file] 209 | print("Org Policy File Downloaded from GCS Bucket") 210 | 211 | return old_policies 212 | 213 | def post_to_slack(policies, commit): 214 | """ 215 | Posts to a slack channel with the Organization Policy updates 216 | and the Github commit URL 217 | """ 218 | 219 | # Slack webhook URL 220 | url = get_latest_secret(getenv('S_SLACK_NAME')) 221 | 222 | # Set the headers for our slack HTTP POST 223 | headers = { 224 | 'Content-Type': 'application/json' 225 | } 226 | 227 | # This makes the policy into a dict. Slack requires the format {"text": "data"} 228 | dict_policy = {} 229 | 230 | # Join all of the policy strings with a new line so slack posts one blob 231 | policies_to_post = '\n'.join(policies) 232 | 233 | # Append the commit url to a new line 234 | dict_policy['text'] = policies_to_post + '\n' + commit['commit'].html_url 235 | 236 | # Converts to JSON for the HTTP POST payload 237 | payload = json.dumps(dict_policy) 238 | # Post to the slack channel 239 | try: 240 | requests.request("POST", url, headers=headers, data=payload) 241 | print("Posting to Slack") 242 | except Exception as e: 243 | print(e) 244 | raise 245 | 246 | def create_pr_file_content(): 247 | """ 248 | Creates the Organization Policy file content for the GitHub Pull Request. 249 | """ 250 | 251 | #Grabs our response from the List Org Policy call 252 | org_response = list_org_policies() 253 | 254 | # Create PR file content 255 | pr_file_content = json.dumps(org_response, indent=4) 256 | 257 | # Create GitHub Pull Request 258 | result = create_pr(pr_file_content) 259 | 260 | return result 261 | 262 | def create_pr(pr_file_content): 263 | """ 264 | Creates our GitHub pull request with the Organization Policy updates. 265 | """ 266 | # Fetch our GitHub token from GCP Secret Manager 267 | github_token = get_latest_secret(getenv('S_TOKEN_NAME')) 268 | 269 | # Date is used in PR 270 | todays_date = datetime.date.today() 271 | 272 | # Create our GitHub authorized client 273 | g = Github(github_token) 274 | 275 | # Set our target repo 276 | try: 277 | repo = g.get_repo("ScaleSec/gcp_org_policy_notifier") 278 | except Exception as e: 279 | print(e) 280 | raise 281 | 282 | # Identify which file we want to update 283 | repo_file_path = "policies/org_policy.json" 284 | 285 | # Set our branches 286 | default_branch = "main" 287 | target_branch = f"current_policies_{todays_date}" 288 | 289 | # Fetch our default branch 290 | try: 291 | source = repo.get_branch(f"{default_branch}") 292 | except Exception as e: 293 | print(e) 294 | raise 295 | # Create our new branch 296 | try: 297 | print("Creating a new branch.") 298 | repo.create_git_ref(ref=f"refs/heads/{target_branch}", sha=source.commit.sha) 299 | except Exception as e: 300 | print(e) 301 | raise 302 | 303 | # Retrieve the old file to get its SHA and path 304 | try: 305 | contents = repo.get_contents(repo_file_path, ref=default_branch) 306 | except Exception as e: 307 | print(e) 308 | raise 309 | 310 | # Update the old file with new content 311 | try: 312 | result = repo.update_file(contents.path, f"Org Policy Update Detected on {todays_date}", pr_file_content, contents.sha, branch=target_branch) 313 | except: 314 | result = None 315 | print("There was an error updating the old policy file.") 316 | raise 317 | 318 | # Create our Pull Request 319 | try: 320 | print("Creating GitHub Pull Request.") 321 | repo.create_pull(title=f"Org Policy Update Detected on {todays_date}", head=target_branch, base=default_branch, body=f"Org Policy Update Detected on {todays_date}") 322 | except Exception as e: 323 | print(e) 324 | raise 325 | return result 326 | 327 | def get_twitter_secrets(): 328 | """ 329 | Retrieves Twitter credentials from Secret Manager. 330 | There are four secrets so this creates a dictionary with all of them by key name. 331 | """ 332 | 333 | # Create a dictionary with the secret names that we will update with the values 334 | secret_names = {"consumer_key":f"{getenv('CONSUMER_KEY_NAME')}","consumer_key_secret":f"{getenv('CONSUMER_KEY_SECRET_NAME')}","access_token":f"{getenv('ACCESS_TOKEN_NAME')}","access_token_secret":f"{getenv('ACCESS_TOKEN_SECRET_NAME')}"} 335 | 336 | # Create the secret path with the values of the secret names, get the secrets and update the dict 337 | secret_names = { k: get_latest_secret(v) for k,v in secret_names.items() } 338 | 339 | return secret_names 340 | 341 | def create_twitter_connection(): 342 | """ 343 | Creates an api connection to Twitter to post content 344 | """ 345 | # Retrieve a dictionary of 4 different credentials needed to authenticate with Twitter 346 | creds = get_twitter_secrets() 347 | 348 | # Auth with Twitter using Tweepy 349 | try: 350 | auth = tweepy.OAuthHandler(creds['consumer_key'], creds['consumer_key_secret']) 351 | auth.set_access_token(creds['access_token'], creds['access_token_secret']) 352 | api = tweepy.API(auth) 353 | return api 354 | except Exception as e: 355 | print(e) 356 | raise 357 | 358 | def post_to_twitter(policies, commit): 359 | """ 360 | Tweets with the updated GCP Org Policies and the GitHub commit link. 361 | """ 362 | 363 | # Get Twitter API 364 | tweet = create_twitter_connection() 365 | 366 | # Iterate through the policies and Tweet them out 367 | for policy in policies: 368 | # This makes the policy into a string with the commit URL at the end. 369 | content_to_post = f"{policy} {commit['commit'].html_url}" 370 | 371 | # Post to Twitter 372 | try: 373 | tweet.update_status(content_to_post) 374 | print("Tweeting...") 375 | except Exception as e: 376 | print(e) 377 | raise 378 | 379 | def get_latest_secret(secret_name): 380 | """ 381 | Function to get the latest secret by name. 382 | """ 383 | 384 | # Set GCP Secret Manager vars 385 | secret_project = getenv('S_PROJECT') 386 | secret_version = getenv('S_VERSION', "latest") 387 | 388 | # Create the Secret Manager client. 389 | client = secretmanager.SecretManagerServiceClient() 390 | 391 | # Set the secret location 392 | secret_location = client.secret_version_path(secret_project, secret_name, secret_version) 393 | 394 | # Get the secret to use 395 | try: 396 | print(f"Getting {secret_name} secret.") 397 | response = client.access_secret_version(name = f"{secret_location}") 398 | decoded_secret = response.payload.data.decode('UTF-8').rstrip() 399 | return decoded_secret 400 | except exceptions.FailedPrecondition as e: 401 | print(e) 402 | raise 403 | 404 | class MemoryCache(Cache): 405 | """ 406 | File-based cache to resolve GCP Cloud Function noisey log entries. 407 | """ 408 | _CACHE = {} 409 | 410 | def get(self, url): 411 | return MemoryCache._CACHE.get(url) 412 | 413 | def set(self, url, content): 414 | MemoryCache._CACHE[url] = content 415 | -------------------------------------------------------------------------------- /src/requirements.txt: -------------------------------------------------------------------------------- 1 | google-api-python-client==2.42.0 2 | google-cloud-secret-manager==2.9.2 3 | google-cloud-storage==2.2.1 4 | PyGithub==1.55 5 | tweepy==4.8.0 6 | -------------------------------------------------------------------------------- /terraform-example.tfvars: -------------------------------------------------------------------------------- 1 | project_id = "" 2 | org_id = "" 3 | secret_project = "" 4 | name_prefix = "" 5 | secret_slack_name = "" 6 | secret_token_name = "" 7 | secret_version = "" 8 | twitter_consumer_key_name = "" 9 | twitter_consumer_key_secret_name = "" 10 | twitter_access_token_name = "" 11 | twitter_access_token_secret_name = "" 12 | policy_bucket_location = "" -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | // Required variables 2 | variable "project_id" { 3 | type = string 4 | description = "The ID of the project where the resources will be created." 5 | } 6 | 7 | variable "name_prefix" { 8 | type = string 9 | description = "The prefixed used to name resources" 10 | } 11 | 12 | variable "org_id" { 13 | description = "The GCP Org ID to assign permissions to." 14 | } 15 | 16 | variable "secret_project" { 17 | description = "The GCP project number where the Slack Webhook is stored." 18 | } 19 | 20 | variable "secret_slack_name" { 21 | description = "The name of the Slack Webhook secret in GCP." 22 | } 23 | 24 | variable "secret_token_name" { 25 | description = "The name of the GitHub token secret in GCP." 26 | } 27 | 28 | variable "twitter_consumer_key_name" { 29 | description = "The name of the Twitter Consumer Key secret in GCP." 30 | } 31 | 32 | variable "twitter_consumer_key_secret_name" { 33 | description = "The name of the Twitter Consumer Key Secret secret in GCP." 34 | } 35 | 36 | variable "twitter_access_token_name" { 37 | description = "The name of the Twitter Access Token secret in GCP." 38 | } 39 | 40 | variable "twitter_access_token_secret_name" { 41 | description = "The name of the Twitter Access Token Secret secret in GCP." 42 | } 43 | 44 | // Optional variables 45 | variable "job_description" { 46 | type = string 47 | description = "The description of the Cloud Scheduler." 48 | default = "Starts Organization Policies check." 49 | } 50 | 51 | variable "job_schedule" { 52 | type = string 53 | description = "The job frequency, in cron syntax. The default is every hour." 54 | default = "0 * * * *" 55 | } 56 | 57 | variable "function_available_memory_mb" { 58 | type = number 59 | default = 2048 60 | description = "The amount of memory in megabytes allotted for the function to use." 61 | } 62 | 63 | variable "function_description" { 64 | type = string 65 | default = "Compares Org Policies and alerts users." 66 | description = "The description of the function." 67 | } 68 | 69 | variable "function_entry_point" { 70 | type = string 71 | description = "The name of a method in the function source which will be invoked when the function is executed." 72 | default = "announce_kickoff" 73 | } 74 | 75 | variable "function_event_trigger_failure_policy_retry" { 76 | type = bool 77 | default = false 78 | description = "A toggle to determine if the function should be retried on failure." 79 | } 80 | 81 | variable "function_runtime" { 82 | type = string 83 | default = "python37" 84 | description = "The runtime in which the function will be executed." 85 | } 86 | 87 | variable "function_source_directory" { 88 | type = string 89 | description = "The contents of this directory will be archived and used as the function source." 90 | default = "./src" 91 | } 92 | 93 | variable "function_timeout_s" { 94 | type = number 95 | default = 60 96 | description = "The amount of time in seconds allotted for the execution of the function." 97 | } 98 | 99 | variable "bucket_force_destroy" { 100 | type = bool 101 | default = true 102 | description = "When deleting the GCS bucket containing the cloud function, delete all objects in the bucket first." 103 | } 104 | 105 | variable "region" { 106 | type = string 107 | description = "The region in which resources will be applied." 108 | default = "us-central1" 109 | } 110 | 111 | variable "message_data" { 112 | type = string 113 | description = "The data to send in the topic message." 114 | default = "U3RhcnRpbmcgQ29tcGFyaXNvbg==" 115 | } 116 | 117 | variable "time_zone" { 118 | type = string 119 | description = "The timezone to use in scheduler." 120 | default = "America/Detroit" 121 | } 122 | 123 | variable "file_location" { 124 | type = string 125 | description = "Location to store the org policy file in the Cloud Function. Needs to be in /tmp/." 126 | default = "/tmp/policies.txt" 127 | } 128 | 129 | variable "policy_file" { 130 | type = string 131 | description = "The name of the Org policy file in the GCS bucket." 132 | default = "policies.txt" 133 | } 134 | 135 | variable "function_perms" { 136 | description = "The Cloud Function custom IAM role permissions. Must be a list." 137 | default = ["secretmanager.secrets.get", "secretmanager.versions.get", "secretmanager.versions.access", "orgpolicy.policy.get", "resourcemanager.projects.get", "resourcemanager.projects.list", "storage.objects.create", "storage.objects.get", "storage.objects.update", "storage.objects.delete", "storage.objects.list"] 138 | } 139 | 140 | variable "secret_version" { 141 | description = "The version of the Slack Webhook secret in GCP. Leave as an empty string to use 'latest'" 142 | default = "latest" 143 | } 144 | 145 | variable "scheduler_job" { 146 | type = object({ name = string }) 147 | description = "An existing Cloud Scheduler job instance." 148 | default = null 149 | } 150 | 151 | variable "policy_bucket_location" { 152 | type = string 153 | description = "Policy bucket data locale. Required for GCS API. Defaults to 'US' for proximity to function" 154 | default = "US" 155 | } 156 | --------------------------------------------------------------------------------