├── .github ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_requests.md │ └── question.md ├── PULL_REQUEST_TEMPLATE.md ├── actions │ └── build-and-persist-plugin-binary │ │ └── action.yml ├── dependabot.yml ├── release.yml └── workflows │ ├── acceptance-test.yml │ ├── build_plugin_binaries.yml │ ├── go-test-darwin.yml │ ├── go-test-linux.yml │ ├── go-test-windows.yml │ ├── go-validate.yml │ ├── jira.yml │ ├── notify-integration-release-via-manual.yaml │ ├── notify-integration-release-via-tag.yaml │ └── release.yml ├── .gitignore ├── .go-version ├── .golangci.yml ├── .goreleaser.yml ├── .web-docs ├── README.md ├── components │ ├── builder │ │ └── googlecompute │ │ │ └── README.md │ ├── data-source │ │ ├── image │ │ │ └── README.md │ │ └── secretsmanager │ │ │ └── README.md │ └── post-processor │ │ ├── googlecompute-export │ │ └── README.md │ │ └── googlecompute-import │ │ └── README.md ├── metadata.hcl └── scripts │ └── compile-to-webdocs.sh ├── CHANGELOG.md ├── CODEOWNERS ├── GNUmakefile ├── LICENSE ├── README.md ├── builder └── googlecompute │ ├── artifact.go │ ├── artifact_test.go │ ├── builder.go │ ├── builder_acc_test.go │ ├── config.go │ ├── config.hcl2spec.go │ ├── config_test.go │ ├── startup.go │ ├── step_check_existing_image.go │ ├── step_check_existing_image_test.go │ ├── step_create_disks.go │ ├── step_create_image.go │ ├── step_create_image_test.go │ ├── step_create_instance.go │ ├── step_create_instance_test.go │ ├── step_create_windows_password.go │ ├── step_create_windows_password_test.go │ ├── step_import_os_login_ssh_key.go │ ├── step_import_os_login_ssh_key_test.go │ ├── step_instance_info.go │ ├── step_instance_info_test.go │ ├── step_start_tunnel.go │ ├── step_start_tunnel.hcl2spec.go │ ├── step_start_tunnel_test.go │ ├── step_teardown_instance.go │ ├── step_teardown_instance_test.go │ ├── step_test.go │ ├── step_wait_startup_script.go │ ├── step_wait_startup_script_test.go │ ├── template_funcs.go │ ├── template_funcs_test.go │ ├── test-fixtures │ └── fake-key │ ├── testdata │ ├── basic.pkr.hcl │ ├── extra_persistent_disk.pkr.hcl │ ├── extra_persistent_disk_and_regions.pkr.hcl │ ├── extra_scratch_disk.pkr.hcl │ ├── image_arch_builds.pkr.hcl │ ├── multiple_disks.pkr.hcl │ ├── network_ip.pkr.hcl │ ├── oslogin │ │ ├── default-token-and-pkey.pkr.hcl │ │ └── default-token.pkr.hcl │ └── wrapped-startup-scripts │ │ ├── errored.pkr.hcl │ │ ├── errored.sh │ │ ├── successful.pkr.hcl │ │ └── successful.sh │ ├── tunnel_driver.go │ ├── tunnel_driver_windows.go │ └── winrm.go ├── datasource ├── image │ ├── data.go │ ├── data.hcl2spec.go │ ├── data_acc_test.go │ ├── data_test.go │ └── test-fixtures │ │ └── template.pkr.hcl └── secretsmanager │ ├── data.go │ ├── data.hcl2spec.go │ ├── data_acc_test.go │ ├── data_test.go │ └── test-fixtures │ └── template.pkr.hcl ├── docs-partials ├── builder │ └── googlecompute │ │ ├── Config-not-required.mdx │ │ ├── Config-required.mdx │ │ ├── Config.mdx │ │ ├── IAPConfig-not-required.mdx │ │ └── IAPConfig.mdx ├── datasource │ ├── image │ │ ├── Config-not-required.mdx │ │ └── DatasourceOutput.mdx │ └── secretsmanager │ │ ├── Config-not-required.mdx │ │ ├── Config-required.mdx │ │ └── DatasourceOutput.mdx ├── lib │ └── common │ │ ├── Authentication-not-required.mdx │ │ ├── BlockDevice-not-required.mdx │ │ ├── BlockDevice-required.mdx │ │ ├── BlockDevice.mdx │ │ ├── CustomerEncryptionKey-not-required.mdx │ │ ├── NodeAffinity-not-required.mdx │ │ └── NodeAffinity.mdx └── post-processor │ ├── googlecompute-export │ ├── Config-not-required.mdx │ └── Config-required.mdx │ └── googlecompute-import │ ├── Config-not-required.mdx │ └── Config-required.mdx ├── docs ├── README.md ├── builders │ └── googlecompute.mdx ├── datasources │ ├── image.mdx │ └── secretsmanager.mdx └── post-processors │ ├── googlecompute-export.mdx │ └── googlecompute-import.mdx ├── example ├── README.md └── build.pkr.hcl ├── go.mod ├── go.sum ├── lib └── common │ ├── affinities.go │ ├── affinities.hcl2spec.go │ ├── auth.go │ ├── auth.hcl2spec.go │ ├── block_device.go │ ├── block_device.hcl2spec.go │ ├── block_device_test.go │ ├── client_keys.go │ ├── client_keys.hcl2spec.go │ ├── driver.go │ ├── driver_gce.go │ ├── driver_mock.go │ ├── image.go │ ├── instance.go │ ├── networking.go │ └── networking_test.go ├── main.go ├── post-processor ├── googlecompute-export │ ├── artifact.go │ ├── artifact_test.go │ ├── post-processor.go │ ├── post-processor.hcl2spec.go │ └── startup.go └── googlecompute-import │ ├── artifact.go │ ├── artifact_test.go │ ├── post-processor.go │ └── post-processor.hcl2spec.go └── version └── version.go /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: You're experiencing an issue with this Packer plugin that is different than the documented behavior. 4 | labels: bug 5 | --- 6 | 7 | When filing a bug, please include the following headings if possible. Any 8 | example text in this template can be deleted. 9 | 10 | #### Overview of the Issue 11 | 12 | A paragraph or two about the issue you're experiencing. 13 | 14 | #### Reproduction Steps 15 | 16 | Steps to reproduce this issue 17 | 18 | ### Plugin and Packer version 19 | 20 | From `packer version` 21 | 22 | ### Simplified Packer Buildfile 23 | 24 | If the file is longer than a few dozen lines, please include the URL to the 25 | [gist](https://gist.github.com/) of the log or use the [Github detailed 26 | format](https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d) 27 | instead of posting it directly in the issue. 28 | 29 | ### Operating system and Environment details 30 | 31 | OS, Architecture, and any other information you can provide about the 32 | environment. 33 | 34 | ### Log Fragments and crash.log files 35 | 36 | Include appropriate log fragments. If the log is longer than a few dozen lines, 37 | please include the URL to the [gist](https://gist.github.com/) of the log or 38 | use the [Github detailed format](https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d) instead of posting it directly in the issue. 39 | 40 | Set the env var `PACKER_LOG=1` for maximum log detail. 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_requests.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: If you have something you think this Packer plugin could improve or add support for. 4 | labels: enhancement 5 | --- 6 | 7 | Please search the existing issues for relevant feature requests, and use the 8 | reaction feature 9 | (https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) 10 | to add upvotes to pre-existing requests. 11 | 12 | #### Community Note 13 | 14 | Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request. 15 | Please do not leave "+1" or "me too" comments, they generate extra noise for issue followers and do not help prioritize the request. 16 | If you are interested in working on this issue or have submitted a pull request, please leave a comment. 17 | 18 | #### Description 19 | 20 | A written overview of the feature. 21 | 22 | #### Use Case(s) 23 | 24 | Any relevant use-cases that you see. 25 | 26 | #### Potential configuration 27 | 28 | ``` 29 | ``` 30 | 31 | #### Potential References 32 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: If you have a question, please check out our other community resources instead of opening an issue. 4 | labels: question 5 | --- 6 | 7 | Issues on GitHub are intended to be related to bugs or feature requests, so we 8 | recommend using our other community resources instead of asking here if you 9 | have a question. 10 | 11 | - Packer Guides: https://developer.hashicorp.com/packer/guides 12 | - Packer Community Tools: https://developer.hashicorp.com/packer/docs/community-tools enumerates 13 | vetted community resources like examples and useful tools 14 | - Any other questions can be sent to the Packer section of the HashiCorp 15 | forum: https://discuss.hashicorp.com/c/packer 16 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **DELETE THIS PART BEFORE SUBMITTING** 2 | 3 | In order to have a good experience with our community, we recommend that you 4 | read the contributing guidelines for making a PR, and understand the lifecycle 5 | of a Packer Plugin PR: 6 | 7 | https://github.com/hashicorp/packer-plugin-googlecompute/blob/main/.github/CONTRIBUTING.md#opening-an-pull-request 8 | 9 | ---- 10 | 11 | ### Description 12 | What code changed, and why? 13 | 14 | 15 | ### Resolved Issues 16 | If your PR resolves any open issue(s), please indicate them like this so they will be closed when your PR is merged: 17 | 18 | Closes #xxx 19 | Closes #xxx 20 | 21 | 22 | ### Rollback Plan 23 | 24 | If a change needs to be reverted, we will roll out an update to the code within 7 days. 25 | 26 | ### Changes to Security Controls 27 | 28 | Are there any changes to security controls (access controls, encryption, logging) in this pull request? If so, explain. 29 | 30 | -------------------------------------------------------------------------------- /.github/actions/build-and-persist-plugin-binary/action.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | name: build-and-persist-plugin-binary 5 | inputs: 6 | GOOS: 7 | required: true 8 | GOARCH: 9 | required: true 10 | runs: 11 | using: composite 12 | steps: 13 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 14 | - run: "GOOS=${{ inputs.GOOS }} GOARCH=${{ inputs.GOARCH }} go build -o ./pkg/packer_plugin_googlecompute_${{ inputs.GOOS }}_${{ inputs.GOARCH }} ." 15 | shell: bash 16 | - run: zip ./pkg/packer_plugin_googlecompute_${{ inputs.GOOS }}_${{ inputs.GOARCH }}.zip ./pkg/packer_plugin_googlecompute_${{ inputs.GOOS }}_${{ inputs.GOARCH }} 17 | shell: bash 18 | - run: rm ./pkg/packer_plugin_googlecompute_${{ inputs.GOOS }}_${{ inputs.GOARCH }} 19 | shell: bash 20 | - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 21 | with: 22 | name: "packer_plugin_googlecompute_${{ inputs.GOOS }}_${{ inputs.GOARCH }}.zip" 23 | path: "pkg/packer_plugin_googlecompute_${{ inputs.GOOS }}_${{ inputs.GOARCH }}.zip" 24 | retention-days: 30 25 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: 2 5 | updates: 6 | - package-ecosystem: "gomod" # See documentation for possible values 7 | directory: "/" # Location of package manifests 8 | schedule: 9 | interval: "daily" 10 | allow: 11 | - dependency-name: "github.com/hashicorp/packer-plugin-sdk" 12 | 13 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | changelog: 5 | exclude: 6 | labels: 7 | - ignore-for-release 8 | categories: 9 | - title: Breaking Changes 🛠 10 | labels: 11 | - breaking-change 12 | - title: Exciting New Features 🎉 13 | labels: 14 | - enhancement 15 | - title: Bug fixes🧑‍🔧 🐞 16 | labels: 17 | - bug 18 | - title: Doc improvements 📚 19 | labels: 20 | - docs 21 | - documentation 22 | - title: Other Changes 23 | labels: 24 | - "*" 25 | 26 | -------------------------------------------------------------------------------- /.github/workflows/acceptance-test.yml: -------------------------------------------------------------------------------- 1 | # 2 | # This GitHub action runs acceptance tests every night. 3 | # 4 | name: "Acceptance Test" 5 | 6 | on: 7 | push: 8 | branches: 9 | - "main" 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | get-go-version: 16 | runs-on: ubuntu-latest 17 | outputs: 18 | go-version: ${{ steps.get-go-version.outputs.go-version }} 19 | steps: 20 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 21 | - name: 'Determine Go version' 22 | id: get-go-version 23 | # We use .go-version as our source of truth for current Go 24 | # version, because "goenv" can react to it automatically. 25 | run: | 26 | echo "Building with Go $(cat .go-version)" 27 | echo "go-version=$(cat .go-version)" >>"$GITHUB_OUTPUT" 28 | acceptance-test: 29 | runs-on: ubuntu-latest 30 | name: Acceptance Test 31 | needs: get-go-version 32 | steps: 33 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 34 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 35 | with: 36 | go-version: ${{ needs.get-go-version.outputs.go-version }} 37 | - name: Install gotestsum 38 | run: go install gotest.tools/gotestsum@latest 39 | - name: Set up Cloud SDK 40 | uses: google-github-actions/setup-gcloud@98ddc00a17442e89a24bbf282954a3b65ce6d200 # v2.1.0 41 | - name: Install Packer 42 | uses: hashicorp/setup-packer@ae6b3ed3bec089bbfb576ab7d714df7cbc4b88a4 # v2.0.0 43 | with: 44 | version: latest 45 | - name: 'Authenticate to Google Cloud' 46 | uses: google-github-actions/auth@55bd3a7c6e2ae7cf1877fd1ccb9d54c0503c457c # v2.1.2 47 | id: 'gcp-auth' 48 | with: 49 | credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' 50 | access_token_lifetime: '7200s' 51 | export_environment_variables: false 52 | create_credentials_file: true 53 | cleanup_credentials: true 54 | - name: Run Acceptance Tests 55 | run: | 56 | mkdir -p /tmp/test-results 57 | make dev 58 | PACKER_ACC=1 gotestsum --format=short-verbose -- -count=1 -timeout=120m -p 2 ./... 59 | env: 60 | GOOGLE_APPLICATION_CREDENTIALS: ${{ steps.gcp-auth.outputs.credentials_file_path }} 61 | GOOGLE_PROJECT_ID: ${{ secrets.GOOGLE_PROJECT_ID }} 62 | - run: find ./ -type f -name "*.txt" | zip acc_failure_logs.zip -@ 63 | if: ${{ failure() }} 64 | - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 65 | if: ${{ failure() }} 66 | with: 67 | name: "acc_failure_logs.zip" 68 | path: "acc_failure_logs.zip" 69 | retention-days: 5 70 | 71 | -------------------------------------------------------------------------------- /.github/workflows/build_plugin_binaries.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | name: hashicorp/packer-plugin-googlecompute/build_plugin_binaries 5 | permissions: 6 | contents: read 7 | on: 8 | push: 9 | branches: 10 | - main 11 | jobs: 12 | build_darwin: 13 | defaults: 14 | run: 15 | working-directory: ~/go/src/github.com/hashicorp/packer-plugin-googlecompute 16 | runs-on: ubuntu-latest 17 | container: 18 | image: docker.mirror.hashicorp.services/cimg/go:1.21 19 | steps: 20 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 21 | - uses: "./.github/actions/build-and-persist-plugin-binary" 22 | with: 23 | GOOS: darwin 24 | GOARCH: amd64 25 | - uses: "./.github/actions/build-and-persist-plugin-binary" 26 | with: 27 | GOOS: darwin 28 | GOARCH: arm64 29 | build_freebsd: 30 | defaults: 31 | run: 32 | working-directory: ~/go/src/github.com/hashicorp/packer-plugin-googlecompute 33 | runs-on: ubuntu-latest 34 | container: 35 | image: docker.mirror.hashicorp.services/cimg/go:1.21 36 | steps: 37 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 38 | - uses: "./.github/actions/build-and-persist-plugin-binary" 39 | with: 40 | GOOS: freebsd 41 | GOARCH: 386 42 | - uses: "./.github/actions/build-and-persist-plugin-binary" 43 | with: 44 | GOOS: freebsd 45 | GOARCH: amd64 46 | - uses: "./.github/actions/build-and-persist-plugin-binary" 47 | with: 48 | GOOS: freebsd 49 | GOARCH: arm 50 | build_linux: 51 | defaults: 52 | run: 53 | working-directory: ~/go/src/github.com/hashicorp/packer-plugin-googlecompute 54 | runs-on: ubuntu-latest 55 | container: 56 | image: docker.mirror.hashicorp.services/cimg/go:1.21 57 | steps: 58 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 59 | - uses: "./.github/actions/build-and-persist-plugin-binary" 60 | with: 61 | GOOS: linux 62 | GOARCH: 386 63 | - uses: "./.github/actions/build-and-persist-plugin-binary" 64 | with: 65 | GOOS: linux 66 | GOARCH: amd64 67 | - uses: "./.github/actions/build-and-persist-plugin-binary" 68 | with: 69 | GOOS: linux 70 | GOARCH: arm 71 | - uses: "./.github/actions/build-and-persist-plugin-binary" 72 | with: 73 | GOOS: linux 74 | GOARCH: arm64 75 | build_netbsd: 76 | defaults: 77 | run: 78 | working-directory: ~/go/src/github.com/hashicorp/packer-plugin-googlecompute 79 | runs-on: ubuntu-latest 80 | container: 81 | image: docker.mirror.hashicorp.services/cimg/go:1.21 82 | steps: 83 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 84 | - uses: "./.github/actions/build-and-persist-plugin-binary" 85 | with: 86 | GOOS: netbsd 87 | GOARCH: 386 88 | - uses: "./.github/actions/build-and-persist-plugin-binary" 89 | with: 90 | GOOS: netbsd 91 | GOARCH: amd64 92 | - uses: "./.github/actions/build-and-persist-plugin-binary" 93 | with: 94 | GOOS: netbsd 95 | GOARCH: arm 96 | build_openbsd: 97 | defaults: 98 | run: 99 | working-directory: ~/go/src/github.com/hashicorp/packer-plugin-googlecompute 100 | runs-on: ubuntu-latest 101 | container: 102 | image: docker.mirror.hashicorp.services/cimg/go:1.21 103 | steps: 104 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 105 | - uses: "./.github/actions/build-and-persist-plugin-binary" 106 | with: 107 | GOOS: openbsd 108 | GOARCH: 386 109 | - uses: "./.github/actions/build-and-persist-plugin-binary" 110 | with: 111 | GOOS: openbsd 112 | GOARCH: amd64 113 | - uses: "./.github/actions/build-and-persist-plugin-binary" 114 | with: 115 | GOOS: openbsd 116 | GOARCH: arm 117 | build_solaris: 118 | defaults: 119 | run: 120 | working-directory: ~/go/src/github.com/hashicorp/packer-plugin-googlecompute 121 | runs-on: ubuntu-latest 122 | container: 123 | image: docker.mirror.hashicorp.services/cimg/go:1.21 124 | steps: 125 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 126 | - uses: "./.github/actions/build-and-persist-plugin-binary" 127 | with: 128 | GOOS: solaris 129 | GOARCH: amd64 130 | build_windows: 131 | defaults: 132 | run: 133 | working-directory: ~/go/src/github.com/hashicorp/packer-plugin-googlecompute 134 | runs-on: ubuntu-latest 135 | container: 136 | image: docker.mirror.hashicorp.services/cimg/go:1.21 137 | steps: 138 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 139 | - uses: "./.github/actions/build-and-persist-plugin-binary" 140 | with: 141 | GOOS: windows 142 | GOARCH: 386 143 | - uses: "./.github/actions/build-and-persist-plugin-binary" 144 | with: 145 | GOOS: windows 146 | GOARCH: amd64 147 | -------------------------------------------------------------------------------- /.github/workflows/go-test-darwin.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) HashiCorp, Inc. 3 | # SPDX-License-Identifier: MPL-2.0 4 | 5 | # This GitHub action runs Packer go tests across 6 | # MacOS runners. 7 | # 8 | 9 | name: "Go Test MacOS" 10 | 11 | on: 12 | push: 13 | branches: 14 | - 'main' 15 | pull_request: 16 | 17 | permissions: 18 | contents: read 19 | 20 | jobs: 21 | get-go-version: 22 | runs-on: ubuntu-latest 23 | outputs: 24 | go-version: ${{ steps.get-go-version.outputs.go-version }} 25 | steps: 26 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 27 | - name: 'Determine Go version' 28 | id: get-go-version 29 | run: | 30 | echo "Found Go $(cat .go-version)" 31 | echo "go-version=$(cat .go-version)" >> $GITHUB_OUTPUT 32 | darwin-go-tests: 33 | needs: 34 | - get-go-version 35 | runs-on: macos-latest 36 | name: Darwin Go tests 37 | steps: 38 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 39 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 40 | with: 41 | go-version: ${{ needs.get-go-version.outputs.go-version }} 42 | - run: | 43 | echo "Testing with Go ${{ needs.get-go-version.outputs.go-version }}" 44 | go test -race -count 1 ./... -timeout=3m 45 | 46 | 47 | -------------------------------------------------------------------------------- /.github/workflows/go-test-linux.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # 5 | # This GitHub action runs Packer go tests across 6 | # Linux runners. 7 | # 8 | 9 | name: "Go Test Linux" 10 | 11 | on: 12 | push: 13 | branches: 14 | - 'main' 15 | pull_request: 16 | 17 | permissions: 18 | contents: read 19 | 20 | jobs: 21 | get-go-version: 22 | runs-on: ubuntu-latest 23 | outputs: 24 | go-version: ${{ steps.get-go-version.outputs.go-version }} 25 | steps: 26 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 27 | - name: 'Determine Go version' 28 | id: get-go-version 29 | run: | 30 | echo "Found Go $(cat .go-version)" 31 | echo "go-version=$(cat .go-version)" >> $GITHUB_OUTPUT 32 | linux-go-tests: 33 | needs: 34 | - get-go-version 35 | runs-on: ubuntu-latest 36 | name: Linux Go tests 37 | steps: 38 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 39 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 40 | with: 41 | go-version: ${{ needs.get-go-version.outputs.go-version }} 42 | - run: | 43 | echo "Testing with Go ${{ needs.get-go-version.outputs.go-version }}" 44 | go test -race -count 1 ./... -timeout=3m 45 | -------------------------------------------------------------------------------- /.github/workflows/go-test-windows.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # 5 | # This GitHub action runs Packer go tests across 6 | # Windows runners. 7 | # 8 | 9 | name: "Go Test Windows" 10 | 11 | on: 12 | push: 13 | branches: 14 | - 'main' 15 | pull_request: 16 | 17 | permissions: 18 | contents: read 19 | 20 | jobs: 21 | get-go-version: 22 | runs-on: ubuntu-latest 23 | outputs: 24 | go-version: ${{ steps.get-go-version.outputs.go-version }} 25 | steps: 26 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 27 | - name: 'Determine Go version' 28 | id: get-go-version 29 | run: | 30 | echo "Found Go $(cat .go-version)" 31 | echo "go-version=$(cat .go-version)" >> $GITHUB_OUTPUT 32 | windows-go-tests: 33 | needs: 34 | - get-go-version 35 | runs-on: windows-latest 36 | name: Windows Go tests 37 | steps: 38 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 39 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 40 | with: 41 | go-version: ${{ needs.get-go-version.outputs.go-version }} 42 | - run: | 43 | echo "Testing with Go ${{ needs.get-go-version.outputs.go-version }}" 44 | go test -race -count 1 ./... -timeout=3m 45 | 46 | -------------------------------------------------------------------------------- /.github/workflows/go-validate.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # 5 | # This GitHub action runs basic linting checks for Packer. 6 | # 7 | 8 | name: "Go Validate" 9 | 10 | on: 11 | push: 12 | branches: 13 | - 'main' 14 | pull_request: 15 | 16 | permissions: 17 | contents: read 18 | 19 | jobs: 20 | get-go-version: 21 | runs-on: ubuntu-latest 22 | outputs: 23 | go-version: ${{ steps.get-go-version.outputs.go-version }} 24 | steps: 25 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 26 | - name: 'Determine Go version' 27 | id: get-go-version 28 | run: | 29 | echo "Found Go $(cat .go-version)" 30 | echo "go-version=$(cat .go-version)" >> $GITHUB_OUTPUT 31 | check-mod-tidy: 32 | needs: 33 | - get-go-version 34 | runs-on: ubuntu-latest 35 | name: Go Mod Tidy 36 | steps: 37 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 38 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 39 | with: 40 | go-version: ${{ needs.get-go-version.outputs.go-version }} 41 | - run: go mod tidy 42 | check-lint: 43 | needs: 44 | - get-go-version 45 | runs-on: ubuntu-latest 46 | name: Lint check 47 | steps: 48 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 49 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 50 | with: 51 | go-version: ${{ needs.get-go-version.outputs.go-version }} 52 | - uses: golangci/golangci-lint-action@82d40c283aeb1f2b6595839195e95c2d6a49081b # v5.0.0 53 | with: 54 | version: v1.60.1 55 | only-new-issues: true 56 | check-fmt: 57 | needs: 58 | - get-go-version 59 | runs-on: ubuntu-latest 60 | name: Gofmt check 61 | steps: 62 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 63 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 64 | with: 65 | go-version: ${{ needs.get-go-version.outputs.go-version }} 66 | - run: | 67 | go fmt ./... 68 | echo "==> Checking that code complies with go fmt requirements..." 69 | git diff --exit-code; if [ $$? -eq 1 ]; then \ 70 | echo "Found files that are not fmt'ed."; \ 71 | echo "You can use the command: \`go fmt ./...\` to reformat code."; \ 72 | exit 1; \ 73 | fi 74 | check-generate: 75 | needs: 76 | - get-go-version 77 | runs-on: ubuntu-latest 78 | name: Generate check 79 | steps: 80 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 81 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 82 | with: 83 | go-version: ${{ needs.get-go-version.outputs.go-version }} 84 | - run: | 85 | export PATH=$PATH:$(go env GOPATH)/bin 86 | make generate 87 | uncommitted="$(git status -s)" 88 | if [[ -z "$uncommitted" ]]; then 89 | echo "OK" 90 | else 91 | echo "Docs have been updated, but the compiled docs have not been committed." 92 | echo "Run 'make generate', and commit the result to resolve this error." 93 | echo "Generated but uncommitted files:" 94 | echo "$uncommitted" 95 | exit 1 96 | fi 97 | -------------------------------------------------------------------------------- /.github/workflows/jira.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | name: JIRA Sync 5 | on: 6 | issues: 7 | types: [labeled] 8 | permissions: 9 | contents: read 10 | jobs: 11 | sync: 12 | name: Sync to JIRA 13 | permissions: 14 | issues: write # for actions-ecosytem/action-create-comment 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Login 18 | uses: atlassian/gajira-login@45fd029b9f1d6d8926c6f04175aa80c0e42c9026 # v3.0.1 19 | env: 20 | JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} 21 | JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} 22 | JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} 23 | - name: Search 24 | if: github.event.action == 'labeled' 25 | id: search 26 | uses: tomhjp/gh-action-jira-search@04700b457f317c3e341ce90da5a3ff4ce058f2fa # v0.2.2 27 | with: 28 | # cf[10089] is Issue Link (use JIRA API to retrieve) 29 | jql: 'project = "HPR" AND cf[10089] = "${{ github.event.issue.html_url }}"' 30 | - name: Set type 31 | id: set-ticket-type 32 | run: | 33 | # Questions are not tracked in JIRA at this time. 34 | if [[ "${{ contains(github.event.issue.labels.*.name, 'question') }}" == "true" ]]; then 35 | echo "type=Invalid" >> "$GITHUB_OUTPUT" 36 | else 37 | # Properly labeled GH issues are assigned the standard "GH Issue" type upon creation. 38 | echo "type=GH Issue" >> "$GITHUB_OUTPUT" 39 | fi 40 | - name: Set labels 41 | id: set-ticket-labels 42 | run: | 43 | if [[ "${{ contains(github.event.issue.labels.*.name, 'bug') }}" == "true" ]]; then 44 | echo "labels=[\"bug\"]" >> "$GITHUB_OUTPUT" 45 | elif [[ "${{ contains(github.event.issue.labels.*.name, 'enhancement') }}" == "true" ]]; then 46 | echo "labels=[\"enhancement\"]" >> "$GITHUB_OUTPUT" 47 | else 48 | echo "labels=[]" >> "$GITHUB_OUTPUT" 49 | fi 50 | - name: Validate ticket 51 | if: steps.set-ticket-type.outputs.type == 'Invalid' 52 | run: | 53 | echo "Questions are not being synced to JIRA at this time." 54 | echo "If the issue is a bug or an enhancement please remove the question label and reapply the 'sync to jira' label." 55 | - name: Create ticket 56 | id: create-ticket 57 | if: steps.search.outputs.issue == '' && github.event.label.name == 'sync to jira' && steps.set-ticket-type.outputs.type != 'Invalid' 58 | uses: atlassian/gajira-create@59e177c4f6451399df5b4911c2211104f171e669 # v3.0.1 59 | with: 60 | project: HPR 61 | issuetype: "${{ steps.set-ticket-type.outputs.type }}" 62 | summary: "${{ github.event.repository.name }}: ${{ github.event.issue.title }}" 63 | description: "${{ github.event.issue.body }}\n\n_Created from GitHub by ${{ github.actor }}._" 64 | # The field customfield_10089 refers to the Issue Link field in JIRA. 65 | fields: '{ "customfield_10089": "${{ github.event.issue.html_url }}", 66 | "components": [{ "name": "Core" }], 67 | "labels": ${{ steps.set-ticket-labels.outputs.labels }} }' 68 | - name: Add tracking comment 69 | if: steps.create-ticket.outputs.issue != '' && steps.set-ticket-type.outputs.type != 'Invalid' 70 | uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1 71 | with: 72 | script: | 73 | github.rest.issues.createComment({ 74 | issue_number: context.issue.number, 75 | owner: context.repo.owner, 76 | repo: context.repo.repo, 77 | body: ` 78 | This issue has been synced to JIRA for planning. 79 | JIRA ID: [${{ steps.create-ticket.outputs.issue }}](https://hashicorp.atlassian.net/browse/${{steps.create-ticket.outputs.issue}})` 80 | }) 81 | -------------------------------------------------------------------------------- /.github/workflows/notify-integration-release-via-manual.yaml: -------------------------------------------------------------------------------- 1 | name: Notify Integration Release (Manual) 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | version: 6 | description: "The release version (semver)" 7 | default: 0.0.1 8 | required: false 9 | branch: 10 | description: "A branch or SHA" 11 | default: 'main' 12 | required: false 13 | jobs: 14 | strip-version: 15 | runs-on: ubuntu-latest 16 | outputs: 17 | packer-version: ${{ steps.strip.outputs.packer-version }} 18 | steps: 19 | - name: Strip leading v from version tag 20 | id: strip 21 | env: 22 | REF: ${{ github.event.inputs.version }} 23 | run: | 24 | echo "packer-version=$(echo "$REF" | sed -E 's/v?([0-9]+\.[0-9]+\.[0-9]+)/\1/')" >> "$GITHUB_OUTPUT" 25 | notify-release: 26 | needs: 27 | - strip-version 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: Checkout this repo 31 | uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 32 | with: 33 | ref: ${{ github.event.inputs.branch }} 34 | # Ensure that Docs are Compiled 35 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 36 | - shell: bash 37 | run: make generate 38 | - shell: bash 39 | run: | 40 | uncommitted="$(git status -s)" 41 | if [[ -z "$uncommitted" ]]; then 42 | echo "OK" 43 | else 44 | echo "Docs have been updated, but the compiled docs have not been committed." 45 | echo "Run 'make generate', and commit the result to resolve this error." 46 | echo "Generated but uncommitted files:" 47 | echo "$uncommitted" 48 | exit 1 49 | fi 50 | # Perform the Release 51 | - name: Checkout integration-release-action 52 | uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 53 | with: 54 | repository: hashicorp/integration-release-action 55 | path: ./integration-release-action 56 | - name: Notify Release 57 | uses: ./integration-release-action 58 | with: 59 | integration_identifier: "packer/hashicorp/googlecompute" 60 | release_version: ${{ needs.strip-version.outputs.packer-version }} 61 | release_sha: ${{ github.event.inputs.branch }} 62 | github_token: ${{ secrets.GITHUB_TOKEN }} 63 | -------------------------------------------------------------------------------- /.github/workflows/notify-integration-release-via-tag.yaml: -------------------------------------------------------------------------------- 1 | name: Notify Integration Release (Tag) 2 | on: 3 | push: 4 | tags: 5 | - '*.*.*' # Proper releases 6 | jobs: 7 | strip-version: 8 | runs-on: ubuntu-latest 9 | outputs: 10 | packer-version: ${{ steps.strip.outputs.packer-version }} 11 | steps: 12 | - name: Strip leading v from version tag 13 | id: strip 14 | env: 15 | REF: ${{ github.ref_name }} 16 | run: | 17 | echo "packer-version=$(echo "$REF" | sed -E 's/v?([0-9]+\.[0-9]+\.[0-9]+)/\1/')" >> "$GITHUB_OUTPUT" 18 | notify-release: 19 | needs: 20 | - strip-version 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Checkout this repo 24 | uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 25 | with: 26 | ref: ${{ github.ref }} 27 | # Ensure that Docs are Compiled 28 | - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 29 | - shell: bash 30 | run: make generate 31 | - shell: bash 32 | run: | 33 | uncommitted="$(git status -s)" 34 | if [[ -z "$uncommitted" ]]; then 35 | echo "OK" 36 | else 37 | echo "Docs have been updated, but the compiled docs have not been committed." 38 | echo "Run 'make generate', and commit the result to resolve this error." 39 | echo "Generated but uncommitted files:" 40 | echo "$uncommitted" 41 | exit 1 42 | fi 43 | # Perform the Release 44 | - name: Checkout integration-release-action 45 | uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 46 | with: 47 | repository: hashicorp/integration-release-action 48 | path: ./integration-release-action 49 | - name: Notify Release 50 | uses: ./integration-release-action 51 | with: 52 | integration_identifier: "packer/hashicorp/googlecompute" 53 | release_version: ${{ needs.strip-version.outputs.packer-version }} 54 | release_sha: ${{ github.ref }} 55 | github_token: ${{ secrets.GITHUB_TOKEN }} 56 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # This GitHub action can publish assets for release when a tag is created. 5 | # Currently its setup to run on any tag that matches the pattern "v*" (ie. v0.1.0). 6 | # 7 | # This uses an action (hashicorp/ghaction-import-gpg) that assumes you set your 8 | # private key in the `GPG_PRIVATE_KEY` secret and passphrase in the `GPG_PASSPHRASE` 9 | # secret. If you would rather own your own GPG handling, please fork this action 10 | # or use an alternative one for key handling. 11 | # 12 | # You will need to pass the `--batch` flag to `gpg` in your signing step 13 | # in `goreleaser` to indicate this is being used in a non-interactive mode. 14 | # 15 | name: release 16 | on: 17 | push: 18 | tags: 19 | - 'v*' 20 | permissions: 21 | contents: write 22 | packages: read 23 | jobs: 24 | get-go-version: 25 | runs-on: ubuntu-latest 26 | outputs: 27 | go-version: ${{ steps.get-go-version.outputs.go-version }} 28 | steps: 29 | - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 30 | - name: 'Determine Go version' 31 | id: get-go-version 32 | run: | 33 | echo "Found Go $(cat .go-version)" 34 | echo "go-version=$(cat .go-version)" >> $GITHUB_OUTPUT 35 | goreleaser: 36 | needs: 37 | - get-go-version 38 | runs-on: ubuntu-latest 39 | steps: 40 | - name: Checkout 41 | uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 42 | - name: Unshallow 43 | run: git fetch --prune --unshallow 44 | - name: Set up Go 45 | uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 46 | with: 47 | go-version: ${{ needs.get-go-version.outputs.go-version }} 48 | - name: Describe plugin 49 | id: plugin_describe 50 | run: echo "api_version=$(go run . describe | jq -r '.api_version')" >> "$GITHUB_OUTPUT" 51 | - name: Install signore 52 | uses: hashicorp/setup-signore-package@v1 53 | - name: Run GoReleaser 54 | uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0 55 | with: 56 | version: latest 57 | args: release --clean --timeout 120m 58 | env: 59 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 60 | API_VERSION: ${{ steps.plugin_describe.outputs.api_version }} 61 | SIGNORE_CLIENT_ID: ${{ secrets.SIGNORE_CLIENT_ID }} 62 | SIGNORE_CLIENT_SECRET: ${{ secrets.SIGNORE_CLIENT_SECRET }} 63 | SIGNORE_SIGNER: ${{ secrets.SIGNORE_SIGNER }} 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | main 2 | .docs 3 | packer-plugin-googlecompute 4 | 5 | **/packer_log*.txt 6 | 7 | crash.log 8 | -------------------------------------------------------------------------------- /.go-version: -------------------------------------------------------------------------------- 1 | 1.21.13 2 | 3 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | issues: 5 | # List of regexps of issue texts to exclude, empty list by default. 6 | # But independently from this option we use default exclude patterns, 7 | # it can be disabled by `exclude-use-default: false`. To list all 8 | # excluded by default patterns execute `golangci-lint run --help` 9 | 10 | exclude-rules: 11 | # Exclude gosimple bool check 12 | - linters: 13 | - gosimple 14 | text: "S(1002|1008|1021)" 15 | # Exclude failing staticchecks for now 16 | - linters: 17 | - staticcheck 18 | text: "SA(1006|1019|4006|4010|4017|5007|6005|9004):" 19 | # Exclude lll issues for long lines with go:generate 20 | - linters: 21 | - lll 22 | source: "^//go:generate " 23 | - linters: 24 | - errcheck 25 | path: ".*_test.go" 26 | 27 | # Maximum issues count per one linter. Set to 0 to disable. Default is 50. 28 | max-issues-per-linter: 0 29 | 30 | # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. 31 | max-same-issues: 0 32 | 33 | linters: 34 | disable-all: true 35 | enable: 36 | - errcheck 37 | - goimports 38 | - gosimple 39 | - govet 40 | - ineffassign 41 | - staticcheck 42 | - unconvert 43 | - unused 44 | fast: true 45 | 46 | # options for analysis running 47 | run: 48 | # default concurrency is a available CPU number 49 | concurrency: 4 50 | 51 | # timeout for analysis, e.g. 30s, 5m, default is 1m 52 | timeout: 10m 53 | 54 | # exit code when at least one issue was found, default is 1 55 | issues-exit-code: 1 56 | 57 | # include test files or not, default is true 58 | tests: true 59 | 60 | # list of build tags, all linters use it. Default is empty list. 61 | #build-tags: 62 | # - mytag 63 | 64 | # which dirs to skip: issues from them won't be reported; 65 | # can use regexp here: generated.*, regexp is applied on full path; 66 | # default value is empty list, but default dirs are skipped independently 67 | # from this option's value (see skip-dirs-use-default). 68 | #skip-dirs: 69 | # - src/external_libs 70 | # - autogenerated_by_my_lib 71 | 72 | # default is true. Enables skipping of directories: 73 | # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ 74 | skip-dirs-use-default: true 75 | 76 | # which files to skip: they will be analyzed, but issues from them 77 | # won't be reported. Default value is empty list, but there is 78 | # no need to include all autogenerated files, we confidently recognize 79 | # autogenerated files. If it's not please let us know. 80 | exclude-files: 81 | - ".*\\.hcl2spec\\.go$" 82 | # - lib/bad.go 83 | 84 | # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": 85 | # If invoked with -mod=readonly, the go command is disallowed from the implicit 86 | # automatic updating of go.mod described above. Instead, it fails when any changes 87 | # to go.mod are needed. This setting is most useful to check that go.mod does 88 | # not need updates, such as in a continuous integration and testing system. 89 | # If invoked with -mod=vendor, the go command assumes that the vendor 90 | # directory holds the correct copies of dependencies and ignores 91 | # the dependency descriptions in go.mod. 92 | # modules-download-mode: vendor 93 | 94 | 95 | # output configuration options 96 | output: 97 | # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" 98 | formats: colored-line-number 99 | 100 | # print lines of code with issue, default is true 101 | print-issued-lines: true 102 | 103 | # print linter name in the end of issue text, default is true 104 | print-linter-name: true 105 | 106 | # make issues output unique by line, default is true 107 | uniq-by-line: true 108 | 109 | 110 | # all available settings of specific linters 111 | linters-settings: 112 | errcheck: 113 | # report about not checking of errors in type assetions: `a := b.(MyStruct)`; 114 | # default is false: such cases aren't reported by default. 115 | check-type-assertions: false 116 | 117 | # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; 118 | # default is false: such cases aren't reported by default. 119 | check-blank: false 120 | 121 | # [deprecated] comma-separated list of pairs of the form pkg:regex 122 | # the regex is used to ignore names within pkg. (default "fmt:.*"). 123 | # see https://github.com/kisielk/errcheck#the-deprecated-method for details 124 | exclude-functions: fmt:.*,io/ioutil:^Read.*,io:Close 125 | 126 | # path to a file containing a list of functions to exclude from checking 127 | # see https://github.com/kisielk/errcheck#excluding-functions for details 128 | #exclude: /path/to/file.txt 129 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # This is an example goreleaser.yaml file with some defaults. 5 | # Make sure to check the documentation at http://goreleaser.com 6 | env: 7 | - CGO_ENABLED=0 8 | before: 9 | hooks: 10 | # We strongly recommend running tests to catch any regression before release. 11 | # Even though, this an optional step. 12 | - go test ./... 13 | # Check plugin compatibility with required version of the Packer SDK 14 | - make plugin-check 15 | # Copy LICENSE file for inclusion in zip archive 16 | - cp LICENSE LICENSE.txt 17 | builds: 18 | # A separated build to run the packer-plugins-check only once for a linux_amd64 binary 19 | - 20 | id: plugin-check 21 | mod_timestamp: '{{ .CommitTimestamp }}' 22 | flags: 23 | - -trimpath #removes all file system paths from the compiled executable 24 | ldflags: 25 | - '-s -w -X {{ .ModulePath }}/version.Version={{.Version}} -X {{ .ModulePath }}/version.VersionPrerelease= ' 26 | goos: 27 | - linux 28 | goarch: 29 | - amd64 30 | binary: '{{ .ProjectName }}_v{{ .Version }}_{{ .Env.API_VERSION }}_{{ .Os }}_{{ .Arch }}' 31 | - 32 | id: linux-builds 33 | mod_timestamp: '{{ .CommitTimestamp }}' 34 | flags: 35 | - -trimpath #removes all file system paths from the compiled executable 36 | ldflags: 37 | - '-s -w -X {{ .ModulePath }}/version.Version={{.Version}} -X {{ .ModulePath }}/version.VersionPrerelease= ' 38 | goos: 39 | - linux 40 | goarch: 41 | - amd64 42 | - '386' 43 | - arm 44 | - arm64 45 | ignore: 46 | - goos: linux 47 | goarch: amd64 48 | binary: '{{ .ProjectName }}_v{{ .Version }}_{{ .Env.API_VERSION }}_{{ .Os }}_{{ .Arch }}' 49 | - 50 | id: darwin-builds 51 | mod_timestamp: '{{ .CommitTimestamp }}' 52 | flags: 53 | - -trimpath #removes all file system paths from the compiled executable 54 | ldflags: 55 | - '-s -w -X {{ .ModulePath }}/version.Version={{.Version}} -X {{ .ModulePath }}/version.VersionPrerelease= ' 56 | goos: 57 | - darwin 58 | goarch: 59 | - amd64 60 | - arm64 61 | binary: '{{ .ProjectName }}_v{{ .Version }}_{{ .Env.API_VERSION }}_{{ .Os }}_{{ .Arch }}' 62 | - 63 | id: other-builds 64 | mod_timestamp: '{{ .CommitTimestamp }}' 65 | flags: 66 | - -trimpath #removes all file system paths from the compiled executable 67 | ldflags: 68 | - '-s -w -X {{ .ModulePath }}/version.Version={{.Version}} -X {{ .ModulePath }}/version.VersionPrerelease= ' 69 | goos: 70 | - netbsd 71 | - openbsd 72 | - freebsd 73 | - windows 74 | - solaris 75 | goarch: 76 | - amd64 77 | - '386' 78 | - arm 79 | ignore: 80 | - goos: windows 81 | goarch: arm 82 | - goos: solaris 83 | goarch: arm 84 | - goos: solaris 85 | goarch: '386' 86 | binary: '{{ .ProjectName }}_v{{ .Version }}_{{ .Env.API_VERSION }}_{{ .Os }}_{{ .Arch }}' 87 | archives: 88 | - format: zip 89 | files: 90 | - "LICENSE.txt" 91 | 92 | name_template: '{{ .ProjectName }}_v{{ .Version }}_{{ .Env.API_VERSION }}_{{ .Os }}_{{ .Arch }}' 93 | checksum: 94 | name_template: '{{ .ProjectName }}_v{{ .Version }}_SHA256SUMS' 95 | algorithm: sha256 96 | signs: 97 | - cmd: signore 98 | args: ["sign", "--dearmor", "--file", "${artifact}", "--out", "${signature}"] 99 | artifacts: checksum 100 | signature: ${artifact}.sig 101 | 102 | changelog: 103 | use: github-native 104 | -------------------------------------------------------------------------------- /.web-docs/components/data-source/image/README.md: -------------------------------------------------------------------------------- 1 | Type: `googlecompute-image` 2 | 3 | The Google Compute Image data source filters and fetches a GCE image and outputs relevant image metadata for 4 | use with [Google Compute builders](/packer/integrations/hashicorp/googlecompute). 5 | 6 | -> **Note:** Data sources is a feature exclusively available to HCL2 templates. 7 | 8 | Basic example of usage: 9 | 10 | ```hcl 11 | data "googlecompute-image" "basic-example" { 12 | project_id = "debian-cloud" 13 | filters = "family=debian-12 AND labels.public-image=true" 14 | most_recent = true 15 | } 16 | ``` 17 | 18 | This configuration selects the most recent GCE image from the `debian-cloud` project that belongs to the`debian-12` family and has the `public-image` label set to `true`. 19 | The data source will fail unless exactly one image is matched. Setting `most_recent = true` ensures only the newest image is selected when multiple matches exist. 20 | 21 | ## Configuration Reference 22 | 23 | 24 | 25 | - `project_id` (string) - The Google Cloud project ID to search for images. 26 | 27 | - `filters` (string) - The filter expression to narrow down the image search. 28 | For example: "name=ubuntu" or "family=ubuntu-2004". 29 | The exrpressions can be combined with AND/OR like this: 30 | "name=ubuntu AND family=ubuntu-2004". 31 | See https://cloud.google.com/sdk/gcloud/reference/topic/filters 32 | 33 | - `most_recent` (bool) - If true, the most recent image will be returned. 34 | If false, an error will be returned if more than one image matches the filters. 35 | 36 | 37 | 38 | 39 | ## Output Data 40 | 41 | 42 | 43 | - `id` (string) - ID 44 | 45 | - `name` (string) - Name 46 | 47 | - `creation_date` (string) - Creation Date 48 | 49 | - `labels` (map[string]string) - Labels 50 | 51 | 52 | 53 | 54 | ## Authentication 55 | 56 | To authenticate with GCE, this data-source supports everything the plugin does. 57 | To get more information on this, refer to the plugin's description page, under 58 | the [authentication](/packer/integrations/hashicorp/googlecompute#authentication) section. 59 | -------------------------------------------------------------------------------- /.web-docs/components/data-source/secretsmanager/README.md: -------------------------------------------------------------------------------- 1 | The Secrets Manager data source provides information about a Secrets Manager secret version, 2 | including its value and metadata. 3 | 4 | -> **Note:** Data sources is a feature exclusively available to HCL2 templates. 5 | 6 | Basic examples of usage: 7 | 8 | ```hcl 9 | data "googlecompute-secretsmanager" "basic-example" { 10 | project_id = "debian-cloud" 11 | name = "packer_test_secret" 12 | key = "packer_test_key" 13 | } 14 | 15 | # usage example of the data source output 16 | locals { 17 | value = data.googlecompute-secretsmanager.basic-example.value 18 | payload = data.googlecompute-secretsmanager.basic-example.payload 19 | } 20 | ``` 21 | 22 | Reading key-value pairs from JSON back into a native Packer map can be accomplished 23 | with the [jsondecode() function](/packer/docs/templates/hcl_templates/functions/encoding/jsondecode). 24 | 25 | ## Configuration Reference 26 | 27 | ### Required 28 | 29 | 30 | 31 | - `project_id` (string) - The Google Cloud project ID where the secret is stored. 32 | 33 | - `name` (string) - The name of the secret in the secret manager. 34 | 35 | 36 | 37 | 38 | ### Optional 39 | 40 | 41 | 42 | - `key` (string) - The key to extract from the secret payload. 43 | If not provided, the entire payload will be returned. 44 | 45 | - `version` (string) - The version of the secret to access. Defaults to "latest" if not specified. 46 | 47 | 48 | 49 | 50 | ## Output Data 51 | 52 | 53 | 54 | - `payload` (string) - The raw string payload of the secret version. 55 | 56 | - `value` (string) - The value extracted using the 'key', if provided. 57 | 58 | - `checksum` (int64) - The crc32c checksum for the payload. 59 | 60 | 61 | 62 | 63 | ## Authentication 64 | 65 | To authenticate with GCE, this data-source supports everything the plugin does. 66 | To get more information on this, refer to the plugin's description page, under 67 | the [authentication](/packer/integrations/hashicorp/googlecompute#authentication) section. 68 | -------------------------------------------------------------------------------- /.web-docs/metadata.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # For full specification on the configuration of this file visit: 5 | # https://github.com/hashicorp/integration-template#metadata-configuration 6 | integration { 7 | name = "Google Cloud Platform" 8 | description = "The googlecompute plugin can be used with HashiCorp Packer to create custom images on GCE." 9 | identifier = "packer/hashicorp/googlecompute" 10 | flags = ["hcp-ready"] 11 | component { 12 | type = "data-source" 13 | name = "Secrets Manager" 14 | slug = "secretsmanager" 15 | } 16 | component { 17 | type = "data-source" 18 | name = "GCE Image" 19 | slug = "image" 20 | } 21 | component { 22 | type = "builder" 23 | name = "Google Cloud Platform" 24 | slug = "googlecompute" 25 | } 26 | component { 27 | type = "post-processor" 28 | name = "Google Cloud Platform Image Import" 29 | slug = "googlecompute-import" 30 | } 31 | component { 32 | type = "post-processor" 33 | name = "Google Cloud Platform Image Exporter" 34 | slug = "googlecompute-export" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Latest Release 2 | 3 | Please refer to [releases](https://github.com/hashicorp/packer-plugin-googlecompute/releases) for the latest CHANGELOG information. 4 | 5 | --- 6 | ## 1.0.8 (December 6, 2021) 7 | 8 | ### Exciting New Features 🎉 9 | * Customer-managed Encryption Key for Remote VM's Boot Disk #20 by @wilsonfv in [#21](https://github.com/hashicorp/packer-plugin-googlecompute/pull/21) 10 | ### Doc improvements 📚 11 | * update ansible example by @lmayorga1980 in [#65](https://github.com/hashicorp/packer-plugin-googlecompute/pull/65) 12 | 13 | ## 1.0.6 (October 18, 2021) 14 | 15 | ### NOTES: 16 | Support for the HCP Packer registry is currently in beta and requires 17 | Packer v1.7.7 [GH-47] [GH-52] 18 | 19 | ### Improvements: 20 | * Add `SourceImageName` as shared builder information variable. [GH-47] 21 | * Add `SourceImageName` to HCP Packer registry image metadata. [GH-47] [GH-52] 22 | * Update Packer plugin SDK to version v0.2.7 [GH-48] 23 | 24 | ### BUG FIXES: 25 | * Pass DiskName configuration argument when creating instance. [GH-51] 26 | 27 | ## 1.0.5 (September 13, 2021) 28 | 29 | ### NOTES: 30 | HCP Packer private beta support requires Packer version 1.7.5 or 1.7.6 [GH-32] 31 | 32 | ### FEATURES: 33 | * Add HCP Packer registry image metadata to builder artifacts. [GH-32] 34 | * Bump Packer plugin SDK to version v0.2.5 [GH-32] 35 | 36 | ### IMPROVEMENTS: 37 | * Update driver to use user-configured Service Account for public key import. 38 | [GH-33] 39 | 40 | ## 1.0.4 (September 2, 2021) 41 | 42 | * Remove Packer core as dependency to plugin. [GH-36] 43 | 44 | ## 1.0.3 (September 1, 2021) 45 | 46 | * Upgrade plugin to use Go 1.17. 47 | 48 | ## 1.0.2 (August 27, 2021) 49 | 50 | * Treat ERROR 4047 as retryable. [GH-34] 51 | 52 | ## 1.0.0 (June 14, 2021) 53 | The code base for this plugin has been stable since the Packer core split. 54 | We are marking this plugin as v1.0.0 to indicate that it is stable and ready for consumption via `packer init`. 55 | 56 | * Update packer-plugin-sdk to v0.2.3 57 | * Update IAP tunnel support to work with all builder authentication types. [GH-19] 58 | 59 | 60 | ## 0.0.2 (April 21, 2021) 61 | 62 | * Google Compute plugin break out from Packer core. Changes prior to break out can be found in [Packer's CHANGELOG](https://github.com/hashicorp/packer/blob/master/CHANGELOG.md) 63 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @hashicorp/packer 2 | -------------------------------------------------------------------------------- /GNUmakefile: -------------------------------------------------------------------------------- 1 | NAME=googlecompute 2 | BINARY=packer-plugin-${NAME} 3 | PLUGIN_FQN="$(shell grep -E '^module' 0 { 86 | labels["source_image_project_ids"] = strings.Join(a.config.SourceImageProjectId, ",") 87 | } 88 | 89 | for k, v := range a.image.Labels { 90 | labels["tags"] = labels["tags"] + fmt.Sprintf("%s:%s", k, v) 91 | } 92 | 93 | img.Labels = labels 94 | return img 95 | } 96 | 97 | if name == registryimage.ArtifactStateURI { 98 | img, _ := registryimage.FromArtifact(a, 99 | registryimage.WithID(a.Id()), 100 | registryimage.WithProvider("gce"), 101 | registryimage.WithRegion(a.config.Zone), 102 | ) 103 | 104 | labels := map[string]string{ 105 | "self_link": a.image.SelfLink, 106 | "project_id": a.image.ProjectId, 107 | "disk_size_gb": strconv.FormatInt(a.image.SizeGb, 10), 108 | "machine_type": a.config.MachineType, 109 | "licenses": strings.Join(a.image.Licenses, ","), 110 | } 111 | 112 | // Set source image and/or family as labels 113 | if a.config.SourceImage != "" { 114 | labels["source_image"] = a.config.SourceImage 115 | } 116 | if a.config.SourceImageFamily != "" { 117 | labels["source_image_family"] = a.config.SourceImageFamily 118 | } 119 | 120 | // Set PARtifact's source image name from state; this is set regardless 121 | // of whether image or image family were used: 122 | data, ok := a.StateData["generated_data"].(map[string]interface{}) 123 | if ok { 124 | img.SourceImageID = data["SourceImageName"].(string) 125 | } 126 | 127 | if len(a.config.SourceImageProjectId) > 0 { 128 | labels["source_image_project_ids"] = strings.Join(a.config.SourceImageProjectId, ",") 129 | } 130 | 131 | for k, v := range a.image.Labels { 132 | labels["tags"] = labels["tags"] + fmt.Sprintf("%s:%s", k, v) 133 | } 134 | 135 | img.Labels = labels 136 | return img 137 | } 138 | 139 | switch name { 140 | case "ImageName": 141 | return a.image.Name 142 | case "ImageSizeGb": 143 | return a.image.SizeGb 144 | case "ProjectId": 145 | return a.config.ProjectId 146 | case "BuildZone": 147 | return a.config.Zone 148 | } 149 | 150 | if _, ok := a.StateData[name]; ok { 151 | return a.StateData[name] 152 | } 153 | 154 | return nil 155 | 156 | } 157 | -------------------------------------------------------------------------------- /builder/googlecompute/artifact_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "testing" 8 | 9 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 10 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 11 | registryimage "github.com/hashicorp/packer-plugin-sdk/packer/registry/image" 12 | "github.com/mitchellh/mapstructure" 13 | ) 14 | 15 | func TestArtifact_impl(t *testing.T) { 16 | var _ packersdk.Artifact = new(Artifact) 17 | } 18 | 19 | func TestArtifactState_StateData(t *testing.T) { 20 | expectedData := "this is the data" 21 | artifact := &Artifact{ 22 | StateData: map[string]interface{}{"state_data": expectedData}, 23 | } 24 | 25 | // Valid state 26 | result := artifact.State("state_data") 27 | if result != expectedData { 28 | t.Fatalf("Bad: State data was %s instead of %s", result, expectedData) 29 | } 30 | 31 | // Invalid state 32 | result = artifact.State("invalid_key") 33 | if result != nil { 34 | t.Fatalf("Bad: State should be nil for invalid state data name") 35 | } 36 | 37 | // Nil StateData should not fail and should return nil 38 | artifact = &Artifact{} 39 | result = artifact.State("key") 40 | if result != nil { 41 | t.Fatalf("Bad: State should be nil for nil StateData") 42 | } 43 | } 44 | 45 | func TestArtifactState_RegistryImageMetadata(t *testing.T) { 46 | artifact := &Artifact{ 47 | config: &Config{Zone: "us1"}, 48 | image: &common.Image{Name: "test-image", ProjectId: "5678"}, 49 | } 50 | 51 | // Valid state 52 | result := artifact.State(registryimage.ArtifactStateURI) 53 | if result == nil { 54 | t.Fatalf("Bad: HCP Packer registry image data was nil") 55 | } 56 | 57 | var image registryimage.Image 58 | err := mapstructure.Decode(result, &image) 59 | if err != nil { 60 | t.Errorf("Bad: unexpected error when trying to decode state into registryimage.Image %v", err) 61 | } 62 | 63 | if image.ImageID != artifact.image.Name { 64 | t.Errorf("Bad: unexpected value for ImageID %q, expected %q", image.ImageID, artifact.image.Name) 65 | } 66 | 67 | if image.ProviderRegion != artifact.State("BuildZone").(string) { 68 | t.Errorf("Bad: unexpected value for ImageID %q, expected %q", image.ProviderRegion, artifact.State("BuildZone").(string)) 69 | } 70 | 71 | if image.Labels["project_id"] != artifact.image.ProjectId { 72 | t.Errorf("Bad: unexpected value for Labels %q, expected %q", image.Labels["project_id"], artifact.image.ProjectId) 73 | } 74 | 75 | } 76 | -------------------------------------------------------------------------------- /builder/googlecompute/builder.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | // The googlecompute package contains a packersdk.Builder implementation that 5 | // builds images for Google Compute Engine. 6 | package googlecompute 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "log" 12 | 13 | "github.com/hashicorp/hcl/v2/hcldec" 14 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 15 | "github.com/hashicorp/packer-plugin-sdk/communicator" 16 | "github.com/hashicorp/packer-plugin-sdk/multistep" 17 | "github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps" 18 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 19 | "github.com/hashicorp/packer-plugin-sdk/packerbuilderdata" 20 | ) 21 | 22 | // The unique ID for this builder. 23 | const BuilderId = "packer.googlecompute" 24 | 25 | // Builder represents a Packer Builder. 26 | type Builder struct { 27 | config Config 28 | runner multistep.Runner 29 | } 30 | 31 | func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() } 32 | 33 | func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) { 34 | warnings, errs := b.config.Prepare(raws...) 35 | if errs != nil { 36 | return nil, warnings, errs 37 | } 38 | generatedDataKeys := []string{ 39 | // This will be set with the source image name even if the config 40 | // uses source image family instead of source image id. 41 | "SourceImageName", 42 | } 43 | 44 | return generatedDataKeys, warnings, nil 45 | } 46 | 47 | // Run executes a googlecompute Packer build and returns a packersdk.Artifact 48 | // representing a GCE machine image. 49 | func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook) (packersdk.Artifact, error) { 50 | cfg := &common.GCEDriverConfig{ 51 | Ui: ui, 52 | ProjectId: b.config.ProjectId, 53 | Scopes: b.config.Scopes, 54 | } 55 | b.config.Authentication.ApplyDriverConfig(cfg) 56 | 57 | driver, err := common.NewDriverGCE(*cfg) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | // Set up the state. 63 | state := new(multistep.BasicStateBag) 64 | state.Put("config", &b.config) 65 | state.Put("driver", driver) 66 | state.Put("hook", hook) 67 | state.Put("ui", ui) 68 | generatedData := &packerbuilderdata.GeneratedData{State: state} 69 | 70 | // Build the steps. 71 | steps := []multistep.Step{ 72 | new(StepCheckExistingImage), 73 | &communicator.StepSSHKeyGen{ 74 | CommConf: &b.config.Comm, 75 | SSHTemporaryKeyPair: b.config.Comm.SSH.SSHTemporaryKeyPair, 76 | }, 77 | multistep.If(b.config.PackerDebug && b.config.Comm.SSHPrivateKeyFile == "", 78 | &communicator.StepDumpSSHKey{ 79 | Path: fmt.Sprintf("gce_%s.pem", b.config.PackerBuildName), 80 | SSH: &b.config.Comm.SSH, 81 | }, 82 | ), 83 | &StepCreateDisks{ 84 | DiskConfiguration: b.config.ExtraBlockDevices, 85 | }, 86 | &StepImportOSLoginSSHKey{ 87 | Debug: b.config.PackerDebug, 88 | }, 89 | &StepCreateInstance{ 90 | Debug: b.config.PackerDebug, 91 | GeneratedData: generatedData, 92 | }, 93 | &StepCreateWindowsPassword{ 94 | Debug: b.config.PackerDebug, 95 | DebugKeyPath: fmt.Sprintf("gce_windows_%s.pem", b.config.PackerBuildName), 96 | }, 97 | &StepInstanceInfo{ 98 | Debug: b.config.PackerDebug, 99 | }, 100 | &StepStartTunnel{ 101 | IAPConf: &b.config.IAPConfig, 102 | CommConf: &b.config.Comm, 103 | AccountFile: b.config.CredentialsFile, 104 | ImpersonateAccount: b.config.ImpersonateServiceAccount, 105 | ProjectId: b.config.ProjectId, 106 | }, 107 | &communicator.StepConnect{ 108 | Config: &b.config.Comm, 109 | Host: communicator.CommHost(b.config.Comm.Host(), "instance_ip"), 110 | SSHConfig: b.config.Comm.SSHConfigFunc(), 111 | WinRMConfig: winrmConfig, 112 | }, 113 | new(commonsteps.StepProvision), 114 | &commonsteps.StepCleanupTempKeys{ 115 | Comm: &b.config.Comm, 116 | }, 117 | } 118 | if _, exists := b.config.Metadata[StartupScriptKey]; exists || b.config.StartupScriptFile != "" { 119 | steps = append(steps, new(StepWaitStartupScript)) 120 | } 121 | steps = append(steps, new(StepTeardownInstance), new(StepCreateImage)) 122 | 123 | // Run the steps. 124 | b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui) 125 | b.runner.Run(ctx, state) 126 | 127 | // Report any errors. 128 | if rawErr, ok := state.GetOk("error"); ok { 129 | return nil, rawErr.(error) 130 | } 131 | if _, ok := state.GetOk("image"); !ok { 132 | log.Println("Failed to find image in state. Bug?") 133 | return nil, nil 134 | } 135 | 136 | artifact := &Artifact{ 137 | image: state.Get("image").(*common.Image), 138 | driver: driver, 139 | config: &b.config, 140 | StateData: map[string]interface{}{"generated_data": state.Get("generated_data")}, 141 | } 142 | return artifact, nil 143 | } 144 | -------------------------------------------------------------------------------- /builder/googlecompute/startup.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "fmt" 8 | ) 9 | 10 | const StartupScriptKey string = "startup-script" 11 | const StartupScriptStatusKey string = "startup-script-status" 12 | const StartupWrappedScriptKey string = "packer-wrapped-startup-script" 13 | const EnableOSLoginKey string = "enable-oslogin" 14 | 15 | const StartupScriptStatusDone string = "done" 16 | const StartupScriptStatusError string = "error" 17 | const StartupScriptStatusNotDone string = "notdone" 18 | 19 | var StartupScriptLinux string = fmt.Sprintf(`#!/usr/bin/env bash 20 | echo "Packer startup script starting." 21 | RETVAL=0 22 | BASEMETADATAURL=http://metadata.google.internal/computeMetadata/v1/instance/ 23 | 24 | GetMetadata () { 25 | echo "$(curl -f -H "Metadata-Flavor: Google" ${BASEMETADATAURL}/${1} 2> /dev/null)" 26 | } 27 | 28 | ZONE=$(basename $(GetMetadata zone)) 29 | 30 | SetMetadata () { 31 | gcloud compute instances add-metadata ${HOSTNAME} --metadata ${1}=${2} --zone ${ZONE} 32 | } 33 | 34 | STARTUPSCRIPT=$(GetMetadata attributes/%[1]s) 35 | STARTUPSCRIPTPATH=/packer-wrapped-startup-script 36 | if [ -f "/var/log/startupscript.log" ]; then 37 | STARTUPSCRIPTLOGPATH=/var/log/startupscript.log 38 | else 39 | STARTUPSCRIPTLOGPATH=/var/log/daemon.log 40 | fi 41 | STARTUPSCRIPTLOGDEST=$(GetMetadata attributes/startup-script-log-dest) 42 | 43 | if [[ ! -z $STARTUPSCRIPT ]]; then 44 | echo "Executing user-provided startup script..." 45 | echo "${STARTUPSCRIPT}" > ${STARTUPSCRIPTPATH} 46 | chmod +x ${STARTUPSCRIPTPATH} 47 | ${STARTUPSCRIPTPATH} 48 | RETVAL=$? 49 | 50 | if [[ ! -z $STARTUPSCRIPTLOGDEST ]]; then 51 | echo "Uploading user-provided startup script log to ${STARTUPSCRIPTLOGDEST}..." 52 | gsutil -h "Content-Type:text/plain" cp ${STARTUPSCRIPTLOGPATH} ${STARTUPSCRIPTLOGDEST} 53 | fi 54 | 55 | rm ${STARTUPSCRIPTPATH} 56 | fi 57 | 58 | if [ $RETVAL -ne 0 ]; then 59 | echo "Packer startup script exited with exit code: ${RETVAL}" 60 | SetMetadata %[2]s %[4]s 61 | else 62 | echo "Packer startup script done." 63 | SetMetadata %[2]s %[3]s 64 | fi 65 | 66 | exit $RETVAL 67 | `, StartupWrappedScriptKey, StartupScriptStatusKey, StartupScriptStatusDone, StartupScriptStatusError) 68 | 69 | var StartupScriptWindows string = "" 70 | -------------------------------------------------------------------------------- /builder/googlecompute/step_check_existing_image.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | 10 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 11 | "github.com/hashicorp/packer-plugin-sdk/multistep" 12 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 13 | ) 14 | 15 | // StepCheckExistingImage represents a Packer build step that checks if the 16 | // target image already exists, and aborts immediately if so. 17 | type StepCheckExistingImage int 18 | 19 | // Run executes the Packer build step that checks if the image already exists. 20 | func (s *StepCheckExistingImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { 21 | c := state.Get("config").(*Config) 22 | d := state.Get("driver").(common.Driver) 23 | ui := state.Get("ui").(packersdk.Ui) 24 | 25 | ui.Say("Checking image does not exist...") 26 | c.imageAlreadyExists = d.ImageExists(c.ImageProjectId, c.ImageName) 27 | if !c.PackerForce && c.imageAlreadyExists { 28 | err := fmt.Errorf("Image %s already exists in project %s.\n"+ 29 | "Use the force flag to delete it prior to building.", c.ImageName, c.ImageProjectId) 30 | state.Put("error", err) 31 | ui.Error(err.Error()) 32 | return multistep.ActionHalt 33 | } 34 | return multistep.ActionContinue 35 | } 36 | 37 | // Cleanup. 38 | func (s *StepCheckExistingImage) Cleanup(state multistep.StateBag) {} 39 | -------------------------------------------------------------------------------- /builder/googlecompute/step_check_existing_image_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "testing" 9 | 10 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 11 | "github.com/hashicorp/packer-plugin-sdk/multistep" 12 | ) 13 | 14 | func TestStepCheckExistingImage_impl(t *testing.T) { 15 | var _ multistep.Step = new(StepCheckExistingImage) 16 | } 17 | 18 | func TestStepCheckExistingImage(t *testing.T) { 19 | state := testState(t) 20 | step := new(StepCheckExistingImage) 21 | defer step.Cleanup(state) 22 | 23 | state.Put("instance_name", "foo") 24 | 25 | config := state.Get("config").(*Config) 26 | driver := state.Get("driver").(*common.DriverMock) 27 | driver.ImageExistsResult = true 28 | 29 | // run the step 30 | if action := step.Run(context.Background(), state); action != multistep.ActionHalt { 31 | t.Fatalf("bad action: %#v", action) 32 | } 33 | 34 | // Verify state 35 | if driver.ImageExistsName != config.ImageName { 36 | t.Fatalf("bad: %#v", driver.ImageExistsName) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /builder/googlecompute/step_create_disks.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "fmt" 10 | "strings" 11 | "time" 12 | 13 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 14 | "github.com/hashicorp/packer-plugin-sdk/multistep" 15 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 16 | ) 17 | 18 | type StepCreateDisks struct { 19 | DiskConfiguration []common.BlockDevice 20 | } 21 | 22 | func (s *StepCreateDisks) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { 23 | ui := state.Get("ui").(packersdk.Ui) 24 | 25 | if !s.needToCreateDisks() { 26 | ui.Say("no persistent disk to create") 27 | return multistep.ActionContinue 28 | } 29 | 30 | driver := state.Get("driver").(common.Driver) 31 | config := state.Get("config").(*Config) 32 | 33 | for i, disk := range s.DiskConfiguration { 34 | if disk.VolumeType == common.LocalScratch { 35 | continue 36 | } 37 | 38 | if disk.SourceVolume != "" { 39 | continue 40 | } 41 | 42 | ui.Say(fmt.Sprintf("Creating persistent disk %s", disk.DiskName)) 43 | 44 | _, errCh := driver.CreateDisk(disk) 45 | 46 | var err error 47 | select { 48 | case err = <-errCh: 49 | case <-time.After(config.StateTimeout): 50 | err = errors.New("time out while waiting for disk to create") 51 | } 52 | if err != nil { 53 | err := fmt.Errorf("failed to create disk: %s", err) 54 | ui.Say(err.Error()) 55 | state.Put("error", err) 56 | return multistep.ActionHalt 57 | } 58 | 59 | if len(disk.ReplicaZones) != 0 { 60 | region, _ := common.GetRegionFromZone(config.Zone) 61 | // Generate the source URI for attachment later 62 | s.DiskConfiguration[i].SourceVolume = fmt.Sprintf("projects/%s/regions/%s/disks/%s", 63 | config.ProjectId, 64 | region, 65 | disk.DiskName) 66 | } else { 67 | // Generate the source URI for attachment later 68 | s.DiskConfiguration[i].SourceVolume = fmt.Sprintf("projects/%s/zones/%s/disks/%s", 69 | config.ProjectId, 70 | config.Zone, 71 | disk.DiskName) 72 | } 73 | } 74 | 75 | return multistep.ActionContinue 76 | } 77 | 78 | func (s *StepCreateDisks) needToCreateDisks() bool { 79 | for _, cfg := range s.DiskConfiguration { 80 | if cfg.VolumeType == common.LocalScratch { 81 | continue 82 | } 83 | 84 | if cfg.SourceVolume != "" { 85 | continue 86 | } 87 | 88 | return true 89 | } 90 | 91 | return false 92 | } 93 | 94 | func (s *StepCreateDisks) Cleanup(state multistep.StateBag) { 95 | ui := state.Get("ui").(packersdk.Ui) 96 | config := state.Get("config").(*Config) 97 | driver := state.Get("driver").(common.Driver) 98 | 99 | for _, gceDisk := range s.DiskConfiguration { 100 | if gceDisk.KeepDevice { 101 | ui.Say(fmt.Sprintf("Keeping disk %q", gceDisk.DiskName)) 102 | continue 103 | } 104 | 105 | // Scratch volumes are not to be deleted since they are 106 | // linked to the instance and are always automatically deleted. 107 | if gceDisk.VolumeType == common.LocalScratch { 108 | continue 109 | } 110 | 111 | zone := config.Zone 112 | if len(gceDisk.ReplicaZones) != 0 { 113 | zone, _ = common.GetRegionFromZone(zone) 114 | } 115 | 116 | _, err := driver.GetDisk(zone, gceDisk.DiskName) 117 | if err != nil { 118 | // If the disk isn't found, it's likely because it was auto-deleted 119 | // when the instance was torn-down. 120 | // 121 | // In this case, we don't say anything to the user since the disk is already 122 | // gone, and there's nothing they have to do in order to clean it up. 123 | if strings.Contains(err.Error(), "googleapi: Error 404") { 124 | continue 125 | } 126 | 127 | ui.Say(fmt.Sprintf("Failed to get disk: %s, will attempt deletion regardless, may fail", err)) 128 | } 129 | 130 | ui.Say(fmt.Sprintf("Deleting persistent disk %q", gceDisk.DiskName)) 131 | 132 | errCh := driver.DeleteDisk(zone, gceDisk.DiskName) 133 | select { 134 | case err = <-errCh: 135 | case <-time.After(config.StateTimeout): 136 | err = errors.New("time out while waiting for disk to delete") 137 | } 138 | 139 | if err != nil { 140 | ui.Error(fmt.Sprintf( 141 | "Error deleting disk. Please delete it manually.\n\n"+ 142 | "Name: %s\n"+ 143 | "Error: %s", gceDisk.DiskName, err)) 144 | } else { 145 | ui.Say(fmt.Sprintf("Persistent disk %q successfully deleted", gceDisk.DiskName)) 146 | } 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /builder/googlecompute/step_create_image_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "testing" 10 | 11 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 12 | "github.com/hashicorp/packer-plugin-sdk/multistep" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func TestStepCreateImage_impl(t *testing.T) { 17 | var _ multistep.Step = new(StepCreateImage) 18 | } 19 | 20 | func TestStepCreateImage(t *testing.T) { 21 | state := testState(t) 22 | step := new(StepCreateImage) 23 | defer step.Cleanup(state) 24 | 25 | c := state.Get("config").(*Config) 26 | d := state.Get("driver").(*common.DriverMock) 27 | 28 | d.CreateImageReturnSelfLink = "https://selflink/compute/v1/test" 29 | d.CreateImageReturnDiskSize = 420 30 | 31 | // run the step 32 | action := step.Run(context.Background(), state) 33 | assert.Equal(t, action, multistep.ActionContinue, "Step did not pass.") 34 | 35 | uncastImage, ok := state.GetOk("image") 36 | assert.True(t, ok, "State does not have resulting image.") 37 | image, ok := uncastImage.(*common.Image) 38 | assert.True(t, ok, "Image in state is not an Image.") 39 | 40 | // Verify created Image results. 41 | assert.Equal(t, c.ImageName, image.Name, "Created image does not match config name.") 42 | assert.Equal(t, len(c.ImageGuestOsFeatures), len(image.GuestOsFeatures), "Created image features does not match config.") 43 | assert.Equal(t, c.ImageLabels, image.Labels, "Created image labels does not match config.") 44 | assert.Equal(t, c.ImageLicenses, image.Licenses, "Created image licenses does not match config.") 45 | assert.Equal(t, c.ProjectId, image.ProjectId, "Created image project ID does not match config.") 46 | assert.Equal(t, d.CreateImageReturnSelfLink, image.SelfLink, "Created image selflink does not match config") 47 | assert.Equal(t, d.CreateImageReturnDiskSize, image.SizeGb, "Created image disk size does not match config") 48 | 49 | // Verify proper args passed to driver.CreateImage. 50 | assert.Equal(t, c.ProjectId, d.CreateImageProjectId, "Incorrect project ID passed to driver.") 51 | } 52 | 53 | func TestStepCreateImage_errorOnChannel(t *testing.T) { 54 | state := testState(t) 55 | step := new(StepCreateImage) 56 | defer step.Cleanup(state) 57 | 58 | errCh := make(chan error, 1) 59 | errCh <- errors.New("error") 60 | 61 | driver := state.Get("driver").(*common.DriverMock) 62 | driver.CreateImageErrCh = errCh 63 | 64 | // run the step 65 | action := step.Run(context.Background(), state) 66 | assert.Equal(t, action, multistep.ActionHalt, "Step should not have passed.") 67 | _, ok := state.GetOk("error") 68 | assert.True(t, ok, "State should have an error.") 69 | _, ok = state.GetOk("image_name") 70 | assert.False(t, ok, "State should not have a resulting image.") 71 | } 72 | 73 | func TestStepCreateImage_setsDeprecationFields(t *testing.T) { 74 | state := testState(t) 75 | step := new(StepCreateImage) 76 | defer step.Cleanup(state) 77 | 78 | c := state.Get("config").(*Config) 79 | d := state.Get("driver").(*common.DriverMock) 80 | 81 | // Set deprecation timestamps in config 82 | c.DeprecateAt = "2125-06-01T00:00:00Z" 83 | c.ObsoleteAt = "2125-07-01T00:00:00Z" 84 | c.DeleteAt = "2125-08-01T00:00:00Z" 85 | 86 | // Run step 87 | action := step.Run(context.Background(), state) 88 | assert.Equal(t, multistep.ActionContinue, action, "Step did not pass.") 89 | 90 | assert.Equal(t, c.DeprecateAt, d.DeprecatedImageStatus.Deprecated, "DeprecateAt mismatch") 91 | assert.Equal(t, c.ObsoleteAt, d.DeprecatedImageStatus.Obsolete, "ObsoleteAt mismatch") 92 | assert.Equal(t, c.DeleteAt, d.DeprecatedImageStatus.Deleted, "DeleteAt mismatch") 93 | assert.Equal(t, "DEPRECATED", d.DeprecatedImageStatus.State, "State should be DEPRECATED") 94 | } 95 | -------------------------------------------------------------------------------- /builder/googlecompute/step_create_windows_password.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "crypto/rand" 9 | "crypto/rsa" 10 | "crypto/x509" 11 | "encoding/base64" 12 | "encoding/binary" 13 | "encoding/pem" 14 | "errors" 15 | "fmt" 16 | "os" 17 | "time" 18 | 19 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 20 | "github.com/hashicorp/packer-plugin-sdk/multistep" 21 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 22 | ) 23 | 24 | // StepCreateWindowsPassword represents a Packer build step that sets the windows password on a Windows GCE instance. 25 | type StepCreateWindowsPassword struct { 26 | Debug bool 27 | DebugKeyPath string 28 | } 29 | 30 | // Run executes the Packer build step that sets the windows password on a Windows GCE instance. 31 | func (s *StepCreateWindowsPassword) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { 32 | ui := state.Get("ui").(packersdk.Ui) 33 | d := state.Get("driver").(common.Driver) 34 | c := state.Get("config").(*Config) 35 | name := state.Get("instance_name").(string) 36 | 37 | if c.Comm.WinRMPassword != "" { 38 | state.Put("winrm_password", c.Comm.WinRMPassword) 39 | packersdk.LogSecretFilter.Set(c.Comm.WinRMPassword) 40 | return multistep.ActionContinue 41 | } 42 | 43 | create, ok := state.GetOk("create_windows_password") 44 | 45 | if !ok || !create.(bool) { 46 | return multistep.ActionContinue 47 | 48 | } 49 | ui.Say("Creating windows user for instance...") 50 | priv, err := rsa.GenerateKey(rand.Reader, 2048) 51 | if err != nil { 52 | err := fmt.Errorf("Error creating temporary key: %s", err) 53 | state.Put("error", err) 54 | ui.Error(err.Error()) 55 | return multistep.ActionHalt 56 | } 57 | 58 | buf := make([]byte, 4) 59 | binary.BigEndian.PutUint32(buf, uint32(priv.E)) 60 | 61 | data := common.WindowsPasswordConfig{ 62 | Key: priv, 63 | UserName: c.Comm.WinRMUser, 64 | Modulus: base64.StdEncoding.EncodeToString(priv.N.Bytes()), 65 | Exponent: base64.StdEncoding.EncodeToString(buf[1:]), 66 | ExpireOn: time.Now().Add(time.Minute * 5), 67 | WindowsPasswordTimeout: c.WindowsPasswordTimeout, 68 | } 69 | 70 | if s.Debug { 71 | 72 | priv_blk := pem.Block{ 73 | Type: "RSA PRIVATE KEY", 74 | Headers: nil, 75 | Bytes: x509.MarshalPKCS1PrivateKey(priv), 76 | } 77 | 78 | ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath)) 79 | f, err := os.Create(s.DebugKeyPath) 80 | if err != nil { 81 | state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) 82 | return multistep.ActionHalt 83 | } 84 | 85 | // Write out the key 86 | err = pem.Encode(f, &priv_blk) 87 | f.Close() 88 | if err != nil { 89 | state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) 90 | return multistep.ActionHalt 91 | } 92 | } 93 | 94 | errCh, err := d.CreateOrResetWindowsPassword(name, c.Zone, &data) 95 | 96 | if err == nil { 97 | ui.Message("Waiting for windows password to complete...") 98 | select { 99 | case err = <-errCh: 100 | case <-time.After(c.WindowsPasswordTimeout): 101 | err = errors.New("time out while waiting for the password to be created") 102 | } 103 | } 104 | 105 | if err != nil { 106 | err := fmt.Errorf("Error creating windows password: %s", err) 107 | state.Put("error", err) 108 | ui.Error(err.Error()) 109 | return multistep.ActionHalt 110 | } 111 | 112 | ui.Message("Created password.") 113 | 114 | if s.Debug { 115 | ui.Message(fmt.Sprintf( 116 | "Password (since debug is enabled): %s", data.Password)) 117 | } 118 | 119 | state.Put("winrm_password", data.Password) 120 | packersdk.LogSecretFilter.Set(data.Password) 121 | 122 | return multistep.ActionContinue 123 | } 124 | 125 | // Nothing to clean up. The windows password is only created on the single instance. 126 | func (s *StepCreateWindowsPassword) Cleanup(state multistep.StateBag) {} 127 | -------------------------------------------------------------------------------- /builder/googlecompute/step_instance_info.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "fmt" 10 | "time" 11 | 12 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 13 | "github.com/hashicorp/packer-plugin-sdk/multistep" 14 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 15 | ) 16 | 17 | // stepInstanceInfo represents a Packer build step that gathers GCE instance info. 18 | type StepInstanceInfo struct { 19 | Debug bool 20 | } 21 | 22 | // Run executes the Packer build step that gathers GCE instance info. 23 | // This adds "instance_ip" to the multistep state. 24 | func (s *StepInstanceInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { 25 | config := state.Get("config").(*Config) 26 | driver := state.Get("driver").(common.Driver) 27 | ui := state.Get("ui").(packersdk.Ui) 28 | 29 | instanceName := state.Get("instance_name").(string) 30 | 31 | ui.Say("Waiting for the instance to become running...") 32 | errCh := driver.WaitForInstance("RUNNING", config.Zone, instanceName) 33 | var err error 34 | select { 35 | case err = <-errCh: 36 | case <-time.After(config.StateTimeout): 37 | err = errors.New("time out while waiting for instance to become running") 38 | } 39 | 40 | if err != nil { 41 | err := fmt.Errorf("Error waiting for instance: %s", err) 42 | state.Put("error", err) 43 | ui.Error(err.Error()) 44 | return multistep.ActionHalt 45 | } 46 | 47 | if config.UseInternalIP { 48 | ip, err := driver.GetInternalIP(config.Zone, instanceName) 49 | if err != nil { 50 | err := fmt.Errorf("Error retrieving instance internal ip address: %s", err) 51 | state.Put("error", err) 52 | ui.Error(err.Error()) 53 | return multistep.ActionHalt 54 | } 55 | 56 | if s.Debug { 57 | if ip != "" { 58 | ui.Message(fmt.Sprintf("Internal IP: %s", ip)) 59 | } 60 | } 61 | ui.Message(fmt.Sprintf("IP: %s", ip)) 62 | state.Put("instance_ip", ip) 63 | return multistep.ActionContinue 64 | } else { 65 | ip, err := driver.GetNatIP(config.Zone, instanceName) 66 | if err != nil { 67 | err := fmt.Errorf("Error retrieving instance nat ip address: %s", err) 68 | state.Put("error", err) 69 | ui.Error(err.Error()) 70 | return multistep.ActionHalt 71 | } 72 | 73 | if s.Debug { 74 | if ip != "" { 75 | ui.Message(fmt.Sprintf("Public IP: %s", ip)) 76 | } 77 | } 78 | ui.Message(fmt.Sprintf("IP: %s", ip)) 79 | state.Put("instance_ip", ip) 80 | return multistep.ActionContinue 81 | } 82 | } 83 | 84 | // Cleanup. 85 | func (s *StepInstanceInfo) Cleanup(state multistep.StateBag) {} 86 | -------------------------------------------------------------------------------- /builder/googlecompute/step_instance_info_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "testing" 10 | "time" 11 | 12 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 13 | "github.com/hashicorp/packer-plugin-sdk/multistep" 14 | ) 15 | 16 | func TestStepInstanceInfo_impl(t *testing.T) { 17 | var _ multistep.Step = new(StepInstanceInfo) 18 | } 19 | 20 | func TestStepInstanceInfo(t *testing.T) { 21 | state := testState(t) 22 | step := new(StepInstanceInfo) 23 | defer step.Cleanup(state) 24 | 25 | state.Put("instance_name", "foo") 26 | 27 | config := state.Get("config").(*Config) 28 | driver := state.Get("driver").(*common.DriverMock) 29 | driver.GetNatIPResult = "1.2.3.4" 30 | 31 | // run the step 32 | if action := step.Run(context.Background(), state); action != multistep.ActionContinue { 33 | t.Fatalf("bad action: %#v", action) 34 | } 35 | 36 | // Verify state 37 | if driver.WaitForInstanceState != "RUNNING" { 38 | t.Fatalf("bad: %#v", driver.WaitForInstanceState) 39 | } 40 | if driver.WaitForInstanceZone != config.Zone { 41 | t.Fatalf("bad: %#v", driver.WaitForInstanceZone) 42 | } 43 | if driver.WaitForInstanceName != "foo" { 44 | t.Fatalf("bad: %#v", driver.WaitForInstanceName) 45 | } 46 | 47 | ipRaw, ok := state.GetOk("instance_ip") 48 | if !ok { 49 | t.Fatal("should have ip") 50 | } 51 | if ip, ok := ipRaw.(string); !ok { 52 | t.Fatal("ip is not a string") 53 | } else if ip != "1.2.3.4" { 54 | t.Fatalf("bad ip: %s", ip) 55 | } 56 | } 57 | 58 | func TestStepInstanceInfo_InternalIP(t *testing.T) { 59 | state := testState(t) 60 | step := new(StepInstanceInfo) 61 | defer step.Cleanup(state) 62 | 63 | state.Put("instance_name", "foo") 64 | 65 | config := state.Get("config").(*Config) 66 | config.UseInternalIP = true 67 | driver := state.Get("driver").(*common.DriverMock) 68 | driver.GetNatIPResult = "1.2.3.4" 69 | driver.GetInternalIPResult = "5.6.7.8" 70 | 71 | // run the step 72 | if action := step.Run(context.Background(), state); action != multistep.ActionContinue { 73 | t.Fatalf("bad action: %#v", action) 74 | } 75 | 76 | // Verify state 77 | if driver.WaitForInstanceState != "RUNNING" { 78 | t.Fatalf("bad: %#v", driver.WaitForInstanceState) 79 | } 80 | if driver.WaitForInstanceZone != config.Zone { 81 | t.Fatalf("bad: %#v", driver.WaitForInstanceZone) 82 | } 83 | if driver.WaitForInstanceName != "foo" { 84 | t.Fatalf("bad: %#v", driver.WaitForInstanceName) 85 | } 86 | 87 | ipRaw, ok := state.GetOk("instance_ip") 88 | if !ok { 89 | t.Fatal("should have ip") 90 | } 91 | if ip, ok := ipRaw.(string); !ok { 92 | t.Fatal("ip is not a string") 93 | } else if ip != "5.6.7.8" { 94 | t.Fatalf("bad ip: %s", ip) 95 | } 96 | } 97 | 98 | func TestStepInstanceInfo_getNatIPError(t *testing.T) { 99 | state := testState(t) 100 | step := new(StepInstanceInfo) 101 | defer step.Cleanup(state) 102 | 103 | state.Put("instance_name", "foo") 104 | 105 | driver := state.Get("driver").(*common.DriverMock) 106 | driver.GetNatIPErr = errors.New("error") 107 | 108 | // run the step 109 | if action := step.Run(context.Background(), state); action != multistep.ActionHalt { 110 | t.Fatalf("bad action: %#v", action) 111 | } 112 | 113 | // Verify state 114 | if _, ok := state.GetOk("error"); !ok { 115 | t.Fatal("should have error") 116 | } 117 | if _, ok := state.GetOk("instance_ip"); ok { 118 | t.Fatal("should NOT have instance IP") 119 | } 120 | } 121 | 122 | func TestStepInstanceInfo_waitError(t *testing.T) { 123 | state := testState(t) 124 | step := new(StepInstanceInfo) 125 | defer step.Cleanup(state) 126 | 127 | state.Put("instance_name", "foo") 128 | 129 | errCh := make(chan error, 1) 130 | errCh <- errors.New("error") 131 | 132 | driver := state.Get("driver").(*common.DriverMock) 133 | driver.WaitForInstanceErrCh = errCh 134 | 135 | // run the step 136 | if action := step.Run(context.Background(), state); action != multistep.ActionHalt { 137 | t.Fatalf("bad action: %#v", action) 138 | } 139 | 140 | // Verify state 141 | if _, ok := state.GetOk("error"); !ok { 142 | t.Fatal("should have error") 143 | } 144 | if _, ok := state.GetOk("instance_ip"); ok { 145 | t.Fatal("should NOT have instance IP") 146 | } 147 | } 148 | 149 | func TestStepInstanceInfo_errorTimeout(t *testing.T) { 150 | state := testState(t) 151 | step := new(StepInstanceInfo) 152 | defer step.Cleanup(state) 153 | 154 | errCh := make(chan error, 1) 155 | go func() { 156 | <-time.After(50 * time.Millisecond) 157 | errCh <- nil 158 | }() 159 | 160 | state.Put("instance_name", "foo") 161 | 162 | config := state.Get("config").(*Config) 163 | config.StateTimeout = 1 * time.Millisecond 164 | 165 | driver := state.Get("driver").(*common.DriverMock) 166 | driver.WaitForInstanceErrCh = errCh 167 | 168 | // run the step 169 | if action := step.Run(context.Background(), state); action != multistep.ActionHalt { 170 | t.Fatalf("bad action: %#v", action) 171 | } 172 | 173 | // Verify state 174 | if _, ok := state.GetOk("error"); !ok { 175 | t.Fatal("should have error") 176 | } 177 | if _, ok := state.GetOk("instance_ip"); ok { 178 | t.Fatal("should NOT have instance IP") 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /builder/googlecompute/step_start_tunnel.hcl2spec.go: -------------------------------------------------------------------------------- 1 | // Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT. 2 | 3 | package googlecompute 4 | 5 | import ( 6 | "github.com/hashicorp/hcl/v2/hcldec" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // FlatIAPConfig is an auto-generated flat version of IAPConfig. 11 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 12 | type FlatIAPConfig struct { 13 | IAP *bool `mapstructure:"use_iap" required:"false" cty:"use_iap" hcl:"use_iap"` 14 | IAPLocalhostPort *int `mapstructure:"iap_localhost_port" cty:"iap_localhost_port" hcl:"iap_localhost_port"` 15 | IAPHashBang *string `mapstructure:"iap_hashbang" required:"false" cty:"iap_hashbang" hcl:"iap_hashbang"` 16 | IAPExt *string `mapstructure:"iap_ext" required:"false" cty:"iap_ext" hcl:"iap_ext"` 17 | IAPTunnelLaunchWait *int `mapstructure:"iap_tunnel_launch_wait" required:"false" cty:"iap_tunnel_launch_wait" hcl:"iap_tunnel_launch_wait"` 18 | } 19 | 20 | // FlatMapstructure returns a new FlatIAPConfig. 21 | // FlatIAPConfig is an auto-generated flat version of IAPConfig. 22 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 23 | func (*IAPConfig) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 24 | return new(FlatIAPConfig) 25 | } 26 | 27 | // HCL2Spec returns the hcl spec of a IAPConfig. 28 | // This spec is used by HCL to read the fields of IAPConfig. 29 | // The decoded values from this spec will then be applied to a FlatIAPConfig. 30 | func (*FlatIAPConfig) HCL2Spec() map[string]hcldec.Spec { 31 | s := map[string]hcldec.Spec{ 32 | "use_iap": &hcldec.AttrSpec{Name: "use_iap", Type: cty.Bool, Required: false}, 33 | "iap_localhost_port": &hcldec.AttrSpec{Name: "iap_localhost_port", Type: cty.Number, Required: false}, 34 | "iap_hashbang": &hcldec.AttrSpec{Name: "iap_hashbang", Type: cty.String, Required: false}, 35 | "iap_ext": &hcldec.AttrSpec{Name: "iap_ext", Type: cty.String, Required: false}, 36 | "iap_tunnel_launch_wait": &hcldec.AttrSpec{Name: "iap_tunnel_launch_wait", Type: cty.Number, Required: false}, 37 | } 38 | return s 39 | } 40 | -------------------------------------------------------------------------------- /builder/googlecompute/step_start_tunnel_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "os" 9 | "runtime" 10 | "testing" 11 | 12 | "github.com/hashicorp/packer-plugin-sdk/communicator" 13 | ) 14 | 15 | type MockTunnelDriver struct { 16 | StopTunnelCalled bool 17 | StartTunnelCalled bool 18 | StartTunnelTimeout int 19 | } 20 | 21 | func (m *MockTunnelDriver) StopTunnel() { 22 | m.StopTunnelCalled = true 23 | } 24 | 25 | func (m *MockTunnelDriver) StartTunnel(_ context.Context, _ string, timeout int) error { 26 | m.StartTunnelCalled = true 27 | m.StartTunnelTimeout = timeout 28 | return nil 29 | } 30 | 31 | func getTestStepStartTunnel() *StepStartTunnel { 32 | return &StepStartTunnel{ 33 | IAPConf: &IAPConfig{ 34 | IAP: true, 35 | IAPLocalhostPort: 0, 36 | IAPHashBang: "/bin/bash", 37 | IAPExt: "", 38 | }, 39 | CommConf: &communicator.Config{ 40 | SSH: communicator.SSH{ 41 | SSHPort: 1234, 42 | }, 43 | }, 44 | AccountFile: "/path/to/account_file.json", 45 | ProjectId: "fake-project-123", 46 | } 47 | } 48 | 49 | func TestStepStartTunnel_CreateTempScript(t *testing.T) { 50 | s := getTestStepStartTunnel() 51 | 52 | args := []string{"compute", "start-iap-tunnel", "fakeinstance-12345", 53 | "1234", "--local-host-port=localhost:8774", "--zone", "us-central-b", 54 | "--project", "fake-project-123"} 55 | 56 | scriptPath, err := s.createTempGcloudScript(args) 57 | if err != nil { 58 | t.Fatalf("Shouldn't have error building script file.") 59 | } 60 | defer os.Remove(scriptPath) 61 | 62 | f, err := os.ReadFile(scriptPath) 63 | if err != nil { 64 | t.Fatalf("couldn't read created inventoryfile: %s", err) 65 | } 66 | 67 | expected := `#!/bin/bash 68 | CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE='/path/to/account_file.json' 69 | export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE 70 | gcloud compute start-iap-tunnel fakeinstance-12345 1234 --local-host-port=localhost:8774 --zone us-central-b --project fake-project-123 71 | ` 72 | if runtime.GOOS == "windows" { 73 | // in real life you'd not be passing a HashBang here, but GIGO. 74 | expected = `#!/bin/bash 75 | set "CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=/path/to/account_file.json" 76 | call gcloud compute start-iap-tunnel fakeinstance-12345 1234 --local-host-port=localhost:8774 --zone us-central-b --project fake-project-123 77 | ` 78 | } 79 | if string(f) != expected { 80 | t.Fatalf("script didn't match expected:\n\n expected: \n%s\n; recieved: \n%s\n", expected, f) 81 | } 82 | } 83 | 84 | func TestStepStartTunnel_Cleanup(t *testing.T) { 85 | // Check IAP true 86 | s := getTestStepStartTunnel() 87 | td := &MockTunnelDriver{} 88 | s.tunnelDriver = td 89 | 90 | state := testState(t) 91 | s.Cleanup(state) 92 | 93 | if !td.StopTunnelCalled { 94 | t.Fatalf("Should have called StopTunnel, since IAP is true") 95 | } 96 | 97 | // Check IAP false 98 | s = getTestStepStartTunnel() 99 | td = &MockTunnelDriver{} 100 | s.tunnelDriver = td 101 | 102 | s.IAPConf.IAP = false 103 | 104 | s.Cleanup(state) 105 | 106 | if td.StopTunnelCalled { 107 | t.Fatalf("Should not have called StopTunnel, since IAP is false") 108 | } 109 | } 110 | 111 | func TestStepStartTunnel_ConfigurePort_port_set_by_user(t *testing.T) { 112 | s := getTestStepStartTunnel() 113 | s.IAPConf.IAPLocalhostPort = 8447 114 | 115 | ctx := context.TODO() 116 | err := s.ConfigureLocalHostPort(ctx) 117 | if err != nil { 118 | t.Fatalf("Shouldn't have error detecting port") 119 | } 120 | if s.IAPConf.IAPLocalhostPort != 8447 { 121 | t.Fatalf("Shouldn't have found new port; one was configured.") 122 | } 123 | } 124 | 125 | func TestStepStartTunnel_ConfigurePort_port_not_set_by_user(t *testing.T) { 126 | s := getTestStepStartTunnel() 127 | s.IAPConf.IAPLocalhostPort = 0 128 | 129 | ctx := context.TODO() 130 | err := s.ConfigureLocalHostPort(ctx) 131 | if err != nil { 132 | t.Fatalf("Shouldn't have error detecting port") 133 | } 134 | if s.IAPConf.IAPLocalhostPort == 0 { 135 | t.Fatalf("Should have found new port; none was configured.") 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /builder/googlecompute/step_teardown_instance.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "fmt" 10 | "time" 11 | 12 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 13 | "github.com/hashicorp/packer-plugin-sdk/multistep" 14 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 15 | ) 16 | 17 | // StepTeardownInstance represents a Packer build step that tears down GCE 18 | // instances. 19 | type StepTeardownInstance struct { 20 | Debug bool 21 | } 22 | 23 | // Run executes the Packer build step that tears down a GCE instance. 24 | func (s *StepTeardownInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { 25 | config := state.Get("config").(*Config) 26 | driver := state.Get("driver").(common.Driver) 27 | ui := state.Get("ui").(packersdk.Ui) 28 | 29 | name := config.InstanceName 30 | if name == "" { 31 | return multistep.ActionHalt 32 | } 33 | 34 | ui.Say("Deleting instance...") 35 | instanceLog, _ := driver.GetSerialPortOutput(config.Zone, name) 36 | state.Put("instance_log", instanceLog) 37 | errCh, err := driver.DeleteInstance(config.Zone, name) 38 | if err == nil { 39 | select { 40 | case err = <-errCh: 41 | case <-time.After(config.StateTimeout): 42 | err = errors.New("time out while waiting for instance to delete") 43 | } 44 | } 45 | 46 | if err != nil { 47 | ui.Error(fmt.Sprintf( 48 | "Error deleting instance. Please delete it manually.\n\n"+ 49 | "Name: %s\n"+ 50 | "Error: %s", name, err)) 51 | return multistep.ActionHalt 52 | } 53 | ui.Message("Instance has been deleted!") 54 | state.Put("instance_name", "") 55 | 56 | return multistep.ActionContinue 57 | } 58 | 59 | // Deleting the instance does not remove the boot disk. This cleanup removes 60 | // the disk. 61 | func (s *StepTeardownInstance) Cleanup(state multistep.StateBag) { 62 | config := state.Get("config").(*Config) 63 | driver := state.Get("driver").(common.Driver) 64 | ui := state.Get("ui").(packersdk.Ui) 65 | 66 | var err error 67 | 68 | ui.Say("Deleting disk...") 69 | errCh := driver.DeleteDisk(config.Zone, config.DiskName) 70 | select { 71 | case err = <-errCh: 72 | case <-time.After(config.StateTimeout): 73 | err = errors.New("time out while waiting for disk to delete") 74 | } 75 | 76 | if err != nil { 77 | ui.Error(fmt.Sprintf( 78 | "Error deleting disk. Please delete it manually.\n\n"+ 79 | "DiskName: %s\n"+ 80 | "Zone: %s\n"+ 81 | "Error: %s", config.DiskName, config.Zone, err)) 82 | } 83 | 84 | ui.Message("Disk has been deleted!") 85 | } 86 | -------------------------------------------------------------------------------- /builder/googlecompute/step_teardown_instance_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "testing" 9 | 10 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 11 | "github.com/hashicorp/packer-plugin-sdk/multistep" 12 | ) 13 | 14 | func TestStepTeardownInstance_impl(t *testing.T) { 15 | var _ multistep.Step = new(StepTeardownInstance) 16 | } 17 | 18 | func TestStepTeardownInstance(t *testing.T) { 19 | state := testState(t) 20 | step := new(StepTeardownInstance) 21 | defer step.Cleanup(state) 22 | 23 | config := state.Get("config").(*Config) 24 | driver := state.Get("driver").(*common.DriverMock) 25 | 26 | // run the step 27 | if action := step.Run(context.Background(), state); action != multistep.ActionContinue { 28 | t.Fatalf("bad action: %#v", action) 29 | } 30 | 31 | if driver.DeleteInstanceName != config.InstanceName { 32 | t.Fatal("should've deleted instance") 33 | } 34 | if driver.DeleteInstanceZone != config.Zone { 35 | t.Fatalf("bad zone: %#v", driver.DeleteInstanceZone) 36 | } 37 | 38 | // cleanup 39 | step.Cleanup(state) 40 | 41 | if driver.DeleteDiskName != config.InstanceName { 42 | t.Fatal("should've deleted disk") 43 | } 44 | if driver.DeleteDiskZone != config.Zone { 45 | t.Fatalf("bad zone: %#v", driver.DeleteDiskZone) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /builder/googlecompute/step_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "bytes" 8 | "testing" 9 | 10 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 11 | "github.com/hashicorp/packer-plugin-sdk/multistep" 12 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 13 | ) 14 | 15 | func testState(t *testing.T) multistep.StateBag { 16 | state := new(multistep.BasicStateBag) 17 | state.Put("config", testConfigStruct(t)) 18 | state.Put("driver", &common.DriverMock{}) 19 | state.Put("hook", &packersdk.MockHook{}) 20 | state.Put("ui", &packersdk.BasicUi{ 21 | Reader: new(bytes.Buffer), 22 | Writer: new(bytes.Buffer), 23 | }) 24 | return state 25 | } 26 | -------------------------------------------------------------------------------- /builder/googlecompute/step_wait_startup_script.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | "fmt" 10 | "time" 11 | 12 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 13 | "github.com/hashicorp/packer-plugin-sdk/multistep" 14 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 15 | "github.com/hashicorp/packer-plugin-sdk/retry" 16 | ) 17 | 18 | // ErrStartupScriptMetadata means that the user provided startup script resulted in 19 | // setting the set-startup-script metadata status to error. 20 | var ErrStartupScriptMetadata = errors.New("Startup script exited with error.") 21 | 22 | // StepWaitStartupScript is a trivial implementation of a Packer multistep 23 | // It can be used for tracking the set-startup-script metadata status. 24 | type StepWaitStartupScript int 25 | 26 | // Run reads the instance metadata and looks for the log entry 27 | // indicating the startup script finished. 28 | func (s *StepWaitStartupScript) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { 29 | config := state.Get("config").(*Config) 30 | driver := state.Get("driver").(common.Driver) 31 | ui := state.Get("ui").(packersdk.Ui) 32 | instanceName := state.Get("instance_name").(string) 33 | 34 | if config.WrapStartupScriptFile.False() { 35 | return multistep.ActionContinue 36 | } 37 | 38 | ui.Say("Waiting for any running startup script to finish...") 39 | // Keep checking the serial port output to see if the startup script is done. 40 | err := retry.Config{ 41 | ShouldRetry: func(err error) bool { 42 | if errors.Is(err, ErrStartupScriptMetadata) { 43 | return false 44 | } 45 | return true 46 | }, 47 | RetryDelay: (&retry.Backoff{InitialBackoff: 10 * time.Second, MaxBackoff: 60 * time.Second, Multiplier: 2}).Linear, 48 | }.Run(ctx, func(ctx context.Context) error { 49 | status, err := driver.GetInstanceMetadata(config.Zone, 50 | instanceName, StartupScriptStatusKey) 51 | 52 | if err != nil { 53 | ui.Message(fmt.Sprintf("Metadata %s on instance %s not available. Waiting...", StartupScriptStatusKey, instanceName)) 54 | err := fmt.Errorf("Error getting startup script status: %s", err) 55 | return err 56 | } 57 | 58 | switch status { 59 | case StartupScriptStatusError: 60 | ui.Message("Startup script in error. Exiting...") 61 | return ErrStartupScriptMetadata 62 | 63 | case StartupScriptStatusDone: 64 | ui.Message("Startup script successfully finished.") 65 | return nil 66 | 67 | default: 68 | ui.Message("Startup script not finished yet. Waiting...") 69 | return errors.New("Startup script not done.") 70 | } 71 | }) 72 | 73 | if err != nil { 74 | err := fmt.Errorf("Error waiting for startup script to finish: %s", err) 75 | state.Put("error", err) 76 | ui.Error(err.Error()) 77 | return multistep.ActionHalt 78 | } 79 | ui.Say("Startup script, if any, has finished running.") 80 | return multistep.ActionContinue 81 | } 82 | 83 | // Cleanup. 84 | func (s *StepWaitStartupScript) Cleanup(state multistep.StateBag) {} 85 | -------------------------------------------------------------------------------- /builder/googlecompute/step_wait_startup_script_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "context" 8 | "testing" 9 | 10 | "github.com/hashicorp/packer-plugin-googlecompute/lib/common" 11 | "github.com/hashicorp/packer-plugin-sdk/multistep" 12 | "github.com/hashicorp/packer-plugin-sdk/template/config" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func TestStepWaitStartupScript(t *testing.T) { 17 | state := testState(t) 18 | step := new(StepWaitStartupScript) 19 | c := state.Get("config").(*Config) 20 | d := state.Get("driver").(*common.DriverMock) 21 | 22 | testZone := "test-zone" 23 | testInstanceName := "test-instance-name" 24 | 25 | c.Zone = testZone 26 | state.Put("instance_name", testInstanceName) 27 | 28 | // This step stops when it gets Done back from the metadata. 29 | d.GetInstanceMetadataResult = StartupScriptStatusDone 30 | 31 | // Run the step. 32 | assert.Equal(t, step.Run(context.Background(), state), multistep.ActionContinue, "Step should have passed and continued.") 33 | 34 | // Check that GetInstanceMetadata was called properly. 35 | assert.Equal(t, d.GetInstanceMetadataZone, testZone, "Incorrect zone passed to GetInstanceMetadata.") 36 | assert.Equal(t, d.GetInstanceMetadataName, testInstanceName, "Incorrect instance name passed to GetInstanceMetadata.") 37 | } 38 | 39 | func TestStepWaitStartupScript_withWrapStartupScript(t *testing.T) { 40 | tt := []struct { 41 | Name string 42 | WrapStartup config.Trilean 43 | MetadataResult, Zone, MetadataName string 44 | StepResult multistep.StepAction //Zero value for StepAction is StepContinue; this is expected for all passing test cases. 45 | }{ 46 | {Name: "no- wrapped startup script", WrapStartup: config.TriFalse}, 47 | {Name: "good - wrapped startup script", WrapStartup: config.TriTrue, MetadataResult: StartupScriptStatusDone, Zone: "test-zone", MetadataName: "test-instance-name"}, 48 | { 49 | Name: "failed - wrapped startup script", 50 | WrapStartup: config.TriTrue, 51 | MetadataResult: StartupScriptStatusError, 52 | Zone: "test-zone", 53 | MetadataName: "failed-instance-name", 54 | StepResult: multistep.ActionHalt, 55 | }, 56 | } 57 | 58 | for _, tc := range tt { 59 | tc := tc 60 | t.Run(tc.Name, func(t *testing.T) { 61 | state := testState(t) 62 | step := new(StepWaitStartupScript) 63 | c := state.Get("config").(*Config) 64 | d := state.Get("driver").(*common.DriverMock) 65 | 66 | c.StartupScriptFile = "startup.sh" 67 | c.WrapStartupScriptFile = tc.WrapStartup 68 | c.Zone = tc.Zone 69 | state.Put("instance_name", tc.MetadataName) 70 | 71 | // This step stops when it gets Done back from the metadata. 72 | d.GetInstanceMetadataResult = tc.MetadataResult 73 | 74 | // Run the step. 75 | assert.Equal(t, step.Run(context.Background(), state), tc.StepResult, "Step should have continued.") 76 | 77 | assert.Equal(t, d.GetInstanceMetadataResult, tc.MetadataResult, "MetadataResult was not the expected value.") 78 | assert.Equal(t, d.GetInstanceMetadataZone, tc.Zone, "Zone was not the expected value.") 79 | assert.Equal(t, d.GetInstanceMetadataName, tc.MetadataName, "Instance name was not the expected value.") 80 | }) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /builder/googlecompute/template_funcs.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "strings" 8 | "text/template" 9 | ) 10 | 11 | func isalphanumeric(b byte) bool { 12 | if '0' <= b && b <= '9' { 13 | return true 14 | } 15 | if 'a' <= b && b <= 'z' { 16 | return true 17 | } 18 | return false 19 | } 20 | 21 | // Clean up image name by replacing invalid characters with "-" 22 | // and converting upper cases to lower cases 23 | func templateCleanImageName(s string) string { 24 | if validImageName.MatchString(s) { 25 | return s 26 | } 27 | b := []byte(strings.ToLower(s)) 28 | newb := make([]byte, len(b)) 29 | for i := range newb { 30 | if isalphanumeric(b[i]) { 31 | newb[i] = b[i] 32 | } else { 33 | newb[i] = '-' 34 | } 35 | } 36 | return string(newb) 37 | } 38 | 39 | var TemplateFuncs = template.FuncMap{ 40 | "clean_resource_name": templateCleanImageName, 41 | } 42 | -------------------------------------------------------------------------------- /builder/googlecompute/template_funcs_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import "testing" 7 | 8 | func Test_templateCleanImageName(t *testing.T) { 9 | vals := []struct { 10 | origName string 11 | expected string 12 | }{ 13 | // test that valid name is unchanged 14 | { 15 | origName: "abcde-012345xyz", 16 | expected: "abcde-012345xyz", 17 | }, 18 | 19 | //test that capital letters are converted to lowercase 20 | { 21 | origName: "ABCDE-012345xyz", 22 | expected: "abcde-012345xyz", 23 | }, 24 | // test that periods and colons are converted to hyphens 25 | { 26 | origName: "abcde-012345v1.0:0", 27 | expected: "abcde-012345v1-0-0", 28 | }, 29 | // Name starting with number is not valid, but not in scope of this 30 | // function to correct 31 | { 32 | origName: "012345v1.0:0", 33 | expected: "012345v1-0-0", 34 | }, 35 | // Name over 64 chars is not valid, but not corrected by this function. 36 | { 37 | origName: "loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong", 38 | expected: "loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong", 39 | }, 40 | } 41 | 42 | for _, v := range vals { 43 | name := templateCleanImageName(v.origName) 44 | if name != v.expected { 45 | t.Fatalf("template names do not match: expected %s got %s\n", v.expected, name) 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /builder/googlecompute/test-fixtures/fake-key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 3 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 4 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 5 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 6 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 7 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 8 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 9 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 10 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 11 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 12 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 13 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 14 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 15 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 16 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 17 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 18 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 19 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 20 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 21 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 22 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 23 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 24 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 25 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 26 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/basic.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | 5 | variable "project" { 6 | type = string 7 | default = "${env("GOOGLE_PROJECT_ID")}" 8 | } 9 | 10 | variable "service_account_file" { 11 | type = string 12 | default = "${env("GOOGLE_APPLICATION_CREDENTIALS")}" 13 | } 14 | 15 | variable "ssh_private_key" { 16 | type = string 17 | default = "" 18 | } 19 | 20 | variable "ssh_username" { 21 | type = string 22 | default = "packer" 23 | } 24 | 25 | variable "zone" { 26 | type = string 27 | default = "us-central1-a" 28 | } 29 | 30 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 31 | 32 | source "googlecompute" "autogenerated_1" { 33 | account_file = var.service_account_file 34 | image_name = "packer-basic-tester-${local.timestamp}" 35 | project_id = "${var.project}" 36 | source_image_family = "centos-stream-9" 37 | ssh_username = "${var.ssh_username}" 38 | skip_create_image = true 39 | zone = "${var.zone}" 40 | } 41 | 42 | build { 43 | sources = ["source.googlecompute.autogenerated_1"] 44 | 45 | provisioner "shell" { 46 | execute_command = "sudo -E -S sh '{{ .Path }}'" 47 | inline = ["ls /var/log"] 48 | } 49 | 50 | provisioner "shell" { 51 | inline = ["echo hello from the other side"] 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/extra_persistent_disk.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | variable "project" { 5 | type = string 6 | default = env("GOOGLE_PROJECT_ID") 7 | } 8 | 9 | variable "service_account_file" { 10 | type = string 11 | default = env("GOOGLE_APPLICATION_CREDENTIALS") 12 | } 13 | 14 | variable "ssh_username" { 15 | type = string 16 | default = "packer" 17 | } 18 | 19 | variable "zone" { 20 | type = string 21 | default = "us-central1-a" 22 | } 23 | 24 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 25 | 26 | source "googlecompute" "test" { 27 | account_file = var.service_account_file 28 | image_name = "packer-persistent-disks-test-${local.timestamp}" 29 | project_id = var.project 30 | source_image_family = "centos-stream-9" 31 | ssh_username = var.ssh_username 32 | skip_create_image = true 33 | zone = var.zone 34 | disk_attachment { 35 | attachment_mode = "READ_WRITE" 36 | volume_type = "pd-standard" 37 | volume_size = 25 38 | interface_type = "SCSI" 39 | } 40 | } 41 | 42 | build { 43 | sources = ["source.googlecompute.test"] 44 | 45 | provisioner "shell" { 46 | # persistent-disk-0 is already reserved for the boot disk, the ones we add will start at 1 47 | inline = ["ls -la /dev/disk/by-id/google-persistent-disk-1"] 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/extra_persistent_disk_and_regions.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | variable "project" { 5 | type = string 6 | default = env("GOOGLE_PROJECT_ID") 7 | } 8 | 9 | variable "service_account_file" { 10 | type = string 11 | default = env("GOOGLE_APPLICATION_CREDENTIALS") 12 | } 13 | 14 | variable "ssh_username" { 15 | type = string 16 | default = "packer" 17 | } 18 | 19 | variable "zone" { 20 | type = string 21 | default = "us-central1-a" 22 | } 23 | 24 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 25 | 26 | source "googlecompute" "test" { 27 | account_file = var.service_account_file 28 | image_name = "packer-persistent-disks-region-test-${local.timestamp}" 29 | project_id = var.project 30 | source_image_family = "centos-stream-9" 31 | ssh_username = var.ssh_username 32 | skip_create_image = true 33 | zone = var.zone 34 | disk_attachment { 35 | attachment_mode = "READ_WRITE" 36 | volume_type = "pd-standard" 37 | volume_size = 200 38 | interface_type = "SCSI" 39 | replica_zones = ["us-central1-b"] 40 | } 41 | } 42 | 43 | build { 44 | sources = ["source.googlecompute.test"] 45 | 46 | provisioner "shell" { 47 | # persistent-disk-0 is already reserved for the boot disk, the ones we add will start at 1 48 | inline = ["ls -la /dev/disk/by-id/google-persistent-disk-1"] 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/extra_scratch_disk.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | variable "project" { 5 | type = string 6 | default = env("GOOGLE_PROJECT_ID") 7 | } 8 | 9 | variable "service_account_file" { 10 | type = string 11 | default = env("GOOGLE_APPLICATION_CREDENTIALS") 12 | } 13 | 14 | variable "ssh_username" { 15 | type = string 16 | default = "packer" 17 | } 18 | 19 | variable "zone" { 20 | type = string 21 | default = "us-central1-a" 22 | } 23 | 24 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 25 | 26 | source "googlecompute" "test" { 27 | account_file = var.service_account_file 28 | image_name = "packer-scratch-disk-test-${local.timestamp}" 29 | project_id = var.project 30 | source_image_family = "centos-stream-9" 31 | ssh_username = var.ssh_username 32 | skip_create_image = true 33 | machine_type = "n2-standard-2" 34 | zone = var.zone 35 | disk_attachment { 36 | attachment_mode = "READ_WRITE" 37 | volume_type = "scratch" 38 | volume_size = 375 39 | interface_type = "NVME" 40 | } 41 | } 42 | 43 | build { 44 | sources = ["source.googlecompute.test"] 45 | 46 | provisioner "shell" { 47 | inline = ["test -b /dev/disk/by-id/google-local-nvme-ssd-0"] 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/image_arch_builds.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | variable "project" { 5 | type = string 6 | default = "${env("GOOGLE_PROJECT_ID")}" 7 | } 8 | 9 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 10 | 11 | source "googlecompute" "autogenerated_1" { 12 | image_name = "%s" 13 | project_id = "${var.project}" 14 | source_image_family = "%s" 15 | ssh_username = "packer" 16 | zone = "us-central1-a" 17 | image_architecture = "%s" 18 | machine_type = "%s" 19 | } 20 | 21 | build { 22 | sources = ["source.googlecompute.autogenerated_1"] 23 | } 24 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/multiple_disks.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | variable "project" { 5 | type = string 6 | default = env("GOOGLE_PROJECT_ID") 7 | } 8 | 9 | variable "service_account_file" { 10 | type = string 11 | default = env("GOOGLE_APPLICATION_CREDENTIALS") 12 | } 13 | 14 | variable "ssh_username" { 15 | type = string 16 | default = "packer" 17 | } 18 | 19 | variable "zone" { 20 | type = string 21 | default = "us-central1-a" 22 | } 23 | 24 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 25 | 26 | source "googlecompute" "test" { 27 | account_file = var.service_account_file 28 | image_name = "packer-multiple-disks-test-${local.timestamp}" 29 | project_id = var.project 30 | source_image_family = "centos-stream-9" 31 | ssh_username = var.ssh_username 32 | skip_create_image = true 33 | machine_type = "n2-standard-2" 34 | zone = var.zone 35 | 36 | disk_attachment { 37 | attachment_mode = "READ_WRITE" 38 | volume_type = "pd-standard" 39 | volume_size = 25 40 | interface_type = "SCSI" 41 | } 42 | 43 | disk_attachment { 44 | volume_type = "scratch" 45 | volume_size = 375 46 | } 47 | } 48 | 49 | build { 50 | sources = ["source.googlecompute.test"] 51 | 52 | provisioner "shell" { 53 | inline = [ 54 | "set -ex", 55 | "ls -la /dev/disk/by-id/google-local-ssd-0", 56 | "ls -la /dev/disk/by-id/google-persistent-disk-2" 57 | ] 58 | } 59 | } 60 | 61 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/network_ip.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | 5 | variable "project" { 6 | type = string 7 | default = "${env("GOOGLE_PROJECT_ID")}" 8 | } 9 | 10 | variable "service_account_file" { 11 | type = string 12 | default = "${env("GOOGLE_APPLICATION_CREDENTIALS")}" 13 | } 14 | 15 | variable "ssh_private_key" { 16 | type = string 17 | default = "" 18 | } 19 | 20 | variable "ssh_username" { 21 | type = string 22 | default = "packer" 23 | } 24 | 25 | variable "zone" { 26 | type = string 27 | default = "us-central1-a" 28 | } 29 | 30 | variable "network_ip" { 31 | type = string 32 | default = "" 33 | } 34 | 35 | variable "network" { 36 | type = string 37 | default = "" 38 | } 39 | 40 | variable "image_name" { 41 | type = string 42 | default = "packer-ip-tester" 43 | } 44 | 45 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 46 | 47 | source "googlecompute" "autogenerated_1" { 48 | image_name = "${var.image_name}" 49 | project_id = "${var.project}" 50 | source_image_family = "centos-stream-9" 51 | ssh_username = "${var.ssh_username}" 52 | zone = "${var.zone}" 53 | network_ip = "${var.network_ip}" 54 | } 55 | 56 | build { 57 | sources = ["source.googlecompute.autogenerated_1"] 58 | 59 | provisioner "shell" { 60 | execute_command = "sudo -E -S sh '{{ .Path }}'" 61 | inline = ["ls /var/log"] 62 | } 63 | 64 | provisioner "shell" { 65 | inline = ["hostname -I"] 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/oslogin/default-token-and-pkey.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | variable "project" { 5 | type = string 6 | default = env("GOOGLE_PROJECT_ID") 7 | } 8 | 9 | variable "ssh_private_key" { 10 | type = string 11 | default = "" 12 | } 13 | 14 | variable "ssh_username" { 15 | type = string 16 | default = "root" 17 | } 18 | 19 | variable "zone" { 20 | type = string 21 | default = "us-central1-a" 22 | } 23 | 24 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 25 | 26 | # No provided access_token or account_file should read contents of env GOOGLE_APPLICATION_CREDENTIALS 27 | source "googlecompute" "autogenerated_1" { 28 | image_name = "packer-oslogin-tester-pkey-${local.timestamp}" 29 | project_id = var.project 30 | source_image_family = "centos-stream-9" 31 | ssh_username = var.ssh_username 32 | ssh_private_key_file = "%s" 33 | ssh_timeout = "30s" 34 | use_os_login = true 35 | skip_create_image = true 36 | zone = var.zone 37 | } 38 | 39 | build { 40 | sources = ["source.googlecompute.autogenerated_1"] 41 | 42 | provisioner "shell" { 43 | inline = ["echo hello from the other side, username is $(whoami)"] 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/oslogin/default-token.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | 5 | variable "project" { 6 | type = string 7 | default = "${env("GOOGLE_PROJECT_ID")}" 8 | } 9 | 10 | variable "ssh_private_key" { 11 | type = string 12 | default = "" 13 | } 14 | 15 | variable "ssh_username" { 16 | type = string 17 | default = "root" 18 | } 19 | 20 | variable "zone" { 21 | type = string 22 | default = "us-central1-a" 23 | } 24 | 25 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 26 | 27 | # No provided access_token or account_file should read contents of env GOOGLE_APPLICATION_CREDENTIALS 28 | source "googlecompute" "autogenerated_1" { 29 | image_name = "packer-oslogin-tester-${local.timestamp}" 30 | project_id = "${var.project}" 31 | source_image_family = "centos-stream-9" 32 | ssh_username = "${var.ssh_username}" 33 | use_os_login = true 34 | skip_create_image = true 35 | zone = "${var.zone}" 36 | } 37 | 38 | build { 39 | sources = ["source.googlecompute.autogenerated_1"] 40 | 41 | provisioner "shell" { 42 | execute_command = "sudo -E -S sh '{{ .Path }}'" 43 | inline = ["ls /var/log"] 44 | } 45 | 46 | provisioner "shell" { 47 | inline = ["echo hello from the other side"] 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/wrapped-startup-scripts/errored.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | 5 | variable "project" { 6 | type = string 7 | default = "${env("GOOGLE_PROJECT_ID")}" 8 | } 9 | 10 | variable "service_account_file" { 11 | type = string 12 | default = "${env("GOOGLE_APPLICATION_CREDENTIALS")}" 13 | } 14 | 15 | variable "ssh_private_key" { 16 | type = string 17 | default = "" 18 | } 19 | 20 | variable "ssh_username" { 21 | type = string 22 | default = "packer" 23 | } 24 | 25 | variable "zone" { 26 | type = string 27 | default = "us-central1-a" 28 | } 29 | 30 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 31 | 32 | source "googlecompute" "autogenerated_1" { 33 | account_file = var.service_account_file 34 | image_name = "packer-wrapped-err-test-${local.timestamp}" 35 | project_id = "${var.project}" 36 | source_image_family = "centos-stream-9" 37 | ssh_username = "${var.ssh_username}" 38 | startup_script_file = "./testdata/wrapped-startup-scripts/errored.sh" 39 | skip_create_image = true 40 | zone = "${var.zone}" 41 | } 42 | 43 | build { 44 | sources = ["source.googlecompute.autogenerated_1"] 45 | 46 | provisioner "shell" { 47 | execute_command = "sudo -E -S sh '{{ .Path }}'" 48 | inline = ["ls /var/log"] 49 | } 50 | 51 | provisioner "shell" { 52 | inline = ["echo hello from the other side"] 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/wrapped-startup-scripts/errored.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | echo "Executing some startup script" 5 | echo ".....pretend we did a lot of work here" 6 | echo "Uh oh we are starting to fail!!!!!" 7 | exit 1 8 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/wrapped-startup-scripts/successful.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | 5 | variable "project" { 6 | type = string 7 | default = "${env("GOOGLE_PROJECT_ID")}" 8 | } 9 | 10 | variable "service_account_file" { 11 | type = string 12 | default = "${env("GOOGLE_APPLICATION_CREDENTIALS")}" 13 | } 14 | 15 | variable "ssh_private_key" { 16 | type = string 17 | default = "" 18 | } 19 | 20 | variable "ssh_username" { 21 | type = string 22 | default = "packer" 23 | } 24 | 25 | variable "zone" { 26 | type = string 27 | default = "us-central1-a" 28 | } 29 | 30 | locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") } 31 | 32 | source "googlecompute" "autogenerated_1" { 33 | account_file = var.service_account_file 34 | image_name = "packer-wrapped-succ-test-${local.timestamp}" 35 | project_id = "${var.project}" 36 | source_image_family = "centos-stream-9" 37 | ssh_username = "${var.ssh_username}" 38 | skip_create_image = true 39 | zone = "${var.zone}" 40 | } 41 | 42 | build { 43 | sources = ["source.googlecompute.autogenerated_1"] 44 | 45 | provisioner "shell" { 46 | execute_command = "sudo -E -S sh '{{ .Path }}'" 47 | inline = ["ls /var/log"] 48 | } 49 | 50 | provisioner "shell" { 51 | inline = ["echo hello from the other side"] 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /builder/googlecompute/testdata/wrapped-startup-scripts/successful.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | echo "Executing some startup script" 5 | echo ".....pretend we did a lot of work here" 6 | echo "we are all done" 7 | exit 0 8 | -------------------------------------------------------------------------------- /builder/googlecompute/tunnel_driver.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:build !windows 5 | // +build !windows 6 | 7 | package googlecompute 8 | 9 | import ( 10 | "context" 11 | "log" 12 | "os/exec" 13 | "syscall" 14 | ) 15 | 16 | func NewTunnelDriver() TunnelDriver { 17 | return &TunnelDriverLinux{} 18 | } 19 | 20 | type TunnelDriverLinux struct { 21 | cmd *exec.Cmd 22 | } 23 | 24 | func (t *TunnelDriverLinux) StartTunnel(cancelCtx context.Context, tempScriptFileName string, timeout int) error { 25 | cmd := exec.CommandContext(cancelCtx, tempScriptFileName) 26 | cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 27 | 28 | err := RunTunnelCommand(cmd, timeout) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | // Store successful command on step so we can access it to cancel it 34 | // later. 35 | t.cmd = cmd 36 | return nil 37 | } 38 | 39 | func (t *TunnelDriverLinux) StopTunnel() { 40 | if t.cmd != nil && t.cmd.Process != nil { 41 | log.Printf("Cleaning up the IAP tunnel...") 42 | // Why not just cmd.Process.Kill()? I'm glad you asked. The gcloud 43 | // call spawns a python subprocess that listens on the port, and you 44 | // need to use the process _group_ id to halt this process and its 45 | // daemon child. We create the group ID with the syscall.SysProcAttr 46 | // call inside the retry loop above, and then store that ID on the 47 | // command so we can halt it here. 48 | err := syscall.Kill(-t.cmd.Process.Pid, syscall.SIGINT) 49 | if err != nil { 50 | log.Printf("Issue stopping IAP tunnel: %s", err) 51 | } 52 | } else { 53 | log.Printf("Couldn't find IAP tunnel process to kill. Continuing.") 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /builder/googlecompute/tunnel_driver_windows.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:build windows 5 | // +build windows 6 | 7 | package googlecompute 8 | 9 | import ( 10 | "context" 11 | "log" 12 | "os/exec" 13 | ) 14 | 15 | func NewTunnelDriver() TunnelDriver { 16 | return &TunnelDriverWindows{} 17 | } 18 | 19 | type TunnelDriverWindows struct { 20 | cmd *exec.Cmd 21 | } 22 | 23 | func (t *TunnelDriverWindows) StartTunnel(cancelCtx context.Context, tempScriptFileName string, timeout int) error { 24 | args := []string{"/C", "call", tempScriptFileName} 25 | cmd := exec.CommandContext(cancelCtx, "cmd", args...) 26 | err := RunTunnelCommand(cmd, timeout) 27 | if err != nil { 28 | return err 29 | } 30 | // Store successful command on step so we can access it to cancel it 31 | // later. 32 | t.cmd = cmd 33 | return nil 34 | } 35 | 36 | func (t *TunnelDriverWindows) StopTunnel() { 37 | if t.cmd != nil && t.cmd.Process != nil { 38 | err := t.cmd.Process.Kill() 39 | if err != nil { 40 | log.Printf("Issue stopping IAP tunnel: %s", err) 41 | } 42 | } else { 43 | log.Printf("Couldn't find IAP tunnel process to kill. Continuing.") 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /builder/googlecompute/winrm.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecompute 5 | 6 | import ( 7 | "github.com/hashicorp/packer-plugin-sdk/communicator" 8 | "github.com/hashicorp/packer-plugin-sdk/multistep" 9 | ) 10 | 11 | // winrmConfig returns the WinRM configuration. 12 | func winrmConfig(state multistep.StateBag) (*communicator.WinRMConfig, error) { 13 | config := state.Get("config").(*Config) 14 | password := state.Get("winrm_password").(string) 15 | 16 | return &communicator.WinRMConfig{ 17 | Username: config.Comm.WinRMUser, 18 | Password: password, 19 | }, nil 20 | } 21 | -------------------------------------------------------------------------------- /datasource/image/data.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:generate packer-sdc struct-markdown 5 | //go:generate packer-sdc mapstructure-to-hcl2 -type Config,DatasourceOutput 6 | 7 | package image 8 | 9 | import ( 10 | "context" 11 | "fmt" 12 | "log" 13 | "sort" 14 | 15 | "github.com/hashicorp/hcl/v2/hcldec" 16 | "github.com/hashicorp/packer-plugin-sdk/hcl2helper" 17 | "github.com/hashicorp/packer-plugin-sdk/template/config" 18 | "github.com/zclconf/go-cty/cty" 19 | "google.golang.org/api/compute/v1" 20 | ) 21 | 22 | type Datasource struct { 23 | config Config 24 | } 25 | 26 | type Config struct { 27 | // The Google Cloud project ID to search for images. 28 | ProjectID string `mapstructure:"project_id"` 29 | // The filter expression to narrow down the image search. 30 | // For example: "name=ubuntu" or "family=ubuntu-2004". 31 | // The exrpressions can be combined with AND/OR like this: 32 | // "name=ubuntu AND family=ubuntu-2004". 33 | // See https://cloud.google.com/sdk/gcloud/reference/topic/filters 34 | Filters string `mapstructure:"filters"` 35 | // If true, the most recent image will be returned. 36 | // If false, an error will be returned if more than one image matches the filters. 37 | MostRecent bool `mapstructure:"most_recent"` 38 | } 39 | 40 | type DatasourceOutput struct { 41 | ID string `mapstructure:"id"` 42 | Name string `mapstructure:"name"` 43 | CreationDate string `mapstructure:"creation_date"` 44 | Labels map[string]string `mapstructure:"labels"` 45 | } 46 | 47 | func (d *Datasource) ConfigSpec() hcldec.ObjectSpec { 48 | return d.config.FlatMapstructure().HCL2Spec() 49 | } 50 | 51 | func (d *Datasource) OutputSpec() hcldec.ObjectSpec { 52 | return (&DatasourceOutput{}).FlatMapstructure().HCL2Spec() 53 | } 54 | 55 | func (d *Datasource) Configure(raws ...interface{}) error { 56 | if err := config.Decode(&d.config, nil, raws...); err != nil { 57 | return err 58 | } 59 | if d.config.ProjectID == "" { 60 | return fmt.Errorf("project_id must be specified") 61 | } 62 | if d.config.Filters == "" { 63 | return fmt.Errorf("filters must be specified to narrow down image search") 64 | } 65 | return nil 66 | } 67 | 68 | func (d *Datasource) Execute() (cty.Value, error) { 69 | ctx := context.Background() 70 | 71 | service, err := compute.NewService(ctx) 72 | if err != nil { 73 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("failed to create compute client: %w", err) 74 | } 75 | 76 | images, err := service.Images.List(d.config.ProjectID).Filter(d.config.Filters).Do() 77 | if err != nil { 78 | return cty.NullVal(cty.EmptyObject), err 79 | } 80 | 81 | if len(images.Items) == 0 { 82 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("no images found with filter expression: %q", d.config.Filters) 83 | } 84 | 85 | if len(images.Items) > 1 && !d.config.MostRecent { 86 | return cty.NullVal(cty.EmptyObject), fmt.Errorf( 87 | "Your query returned more than one result. Please try a more specific search, or set most_recent = true", 88 | ) 89 | } 90 | // Sort by most recent first 91 | sort.Slice(images.Items, func(i, j int) bool { 92 | return images.Items[i].CreationTimestamp > images.Items[j].CreationTimestamp 93 | }) 94 | 95 | matched := images.Items[0] 96 | out := DatasourceOutput{ 97 | ID: fmt.Sprintf("%d", matched.Id), 98 | Name: matched.Name, 99 | CreationDate: matched.CreationTimestamp, 100 | Labels: func() map[string]string { 101 | if matched.Labels == nil { 102 | return map[string]string{} 103 | } 104 | return matched.Labels 105 | }(), 106 | } 107 | 108 | log.Printf("[DEBUG] - datasource: found image %q with ID %s", matched.Name, out.ID) 109 | 110 | return hcl2helper.HCL2ValueFromConfig(out, d.OutputSpec()), nil 111 | } 112 | -------------------------------------------------------------------------------- /datasource/image/data.hcl2spec.go: -------------------------------------------------------------------------------- 1 | // Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT. 2 | 3 | package image 4 | 5 | import ( 6 | "github.com/hashicorp/hcl/v2/hcldec" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // FlatConfig is an auto-generated flat version of Config. 11 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 12 | type FlatConfig struct { 13 | ProjectID *string `mapstructure:"project_id" cty:"project_id" hcl:"project_id"` 14 | Filters *string `mapstructure:"filters" cty:"filters" hcl:"filters"` 15 | MostRecent *bool `mapstructure:"most_recent" cty:"most_recent" hcl:"most_recent"` 16 | } 17 | 18 | // FlatMapstructure returns a new FlatConfig. 19 | // FlatConfig is an auto-generated flat version of Config. 20 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 21 | func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 22 | return new(FlatConfig) 23 | } 24 | 25 | // HCL2Spec returns the hcl spec of a Config. 26 | // This spec is used by HCL to read the fields of Config. 27 | // The decoded values from this spec will then be applied to a FlatConfig. 28 | func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { 29 | s := map[string]hcldec.Spec{ 30 | "project_id": &hcldec.AttrSpec{Name: "project_id", Type: cty.String, Required: false}, 31 | "filters": &hcldec.AttrSpec{Name: "filters", Type: cty.String, Required: false}, 32 | "most_recent": &hcldec.AttrSpec{Name: "most_recent", Type: cty.Bool, Required: false}, 33 | } 34 | return s 35 | } 36 | 37 | // FlatDatasourceOutput is an auto-generated flat version of DatasourceOutput. 38 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 39 | type FlatDatasourceOutput struct { 40 | ID *string `mapstructure:"id" cty:"id" hcl:"id"` 41 | Name *string `mapstructure:"name" cty:"name" hcl:"name"` 42 | CreationDate *string `mapstructure:"creation_date" cty:"creation_date" hcl:"creation_date"` 43 | Labels map[string]string `mapstructure:"labels" cty:"labels" hcl:"labels"` 44 | } 45 | 46 | // FlatMapstructure returns a new FlatDatasourceOutput. 47 | // FlatDatasourceOutput is an auto-generated flat version of DatasourceOutput. 48 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 49 | func (*DatasourceOutput) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 50 | return new(FlatDatasourceOutput) 51 | } 52 | 53 | // HCL2Spec returns the hcl spec of a DatasourceOutput. 54 | // This spec is used by HCL to read the fields of DatasourceOutput. 55 | // The decoded values from this spec will then be applied to a FlatDatasourceOutput. 56 | func (*FlatDatasourceOutput) HCL2Spec() map[string]hcldec.Spec { 57 | s := map[string]hcldec.Spec{ 58 | "id": &hcldec.AttrSpec{Name: "id", Type: cty.String, Required: false}, 59 | "name": &hcldec.AttrSpec{Name: "name", Type: cty.String, Required: false}, 60 | "creation_date": &hcldec.AttrSpec{Name: "creation_date", Type: cty.String, Required: false}, 61 | "labels": &hcldec.AttrSpec{Name: "labels", Type: cty.Map(cty.String), Required: false}, 62 | } 63 | return s 64 | } 65 | -------------------------------------------------------------------------------- /datasource/image/data_acc_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package image 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "os/exec" 10 | "regexp" 11 | "testing" 12 | 13 | "github.com/hashicorp/packer-plugin-sdk/acctest" 14 | ) 15 | 16 | var projectID = os.Getenv("GOOGLE_PROJECT_ID") 17 | 18 | func TestAccGCPImageDatasource(t *testing.T) { 19 | if projectID == "" { 20 | t.Skip("GOOGLE_PROJECT_ID must be set") 21 | } 22 | 23 | imageName := "debian-12-bookworm-v202" 24 | 25 | tmpl := loadTemplate(t) 26 | 27 | family := "debian-12" 28 | 29 | projectID = "debian-cloud" 30 | 31 | tc := &acctest.PluginTestCase{ 32 | Name: "gcp_image_datasource", 33 | Template: tmpl, 34 | BuildExtraArgs: []string{ 35 | "-var", fmt.Sprintf("project_id=%s", projectID), 36 | "-var", fmt.Sprintf("family=%s", family), 37 | }, 38 | Check: func(cmd *exec.Cmd, logfile string) error { 39 | out, err := os.ReadFile(logfile) 40 | if err != nil { 41 | return err 42 | } 43 | output := string(out) 44 | if !regexp.MustCompile(regexp.QuoteMeta(imageName)).MatchString(output) { 45 | t.Errorf("expected image name %q in logs:\n%s", imageName, output) 46 | } 47 | return nil 48 | }, 49 | } 50 | 51 | acctest.TestPlugin(t, tc) 52 | } 53 | 54 | func loadTemplate(t *testing.T) string { 55 | content, err := os.ReadFile("test-fixtures/template.pkr.hcl") 56 | if err != nil { 57 | t.Fatalf("failed to read test template: %v", err) 58 | } 59 | return string(content) 60 | } 61 | -------------------------------------------------------------------------------- /datasource/image/data_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package image 5 | 6 | import ( 7 | "testing" 8 | ) 9 | 10 | func TestDatasourceConfigure_EmptyProjectID(t *testing.T) { 11 | d := &Datasource{ 12 | config: Config{ 13 | Filters: "name=ubuntu", 14 | }, 15 | } 16 | err := d.Configure() 17 | if err == nil { 18 | t.Fatal("expected error when project_id is missing") 19 | } 20 | } 21 | 22 | func TestDatasourceConfigure_EmptyFilters(t *testing.T) { 23 | d := &Datasource{ 24 | config: Config{ 25 | ProjectID: "test-project", 26 | }, 27 | } 28 | err := d.Configure() 29 | if err == nil { 30 | t.Fatal("expected error when filters are missing") 31 | } 32 | } 33 | 34 | func TestDatasourceConfigure_ValidConfig(t *testing.T) { 35 | d := &Datasource{ 36 | config: Config{ 37 | ProjectID: "test-project", 38 | Filters: "name=ubuntu", 39 | }, 40 | } 41 | err := d.Configure() 42 | if err != nil { 43 | t.Fatalf("unexpected error: %s", err) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /datasource/image/test-fixtures/template.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | packer { 5 | required_plugins { 6 | googlecompute = { 7 | version = "~> v1.0" 8 | source = "github.com/hashicorp/googlecompute" 9 | } 10 | } 11 | } 12 | 13 | variable "project_id" { 14 | type = string 15 | } 16 | 17 | variable "family" { 18 | type = string 19 | } 20 | 21 | data "googlecompute-image" "example" { 22 | project_id = var.project_id 23 | filters = "family=debian-12 AND labels.public-image=true" 24 | most_recent = true 25 | } 26 | 27 | source "null" "ex" { 28 | communicator = "none" 29 | } 30 | 31 | build { 32 | sources = ["source.null.ex"] 33 | provisioner "shell-local" { 34 | inline = [ 35 | "echo ${data.googlecompute-image.example.id}", 36 | "echo ${data.googlecompute-image.example.name}", 37 | ] 38 | } 39 | } -------------------------------------------------------------------------------- /datasource/secretsmanager/data.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:generate packer-sdc struct-markdown 5 | //go:generate packer-sdc mapstructure-to-hcl2 -type Config,DatasourceOutput 6 | 7 | package secretsmanager 8 | 9 | import ( 10 | "context" 11 | "encoding/json" 12 | "errors" 13 | "fmt" 14 | "hash/crc32" 15 | 16 | "github.com/hashicorp/hcl/v2/hcldec" 17 | "github.com/hashicorp/packer-plugin-sdk/hcl2helper" 18 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 19 | "github.com/hashicorp/packer-plugin-sdk/template/config" 20 | 21 | "github.com/zclconf/go-cty/cty" 22 | 23 | secretmanager "cloud.google.com/go/secretmanager/apiv1" 24 | secretmanagerpb "cloud.google.com/go/secretmanager/apiv1/secretmanagerpb" 25 | "google.golang.org/grpc/codes" 26 | "google.golang.org/grpc/status" 27 | ) 28 | 29 | type Config struct { 30 | 31 | // The Google Cloud project ID where the secret is stored. 32 | ProjectId string `mapstructure:"project_id" required:"true"` 33 | 34 | // The name of the secret in the secret manager. 35 | Name string `mapstructure:"name" required:"true"` 36 | 37 | // The key to extract from the secret payload. 38 | // If not provided, the entire payload will be returned. 39 | Key string `mapstructure:"key"` 40 | 41 | // The version of the secret to access. Defaults to "latest" if not specified. 42 | Version string `mapstructure:"version"` 43 | } 44 | 45 | type Datasource struct { 46 | config Config 47 | } 48 | 49 | type DatasourceOutput struct { 50 | // The raw string payload of the secret version. 51 | Payload string `mapstructure:"payload"` 52 | 53 | // The value extracted using the 'key', if provided. 54 | Value string `mapstructure:"value"` 55 | 56 | // The crc32c checksum for the payload. 57 | Checksum int64 `mapstructure:"checksum"` 58 | } 59 | 60 | func (d *Datasource) ConfigSpec() hcldec.ObjectSpec { 61 | return d.config.FlatMapstructure().HCL2Spec() 62 | } 63 | 64 | func (d *Datasource) OutputSpec() hcldec.ObjectSpec { 65 | return (&DatasourceOutput{}).FlatMapstructure().HCL2Spec() 66 | } 67 | 68 | func (d *Datasource) Configure(raws ...interface{}) error { 69 | err := config.Decode(&d.config, nil, raws...) 70 | if err != nil { 71 | return err 72 | } 73 | 74 | var errs *packersdk.MultiError 75 | if d.config.Version == "" { 76 | d.config.Version = "latest" 77 | } 78 | if d.config.ProjectId == "" { 79 | errs = packersdk.MultiErrorAppend(errs, errors.New("a 'project_id' must be specified")) 80 | } 81 | if d.config.Name == "" { 82 | errs = packersdk.MultiErrorAppend(errs, errors.New("a 'name' must be specified")) 83 | } 84 | 85 | if errs != nil && len(errs.Errors) > 0 { 86 | return errs 87 | } 88 | return nil 89 | } 90 | 91 | func (d *Datasource) Execute() (cty.Value, error) { 92 | ctx := context.Background() 93 | 94 | client, err := secretmanager.NewClient(ctx) 95 | if err != nil { 96 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("failed to create secret manager client: %w", err) 97 | } 98 | defer client.Close() 99 | 100 | secretName := fmt.Sprintf("projects/%s/secrets/%s/versions/%s", d.config.ProjectId, d.config.Name, d.config.Version) 101 | 102 | secret, err := client.AccessSecretVersion(ctx, &secretmanagerpb.AccessSecretVersionRequest{ 103 | Name: secretName, 104 | }) 105 | if err != nil { 106 | st := status.Convert(err) 107 | if st.Code() == codes.NotFound { 108 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("secret %q not found", secretName) 109 | } 110 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("error accessing secret: %w", err) 111 | } 112 | 113 | payload := secret.GetPayload() 114 | checksum := int64(0) 115 | if secret.Payload.DataCrc32C != nil { 116 | checksum = *payload.DataCrc32C 117 | } 118 | computedChecksum := crc32.Checksum(payload.Data, crc32.MakeTable(crc32.Castagnoli)) 119 | 120 | if payload.DataCrc32C != nil && int64(computedChecksum) != checksum { 121 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("data integrity check failed: expected crc32c %d but got %d", *payload.DataCrc32C, computedChecksum) 122 | } 123 | 124 | var value string 125 | if d.config.Key != "" { 126 | var payloadMap map[string]interface{} 127 | if err := json.Unmarshal(payload.GetData(), &payloadMap); err != nil { 128 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("failed to parse JSON payload for key extraction: %w", err) 129 | } 130 | 131 | val, ok := payloadMap[d.config.Key] 132 | if !ok { 133 | return cty.NullVal(cty.EmptyObject), fmt.Errorf("key %q not found in secret payload", d.config.Key) 134 | } 135 | 136 | value = fmt.Sprintf("%v", val) 137 | } 138 | 139 | output := DatasourceOutput{ 140 | Payload: string(payload.GetData()), 141 | Value: value, 142 | Checksum: checksum, 143 | } 144 | return hcl2helper.HCL2ValueFromConfig(output, d.OutputSpec()), nil 145 | } 146 | -------------------------------------------------------------------------------- /datasource/secretsmanager/data.hcl2spec.go: -------------------------------------------------------------------------------- 1 | // Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT. 2 | 3 | package secretsmanager 4 | 5 | import ( 6 | "github.com/hashicorp/hcl/v2/hcldec" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // FlatConfig is an auto-generated flat version of Config. 11 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 12 | type FlatConfig struct { 13 | ProjectId *string `mapstructure:"project_id" required:"true" cty:"project_id" hcl:"project_id"` 14 | Name *string `mapstructure:"name" required:"true" cty:"name" hcl:"name"` 15 | Key *string `mapstructure:"key" cty:"key" hcl:"key"` 16 | Version *string `mapstructure:"version" cty:"version" hcl:"version"` 17 | } 18 | 19 | // FlatMapstructure returns a new FlatConfig. 20 | // FlatConfig is an auto-generated flat version of Config. 21 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 22 | func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 23 | return new(FlatConfig) 24 | } 25 | 26 | // HCL2Spec returns the hcl spec of a Config. 27 | // This spec is used by HCL to read the fields of Config. 28 | // The decoded values from this spec will then be applied to a FlatConfig. 29 | func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { 30 | s := map[string]hcldec.Spec{ 31 | "project_id": &hcldec.AttrSpec{Name: "project_id", Type: cty.String, Required: false}, 32 | "name": &hcldec.AttrSpec{Name: "name", Type: cty.String, Required: false}, 33 | "key": &hcldec.AttrSpec{Name: "key", Type: cty.String, Required: false}, 34 | "version": &hcldec.AttrSpec{Name: "version", Type: cty.String, Required: false}, 35 | } 36 | return s 37 | } 38 | 39 | // FlatDatasourceOutput is an auto-generated flat version of DatasourceOutput. 40 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 41 | type FlatDatasourceOutput struct { 42 | Payload *string `mapstructure:"payload" cty:"payload" hcl:"payload"` 43 | Value *string `mapstructure:"value" cty:"value" hcl:"value"` 44 | Checksum *int64 `mapstructure:"checksum" cty:"checksum" hcl:"checksum"` 45 | } 46 | 47 | // FlatMapstructure returns a new FlatDatasourceOutput. 48 | // FlatDatasourceOutput is an auto-generated flat version of DatasourceOutput. 49 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 50 | func (*DatasourceOutput) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 51 | return new(FlatDatasourceOutput) 52 | } 53 | 54 | // HCL2Spec returns the hcl spec of a DatasourceOutput. 55 | // This spec is used by HCL to read the fields of DatasourceOutput. 56 | // The decoded values from this spec will then be applied to a FlatDatasourceOutput. 57 | func (*FlatDatasourceOutput) HCL2Spec() map[string]hcldec.Spec { 58 | s := map[string]hcldec.Spec{ 59 | "payload": &hcldec.AttrSpec{Name: "payload", Type: cty.String, Required: false}, 60 | "value": &hcldec.AttrSpec{Name: "value", Type: cty.String, Required: false}, 61 | "checksum": &hcldec.AttrSpec{Name: "checksum", Type: cty.Number, Required: false}, 62 | } 63 | return s 64 | } 65 | -------------------------------------------------------------------------------- /datasource/secretsmanager/data_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package secretsmanager 5 | 6 | import ( 7 | "testing" 8 | ) 9 | 10 | func TestDatasourceConfigure_EmptyProjectId(t *testing.T) { 11 | d := &Datasource{ 12 | config: Config{ 13 | Name: "test-secret", 14 | }, 15 | } 16 | err := d.Configure() 17 | if err == nil { 18 | t.Fatal("expected error when project_id is missing") 19 | } 20 | } 21 | 22 | func TestDatasourceConfigure_EmptyName(t *testing.T) { 23 | d := &Datasource{ 24 | config: Config{ 25 | ProjectId: "test-project", 26 | }, 27 | } 28 | err := d.Configure() 29 | if err == nil { 30 | t.Fatal("expected error when name is missing") 31 | } 32 | } 33 | 34 | func TestDatasourceConfigure_Defaults(t *testing.T) { 35 | d := &Datasource{ 36 | config: Config{ 37 | Name: "test-secret", 38 | ProjectId: "test-project", 39 | }, 40 | } 41 | err := d.Configure() 42 | if err != nil { 43 | t.Fatalf("unexpected error: %s", err) 44 | } 45 | if d.config.Version != "latest" { 46 | t.Fatalf("expected version to default to 'latest', got %s", d.config.Version) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /datasource/secretsmanager/test-fixtures/template.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | variable "project_id" { 5 | type = string 6 | } 7 | 8 | variable "secret_name" { 9 | type = string 10 | default = "packer-test-secret" 11 | } 12 | 13 | variable "key" { 14 | type = string 15 | default = "foo" 16 | } 17 | 18 | data "googlecompute-secretsmanager" "test" { 19 | project_id = var.project_id 20 | name = var.secret_name 21 | key = var.key 22 | } 23 | 24 | locals { 25 | value = data.googlecompute-secretsmanager.test.value 26 | payload = data.googlecompute-secretsmanager.test.payload 27 | } 28 | 29 | source "null" "basic-example" { 30 | communicator = "none" 31 | } 32 | 33 | build { 34 | sources = [ 35 | "source.null.basic-example" 36 | ] 37 | 38 | provisioner "shell-local" { 39 | inline = [ 40 | "echo secret value: ${local.value}", 41 | "echo secret payload: ${local.payload}", 42 | ] 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /docs-partials/builder/googlecompute/Config-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `project_id` (string) - The project ID that will be used to launch instances and store images. 4 | 5 | - `source_image` (string) - The source image to use to create the new image from. You can also 6 | specify source_image_family instead. If both source_image and 7 | source_image_family are specified, source_image takes precedence. 8 | Example: `"debian-8-jessie-v20161027"` 9 | 10 | - `source_image_family` (string) - The source image family to use to create the new image from. The image 11 | family always returns its latest image that is not deprecated. Example: 12 | `"debian-8"`. 13 | 14 | - `zone` (string) - The zone in which to launch the instance used to create the image. 15 | Example: `"us-central1-a"` 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs-partials/builder/googlecompute/Config.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | Config is the configuration structure for the GCE builder. It stores 4 | both the publicly settable state as well as the privately generated 5 | state of the config object. 6 | 7 | 8 | -------------------------------------------------------------------------------- /docs-partials/builder/googlecompute/IAPConfig-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `use_iap` (bool) - Whether to use an IAP proxy. 4 | Prerequisites and limitations for using IAP: 5 | - You must manually enable the IAP API in the Google Cloud console. 6 | - You must have the gcloud sdk installed on the computer running Packer. 7 | - If you use a service account, you must add it to project level IAP permissions 8 | in https://console.cloud.google.com/security/iap. To do so, click 9 | "project" > "SSH and TCP resources" > "All Tunnel Resources" > 10 | "Add Member". Then add your service account and choose the role 11 | "IAP-secured Tunnel User" and add any conditions you may care about. 12 | 13 | - `iap_localhost_port` (int) - Which port to connect the local end of the IAM localhost proxy to. If 14 | left blank, Packer will choose a port for you from available ports. 15 | 16 | - `iap_hashbang` (string) - What "hashbang" to use to invoke script that sets up gcloud. 17 | Default: "/bin/sh" 18 | 19 | - `iap_ext` (string) - What file extension to use for script that sets up gcloud. 20 | Default: ".sh" 21 | 22 | - `iap_tunnel_launch_wait` (int) - How long to wait, in seconds, before assuming a tunnel launch was 23 | successful. Defaults to 30 seconds for SSH or 40 seconds for WinRM. 24 | 25 | 26 | -------------------------------------------------------------------------------- /docs-partials/builder/googlecompute/IAPConfig.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | StepStartTunnel represents a Packer build step that launches an IAP tunnel 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs-partials/datasource/image/Config-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `project_id` (string) - The Google Cloud project ID to search for images. 4 | 5 | - `filters` (string) - The filter expression to narrow down the image search. 6 | For example: "name=ubuntu" or "family=ubuntu-2004". 7 | The exrpressions can be combined with AND/OR like this: 8 | "name=ubuntu AND family=ubuntu-2004". 9 | See https://cloud.google.com/sdk/gcloud/reference/topic/filters 10 | 11 | - `most_recent` (bool) - If true, the most recent image will be returned. 12 | If false, an error will be returned if more than one image matches the filters. 13 | 14 | 15 | -------------------------------------------------------------------------------- /docs-partials/datasource/image/DatasourceOutput.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `id` (string) - ID 4 | 5 | - `name` (string) - Name 6 | 7 | - `creation_date` (string) - Creation Date 8 | 9 | - `labels` (map[string]string) - Labels 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs-partials/datasource/secretsmanager/Config-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `key` (string) - The key to extract from the secret payload. 4 | If not provided, the entire payload will be returned. 5 | 6 | - `version` (string) - The version of the secret to access. Defaults to "latest" if not specified. 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs-partials/datasource/secretsmanager/Config-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `project_id` (string) - The Google Cloud project ID where the secret is stored. 4 | 5 | - `name` (string) - The name of the secret in the secret manager. 6 | 7 | 8 | -------------------------------------------------------------------------------- /docs-partials/datasource/secretsmanager/DatasourceOutput.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `payload` (string) - The raw string payload of the secret version. 4 | 5 | - `value` (string) - The value extracted using the 'key', if provided. 6 | 7 | - `checksum` (int64) - The crc32c checksum for the payload. 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs-partials/lib/common/Authentication-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `access_token` (string) - A temporary [OAuth 2.0 access token](https://developers.google.com/identity/protocols/oauth2) 4 | obtained from the Google Authorization server, i.e. the `Authorization: Bearer` token used to 5 | authenticate HTTP requests to GCP APIs. 6 | This is an alternative to `account_file`, and ignores the `scopes` field. 7 | If both are specified, `access_token` will be used over the `account_file` field. 8 | 9 | These access tokens cannot be renewed by Packer and thus will only work until they expire. 10 | If you anticipate Packer needing access for longer than a token's lifetime (default `1 hour`), 11 | please use a service account key with `account_file` instead. 12 | 13 | - `account_file` (string) - The JSON file containing your account credentials. Not required if you 14 | run Packer on a GCE instance with a service account. Instructions for 15 | creating the file or using service accounts are above. 16 | 17 | - `credentials_file` (string) - The JSON file containing your account credentials. 18 | 19 | The file's contents may be anything supported by the Google Go client, i.e.: 20 | 21 | * Service account JSON 22 | * OIDC-provided token for federation 23 | * Gcloud user credentials file (refresh-token JSON) 24 | * A Google Developers Console client_credentials.json 25 | 26 | - `credentials_json` (string) - The raw JSON payload for credentials. 27 | 28 | The accepted data formats are same as those described under 29 | [credentials_file](#credentials_file). 30 | 31 | - `impersonate_service_account` (string) - This allows service account impersonation as per the [docs](https://cloud.google.com/iam/docs/impersonating-service-accounts). 32 | 33 | - `vault_gcp_oauth_engine` (string) - Can be set instead of account_file. If set, this builder will use 34 | HashiCorp Vault to generate an Oauth token for authenticating against 35 | Google Cloud. The value should be the path of the token generator 36 | within vault. 37 | For information on how to configure your Vault + GCP engine to produce 38 | Oauth tokens, see https://www.vaultproject.io/docs/auth/gcp 39 | You must have the environment variables VAULT_ADDR and VAULT_TOKEN set, 40 | along with any other relevant variables for accessing your vault 41 | instance. For more information, see the Vault docs: 42 | https://www.vaultproject.io/docs/commands/#environment-variables 43 | Example:`"vault_gcp_oauth_engine": "gcp/token/my-project-editor",` 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs-partials/lib/common/BlockDevice-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `attachment_mode` (string) - How to attach the volume to the instance 4 | 5 | Can be either READ_ONLY or READ_WRITE (default). 6 | 7 | - `create_image` (bool) - If true, an image will be created for this disk, instead of the boot disk. 8 | 9 | This only applies to non-scratch disks, and can only be specified on one disk at a 10 | time. 11 | 12 | - `device_name` (string) - The device name as exposed to the OS in the /dev/disk/by-id/google-* directory 13 | 14 | If unspecified, the disk will have a default name in the form 15 | persistent-disk-x with 'x' being a number assigned by GCE 16 | 17 | This field only applies to persistent disks, local SSDs will always 18 | be exposed as /dev/disk/by-id/google-local-nvme-ssd-x. 19 | 20 | - `disk_encryption_key` (CustomerEncryptionKey) - Disk encryption key to apply to the requested disk. 21 | 22 | Possible values: 23 | * kmsKeyName - The name of the encryption key that is stored in Google Cloud KMS. 24 | * RawKey: - A 256-bit customer-supplied encryption key, encodes in RFC 4648 base64. 25 | 26 | Refer to the [Customer Encryption Key](#customer-encryption-key) section for more information on the contents of this block. 27 | 28 | - `disk_name` (string) - Name of the disk to create. 29 | This only applies to non-scratch disks. If the disk is persistent, and 30 | not specified, Packer will generate a unique name for the disk. 31 | 32 | The name must be 1-63 characters long and comply to the regexp 33 | '[a-z]([-a-z0-9]*[a-z0-9])?' 34 | 35 | - `interface_type` (string) - The interface to use for attaching the disk. 36 | Can be either NVME or SCSI. Defaults to SCSI. 37 | 38 | The available options depend on the type of disk, SEE: https://cloud.google.com/compute/docs/disks/persistent-disks#choose_an_interface 39 | 40 | - `iops` (int) - The requested IOPS for the disk. 41 | 42 | This is only available for pd_extreme disks. 43 | 44 | - `keep_device` (bool) - Keep the device in the created disks after the instance is terminated. 45 | By default, the builder will remove the disks at the end of the build. 46 | 47 | This cannot be used with 'scratch' volumes. 48 | 49 | - `replica_zones` ([]string) - The list of extra zones to replicate the disk into 50 | 51 | The zone in which the instance is created will automatically be 52 | added to the zones in which the disk is replicated. 53 | 54 | - `source_volume` (string) - The URI of the volume to attach 55 | 56 | If this is specified, it won't be deleted after the instance is shut-down. 57 | 58 | - `source_image` (string) - The URI of the image to load 59 | 60 | This cannot be used with SourceVolume. 61 | 62 | - `_` (string) - Zone is the zone in which to create the disk in. 63 | 64 | It is not exposed since the parent config already specifies it 65 | and it will be set for the block device when preparing it. 66 | 67 | 68 | -------------------------------------------------------------------------------- /docs-partials/lib/common/BlockDevice-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `volume_size` (int) - Size of the volume to request, in gigabytes. 4 | 5 | The size specified must be in range of the sizes for the chosen volume type. 6 | 7 | - `volume_type` (BlockDeviceType) - The volume type is the type of storage to reserve and attach to the instance being provisioned. 8 | 9 | The following values are supported by this builder: 10 | * scratch: local SSD data, always 375 GiB (default) 11 | * pd_standard: persistent, HDD-backed disk 12 | * pd_balanced: persistent, SSD-backed disk 13 | * pd_ssd: persistent, SSD-backed disk, with extra performance guarantees 14 | * pd_extreme: persistent, fastest SSD-backed disk, with custom IOPS 15 | * hyperdisk-balanced: persistent hyperdisk volume, bootable 16 | * hyperdisk-extreme: persistent hyperdisk volume, optimised for performance 17 | * hyperdisk-ml: persistent, shareable, hyperdisk volume, highest throughput 18 | * hyperdisk-throughput: persistent hyperdisk volume with flexible throughput 19 | 20 | For details on the different types, refer to: https://cloud.google.com/compute/docs/disks#disk-types 21 | For more information on hyperdisk volumes, refer to: https://cloud.google.com/compute/docs/disks/hyperdisks#throughput 22 | 23 | 24 | -------------------------------------------------------------------------------- /docs-partials/lib/common/BlockDevice.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | BlockDevice is a block device attachement/creation to an instance when building an image. 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs-partials/lib/common/CustomerEncryptionKey-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `kmsKeyName` (string) - KmsKeyName: The name of the encryption key that is stored in Google 4 | Cloud KMS. 5 | 6 | - `rawKey` (string) - RawKey: Specifies a 256-bit customer-supplied encryption key, encoded 7 | in RFC 4648 base64 to either encrypt or decrypt this resource. 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs-partials/lib/common/NodeAffinity-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `key` (string) - Key: Corresponds to the label key of Node resource. 4 | 5 | - `operator` (string) - Operator: Defines the operation of node selection. Valid operators are IN for affinity and 6 | NOT_IN for anti-affinity. 7 | 8 | - `values` ([]string) - Values: Corresponds to the label values of Node resource. 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs-partials/lib/common/NodeAffinity.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | Node affinity label configuration 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs-partials/post-processor/googlecompute-export/Config-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `scopes` ([]string) - The service account scopes for launched exporter post-processor instance. 4 | Defaults to: 5 | 6 | ```json 7 | [ 8 | "https://www.googleapis.com/auth/cloud-platform" 9 | ] 10 | ``` 11 | 12 | - `disk_size` (int64) - The size of the export instances disk. 13 | The disk is unused for the export but a larger size will increase `pd-ssd` read speed. 14 | This defaults to `200`, which is 200GB. 15 | 16 | - `disk_type` (string) - Type of disk used to back the export instance, like 17 | `pd-ssd` or `pd-standard`. Defaults to `pd-ssd`. 18 | 19 | - `machine_type` (string) - The export instance machine type. Defaults to `"n1-highcpu-4"`. 20 | 21 | - `source_image_family` (string) - Image used to launch a temp VM for export. Defaults to `"debian-12-worker"` 22 | 23 | - `network` (string) - The Google Compute network id or URL to use for the export instance. 24 | Defaults to `"default"`. If the value is not a URL, it 25 | will be interpolated to `projects/((builder_project_id))/global/networks/((network))`. 26 | This value is not required if a `subnet` is specified. 27 | 28 | - `subnetwork` (string) - The Google Compute subnetwork id or URL to use for 29 | the export instance. Only required if the `network` has been created with 30 | custom subnetting. Note, the region of the subnetwork must match the 31 | `zone` in which the VM is launched. If the value is not a URL, 32 | it will be interpolated to 33 | `projects/((builder_project_id))/regions/((region))/subnetworks/((subnetwork))` 34 | 35 | - `zone` (string) - The zone in which to launch the export instance. Defaults 36 | to `googlecompute` builder zone. Example: `"us-central1-a"` 37 | 38 | - `service_account_email` (string) - Service Account Email 39 | 40 | - `omit_external_ip` (bool) - If true, the exporter instance will not have an external IP. 41 | 42 | 43 | -------------------------------------------------------------------------------- /docs-partials/post-processor/googlecompute-export/Config-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `paths` ([]string) - A list of GCS paths where the image will be exported. 4 | For example `'gs://mybucket/path/to/file.tar.gz'` 5 | 6 | 7 | -------------------------------------------------------------------------------- /docs-partials/post-processor/googlecompute-import/Config-not-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `scopes` ([]string) - The service account scopes for launched importer post-processor instance. 4 | Defaults to: 5 | 6 | ```json 7 | [ 8 | "https://www.googleapis.com/auth/cloud-platform" 9 | ] 10 | ``` 11 | 12 | - `gcs_object_name` (string) - The name of the GCS object in `bucket` where 13 | the RAW disk image will be copied for import. This is treated as a 14 | [template engine](/packer/docs/templates/legacy_json_templates/engine). Therefore, you 15 | may use user variables and template functions in this field. Defaults to 16 | `packer-import-{{timestamp}}.tar.gz`. 17 | 18 | - `image_architecture` (string) - Specifies the architecture or processor type that this image can support. Must be one of: `arm64` or `x86_64`. Defaults to `ARCHITECTURE_UNSPECIFIED`. 19 | 20 | - `image_description` (string) - The description of the resulting image. 21 | 22 | - `image_family` (string) - The name of the image family to which the resulting image belongs. 23 | 24 | - `image_guest_os_features` ([]string) - A list of features to enable on the guest operating system. Applicable only for bootable images. Valid 25 | values are `MULTI_IP_SUBNET`, `UEFI_COMPATIBLE`, 26 | `VIRTIO_SCSI_MULTIQUEUE`, `GVNIC`, `WINDOWS`, `IDPF`, `SEV_CAPABLE`, `SEV_SNP_CAPABLE`, `SEV_LIVE_MIGRATABLE_V2`, `TDX_CAPABLE` and `SUSPEND_RESUME_COMPATIBLE` currently. 27 | 28 | - `image_labels` (map[string]string) - Key/value pair labels to apply to the created image. 29 | 30 | - `image_storage_locations` ([]string) - Specifies a Cloud Storage location, either regional or multi-regional, where image content is to be stored. If not specified, the multi-region location closest to the source is chosen automatically. 31 | 32 | - `skip_clean` (bool) - Skip removing the TAR file uploaded to the GCS 33 | bucket after the import process has completed. "true" means that we should 34 | leave it in the GCS bucket, "false" means to clean it out. Defaults to 35 | `false`. 36 | 37 | - `image_platform_key` (string) - A key used to establish the trust relationship between the platform owner and the firmware. You may only specify one platform key, and it must be a valid X.509 certificate. 38 | 39 | - `image_key_exchange_key` ([]string) - A key used to establish a trust relationship between the firmware and the OS. You may specify multiple comma-separated keys for this value. 40 | 41 | - `image_signatures_db` ([]string) - A database of certificates that are trusted and can be used to sign boot files. You may specify single or multiple comma-separated values for this value. 42 | 43 | - `image_forbidden_signatures_db` ([]string) - A database of certificates that have been revoked and will cause the system to stop booting if a boot file is signed with one of them. You may specify single or multiple comma-separated values for this value. 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs-partials/post-processor/googlecompute-import/Config-required.mdx: -------------------------------------------------------------------------------- 1 | 2 | 3 | - `project_id` (string) - The project ID where the GCS bucket exists and where the GCE image is stored. 4 | 5 | - `bucket` (string) - The name of the GCS bucket where the raw disk image will be uploaded. 6 | 7 | - `image_name` (string) - The unique name of the resulting image. 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/datasources/image.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | description: | 3 | The Google Compute Image data source filters and fetches a GCE image and outputs relevant image metadata. 4 | 5 | page_title: Google Compute Image - Data Source 6 | nav_title: Google Compute Image 7 | --- 8 | 9 | # Google Compute Image Data Source 10 | 11 | Type: `googlecompute-image` 12 | 13 | The Google Compute Image data source filters and fetches a GCE image and outputs relevant image metadata for 14 | use with [Google Compute builders](/packer/integrations/hashicorp/googlecompute). 15 | 16 | -> **Note:** Data sources is a feature exclusively available to HCL2 templates. 17 | 18 | Basic example of usage: 19 | 20 | ```hcl 21 | data "googlecompute-image" "basic-example" { 22 | project_id = "debian-cloud" 23 | filters = "family=debian-12 AND labels.public-image=true" 24 | most_recent = true 25 | } 26 | ``` 27 | 28 | This configuration selects the most recent GCE image from the `debian-cloud` project that belongs to the`debian-12` family and has the `public-image` label set to `true`. 29 | The data source will fail unless exactly one image is matched. Setting `most_recent = true` ensures only the newest image is selected when multiple matches exist. 30 | 31 | ## Configuration Reference 32 | 33 | @include 'datasource/image/Config-not-required.mdx' 34 | 35 | ## Output Data 36 | 37 | @include 'datasource/image/DatasourceOutput.mdx' 38 | 39 | ## Authentication 40 | 41 | To authenticate with GCE, this data-source supports everything the plugin does. 42 | To get more information on this, refer to the plugin's description page, under 43 | the [authentication](/packer/integrations/hashicorp/googlecompute#authentication) section. 44 | -------------------------------------------------------------------------------- /docs/datasources/secretsmanager.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | description: | 3 | The Secrets Manager data source provides information about a Secrets Manager secret version, 4 | including its value and metadata. 5 | 6 | page_title: Secrets Manager - Data Source 7 | nav_title: Secrets Manager 8 | --- 9 | 10 | # Google Compute Secrets Manager Data Source 11 | 12 | The Secrets Manager data source provides information about a Secrets Manager secret version, 13 | including its value and metadata. 14 | 15 | -> **Note:** Data sources is a feature exclusively available to HCL2 templates. 16 | 17 | Basic examples of usage: 18 | 19 | ```hcl 20 | data "googlecompute-secretsmanager" "basic-example" { 21 | project_id = "debian-cloud" 22 | name = "packer_test_secret" 23 | key = "packer_test_key" 24 | } 25 | 26 | # usage example of the data source output 27 | locals { 28 | value = data.googlecompute-secretsmanager.basic-example.value 29 | payload = data.googlecompute-secretsmanager.basic-example.payload 30 | } 31 | ``` 32 | 33 | Reading key-value pairs from JSON back into a native Packer map can be accomplished 34 | with the [jsondecode() function](/packer/docs/templates/hcl_templates/functions/encoding/jsondecode). 35 | 36 | ## Configuration Reference 37 | 38 | ### Required 39 | 40 | @include 'datasource/secretsmanager/Config-required.mdx' 41 | 42 | ### Optional 43 | 44 | @include 'datasource/secretsmanager/Config-not-required.mdx' 45 | 46 | ## Output Data 47 | 48 | @include 'datasource/secretsmanager/DatasourceOutput.mdx' 49 | 50 | ## Authentication 51 | 52 | To authenticate with GCE, this data-source supports everything the plugin does. 53 | To get more information on this, refer to the plugin's description page, under 54 | the [authentication](/packer/integrations/hashicorp/googlecompute#authentication) section. 55 | -------------------------------------------------------------------------------- /docs/post-processors/googlecompute-export.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | description: > 3 | The Google Compute Image Exporter post-processor exports an image from a 4 | Packer googlecompute builder run and uploads it to Google Cloud Storage. The 5 | exported images can be easily shared and uploaded to other Google Cloud 6 | Projects. 7 | page_title: Google Cloud Platform Image Exporter - Post-Processors 8 | sidebar_title: googlecompute-export 9 | --- 10 | 11 | # Google Compute Image Exporter Post-Processor 12 | 13 | Type: `googlecompute-export` 14 | Artifact BuilderId: `packer.post-processor.googlecompute-export` 15 | 16 | The Google Compute Image Exporter post-processor exports the resultant image 17 | from a googlecompute build as a gzipped tarball to Google Cloud Storage (GCS). 18 | 19 | The exporter uses the same Google Cloud Platform (GCP) project and 20 | authentication credentials as the googlecompute build that produced the image. 21 | A temporary VM is started in the GCP project using these credentials. The VM 22 | mounts the built image as a disk then dumps, compresses, and tars the image. 23 | The VM then uploads the tarball to the provided GCS `paths` using the same 24 | credentials. 25 | 26 | As such, the authentication credentials that built the image must have write 27 | permissions to the GCS `paths`. 28 | 29 | ~> **Note**: By default the GCE image being exported will be deleted once the image has been exported. 30 | To prevent Packer from deleting the image set the `keep_input_artifact` configuration option to `true`. See [Post-Processor Input Artifacts](/packer/docs/templates/legacy_json_templates/post-processors#input-artifacts) for more details. 31 | 32 | ## Authentication 33 | 34 | To authenticate with GCE, this builder supports everything the plugin does. 35 | To get more information on this, refer to the plugin's description page, under 36 | the [authentication](/packer/integrations/hashicorp/googlecompute#authentication) section. 37 | 38 | ## Configuration 39 | 40 | ### Required 41 | 42 | @include 'post-processor/googlecompute-export/Config-required.mdx' 43 | 44 | ### Optional 45 | 46 | @include 'post-processor/googlecompute-export/Config-not-required.mdx' 47 | 48 | ## Basic Example 49 | 50 | The following example builds a GCE image in the project, `my-project`, with an 51 | account whose keyfile is `account.json`. After the image build, a temporary VM 52 | will be created to export the image as a gzipped tarball to 53 | `gs://mybucket1/path/to/file1.tar.gz` and 54 | `gs://mybucket2/path/to/file2.tar.gz`. `keep_input_artifact` is true, so the 55 | GCE image won't be deleted after the export. 56 | 57 | In order for this example to work, the account associated with `account.json` 58 | must have write access to both `gs://mybucket1/path/to/file1.tar.gz` and 59 | `gs://mybucket2/path/to/file2.tar.gz`. 60 | 61 | **JSON** 62 | 63 | ```json 64 | { 65 | "builders": [ 66 | { 67 | "type": "googlecompute", 68 | "account_file": "account.json", 69 | "project_id": "my-project", 70 | "source_image": "debian-7-wheezy-v20150127", 71 | "zone": "us-central1-a" 72 | } 73 | ], 74 | "post-processors": [ 75 | { 76 | "type": "googlecompute-export", 77 | "paths": [ 78 | "gs://mybucket1/path/to/file1.tar.gz", 79 | "gs://mybucket2/path/to/file2.tar.gz" 80 | ], 81 | "keep_input_artifact": true 82 | } 83 | ] 84 | } 85 | ``` 86 | 87 | 88 | **HCL2** 89 | 90 | ```hcl 91 | 92 | source "googlecompute" "example" { 93 | account_file = "account.json" 94 | project_id = "my-project" 95 | source_image = "debian-7-wheezy-v20150127" 96 | zone = "us-central1-a" 97 | } 98 | 99 | build { 100 | sources = ["source.googlecompute.example"] 101 | 102 | post-processor "googlecompute-export" { 103 | paths = [ 104 | "gs://mybucket1/path/to/file1.tar.gz", 105 | "gs://mybucket2/path/to/file2.tar.gz" 106 | ] 107 | keep_input_artifact = true 108 | } 109 | } 110 | ``` 111 | 112 | -------------------------------------------------------------------------------- /example/README.md: -------------------------------------------------------------------------------- 1 | ## The Example Folder 2 | 3 | This folder must contain a fully working example of the plugin usage. The 4 | example must define the `required_plugins` block. A pre-defined GitHub Action 5 | will run `packer init`, `packer validate`, and `packer build` to test your 6 | plugin with the latest version available of Packer. 7 | 8 | The folder can contain multiple HCL2 compatible files. The action will execute 9 | Packer at this folder level running `packer init -upgrade .` and `packer build 10 | .`. 11 | 12 | If the plugin requires authentication, the configuration should be provided via 13 | GitHub Secrets and set as environment variables in the 14 | [test-plugin-example.yml](/.github/workflows/test-plugin-example.yml) file. 15 | Example: 16 | 17 | ```yml 18 | - name: Build 19 | working-directory: ${{ github.event.inputs.folder }} 20 | run: PACKER_LOG=${{ github.event.inputs.logs }} packer build . 21 | env: 22 | AUTH_KEY: ${{ secrets.AUTH_KEY }} 23 | AUTH_PASSWORD: ${{ secrets.AUTH_PASSWORD }} 24 | ``` 25 | -------------------------------------------------------------------------------- /example/build.pkr.hcl: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | packer { 5 | required_plugins { 6 | googlecompute = { 7 | version = "~> v1.0" 8 | source = "github.com/hashicorp/googlecompute" 9 | } 10 | } 11 | } 12 | 13 | variable "zone" { 14 | default = "europe-west4-a" 15 | } 16 | 17 | variable "project_id" { 18 | type = string 19 | } 20 | 21 | source "googlecompute" "ex" { 22 | image_name = "test-packer-example" 23 | machine_type = "e2-small" 24 | source_image = "debian-10-buster-v20210316" 25 | ssh_username = "packer" 26 | temporary_key_pair_type = "rsa" 27 | temporary_key_pair_bits = 2048 28 | zone = var.zone 29 | project_id = var.project_id 30 | } 31 | 32 | build { 33 | sources = ["source.googlecompute.ex"] 34 | provisioner "shell" { 35 | inline = [ 36 | "echo Hello From ${source.type} ${source.name}" 37 | ] 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /lib/common/affinities.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:generate packer-sdc struct-markdown 5 | //go:generate packer-sdc mapstructure-to-hcl2 -type NodeAffinity 6 | 7 | package common 8 | 9 | import compute "google.golang.org/api/compute/v1" 10 | 11 | // Node affinity label configuration 12 | type NodeAffinity struct { 13 | // Key: Corresponds to the label key of Node resource. 14 | Key string `mapstructure:"key" json:"key"` 15 | 16 | // Operator: Defines the operation of node selection. Valid operators are IN for affinity and 17 | // NOT_IN for anti-affinity. 18 | Operator string `mapstructure:"operator" json:"operator"` 19 | 20 | // Values: Corresponds to the label values of Node resource. 21 | Values []string `mapstructure:"values" json:"values"` 22 | } 23 | 24 | func (a *NodeAffinity) ComputeType() *compute.SchedulingNodeAffinity { 25 | if a == nil { 26 | return nil 27 | } 28 | return &compute.SchedulingNodeAffinity{ 29 | Key: a.Key, 30 | Operator: a.Operator, 31 | Values: a.Values, 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /lib/common/affinities.hcl2spec.go: -------------------------------------------------------------------------------- 1 | // Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT. 2 | 3 | package common 4 | 5 | import ( 6 | "github.com/hashicorp/hcl/v2/hcldec" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // FlatNodeAffinity is an auto-generated flat version of NodeAffinity. 11 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 12 | type FlatNodeAffinity struct { 13 | Key *string `mapstructure:"key" json:"key" cty:"key" hcl:"key"` 14 | Operator *string `mapstructure:"operator" json:"operator" cty:"operator" hcl:"operator"` 15 | Values []string `mapstructure:"values" json:"values" cty:"values" hcl:"values"` 16 | } 17 | 18 | // FlatMapstructure returns a new FlatNodeAffinity. 19 | // FlatNodeAffinity is an auto-generated flat version of NodeAffinity. 20 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 21 | func (*NodeAffinity) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 22 | return new(FlatNodeAffinity) 23 | } 24 | 25 | // HCL2Spec returns the hcl spec of a NodeAffinity. 26 | // This spec is used by HCL to read the fields of NodeAffinity. 27 | // The decoded values from this spec will then be applied to a FlatNodeAffinity. 28 | func (*FlatNodeAffinity) HCL2Spec() map[string]hcldec.Spec { 29 | s := map[string]hcldec.Spec{ 30 | "key": &hcldec.AttrSpec{Name: "key", Type: cty.String, Required: false}, 31 | "operator": &hcldec.AttrSpec{Name: "operator", Type: cty.String, Required: false}, 32 | "values": &hcldec.AttrSpec{Name: "values", Type: cty.List(cty.String), Required: false}, 33 | } 34 | return s 35 | } 36 | -------------------------------------------------------------------------------- /lib/common/auth.hcl2spec.go: -------------------------------------------------------------------------------- 1 | // Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT. 2 | 3 | package common 4 | 5 | import ( 6 | "github.com/hashicorp/hcl/v2/hcldec" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // FlatAuthentication is an auto-generated flat version of Authentication. 11 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 12 | type FlatAuthentication struct { 13 | AccessToken *string `mapstructure:"access_token" required:"false" cty:"access_token" hcl:"access_token"` 14 | AccountFile *string `mapstructure:"account_file" required:"false" cty:"account_file" hcl:"account_file"` 15 | CredentialsFile *string `mapstructure:"credentials_file" required:"false" cty:"credentials_file" hcl:"credentials_file"` 16 | CredentialsJSON *string `mapstructure:"credentials_json" required:"false" cty:"credentials_json" hcl:"credentials_json"` 17 | ImpersonateServiceAccount *string `mapstructure:"impersonate_service_account" required:"false" cty:"impersonate_service_account" hcl:"impersonate_service_account"` 18 | VaultGCPOauthEngine *string `mapstructure:"vault_gcp_oauth_engine" cty:"vault_gcp_oauth_engine" hcl:"vault_gcp_oauth_engine"` 19 | } 20 | 21 | // FlatMapstructure returns a new FlatAuthentication. 22 | // FlatAuthentication is an auto-generated flat version of Authentication. 23 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 24 | func (*Authentication) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 25 | return new(FlatAuthentication) 26 | } 27 | 28 | // HCL2Spec returns the hcl spec of a Authentication. 29 | // This spec is used by HCL to read the fields of Authentication. 30 | // The decoded values from this spec will then be applied to a FlatAuthentication. 31 | func (*FlatAuthentication) HCL2Spec() map[string]hcldec.Spec { 32 | s := map[string]hcldec.Spec{ 33 | "access_token": &hcldec.AttrSpec{Name: "access_token", Type: cty.String, Required: false}, 34 | "account_file": &hcldec.AttrSpec{Name: "account_file", Type: cty.String, Required: false}, 35 | "credentials_file": &hcldec.AttrSpec{Name: "credentials_file", Type: cty.String, Required: false}, 36 | "credentials_json": &hcldec.AttrSpec{Name: "credentials_json", Type: cty.String, Required: false}, 37 | "impersonate_service_account": &hcldec.AttrSpec{Name: "impersonate_service_account", Type: cty.String, Required: false}, 38 | "vault_gcp_oauth_engine": &hcldec.AttrSpec{Name: "vault_gcp_oauth_engine", Type: cty.String, Required: false}, 39 | } 40 | return s 41 | } 42 | -------------------------------------------------------------------------------- /lib/common/block_device.hcl2spec.go: -------------------------------------------------------------------------------- 1 | // Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT. 2 | 3 | package common 4 | 5 | import ( 6 | "github.com/hashicorp/hcl/v2/hcldec" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // FlatBlockDevice is an auto-generated flat version of BlockDevice. 11 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 12 | type FlatBlockDevice struct { 13 | AttachmentMode *string `mapstructure:"attachment_mode" cty:"attachment_mode" hcl:"attachment_mode"` 14 | CreateImage *bool `mapstructure:"create_image" cty:"create_image" hcl:"create_image"` 15 | DeviceName *string `mapstructure:"device_name" cty:"device_name" hcl:"device_name"` 16 | DiskEncryptionKey *FlatCustomerEncryptionKey `mapstructure:"disk_encryption_key" cty:"disk_encryption_key" hcl:"disk_encryption_key"` 17 | DiskName *string `mapstructure:"disk_name" cty:"disk_name" hcl:"disk_name"` 18 | InterfaceType *string `mapstructure:"interface_type" cty:"interface_type" hcl:"interface_type"` 19 | IOPS *int `mapstructure:"iops" cty:"iops" hcl:"iops"` 20 | KeepDevice *bool `mapstructure:"keep_device" cty:"keep_device" hcl:"keep_device"` 21 | ReplicaZones []string `mapstructure:"replica_zones" required:"false" cty:"replica_zones" hcl:"replica_zones"` 22 | SourceVolume *string `mapstructure:"source_volume" cty:"source_volume" hcl:"source_volume"` 23 | VolumeSize *int `mapstructure:"volume_size" required:"true" cty:"volume_size" hcl:"volume_size"` 24 | VolumeType *BlockDeviceType `mapstructure:"volume_type" required:"true" cty:"volume_type" hcl:"volume_type"` 25 | SourceImage *string `mapstructure:"source_image" required:"false" cty:"source_image" hcl:"source_image"` 26 | Zone *string `mapstructure:"_" cty:"_" hcl:"_"` 27 | } 28 | 29 | // FlatMapstructure returns a new FlatBlockDevice. 30 | // FlatBlockDevice is an auto-generated flat version of BlockDevice. 31 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 32 | func (*BlockDevice) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 33 | return new(FlatBlockDevice) 34 | } 35 | 36 | // HCL2Spec returns the hcl spec of a BlockDevice. 37 | // This spec is used by HCL to read the fields of BlockDevice. 38 | // The decoded values from this spec will then be applied to a FlatBlockDevice. 39 | func (*FlatBlockDevice) HCL2Spec() map[string]hcldec.Spec { 40 | s := map[string]hcldec.Spec{ 41 | "attachment_mode": &hcldec.AttrSpec{Name: "attachment_mode", Type: cty.String, Required: false}, 42 | "create_image": &hcldec.AttrSpec{Name: "create_image", Type: cty.Bool, Required: false}, 43 | "device_name": &hcldec.AttrSpec{Name: "device_name", Type: cty.String, Required: false}, 44 | "disk_encryption_key": &hcldec.BlockSpec{TypeName: "disk_encryption_key", Nested: hcldec.ObjectSpec((*FlatCustomerEncryptionKey)(nil).HCL2Spec())}, 45 | "disk_name": &hcldec.AttrSpec{Name: "disk_name", Type: cty.String, Required: false}, 46 | "interface_type": &hcldec.AttrSpec{Name: "interface_type", Type: cty.String, Required: false}, 47 | "iops": &hcldec.AttrSpec{Name: "iops", Type: cty.Number, Required: false}, 48 | "keep_device": &hcldec.AttrSpec{Name: "keep_device", Type: cty.Bool, Required: false}, 49 | "replica_zones": &hcldec.AttrSpec{Name: "replica_zones", Type: cty.List(cty.String), Required: false}, 50 | "source_volume": &hcldec.AttrSpec{Name: "source_volume", Type: cty.String, Required: false}, 51 | "volume_size": &hcldec.AttrSpec{Name: "volume_size", Type: cty.Number, Required: false}, 52 | "volume_type": &hcldec.AttrSpec{Name: "volume_type", Type: cty.String, Required: false}, 53 | "source_image": &hcldec.AttrSpec{Name: "source_image", Type: cty.String, Required: false}, 54 | "_": &hcldec.AttrSpec{Name: "_", Type: cty.String, Required: false}, 55 | } 56 | return s 57 | } 58 | -------------------------------------------------------------------------------- /lib/common/client_keys.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:generate packer-sdc struct-markdown 5 | //go:generate packer-sdc mapstructure-to-hcl2 -type CustomerEncryptionKey 6 | 7 | package common 8 | 9 | import compute "google.golang.org/api/compute/v1" 10 | 11 | type CustomerEncryptionKey struct { 12 | // KmsKeyName: The name of the encryption key that is stored in Google 13 | // Cloud KMS. 14 | KmsKeyName string `mapstructure:"kmsKeyName" json:"kmsKeyName,omitempty"` 15 | 16 | // RawKey: Specifies a 256-bit customer-supplied encryption key, encoded 17 | // in RFC 4648 base64 to either encrypt or decrypt this resource. 18 | RawKey string `mapstructure:"rawKey" json:"rawKey,omitempty"` 19 | } 20 | 21 | func (k *CustomerEncryptionKey) ComputeType() *compute.CustomerEncryptionKey { 22 | if k == nil { 23 | return nil 24 | } 25 | return &compute.CustomerEncryptionKey{ 26 | KmsKeyName: k.KmsKeyName, 27 | RawKey: k.RawKey, 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /lib/common/client_keys.hcl2spec.go: -------------------------------------------------------------------------------- 1 | // Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT. 2 | 3 | package common 4 | 5 | import ( 6 | "github.com/hashicorp/hcl/v2/hcldec" 7 | "github.com/zclconf/go-cty/cty" 8 | ) 9 | 10 | // FlatCustomerEncryptionKey is an auto-generated flat version of CustomerEncryptionKey. 11 | // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. 12 | type FlatCustomerEncryptionKey struct { 13 | KmsKeyName *string `mapstructure:"kmsKeyName" json:"kmsKeyName,omitempty" cty:"kmsKeyName" hcl:"kmsKeyName"` 14 | RawKey *string `mapstructure:"rawKey" json:"rawKey,omitempty" cty:"rawKey" hcl:"rawKey"` 15 | } 16 | 17 | // FlatMapstructure returns a new FlatCustomerEncryptionKey. 18 | // FlatCustomerEncryptionKey is an auto-generated flat version of CustomerEncryptionKey. 19 | // Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. 20 | func (*CustomerEncryptionKey) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { 21 | return new(FlatCustomerEncryptionKey) 22 | } 23 | 24 | // HCL2Spec returns the hcl spec of a CustomerEncryptionKey. 25 | // This spec is used by HCL to read the fields of CustomerEncryptionKey. 26 | // The decoded values from this spec will then be applied to a FlatCustomerEncryptionKey. 27 | func (*FlatCustomerEncryptionKey) HCL2Spec() map[string]hcldec.Spec { 28 | s := map[string]hcldec.Spec{ 29 | "kmsKeyName": &hcldec.AttrSpec{Name: "kmsKeyName", Type: cty.String, Required: false}, 30 | "rawKey": &hcldec.AttrSpec{Name: "rawKey", Type: cty.String, Required: false}, 31 | } 32 | return s 33 | } 34 | -------------------------------------------------------------------------------- /lib/common/image.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package common 5 | 6 | import ( 7 | "strings" 8 | 9 | compute "google.golang.org/api/compute/v1" 10 | ) 11 | 12 | type Image struct { 13 | Architecture string 14 | GuestOsFeatures []*compute.GuestOsFeature 15 | Labels map[string]string 16 | Licenses []string 17 | Name string 18 | ProjectId string 19 | SelfLink string 20 | SizeGb int64 21 | } 22 | 23 | func (i *Image) IsWindows() bool { 24 | for _, license := range i.Licenses { 25 | if strings.Contains(license, "windows") { 26 | return true 27 | } 28 | } 29 | return false 30 | } 31 | 32 | func (i *Image) IsSecureBootCompatible() bool { 33 | for _, osFeature := range i.GuestOsFeatures { 34 | if osFeature.Type == "UEFI_COMPATIBLE" { 35 | return true 36 | } 37 | } 38 | return false 39 | } 40 | -------------------------------------------------------------------------------- /lib/common/instance.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package common 5 | 6 | type InstanceConfig struct { 7 | AcceleratorType string 8 | AcceleratorCount int64 9 | Address string 10 | Description string 11 | DisableDefaultServiceAccount bool 12 | DiskName string 13 | DiskSizeGb int64 14 | DiskType string 15 | DiskEncryptionKey *CustomerEncryptionKey 16 | EnableNestedVirtualization bool 17 | EnableSecureBoot bool 18 | EnableVtpm bool 19 | EnableIntegrityMonitoring bool 20 | ExtraBlockDevices []BlockDevice 21 | Image *Image 22 | Labels map[string]string 23 | MachineType string 24 | Metadata map[string]string 25 | MinCpuPlatform string 26 | Name string 27 | Network string 28 | NetworkProjectId string 29 | OmitExternalIP bool 30 | OnHostMaintenance string 31 | Preemptible bool 32 | NodeAffinities []NodeAffinity 33 | Region string 34 | ServiceAccountEmail string 35 | Scopes []string 36 | Subnetwork string 37 | Tags []string 38 | Zone string 39 | NetworkIP string 40 | } 41 | -------------------------------------------------------------------------------- /lib/common/networking.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package common 5 | 6 | import ( 7 | "fmt" 8 | "strings" 9 | ) 10 | 11 | // This method will build a network and subnetwork ID from the provided 12 | // instance config, and return them in that order. 13 | func GetNetworking(c *InstanceConfig) (string, string, error) { 14 | networkId := c.Network 15 | subnetworkId := c.Subnetwork 16 | 17 | // Apply network naming requirements per 18 | // https://cloud.google.com/compute/docs/reference/latest/instances#resource 19 | switch c.Network { 20 | // It is possible to omit the network property as long as a subnet is 21 | // specified. That will be validated later. 22 | case "": 23 | break 24 | // This special short name should be expanded. 25 | case "default": 26 | networkId = "global/networks/default" 27 | // A value other than "default" was provided for the network name. 28 | default: 29 | // If the value doesn't contain a slash, we assume it's not a full or 30 | // partial URL. We will expand it into a partial URL here and avoid 31 | // making an API call to discover the network as it's common for the 32 | // caller to not have permission against network discovery APIs. 33 | if !strings.Contains(c.Network, "/") { 34 | networkId = "projects/" + c.NetworkProjectId + "/global/networks/" + c.Network 35 | } 36 | } 37 | 38 | // Apply subnetwork naming requirements per 39 | // https://cloud.google.com/compute/docs/reference/latest/instances#resource 40 | switch c.Subnetwork { 41 | case "": 42 | // You can't omit both subnetwork and network 43 | if networkId == "" { 44 | return networkId, subnetworkId, fmt.Errorf("both network and subnetwork were empty.") 45 | } 46 | // An empty subnetwork is only valid for networks in legacy mode or 47 | // auto-subnet mode. We could make an API call to get that information 48 | // about the network, but it's common for the caller to not have 49 | // permission to that API. We'll proceed assuming they're correct in 50 | // omitting the subnetwork and let the compute.insert API surface an 51 | // error about an invalid network configuration if it exists. 52 | default: 53 | // If the value doesn't contain a slash, we assume it's not a full or 54 | // partial URL. We will expand it into a partial URL here and avoid 55 | // making a call to discover the subnetwork. 56 | if !strings.Contains(c.Subnetwork, "/") { 57 | subnetworkId = "projects/" + c.NetworkProjectId + "/regions/" + c.Region + "/subnetworks/" + c.Subnetwork 58 | } 59 | } 60 | return networkId, subnetworkId, nil 61 | } 62 | -------------------------------------------------------------------------------- /lib/common/networking_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package common 5 | 6 | import ( 7 | "testing" 8 | ) 9 | 10 | func TestGetNetworking(t *testing.T) { 11 | cases := []struct { 12 | c *InstanceConfig 13 | expectedNetwork string 14 | expectedSubnetwork string 15 | error bool 16 | }{ 17 | { 18 | c: &InstanceConfig{ 19 | Network: "default", 20 | Subnetwork: "", 21 | NetworkProjectId: "project-id", 22 | Region: "region-id", 23 | }, 24 | expectedNetwork: "global/networks/default", 25 | expectedSubnetwork: "", 26 | error: false, 27 | }, 28 | { 29 | c: &InstanceConfig{ 30 | Network: "", 31 | Subnetwork: "", 32 | NetworkProjectId: "project-id", 33 | Region: "region-id", 34 | }, 35 | expectedNetwork: "", 36 | expectedSubnetwork: "", 37 | error: true, 38 | }, 39 | { 40 | c: &InstanceConfig{ 41 | Network: "some/network/path", 42 | Subnetwork: "some/subnetwork/path", 43 | NetworkProjectId: "project-id", 44 | Region: "region-id", 45 | }, 46 | expectedNetwork: "some/network/path", 47 | expectedSubnetwork: "some/subnetwork/path", 48 | error: false, 49 | }, 50 | { 51 | c: &InstanceConfig{ 52 | Network: "network-value", 53 | Subnetwork: "subnetwork-value", 54 | NetworkProjectId: "project-id", 55 | Region: "region-id", 56 | }, 57 | expectedNetwork: "projects/project-id/global/networks/network-value", 58 | expectedSubnetwork: "projects/project-id/regions/region-id/subnetworks/subnetwork-value", 59 | error: false, 60 | }, 61 | } 62 | 63 | for _, tc := range cases { 64 | n, sn, err := GetNetworking(tc.c) 65 | if n != tc.expectedNetwork { 66 | t.Errorf("Expected network %q but got network %q", tc.expectedNetwork, n) 67 | } 68 | if sn != tc.expectedSubnetwork { 69 | t.Errorf("Expected subnetwork %q but got subnetwork %q", tc.expectedSubnetwork, sn) 70 | } 71 | if !tc.error && err != nil { 72 | t.Errorf("Did not expect an error but got: %v", err) 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package main 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | 10 | "github.com/hashicorp/packer-plugin-googlecompute/version" 11 | "github.com/hashicorp/packer-plugin-sdk/plugin" 12 | 13 | googlecompute "github.com/hashicorp/packer-plugin-googlecompute/builder/googlecompute" 14 | "github.com/hashicorp/packer-plugin-googlecompute/datasource/image" 15 | "github.com/hashicorp/packer-plugin-googlecompute/datasource/secretsmanager" 16 | googlecomputeexport "github.com/hashicorp/packer-plugin-googlecompute/post-processor/googlecompute-export" 17 | googlecomputeimport "github.com/hashicorp/packer-plugin-googlecompute/post-processor/googlecompute-import" 18 | ) 19 | 20 | func main() { 21 | pps := plugin.NewSet() 22 | pps.RegisterBuilder(plugin.DEFAULT_NAME, new(googlecompute.Builder)) 23 | pps.RegisterPostProcessor("import", new(googlecomputeimport.PostProcessor)) 24 | pps.RegisterPostProcessor("export", new(googlecomputeexport.PostProcessor)) 25 | pps.RegisterDatasource("secretsmanager", new(secretsmanager.Datasource)) 26 | pps.RegisterDatasource("image", new(image.Datasource)) 27 | pps.SetVersion(version.PluginVersion) 28 | err := pps.Run() 29 | if err != nil { 30 | fmt.Fprintln(os.Stderr, err.Error()) 31 | os.Exit(1) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /post-processor/googlecompute-export/artifact.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecomputeexport 5 | 6 | import ( 7 | "fmt" 8 | "strings" 9 | 10 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 11 | registryimage "github.com/hashicorp/packer-plugin-sdk/packer/registry/image" 12 | ) 13 | 14 | const BuilderId = "packer.post-processor.googlecompute-export" 15 | 16 | type Artifact struct { 17 | paths []string 18 | // StateData should store data such as GeneratedData 19 | // to be shared with post-processors 20 | StateData map[string]interface{} 21 | } 22 | 23 | var _ packersdk.Artifact = new(Artifact) 24 | 25 | func (*Artifact) BuilderId() string { 26 | return BuilderId 27 | } 28 | 29 | func (*Artifact) Id() string { 30 | return "" 31 | } 32 | 33 | func (a *Artifact) Files() []string { 34 | pathsCopy := make([]string, len(a.paths)) 35 | copy(pathsCopy, a.paths) 36 | return pathsCopy 37 | } 38 | 39 | func (a *Artifact) String() string { 40 | return fmt.Sprintf("Exported artifacts in: %s", a.paths) 41 | } 42 | 43 | func (a *Artifact) State(name string) interface{} { 44 | if name == registryimage.ArtifactStateURI { 45 | return a.hcpPackerRegistryMetadata() 46 | } 47 | return nil 48 | } 49 | 50 | func (a *Artifact) Destroy() error { 51 | return nil 52 | } 53 | 54 | func (a *Artifact) hcpPackerRegistryMetadata() []*registryimage.Image { 55 | 56 | var images []*registryimage.Image 57 | for _, exportedPath := range a.Files() { 58 | ep := exportedPath 59 | pathParts := strings.SplitN(exportedPath, "/", 4) 60 | img, _ := registryimage.FromArtifact(a, 61 | registryimage.WithID(ep), 62 | registryimage.WithProvider("gce"), 63 | registryimage.WithRegion(pathParts[2])) 64 | 65 | images = append(images, img) 66 | } 67 | 68 | return images 69 | } 70 | -------------------------------------------------------------------------------- /post-processor/googlecompute-export/artifact_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecomputeexport 5 | 6 | import ( 7 | "strings" 8 | "testing" 9 | 10 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 11 | registryimage "github.com/hashicorp/packer-plugin-sdk/packer/registry/image" 12 | "github.com/mitchellh/mapstructure" 13 | ) 14 | 15 | func TestArtifact_impl(t *testing.T) { 16 | var _ packersdk.Artifact = new(Artifact) 17 | } 18 | 19 | func TestArtifactState_RegistryImageMetadata(t *testing.T) { 20 | artifact := &Artifact{ 21 | paths: []string{"gs://testbucket/packer/file.gz"}, 22 | } 23 | 24 | // Valid state 25 | result := artifact.State(registryimage.ArtifactStateURI) 26 | if result == nil { 27 | t.Fatalf("Bad: HCP Packer registry image data was nil") 28 | } 29 | 30 | var images []registryimage.Image 31 | err := mapstructure.Decode(result, &images) 32 | if err != nil { 33 | t.Errorf("Bad: unexpected error when trying to decode state into registryimage.Image %v", err) 34 | } 35 | 36 | if len(images) != 1 { 37 | t.Errorf("Bad: we should have one image for this test Artifact but we got %d", len(images)) 38 | } 39 | 40 | image := images[0] 41 | for _, p := range artifact.Files() { 42 | pathParts := strings.SplitN(p, "/", 4) 43 | if image.ImageID != p { 44 | t.Errorf("Bad: unexpected value for ImageID %q, expected %q", image.ImageID, p) 45 | } 46 | 47 | if image.ProviderRegion != pathParts[2] { 48 | t.Errorf("Bad: unexpected value for Region %q, expected %q", image.ProviderRegion, pathParts[2]) 49 | } 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /post-processor/googlecompute-export/startup.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecomputeexport 5 | 6 | import ( 7 | "fmt" 8 | 9 | "github.com/hashicorp/packer-plugin-googlecompute/builder/googlecompute" 10 | ) 11 | 12 | var StartupScript string = fmt.Sprintf(`#!/bin/bash 13 | 14 | GetMetadata () { 15 | echo "$(curl -f -H "Metadata-Flavor: Google" http://metadata/computeMetadata/v1/instance/attributes/$1 2> /dev/null)" 16 | } 17 | 18 | ZONE=$(basename $(GetMetadata zone)) 19 | 20 | SetMetadata () { 21 | gcloud compute instances add-metadata ${HOSTNAME} --metadata ${1}=${2} --zone ${ZONE} 22 | } 23 | 24 | STARTUPSCRIPT=$(GetMetadata attributes/%s) 25 | STARTUPSCRIPTPATH=/packer-wrapped-startup-script 26 | if [ -f "/var/log/startupscript.log" ]; then 27 | STARTUPSCRIPTLOGPATH=/var/log/startupscript.log 28 | else 29 | STARTUPSCRIPTLOGPATH=/var/log/daemon.log 30 | fi 31 | STARTUPSCRIPTLOGDEST=$(GetMetadata attributes/startup-script-log-dest) 32 | 33 | IMAGENAME=$(GetMetadata image_name) 34 | NAME=$(GetMetadata name) 35 | DISKNAME=${NAME}-toexport 36 | PATHS=($(GetMetadata paths)) 37 | 38 | Exit () { 39 | for i in ${PATHS[@]}; do 40 | LOGDEST="${i}.exporter.log" 41 | echo "Uploading exporter log to ${LOGDEST}..." 42 | gsutil -h "Content-Type:text/plain" cp /var/log/daemon.log ${LOGDEST} 43 | done 44 | exit $1 45 | } 46 | 47 | echo "####### Export configuration #######" 48 | echo "Image name - ${IMAGENAME}" 49 | echo "Instance name - ${NAME}" 50 | echo "Instance zone - ${ZONE}" 51 | echo "Disk name - ${DISKNAME}" 52 | echo "Export paths - ${PATHS}" 53 | echo "####################################" 54 | 55 | echo "Creating disk from image to be exported..." 56 | if ! gcloud compute disks create ${DISKNAME} --image ${IMAGENAME} --zone ${ZONE}; then 57 | echo "Failed to create disk." 58 | Exit 1 59 | fi 60 | 61 | echo "Attaching disk..." 62 | if ! gcloud compute instances attach-disk ${NAME} --disk ${DISKNAME} --device-name toexport --zone ${ZONE}; then 63 | echo "Failed to attach disk." 64 | Exit 1 65 | fi 66 | 67 | echo "GCEExport: Running export tool." 68 | gce_export -gcs_path "${PATHS[0]}" -disk /dev/disk/by-id/google-toexport -y 69 | if [ $? -ne 0 ]; then 70 | echo "ExportFailed: Failed to export disk source to ${PATHS[0]}." 71 | Exit 1 72 | fi 73 | 74 | echo "ExportSuccess" 75 | sync 76 | 77 | echo "Detaching disk..." 78 | if ! gcloud compute instances detach-disk ${NAME} --disk ${DISKNAME} --zone ${ZONE}; then 79 | echo "Failed to detach disk." 80 | fi 81 | 82 | FAIL=0 83 | echo "Deleting disk..." 84 | if ! gcloud compute disks delete ${DISKNAME} --zone ${ZONE}; then 85 | echo "Failed to delete disk." 86 | FAIL=1 87 | fi 88 | 89 | for i in ${PATHS[@]:1}; do 90 | echo "Copying archive image to ${i}..." 91 | if ! gsutil -o GSUtil:parallel_composite_upload_threshold=100M cp ${PATHS[0]} ${i}; then 92 | echo "Failed to copy image to ${i}." 93 | FAIL=1 94 | fi 95 | done 96 | 97 | SetMetadata %s %s 98 | 99 | Exit ${FAIL} 100 | `, googlecompute.StartupWrappedScriptKey, googlecompute.StartupScriptStatusKey, googlecompute.StartupScriptStatusDone) 101 | -------------------------------------------------------------------------------- /post-processor/googlecompute-import/artifact.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecomputeimport 5 | 6 | import ( 7 | "fmt" 8 | "strings" 9 | 10 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 11 | registryimage "github.com/hashicorp/packer-plugin-sdk/packer/registry/image" 12 | ) 13 | 14 | const BuilderId = "packer.post-processor.googlecompute-import" 15 | 16 | type Artifact struct { 17 | paths []string 18 | } 19 | 20 | var _ packersdk.Artifact = new(Artifact) 21 | 22 | func (*Artifact) BuilderId() string { 23 | return BuilderId 24 | } 25 | 26 | func (*Artifact) Id() string { 27 | return "" 28 | } 29 | 30 | func (a *Artifact) Files() []string { 31 | pathsCopy := make([]string, len(a.paths)) 32 | copy(pathsCopy, a.paths) 33 | return pathsCopy 34 | } 35 | 36 | func (a *Artifact) String() string { 37 | return fmt.Sprintf("Exported artifacts in: %s", a.paths) 38 | } 39 | 40 | func (a *Artifact) State(name string) interface{} { 41 | if name == registryimage.ArtifactStateURI { 42 | return a.hcpPackerRegistryMetadata() 43 | } 44 | return nil 45 | } 46 | 47 | func (a *Artifact) Destroy() error { 48 | return nil 49 | } 50 | 51 | func (a *Artifact) hcpPackerRegistryMetadata() []*registryimage.Image { 52 | 53 | var images []*registryimage.Image 54 | for _, exportedPath := range a.Files() { 55 | ep := exportedPath 56 | pathParts := strings.SplitN(exportedPath, "/", 4) 57 | img, _ := registryimage.FromArtifact(a, 58 | registryimage.WithID(ep), 59 | registryimage.WithRegion(pathParts[2])) 60 | 61 | images = append(images, img) 62 | } 63 | 64 | return images 65 | } 66 | -------------------------------------------------------------------------------- /post-processor/googlecompute-import/artifact_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package googlecomputeimport 5 | 6 | import ( 7 | "strings" 8 | "testing" 9 | 10 | packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 11 | registryimage "github.com/hashicorp/packer-plugin-sdk/packer/registry/image" 12 | "github.com/mitchellh/mapstructure" 13 | ) 14 | 15 | func TestArtifact_impl(t *testing.T) { 16 | var _ packersdk.Artifact = new(Artifact) 17 | } 18 | 19 | func TestArtifactState_RegistryImageMetadata(t *testing.T) { 20 | artifact := &Artifact{ 21 | paths: []string{"gs://testimportbucket/packer/file.gz"}, 22 | } 23 | 24 | // Valid state 25 | result := artifact.State(registryimage.ArtifactStateURI) 26 | if result == nil { 27 | t.Fatalf("Bad: HCP Packer registry image data was nil") 28 | } 29 | 30 | var images []registryimage.Image 31 | err := mapstructure.Decode(result, &images) 32 | if err != nil { 33 | t.Errorf("Bad: unexpected error when trying to decode state into registryimage.Image %v", err) 34 | } 35 | 36 | if len(images) != 1 { 37 | t.Errorf("Bad: we should have one image for this test Artifact but we got %d", len(images)) 38 | } 39 | 40 | image := images[0] 41 | for _, p := range artifact.Files() { 42 | pathParts := strings.SplitN(p, "/", 4) 43 | if image.ImageID != p { 44 | t.Errorf("Bad: unexpected value for ImageID %q, expected %q", image.ImageID, p) 45 | } 46 | 47 | if image.ProviderRegion != pathParts[2] { 48 | t.Errorf("Bad: unexpected value for Region %q, expected %q", image.ProviderRegion, pathParts[2]) 49 | } 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package version 5 | 6 | import "github.com/hashicorp/packer-plugin-sdk/version" 7 | 8 | var ( 9 | // Version is the main version number that is being run at the moment. 10 | Version = "1.2.1" 11 | 12 | // VersionPrerelease is A pre-release marker for the Version. If this is "" 13 | // (empty string) then it means that it is a final release. Otherwise, this 14 | // is a pre-release such as "dev" (in development), "beta", "rc1", etc. 15 | VersionPrerelease = "dev" 16 | 17 | // VersionMetadata is extra information to add to the version string. 18 | // 19 | // It is ignored for comparison, and aims to add extra metadata for 20 | // documentation purposes. 21 | VersionMetadata = "" 22 | 23 | // PluginVersion is used by the plugin set to allow Packer to recognize 24 | // what version this plugin is. 25 | PluginVersion = version.NewPluginVersion(Version, VersionPrerelease, VersionMetadata) 26 | ) 27 | --------------------------------------------------------------------------------