├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── codeql-analysis.yml │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── go.mod ├── go.sum ├── internal ├── escapingfs │ ├── escaping.go │ └── escaping_test.go ├── ignorefiles │ ├── ignorerules.go │ ├── terraformignore.go │ ├── terraformignore_test.go │ └── testdata │ │ ├── archive-dir │ │ ├── .terraform │ │ │ ├── file.txt │ │ │ ├── modules │ │ │ │ └── README │ │ │ └── plugins │ │ │ │ └── README │ │ ├── .terraformignore │ │ ├── .terraformrc │ │ ├── bar.txt │ │ ├── baz.txt │ │ ├── exe │ │ ├── foo.terraform │ │ │ └── bar.txt │ │ ├── foo.txt │ │ └── sub │ │ │ ├── bar.txt │ │ │ └── zip.txt │ │ ├── external-dir │ │ └── foo.txt │ │ └── with-exclusion │ │ ├── .terraformignore │ │ ├── logs │ │ └── foo.txt │ │ ├── src │ │ ├── baz │ │ │ └── ignored.txt │ │ └── foo │ │ │ └── bar.txt │ │ └── tmp │ │ └── tmp.txt └── unpackinfo │ ├── lchtimes_darwin.go │ ├── lchtimes_linux32.go │ ├── lchtimes_linux64.go │ ├── lchtimes_others.go │ ├── unpackinfo.go │ └── unpackinfo_test.go ├── slug.go ├── slug_test.go ├── sourceaddrs ├── doc.go ├── package_remote.go ├── source.go ├── source_final.go ├── source_final_test.go ├── source_local.go ├── source_registry.go ├── source_registry_final.go ├── source_remote.go ├── source_remote_types.go ├── source_test.go └── subpath.go ├── sourcebundle ├── builder.go ├── builder_test.go ├── bundle.go ├── dependency_finder.go ├── diagnostics.go ├── doc.go ├── manifest_json.go ├── package_fetcher.go ├── package_meta.go ├── registry_client.go ├── testdata │ └── pkgs │ │ ├── hello │ │ └── hello │ │ ├── subdirs │ │ └── a │ │ │ └── b │ │ │ └── beepbeep │ │ ├── terraformignore │ │ ├── .terraformignore │ │ ├── excluded │ │ ├── excluded-dir │ │ │ └── excluded │ │ └── included │ │ └── with-remote-deps │ │ ├── dependencies │ │ └── self_dependency └── trace.go ├── terraformignore.go └── testdata ├── archive-dir-absolute ├── _common │ ├── extra-files │ │ ├── bar.sh │ │ └── foo.sh │ ├── locals.tf │ ├── output.tf │ └── versions.tf └── dev │ ├── backend.tf │ ├── extra-files │ ├── locals.tf │ ├── output.tf │ ├── variables.tf │ └── versions.tf ├── archive-dir-defaults-only ├── .terraform │ ├── modules │ │ ├── README │ │ └── subdir │ │ │ └── README │ └── plugins │ │ └── foo.txt └── bar.txt ├── archive-dir-no-external ├── .terraform │ ├── file.txt │ ├── modules │ │ └── README │ └── plugins │ │ └── README ├── .terraformignore ├── .terraformrc ├── bar.txt ├── baz.txt ├── exe ├── foo.terraform │ └── bar.txt ├── sub │ ├── bar.txt │ └── zip.txt └── sub2 │ ├── bar.txt │ └── zip.txt ├── archive-dir ├── .terraform │ ├── file.txt │ ├── modules │ │ └── README │ └── plugins │ │ └── README ├── .terraformignore ├── .terraformrc ├── bar.txt ├── baz.txt ├── example.tf ├── exe ├── foo.terraform │ └── bar.txt ├── sub │ ├── bar.txt │ └── zip.txt └── sub2 │ ├── bar.txt │ └── zip.txt ├── example.tf ├── subdir-appears-first.tar.gz └── subdir-ordering ├── README.md ├── main.go └── super └── duper └── trooper └── foo.txt /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Each line is a file pattern followed by one or more owners. 2 | # More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners 3 | 4 | # Default owner 5 | * @hashicorp/team-ip-compliance 6 | 7 | # Add override rules below. Each line is a file/folder pattern followed by one or more owners. 8 | # Being an owner means those groups or individuals will be added as reviewers to PRs affecting 9 | # those areas of the code. 10 | # Examples: 11 | # /docs/ @docs-team 12 | # *.js @js-team 13 | # *.go @go-team 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: monthly 7 | labels: 8 | - dependencies 9 | - automated 10 | groups: 11 | github-actions-breaking: 12 | update-types: 13 | - major 14 | github-actions-backward-compatible: 15 | update-types: 16 | - minor 17 | - patch 18 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "Code scanning - scheduled (weekly) or on-demand" 2 | 3 | on: 4 | schedule: 5 | - cron: '0 15 * * 0' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | CodeQL-Build: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout repository 15 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 16 | with: 17 | # We must fetch at least the immediate parents so that if this is 18 | # a pull request then we can checkout the head. 19 | fetch-depth: 2 20 | 21 | # If this run was triggered by a pull request event, then checkout 22 | # the head of the pull request instead of the merge commit. 23 | - run: git checkout HEAD^2 24 | if: ${{ github.event_name == 'pull_request' }} 25 | 26 | # Initializes the CodeQL tools for scanning. 27 | - name: Initialize CodeQL 28 | uses: github/codeql-action/init@70df9def86d22bf0ea4e7f8b956e7b92e7c1ea22 # codeql-bundle-v2.20.7 29 | # Override language selection by uncommenting this and choosing your languages 30 | with: 31 | languages: go 32 | 33 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 34 | # If this step fails, then you should remove it and run the build manually (see below) 35 | # - name: Autobuild 36 | # uses: github/codeql-action/autobuild@v1 37 | 38 | # ℹ️ Command-line programs to run using the OS shell. 39 | # 📚 https://git.io/JvXDl 40 | 41 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 42 | # and modify them (or add more) to build your code if your project 43 | # uses a compiled language 44 | 45 | #- run: | 46 | # make bootstrap 47 | # make release 48 | 49 | - name: Perform CodeQL Analysis 50 | uses: github/codeql-action/analyze@70df9def86d22bf0ea4e7f8b956e7b92e7c1ea22 # codeql-bundle-v2.20.7 51 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | on: 4 | - push 5 | - pull_request 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | goos: [linux, windows, darwin] 14 | goarch: ["386", amd64, arm64] 15 | exclude: 16 | - goarch: "386" 17 | goos: darwin 18 | - goarch: arm64 19 | goos: windows 20 | steps: 21 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 22 | 23 | - name: setup go 24 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 25 | with: 26 | go-version-file: go.mod 27 | 28 | - name: build 29 | run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} go build 30 | 31 | unit-test: 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 35 | 36 | - name: setup go 37 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 38 | with: 39 | go-version-file: go.mod 40 | 41 | - name: test 42 | run: go test -race -v -coverprofile=coverage.out ./... 43 | 44 | - name: Upload coverage report 45 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 46 | with: 47 | path: coverage.out 48 | name: coverage-report 49 | 50 | - name: Display coverage report 51 | run: go tool cover -func=coverage.out 52 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Mac OS metadata 15 | ._.DS_Store 16 | .DS_Store 17 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## Unreleased 2 | 3 | ### Improvements 4 | 5 | ### Changes 6 | 7 | ### Fixed 8 | 9 | ### Security 10 | 11 | ## v0.16.6 12 | 13 | ### Improvements 14 | IND-2704 Coverage test by @KaushikiAnand in #85 15 | Remove Mac OS meta-data file and prevent others being added in the future by @jsnfwlr in #87 16 | Add Changelog file by @mohanmanikanta2299 in #92 17 | 18 | ### Changes 19 | [COMPLIANCE] Add Copyright and License Headers by @hashicorp-copywrite in #84 20 | Pin action refs to latest trusted by TSCCR by @hashicorp-tsccr in #89 21 | 22 | ### Fixed 23 | irregular mode file checks for Windows symlinks by @notchairmk in #79 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 HashiCorp, Inc. 2 | 3 | Mozilla Public License Version 2.0 4 | ================================== 5 | 6 | 1. Definitions 7 | -------------- 8 | 9 | 1.1. "Contributor" 10 | means each individual or legal entity that creates, contributes to 11 | the creation of, or owns Covered Software. 12 | 13 | 1.2. "Contributor Version" 14 | means the combination of the Contributions of others (if any) used 15 | by a Contributor and that particular Contributor's Contribution. 16 | 17 | 1.3. "Contribution" 18 | means Covered Software of a particular Contributor. 19 | 20 | 1.4. "Covered Software" 21 | means Source Code Form to which the initial Contributor has attached 22 | the notice in Exhibit A, the Executable Form of such Source Code 23 | Form, and Modifications of such Source Code Form, in each case 24 | including portions thereof. 25 | 26 | 1.5. "Incompatible With Secondary Licenses" 27 | means 28 | 29 | (a) that the initial Contributor has attached the notice described 30 | in Exhibit B to the Covered Software; or 31 | 32 | (b) that the Covered Software was made available under the terms of 33 | version 1.1 or earlier of the License, but not also under the 34 | terms of a Secondary License. 35 | 36 | 1.6. "Executable Form" 37 | means any form of the work other than Source Code Form. 38 | 39 | 1.7. "Larger Work" 40 | means a work that combines Covered Software with other material, in 41 | a separate file or files, that is not Covered Software. 42 | 43 | 1.8. "License" 44 | means this document. 45 | 46 | 1.9. "Licensable" 47 | means having the right to grant, to the maximum extent possible, 48 | whether at the time of the initial grant or subsequently, any and 49 | all of the rights conveyed by this License. 50 | 51 | 1.10. "Modifications" 52 | means any of the following: 53 | 54 | (a) any file in Source Code Form that results from an addition to, 55 | deletion from, or modification of the contents of Covered 56 | Software; or 57 | 58 | (b) any new file in Source Code Form that contains any Covered 59 | Software. 60 | 61 | 1.11. "Patent Claims" of a Contributor 62 | means any patent claim(s), including without limitation, method, 63 | process, and apparatus claims, in any patent Licensable by such 64 | Contributor that would be infringed, but for the grant of the 65 | License, by the making, using, selling, offering for sale, having 66 | made, import, or transfer of either its Contributions or its 67 | Contributor Version. 68 | 69 | 1.12. "Secondary License" 70 | means either the GNU General Public License, Version 2.0, the GNU 71 | Lesser General Public License, Version 2.1, the GNU Affero General 72 | Public License, Version 3.0, or any later versions of those 73 | licenses. 74 | 75 | 1.13. "Source Code Form" 76 | means the form of the work preferred for making modifications. 77 | 78 | 1.14. "You" (or "Your") 79 | means an individual or a legal entity exercising rights under this 80 | License. For legal entities, "You" includes any entity that 81 | controls, is controlled by, or is under common control with You. For 82 | purposes of this definition, "control" means (a) the power, direct 83 | or indirect, to cause the direction or management of such entity, 84 | whether by contract or otherwise, or (b) ownership of more than 85 | fifty percent (50%) of the outstanding shares or beneficial 86 | ownership of such entity. 87 | 88 | 2. License Grants and Conditions 89 | -------------------------------- 90 | 91 | 2.1. Grants 92 | 93 | Each Contributor hereby grants You a world-wide, royalty-free, 94 | non-exclusive license: 95 | 96 | (a) under intellectual property rights (other than patent or trademark) 97 | Licensable by such Contributor to use, reproduce, make available, 98 | modify, display, perform, distribute, and otherwise exploit its 99 | Contributions, either on an unmodified basis, with Modifications, or 100 | as part of a Larger Work; and 101 | 102 | (b) under Patent Claims of such Contributor to make, use, sell, offer 103 | for sale, have made, import, and otherwise transfer either its 104 | Contributions or its Contributor Version. 105 | 106 | 2.2. Effective Date 107 | 108 | The licenses granted in Section 2.1 with respect to any Contribution 109 | become effective for each Contribution on the date the Contributor first 110 | distributes such Contribution. 111 | 112 | 2.3. Limitations on Grant Scope 113 | 114 | The licenses granted in this Section 2 are the only rights granted under 115 | this License. No additional rights or licenses will be implied from the 116 | distribution or licensing of Covered Software under this License. 117 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 118 | Contributor: 119 | 120 | (a) for any code that a Contributor has removed from Covered Software; 121 | or 122 | 123 | (b) for infringements caused by: (i) Your and any other third party's 124 | modifications of Covered Software, or (ii) the combination of its 125 | Contributions with other software (except as part of its Contributor 126 | Version); or 127 | 128 | (c) under Patent Claims infringed by Covered Software in the absence of 129 | its Contributions. 130 | 131 | This License does not grant any rights in the trademarks, service marks, 132 | or logos of any Contributor (except as may be necessary to comply with 133 | the notice requirements in Section 3.4). 134 | 135 | 2.4. Subsequent Licenses 136 | 137 | No Contributor makes additional grants as a result of Your choice to 138 | distribute the Covered Software under a subsequent version of this 139 | License (see Section 10.2) or under the terms of a Secondary License (if 140 | permitted under the terms of Section 3.3). 141 | 142 | 2.5. Representation 143 | 144 | Each Contributor represents that the Contributor believes its 145 | Contributions are its original creation(s) or it has sufficient rights 146 | to grant the rights to its Contributions conveyed by this License. 147 | 148 | 2.6. Fair Use 149 | 150 | This License is not intended to limit any rights You have under 151 | applicable copyright doctrines of fair use, fair dealing, or other 152 | equivalents. 153 | 154 | 2.7. Conditions 155 | 156 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 157 | in Section 2.1. 158 | 159 | 3. Responsibilities 160 | ------------------- 161 | 162 | 3.1. Distribution of Source Form 163 | 164 | All distribution of Covered Software in Source Code Form, including any 165 | Modifications that You create or to which You contribute, must be under 166 | the terms of this License. You must inform recipients that the Source 167 | Code Form of the Covered Software is governed by the terms of this 168 | License, and how they can obtain a copy of this License. You may not 169 | attempt to alter or restrict the recipients' rights in the Source Code 170 | Form. 171 | 172 | 3.2. Distribution of Executable Form 173 | 174 | If You distribute Covered Software in Executable Form then: 175 | 176 | (a) such Covered Software must also be made available in Source Code 177 | Form, as described in Section 3.1, and You must inform recipients of 178 | the Executable Form how they can obtain a copy of such Source Code 179 | Form by reasonable means in a timely manner, at a charge no more 180 | than the cost of distribution to the recipient; and 181 | 182 | (b) You may distribute such Executable Form under the terms of this 183 | License, or sublicense it under different terms, provided that the 184 | license for the Executable Form does not attempt to limit or alter 185 | the recipients' rights in the Source Code Form under this License. 186 | 187 | 3.3. Distribution of a Larger Work 188 | 189 | You may create and distribute a Larger Work under terms of Your choice, 190 | provided that You also comply with the requirements of this License for 191 | the Covered Software. If the Larger Work is a combination of Covered 192 | Software with a work governed by one or more Secondary Licenses, and the 193 | Covered Software is not Incompatible With Secondary Licenses, this 194 | License permits You to additionally distribute such Covered Software 195 | under the terms of such Secondary License(s), so that the recipient of 196 | the Larger Work may, at their option, further distribute the Covered 197 | Software under the terms of either this License or such Secondary 198 | License(s). 199 | 200 | 3.4. Notices 201 | 202 | You may not remove or alter the substance of any license notices 203 | (including copyright notices, patent notices, disclaimers of warranty, 204 | or limitations of liability) contained within the Source Code Form of 205 | the Covered Software, except that You may alter any license notices to 206 | the extent required to remedy known factual inaccuracies. 207 | 208 | 3.5. Application of Additional Terms 209 | 210 | You may choose to offer, and to charge a fee for, warranty, support, 211 | indemnity or liability obligations to one or more recipients of Covered 212 | Software. However, You may do so only on Your own behalf, and not on 213 | behalf of any Contributor. You must make it absolutely clear that any 214 | such warranty, support, indemnity, or liability obligation is offered by 215 | You alone, and You hereby agree to indemnify every Contributor for any 216 | liability incurred by such Contributor as a result of warranty, support, 217 | indemnity or liability terms You offer. You may include additional 218 | disclaimers of warranty and limitations of liability specific to any 219 | jurisdiction. 220 | 221 | 4. Inability to Comply Due to Statute or Regulation 222 | --------------------------------------------------- 223 | 224 | If it is impossible for You to comply with any of the terms of this 225 | License with respect to some or all of the Covered Software due to 226 | statute, judicial order, or regulation then You must: (a) comply with 227 | the terms of this License to the maximum extent possible; and (b) 228 | describe the limitations and the code they affect. Such description must 229 | be placed in a text file included with all distributions of the Covered 230 | Software under this License. Except to the extent prohibited by statute 231 | or regulation, such description must be sufficiently detailed for a 232 | recipient of ordinary skill to be able to understand it. 233 | 234 | 5. Termination 235 | -------------- 236 | 237 | 5.1. The rights granted under this License will terminate automatically 238 | if You fail to comply with any of its terms. However, if You become 239 | compliant, then the rights granted under this License from a particular 240 | Contributor are reinstated (a) provisionally, unless and until such 241 | Contributor explicitly and finally terminates Your grants, and (b) on an 242 | ongoing basis, if such Contributor fails to notify You of the 243 | non-compliance by some reasonable means prior to 60 days after You have 244 | come back into compliance. Moreover, Your grants from a particular 245 | Contributor are reinstated on an ongoing basis if such Contributor 246 | notifies You of the non-compliance by some reasonable means, this is the 247 | first time You have received notice of non-compliance with this License 248 | from such Contributor, and You become compliant prior to 30 days after 249 | Your receipt of the notice. 250 | 251 | 5.2. If You initiate litigation against any entity by asserting a patent 252 | infringement claim (excluding declaratory judgment actions, 253 | counter-claims, and cross-claims) alleging that a Contributor Version 254 | directly or indirectly infringes any patent, then the rights granted to 255 | You by any and all Contributors for the Covered Software under Section 256 | 2.1 of this License shall terminate. 257 | 258 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 259 | end user license agreements (excluding distributors and resellers) which 260 | have been validly granted by You or Your distributors under this License 261 | prior to termination shall survive termination. 262 | 263 | ************************************************************************ 264 | * * 265 | * 6. Disclaimer of Warranty * 266 | * ------------------------- * 267 | * * 268 | * Covered Software is provided under this License on an "as is" * 269 | * basis, without warranty of any kind, either expressed, implied, or * 270 | * statutory, including, without limitation, warranties that the * 271 | * Covered Software is free of defects, merchantable, fit for a * 272 | * particular purpose or non-infringing. The entire risk as to the * 273 | * quality and performance of the Covered Software is with You. * 274 | * Should any Covered Software prove defective in any respect, You * 275 | * (not any Contributor) assume the cost of any necessary servicing, * 276 | * repair, or correction. This disclaimer of warranty constitutes an * 277 | * essential part of this License. No use of any Covered Software is * 278 | * authorized under this License except under this disclaimer. * 279 | * * 280 | ************************************************************************ 281 | 282 | ************************************************************************ 283 | * * 284 | * 7. Limitation of Liability * 285 | * -------------------------- * 286 | * * 287 | * Under no circumstances and under no legal theory, whether tort * 288 | * (including negligence), contract, or otherwise, shall any * 289 | * Contributor, or anyone who distributes Covered Software as * 290 | * permitted above, be liable to You for any direct, indirect, * 291 | * special, incidental, or consequential damages of any character * 292 | * including, without limitation, damages for lost profits, loss of * 293 | * goodwill, work stoppage, computer failure or malfunction, or any * 294 | * and all other commercial damages or losses, even if such party * 295 | * shall have been informed of the possibility of such damages. This * 296 | * limitation of liability shall not apply to liability for death or * 297 | * personal injury resulting from such party's negligence to the * 298 | * extent applicable law prohibits such limitation. Some * 299 | * jurisdictions do not allow the exclusion or limitation of * 300 | * incidental or consequential damages, so this exclusion and * 301 | * limitation may not apply to You. * 302 | * * 303 | ************************************************************************ 304 | 305 | 8. Litigation 306 | ------------- 307 | 308 | Any litigation relating to this License may be brought only in the 309 | courts of a jurisdiction where the defendant maintains its principal 310 | place of business and such litigation shall be governed by laws of that 311 | jurisdiction, without reference to its conflict-of-law provisions. 312 | Nothing in this Section shall prevent a party's ability to bring 313 | cross-claims or counter-claims. 314 | 315 | 9. Miscellaneous 316 | ---------------- 317 | 318 | This License represents the complete agreement concerning the subject 319 | matter hereof. If any provision of this License is held to be 320 | unenforceable, such provision shall be reformed only to the extent 321 | necessary to make it enforceable. Any law or regulation which provides 322 | that the language of a contract shall be construed against the drafter 323 | shall not be used to construe this License against a Contributor. 324 | 325 | 10. Versions of the License 326 | --------------------------- 327 | 328 | 10.1. New Versions 329 | 330 | Mozilla Foundation is the license steward. Except as provided in Section 331 | 10.3, no one other than the license steward has the right to modify or 332 | publish new versions of this License. Each version will be given a 333 | distinguishing version number. 334 | 335 | 10.2. Effect of New Versions 336 | 337 | You may distribute the Covered Software under the terms of the version 338 | of the License under which You originally received the Covered Software, 339 | or under the terms of any subsequent version published by the license 340 | steward. 341 | 342 | 10.3. Modified Versions 343 | 344 | If you create software not governed by this License, and you want to 345 | create a new license for such software, you may create and use a 346 | modified version of this License if you rename the license and remove 347 | any references to the name of the license steward (except to note that 348 | such modified license differs from this License). 349 | 350 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 351 | Licenses 352 | 353 | If You choose to distribute Source Code Form that is Incompatible With 354 | Secondary Licenses under the terms of this version of the License, the 355 | notice described in Exhibit B of this License must be attached. 356 | 357 | Exhibit A - Source Code Form License Notice 358 | ------------------------------------------- 359 | 360 | This Source Code Form is subject to the terms of the Mozilla Public 361 | License, v. 2.0. If a copy of the MPL was not distributed with this 362 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 363 | 364 | If it is not possible or desirable to put the notice in a particular 365 | file, then You may include the notice in a location (such as a LICENSE 366 | file in a relevant directory) where a recipient would be likely to look 367 | for such a notice. 368 | 369 | You may add additional accurate notices of copyright ownership. 370 | 371 | Exhibit B - "Incompatible With Secondary Licenses" Notice 372 | --------------------------------------------------------- 373 | 374 | This Source Code Form is "Incompatible With Secondary Licenses", as 375 | defined by the Mozilla Public License, v. 2.0. 376 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-slug 2 | 3 | [![Build Status](https://github.com/hashicorp/go-slug/actions/workflows/test.yml/badge.svg)](https://github.com/hashicorp/go-slug/actions/workflows/test.yml) 4 | [![GitHub license](https://img.shields.io/github/license/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/blob/main/LICENSE) 5 | [![GoDoc](https://godoc.org/github.com/hashicorp/go-slug?status.svg)](https://godoc.org/github.com/hashicorp/go-slug) 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/hashicorp/go-slug)](https://goreportcard.com/report/github.com/hashicorp/go-slug) 7 | [![GitHub issues](https://img.shields.io/github/issues/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/issues) 8 | 9 | Package `go-slug` offers functions for packing and unpacking Terraform Enterprise 10 | compatible slugs. Slugs are gzip compressed tar files containing Terraform configuration files. 11 | 12 | ## Installation 13 | 14 | Installation can be done with a normal `go get`: 15 | 16 | ``` 17 | go get -u github.com/hashicorp/go-slug 18 | ``` 19 | 20 | ## Documentation 21 | 22 | For the complete usage of `go-slug`, see the full [package docs](https://godoc.org/github.com/hashicorp/go-slug). 23 | 24 | ## Example 25 | 26 | Packing or unpacking a slug is pretty straight forward as shown in the 27 | following example: 28 | 29 | ```go 30 | package main 31 | 32 | import ( 33 | "bytes" 34 | "io/ioutil" 35 | "log" 36 | "os" 37 | 38 | slug "github.com/hashicorp/go-slug" 39 | ) 40 | 41 | func main() { 42 | // First create a buffer for storing the slug. 43 | buf := bytes.NewBuffer(nil) 44 | 45 | // Then call the Pack function with a directory path containing the 46 | // configuration files and an io.Writer to write the slug to. 47 | if _, err := slug.Pack("testdata/archive-dir", buf, false); err != nil { 48 | log.Fatal(err) 49 | } 50 | 51 | // Create a directory to unpack the slug contents into. 52 | dst, err := ioutil.TempDir("", "slug") 53 | if err != nil { 54 | log.Fatal(err) 55 | } 56 | defer os.RemoveAll(dst) 57 | 58 | // Unpacking a slug is done by calling the Unpack function with an 59 | // io.Reader to read the slug from and a directory path of an existing 60 | // directory to store the unpacked configuration files. 61 | if err := slug.Unpack(buf, dst); err != nil { 62 | log.Fatal(err) 63 | } 64 | } 65 | ``` 66 | 67 | ## Issues and Contributing 68 | 69 | If you find an issue with this package, please report an issue. If you'd like, 70 | we welcome any contributions. Fork this repository and submit a pull request. 71 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hashicorp/go-slug 2 | 3 | go 1.24 4 | 5 | require ( 6 | github.com/apparentlymart/go-versions v1.0.1 7 | github.com/google/go-cmp v0.5.9 8 | github.com/hashicorp/terraform-registry-address v0.2.0 9 | github.com/hashicorp/terraform-svchost v0.0.1 10 | golang.org/x/mod v0.24.0 11 | golang.org/x/sys v0.31.0 12 | ) 13 | 14 | require ( 15 | github.com/go-test/deep v1.0.3 // indirect 16 | golang.org/x/net v0.37.0 // indirect 17 | golang.org/x/text v0.23.0 // indirect 18 | ) 19 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/apparentlymart/go-versions v1.0.1 h1:ECIpSn0adcYNsBfSRwdDdz9fWlL+S/6EUd9+irwkBgU= 2 | github.com/apparentlymart/go-versions v1.0.1/go.mod h1:YF5j7IQtrOAOnsGkniupEA5bfCjzd7i14yu0shZavyM= 3 | github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= 4 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= 6 | github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= 7 | github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= 8 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 9 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 10 | github.com/hashicorp/terraform-registry-address v0.2.0 h1:92LUg03NhfgZv44zpNTLBGIbiyTokQCDcdH5BhVHT3s= 11 | github.com/hashicorp/terraform-registry-address v0.2.0/go.mod h1:478wuzJPzdmqT6OGbB/iH82EDcI8VFM4yujknh/1nIs= 12 | github.com/hashicorp/terraform-svchost v0.0.1 h1:Zj6fR5wnpOHnJUmLyWozjMeDaVuE+cstMPj41/eKmSQ= 13 | github.com/hashicorp/terraform-svchost v0.0.1/go.mod h1:ut8JaH0vumgdCfJaihdcZULqkAwHdQNwNH7taIDdsZM= 14 | github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= 15 | github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= 16 | golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= 17 | golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= 18 | golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= 19 | golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 20 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 21 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 22 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 23 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 24 | -------------------------------------------------------------------------------- /internal/escapingfs/escaping.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package escapingfs 5 | 6 | import ( 7 | "fmt" 8 | "path/filepath" 9 | "strings" 10 | ) 11 | 12 | func TargetWithinRoot(root string, target string) (bool, error) { 13 | rel, err := filepath.Rel(root, target) 14 | if err != nil { 15 | return false, fmt.Errorf("couldn't find relative path : %w", err) 16 | } 17 | if strings.HasPrefix(rel, "..") { 18 | return false, nil 19 | } 20 | return true, nil 21 | } 22 | -------------------------------------------------------------------------------- /internal/escapingfs/escaping_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package escapingfs 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | ) 10 | 11 | func TestTargetWithinRoot(t *testing.T) { 12 | tempDir := "/tmp/root" // Example root directory 13 | tests := []struct { 14 | name string 15 | root string 16 | target string 17 | expected bool 18 | }{ 19 | { 20 | name: "Target inside root", 21 | root: tempDir, 22 | target: filepath.Join(tempDir, "subdir", "file.txt"), 23 | expected: true, 24 | }, 25 | { 26 | name: "Target is root itself", 27 | root: tempDir, 28 | target: tempDir, 29 | expected: true, 30 | }, 31 | { 32 | name: "Target outside root", 33 | root: tempDir, 34 | target: "/tmp/otherdir/file.txt", 35 | expected: false, 36 | }, 37 | { 38 | name: "Target is parent of root", 39 | root: filepath.Join(tempDir, "subdir"), 40 | target: tempDir, 41 | expected: false, 42 | }, 43 | } 44 | 45 | for _, tt := range tests { 46 | t.Run(tt.name, func(t *testing.T) { 47 | result, err := TargetWithinRoot(tt.root, tt.target) 48 | if err != nil { 49 | t.Fatalf("unexpected error: %v", err) 50 | } 51 | if result != tt.expected { 52 | t.Errorf("expected %v, got %v", tt.expected, result) 53 | } 54 | }) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /internal/ignorefiles/ignorerules.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | // Package ignorefiles deals with the ".terraformignore" file format, which 5 | // is a convention similar to ".gitignore" that specifies path patterns that 6 | // match files Terraform should discard or ignore when interpreting a package 7 | // fetched from a remote location. 8 | package ignorefiles 9 | 10 | import ( 11 | "fmt" 12 | "io" 13 | "os" 14 | "path/filepath" 15 | ) 16 | 17 | // A Ruleset is the result of reading, parsing, and compiling a 18 | // ".terraformignore" file. 19 | type Ruleset struct { 20 | rules []rule 21 | } 22 | 23 | // ExcludesResult is the result of matching a path against a Ruleset. A result 24 | // is Excluded if it matches a set of paths that are excluded by the rules in a 25 | // Ruleset. A matching result is Dominating if none of the rules that follow it 26 | // contain a negation, implying that if the rule excludes a directory, 27 | // everything below that directory may be ignored. 28 | type ExcludesResult struct { 29 | Excluded bool 30 | Dominating bool 31 | } 32 | 33 | // ParseIgnoreFileContent takes a reader over the content of a .terraformignore 34 | // file and returns the Ruleset described by that file, or an error if the 35 | // file is invalid. 36 | func ParseIgnoreFileContent(r io.Reader) (*Ruleset, error) { 37 | rules, err := readRules(r) 38 | if err != nil { 39 | return nil, err 40 | } 41 | return &Ruleset{rules: rules}, nil 42 | } 43 | 44 | // LoadPackageIgnoreRules implements reasonable default behavior for finding 45 | // ignore rules for a particular package root directory: if .terraformignore is 46 | // present then use it, or otherwise just return DefaultRuleset. 47 | // 48 | // This function will return an error only if an ignore file is present but 49 | // unreadable, or if an ignore file is present but contains invalid syntax. 50 | func LoadPackageIgnoreRules(packageDir string) (*Ruleset, error) { 51 | file, err := os.Open(filepath.Join(packageDir, ".terraformignore")) 52 | if err != nil { 53 | if os.IsNotExist(err) { 54 | return DefaultRuleset, nil 55 | } 56 | return nil, fmt.Errorf("cannot read .terraformignore: %s", err) 57 | } 58 | defer file.Close() 59 | 60 | ret, err := ParseIgnoreFileContent(file) 61 | if err != nil { 62 | // The parse errors already mention that they were parsing ignore rules, 63 | // so don't need an additional prefix added. 64 | return nil, err 65 | } 66 | return ret, nil 67 | } 68 | 69 | // Excludes tests whether the given path matches the set of paths that are 70 | // excluded by the rules in the ruleset. 71 | // 72 | // If any of the rules in the ruleset have invalid syntax then Excludes will 73 | // return an error, but it will also still return a result which 74 | // considers all of the remaining valid rules, to support callers that want to 75 | // just ignore invalid exclusions. Such callers can safely ignore the error 76 | // result: 77 | // 78 | // exc, matching, _ = ruleset.Excludes(path) 79 | func (r *Ruleset) Excludes(path string) (ExcludesResult, error) { 80 | if r == nil { 81 | return ExcludesResult{}, nil 82 | } 83 | 84 | var retErr error 85 | foundMatch := false 86 | dominating := false 87 | for _, rule := range r.rules { 88 | match, err := rule.match(path) 89 | if err != nil { 90 | // We'll remember the first error we encounter, but continue 91 | // matching anyway to support callers that want to ignore invalid 92 | // lines and just match with whatever's left. 93 | if retErr == nil { 94 | retErr = fmt.Errorf("invalid ignore rule %q", rule.val) 95 | } 96 | } 97 | if match { 98 | foundMatch = !rule.negated 99 | dominating = foundMatch && !rule.negationsAfter 100 | } 101 | } 102 | return ExcludesResult{ 103 | Excluded: foundMatch, 104 | Dominating: dominating, 105 | }, retErr 106 | } 107 | 108 | // Includes is the inverse of [Ruleset.Excludes]. 109 | func (r *Ruleset) Includes(path string) (bool, error) { 110 | result, err := r.Excludes(path) 111 | return !result.Excluded, err 112 | } 113 | 114 | var DefaultRuleset *Ruleset 115 | 116 | func init() { 117 | DefaultRuleset = &Ruleset{rules: defaultExclusions} 118 | } 119 | -------------------------------------------------------------------------------- /internal/ignorefiles/terraformignore.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package ignorefiles 5 | 6 | import ( 7 | "bufio" 8 | "fmt" 9 | "io" 10 | "os" 11 | "path/filepath" 12 | "regexp" 13 | "strings" 14 | "text/scanner" 15 | ) 16 | 17 | func readRules(input io.Reader) ([]rule, error) { 18 | rules := defaultExclusions 19 | scanner := bufio.NewScanner(input) 20 | scanner.Split(bufio.ScanLines) 21 | currentRuleIndex := len(defaultExclusions) - 1 22 | 23 | for scanner.Scan() { 24 | pattern := scanner.Text() 25 | // Ignore blank lines 26 | if len(pattern) == 0 { 27 | continue 28 | } 29 | // Trim spaces 30 | pattern = strings.TrimSpace(pattern) 31 | // Ignore comments 32 | if pattern[0] == '#' { 33 | continue 34 | } 35 | // New rule structure 36 | rule := rule{} 37 | // Exclusions 38 | if pattern[0] == '!' { 39 | rule.negated = true 40 | pattern = pattern[1:] 41 | // Mark all previous rules as having negations after it 42 | for i := currentRuleIndex; i >= 0; i-- { 43 | if rules[i].negationsAfter { 44 | break 45 | } 46 | rules[i].negationsAfter = true 47 | } 48 | } 49 | // If it is a directory, add ** so we catch descendants 50 | if pattern[len(pattern)-1] == os.PathSeparator { 51 | pattern = pattern + "**" 52 | } 53 | // If it starts with /, it is absolute 54 | if pattern[0] == os.PathSeparator { 55 | pattern = pattern[1:] 56 | } else { 57 | // Otherwise prepend **/ 58 | pattern = "**" + string(os.PathSeparator) + pattern 59 | } 60 | rule.val = pattern 61 | rules = append(rules, rule) 62 | currentRuleIndex += 1 63 | } 64 | 65 | if err := scanner.Err(); err != nil { 66 | return nil, fmt.Errorf("syntax error in .terraformignore: %w", err) 67 | } 68 | return rules, nil 69 | } 70 | 71 | type rule struct { 72 | val string // the value of the rule itself 73 | negated bool // prefixed by !, a negated rule 74 | negationsAfter bool // negatied rules appear after this rule 75 | regex *regexp.Regexp // regular expression to match for the rule 76 | } 77 | 78 | func (r *rule) match(path string) (bool, error) { 79 | if r.regex == nil { 80 | if err := r.compile(); err != nil { 81 | return false, filepath.ErrBadPattern 82 | } 83 | } 84 | 85 | b := r.regex.MatchString(path) 86 | return b, nil 87 | } 88 | 89 | func (r *rule) compile() error { 90 | regStr := "^" 91 | pattern := r.val 92 | // Go through the pattern and convert it to a regexp. 93 | // Use a scanner to support utf-8 chars. 94 | var scan scanner.Scanner 95 | scan.Init(strings.NewReader(pattern)) 96 | 97 | sl := string(os.PathSeparator) 98 | escSL := sl 99 | if sl == `\` { 100 | escSL += `\` 101 | } 102 | 103 | for scan.Peek() != scanner.EOF { 104 | ch := scan.Next() 105 | if ch == '*' { 106 | if scan.Peek() == '*' { 107 | // is some flavor of "**" 108 | scan.Next() 109 | 110 | // Treat **/ as ** so eat the "/" 111 | if string(scan.Peek()) == sl { 112 | scan.Next() 113 | } 114 | 115 | if scan.Peek() == scanner.EOF { 116 | // is "**EOF" - to align with .gitignore just accept all 117 | regStr += ".*" 118 | } else { 119 | // is "**" 120 | // Note that this allows for any # of /'s (even 0) because 121 | // the .* will eat everything, even /'s 122 | regStr += "(.*" + escSL + ")?" 123 | } 124 | } else { 125 | // is "*" so map it to anything but "/" 126 | regStr += "[^" + escSL + "]*" 127 | } 128 | } else if ch == '?' { 129 | // "?" is any char except "/" 130 | regStr += "[^" + escSL + "]" 131 | } else if ch == '.' || ch == '$' { 132 | // Escape some regexp special chars that have no meaning 133 | // in golang's filepath.Match 134 | regStr += `\` + string(ch) 135 | } else if ch == '\\' { 136 | // escape next char. Note that a trailing \ in the pattern 137 | // will be left alone (but need to escape it) 138 | if sl == `\` { 139 | // On windows map "\" to "\\", meaning an escaped backslash, 140 | // and then just continue because filepath.Match on 141 | // Windows doesn't allow escaping at all 142 | regStr += escSL 143 | continue 144 | } 145 | if scan.Peek() != scanner.EOF { 146 | regStr += `\` + string(scan.Next()) 147 | } else { 148 | regStr += `\` 149 | } 150 | } else { 151 | regStr += string(ch) 152 | } 153 | } 154 | 155 | regStr += "$" 156 | re, err := regexp.Compile(regStr) 157 | if err != nil { 158 | return err 159 | } 160 | 161 | r.regex = re 162 | return nil 163 | } 164 | 165 | /* 166 | Default rules as they would appear in .terraformignore: 167 | .git/ 168 | .terraform/ 169 | !.terraform/modules/ 170 | */ 171 | 172 | var defaultExclusions = []rule{ 173 | { 174 | val: strings.Join([]string{"**", ".terraform", "**"}, string(os.PathSeparator)), 175 | negated: false, 176 | negationsAfter: true, 177 | }, 178 | // Place negation rules as high as possible in the list 179 | { 180 | val: strings.Join([]string{"**", ".terraform", "modules", "**"}, string(os.PathSeparator)), 181 | negated: true, 182 | negationsAfter: false, 183 | }, 184 | { 185 | val: strings.Join([]string{"**", ".git", "**"}, string(os.PathSeparator)), 186 | negated: false, 187 | negationsAfter: false, 188 | }, 189 | } 190 | 191 | func init() { 192 | // We'll precompile all of the default rules at initialization, so we 193 | // don't need to recompile them every time we encounter a package that 194 | // doesn't have any rules (the common case). 195 | for _, r := range defaultExclusions { 196 | err := r.compile() 197 | if err != nil { 198 | panic(fmt.Sprintf("invalid default rule %q: %s", r.val, err)) 199 | } 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /internal/ignorefiles/terraformignore_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package ignorefiles 5 | 6 | import ( 7 | "testing" 8 | ) 9 | 10 | func TestTerraformIgnore(t *testing.T) { 11 | // path to directory without .terraformignore 12 | rs, err := LoadPackageIgnoreRules("testdata/external-dir") 13 | if err != nil { 14 | t.Fatal(err) 15 | } 16 | if len(rs.rules) != 3 { 17 | t.Fatal("A directory without .terraformignore should get the default patterns") 18 | } 19 | 20 | // load the .terraformignore file's patterns 21 | rs, err = LoadPackageIgnoreRules("testdata/archive-dir") 22 | if err != nil { 23 | t.Fatal(err) 24 | } 25 | 26 | type file struct { 27 | // the actual path, should be file path format /dir/subdir/file.extension 28 | path string 29 | // should match 30 | match bool 31 | } 32 | paths := []file{ 33 | 0: { 34 | path: ".terraform/", 35 | match: true, 36 | }, 37 | 1: { 38 | path: "included.txt", 39 | match: false, 40 | }, 41 | 2: { 42 | path: ".terraform/foo/bar", 43 | match: true, 44 | }, 45 | 3: { 46 | path: ".terraform/foo/bar/more/directories/so/many", 47 | match: true, 48 | }, 49 | 4: { 50 | path: ".terraform/foo/ignored-subdirectory/", 51 | match: true, 52 | }, 53 | 5: { 54 | path: "baz.txt", 55 | match: true, 56 | }, 57 | 6: { 58 | path: "parent/foo/baz.txt", 59 | match: true, 60 | }, 61 | 7: { 62 | path: "parent/foo/bar.tf", 63 | match: true, 64 | }, 65 | 8: { 66 | path: "parent/bar/bar.tf", 67 | match: false, 68 | }, 69 | // baz.txt is ignored, but a file name including it should not be 70 | 9: { 71 | path: "something/with-baz.txt", 72 | match: false, 73 | }, 74 | 10: { 75 | path: "something/baz.x", 76 | match: false, 77 | }, 78 | // Getting into * patterns 79 | 11: { 80 | path: "foo/ignored-doc.md", 81 | match: true, 82 | }, 83 | // Should match [a-z] group 84 | 12: { 85 | path: "bar/something-a.txt", 86 | match: true, 87 | }, 88 | // ignore sub- terraform.d paths... 89 | 13: { 90 | path: "some-module/terraform.d/x", 91 | match: true, 92 | }, 93 | // ...but not the root one 94 | 14: { 95 | path: "terraform.d/", 96 | match: false, 97 | }, 98 | 15: { 99 | path: "terraform.d/foo", 100 | match: false, 101 | }, 102 | // We ignore the directory, but a file of the same name could exist 103 | 16: { 104 | path: "terraform.d", 105 | match: false, 106 | }, 107 | // boop.txt is ignored everywhere... 108 | 17: { 109 | path: "baz/boop.txt", 110 | match: true, 111 | }, 112 | // ...except in root directory 113 | 18: { 114 | path: "boop.txt", 115 | match: false, 116 | }, 117 | } 118 | for i, p := range paths { 119 | result, err := rs.Excludes(p.path) 120 | if err != nil { 121 | t.Errorf("invalid rule syntax when checking %s at index %d", p.path, i) 122 | continue 123 | } 124 | if result.Excluded != p.match { 125 | t.Fatalf("%s at index %d should be %t", p.path, i, p.match) 126 | } 127 | } 128 | } 129 | 130 | func TestTerraformIgnoreNoExclusionOptimization(t *testing.T) { 131 | rs, err := LoadPackageIgnoreRules("testdata/with-exclusion") 132 | if err != nil { 133 | t.Fatal(err) 134 | } 135 | if len(rs.rules) != 7 { 136 | t.Fatalf("Expected 7 rules, got %d", len(rs.rules)) 137 | } 138 | 139 | // reflects that no negations follow the last rule 140 | afterValue := false 141 | for i := len(rs.rules) - 1; i >= 0; i-- { 142 | r := rs.rules[i] 143 | if r.negationsAfter != afterValue { 144 | t.Errorf("Expected exclusionsAfter to be %v at index %d", afterValue, i) 145 | } 146 | if r.negated { 147 | afterValue = true 148 | } 149 | } 150 | 151 | // last two will be dominating 152 | for _, r := range []string{"logs/", "tmp/"} { 153 | result, err := rs.Excludes(r) 154 | if err != nil { 155 | t.Fatal(err) 156 | } 157 | if !result.Dominating { 158 | t.Errorf("Expected %q to be a dominating rule", r) 159 | } 160 | } 161 | 162 | if actual, _ := rs.Excludes("src/baz/ignored"); !actual.Excluded { 163 | t.Errorf("Expected %q to be excluded, but it was included", "src/baz/ignored") 164 | } 165 | 166 | } 167 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/.terraform/file.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/internal/ignorefiles/testdata/archive-dir/.terraform/file.txt -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/.terraform/modules/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | 3 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/.terraform/plugins/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | 3 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/.terraformignore: -------------------------------------------------------------------------------- 1 | # comments are ignored 2 | # extra spaces are irrelevant 3 | # ignore a file 4 | baz.txt 5 | # below is an empty line 6 | 7 | # ignore a directory 8 | terraform.d/ 9 | # negate ignoring a directory at the root 10 | !/terraform.d/ 11 | # ignore a file at a subpath 12 | **/foo/bar.tf 13 | # ignore files with specific endings 14 | foo/*.md 15 | # character groups 16 | bar/something-[a-z].txt 17 | # ignore a file 18 | boop.txt 19 | # but not one at the current directory 20 | !/boop.txt 21 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/.terraformrc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/internal/ignorefiles/testdata/archive-dir/.terraformrc -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/bar.txt: -------------------------------------------------------------------------------- 1 | bar 2 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/baz.txt: -------------------------------------------------------------------------------- 1 | baz -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/internal/ignorefiles/testdata/archive-dir/exe -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/foo.terraform/bar.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/internal/ignorefiles/testdata/archive-dir/foo.terraform/bar.txt -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/foo.txt: -------------------------------------------------------------------------------- 1 | ../external-dir/foo.txt -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/sub/bar.txt: -------------------------------------------------------------------------------- 1 | ../bar.txt -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/archive-dir/sub/zip.txt: -------------------------------------------------------------------------------- 1 | zip 2 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/external-dir/foo.txt: -------------------------------------------------------------------------------- 1 | foo 2 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/with-exclusion/.terraformignore: -------------------------------------------------------------------------------- 1 | src/**/* 2 | # except at one directory 3 | !src/foo/bar.txt 4 | logs/ 5 | tmp/ 6 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/with-exclusion/logs/foo.txt: -------------------------------------------------------------------------------- 1 | foo 2 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/with-exclusion/src/baz/ignored.txt: -------------------------------------------------------------------------------- 1 | ignored 2 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/with-exclusion/src/foo/bar.txt: -------------------------------------------------------------------------------- 1 | bar 2 | -------------------------------------------------------------------------------- /internal/ignorefiles/testdata/with-exclusion/tmp/tmp.txt: -------------------------------------------------------------------------------- 1 | tmp 2 | -------------------------------------------------------------------------------- /internal/unpackinfo/lchtimes_darwin.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:build darwin 5 | // +build darwin 6 | 7 | package unpackinfo 8 | 9 | import ( 10 | "golang.org/x/sys/unix" 11 | ) 12 | 13 | // Lchtimes modifies the access and modified timestamps on a target path 14 | // This capability is only available on Linux and Darwin as of now. 15 | func (i UnpackInfo) Lchtimes() error { 16 | return unix.Lutimes(i.Path, []unix.Timeval{ 17 | {Sec: i.OriginalAccessTime.Unix(), Usec: int32(i.OriginalAccessTime.Nanosecond() / 1e6 % 1e6)}, 18 | {Sec: i.OriginalModTime.Unix(), Usec: int32(i.OriginalModTime.Nanosecond() / 1e6 % 1e6)}}, 19 | ) 20 | } 21 | 22 | // CanMaintainSymlinkTimestamps determines whether is is possible to change 23 | // timestamps on symlinks for the the current platform. For regular files 24 | // and directories, attempts are made to restore permissions and timestamps 25 | // after extraction. But for symbolic links, go's cross-platform 26 | // packages (Chmod and Chtimes) are not capable of changing symlink info 27 | // because those methods follow the symlinks. However, a platform-dependent option 28 | // is provided for linux and darwin (see Lchtimes) 29 | func CanMaintainSymlinkTimestamps() bool { 30 | return true 31 | } 32 | -------------------------------------------------------------------------------- /internal/unpackinfo/lchtimes_linux32.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:build linux_amd || linux_arm 5 | // +build linux_amd linux_arm 6 | 7 | package unpackinfo 8 | 9 | import ( 10 | "golang.org/x/sys/unix" 11 | ) 12 | 13 | // Lchtimes modifies the access and modified timestamps on a target path 14 | // This capability is only available on Linux and Darwin as of now. 15 | func (i UnpackInfo) Lchtimes() error { 16 | return unix.Lutimes(i.Path, []unix.Timeval{ 17 | {Sec: i.OriginalAccessTime.Unix(), Usec: int32(i.OriginalAccessTime.Nanosecond() / 1e6 % 1e6)}, 18 | {Sec: i.OriginalModTime.Unix(), Usec: int32(i.OriginalModTime.Nanosecond() / 1e6 % 1e6)}}, 19 | ) 20 | } 21 | 22 | // CanMaintainSymlinkTimestamps determines whether is is possible to change 23 | // timestamps on symlinks for the the current platform. For regular files 24 | // and directories, attempts are made to restore permissions and timestamps 25 | // after extraction. But for symbolic links, go's cross-platform 26 | // packages (Chmod and Chtimes) are not capable of changing symlink info 27 | // because those methods follow the symlinks. However, a platform-dependent option 28 | // is provided for linux and darwin (see Lchtimes) 29 | func CanMaintainSymlinkTimestamps() bool { 30 | return true 31 | } 32 | -------------------------------------------------------------------------------- /internal/unpackinfo/lchtimes_linux64.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:build linux_amd64 || linux_arm64 5 | // +build linux_amd64 linux_arm64 6 | 7 | package unpackinfo 8 | 9 | import ( 10 | "golang.org/x/sys/unix" 11 | ) 12 | 13 | // Lchtimes modifies the access and modified timestamps on a target path 14 | // This capability is only available on Linux and Darwin as of now. 15 | func (i UnpackInfo) Lchtimes() error { 16 | return unix.Lutimes(i.Path, []unix.Timeval{ 17 | {Sec: i.OriginalAccessTime.Unix(), Usec: int64(i.OriginalAccessTime.Nanosecond() / 1e6 % 1e6)}, 18 | {Sec: i.OriginalModTime.Unix(), Usec: int64(i.OriginalModTime.Nanosecond() / 1e6 % 1e6)}}, 19 | ) 20 | } 21 | 22 | // CanMaintainSymlinkTimestamps determines whether is is possible to change 23 | // timestamps on symlinks for the the current platform. For regular files 24 | // and directories, attempts are made to restore permissions and timestamps 25 | // after extraction. But for symbolic links, go's cross-platform 26 | // packages (Chmod and Chtimes) are not capable of changing symlink info 27 | // because those methods follow the symlinks. However, a platform-dependent option 28 | // is provided for linux and darwin (see Lchtimes) 29 | func CanMaintainSymlinkTimestamps() bool { 30 | return true 31 | } 32 | -------------------------------------------------------------------------------- /internal/unpackinfo/lchtimes_others.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | //go:build !darwin && !linux_amd64 && !linux_arm64 && !linux_amd && !linux_arm 5 | // +build !darwin,!linux_amd64,!linux_arm64,!linux_amd,!linux_arm 6 | 7 | package unpackinfo 8 | 9 | import ( 10 | "errors" 11 | ) 12 | 13 | // Lchtimes modifies the access and modified timestamps on a target path 14 | // This capability is only available on Linux and Darwin as of now. 15 | func (i UnpackInfo) Lchtimes() error { 16 | return errors.New("Lchtimes is not supported on this platform") 17 | } 18 | 19 | // CanMaintainSymlinkTimestamps determines whether is is possible to change 20 | // timestamps on symlinks for the the current platform. For regular files 21 | // and directories, attempts are made to restore permissions and timestamps 22 | // after extraction. But for symbolic links, go's cross-platform 23 | // packages (Chmod and Chtimes) are not capable of changing symlink info 24 | // because those methods follow the symlinks. However, a platform-dependent option 25 | // is provided for linux and darwin (see Lchtimes) 26 | func CanMaintainSymlinkTimestamps() bool { 27 | return false 28 | } 29 | -------------------------------------------------------------------------------- /internal/unpackinfo/unpackinfo.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package unpackinfo 5 | 6 | import ( 7 | "archive/tar" 8 | "errors" 9 | "fmt" 10 | "io/fs" 11 | "os" 12 | "path/filepath" 13 | "strings" 14 | "time" 15 | ) 16 | 17 | // UnpackInfo stores information about the file (or directory, or symlink) being 18 | // unpacked. UnpackInfo ensures certain malicious tar files are not unpacked. 19 | // The information can be used later to restore the original permissions 20 | // and timestamps based on the type of entry the info represents. 21 | type UnpackInfo struct { 22 | Path string 23 | OriginalAccessTime time.Time 24 | OriginalModTime time.Time 25 | OriginalMode fs.FileMode 26 | Typeflag byte 27 | } 28 | 29 | // NewUnpackInfo returns an UnpackInfo based on a destination root and a tar header. 30 | // It will return an error if the header represents an illegal symlink extraction 31 | // or if the entry type is not supported by go-slug. 32 | func NewUnpackInfo(dst string, header *tar.Header) (UnpackInfo, error) { 33 | // Check for empty destination 34 | if len(dst) == 0 { 35 | return UnpackInfo{}, errors.New("empty destination is not allowed") 36 | } 37 | 38 | // Clean the destination path 39 | dst = filepath.Clean(dst) 40 | path := filepath.Clean(header.Name) 41 | 42 | path = filepath.Join(dst, path) 43 | target := filepath.Clean(path) 44 | 45 | // Check for path traversal by ensuring the target is within the destination 46 | rel, err := filepath.Rel(dst, target) 47 | if err != nil || strings.HasPrefix(rel, "..") { 48 | return UnpackInfo{}, errors.New("invalid filename, traversal with \"..\" outside of current directory") 49 | } 50 | 51 | // Ensure the destination is not through any symlinks. This prevents 52 | // any files from being deployed through symlinks defined in the slug. 53 | // There are malicious cases where this could be used to escape the 54 | // slug's boundaries (zipslip), and any legitimate use is questionable 55 | // and likely indicates a hand-crafted tar file, which we are not in 56 | // the business of supporting here. 57 | // 58 | // The strategy is to Lstat each path component from dst up to the 59 | // immediate parent directory of the file name in the tarball, checking 60 | // the mode on each to ensure we wouldn't be passing through any 61 | // symlinks. 62 | currentPath := dst // Start at the root of the unpacked tarball. 63 | components := strings.Split(header.Name, "/") 64 | 65 | for i := 0; i < len(components)-1; i++ { 66 | currentPath = filepath.Join(currentPath, components[i]) 67 | fi, err := os.Lstat(currentPath) 68 | if os.IsNotExist(err) { 69 | // Parent directory structure is incomplete. Technically this 70 | // means from here upward cannot be a symlink, so we cancel the 71 | // remaining path tests. 72 | continue 73 | } 74 | if err != nil { 75 | return UnpackInfo{}, fmt.Errorf("failed to evaluate path %q: %w", header.Name, err) 76 | } 77 | if fi.Mode()&fs.ModeSymlink != 0 { 78 | return UnpackInfo{}, fmt.Errorf("cannot extract %q through symlink", header.Name) 79 | } 80 | } 81 | 82 | result := UnpackInfo{ 83 | Path: path, 84 | OriginalAccessTime: header.AccessTime, 85 | OriginalModTime: header.ModTime, 86 | OriginalMode: header.FileInfo().Mode(), 87 | Typeflag: header.Typeflag, 88 | } 89 | 90 | if !result.IsDirectory() && !result.IsSymlink() && !result.IsRegular() && !result.IsTypeX() { 91 | return UnpackInfo{}, fmt.Errorf("failed creating %q, unsupported file type %c", path, result.Typeflag) 92 | } 93 | 94 | return result, nil 95 | } 96 | 97 | // IsSymlink describes whether the file being unpacked is a symlink 98 | func (i UnpackInfo) IsSymlink() bool { 99 | return i.Typeflag == tar.TypeSymlink 100 | } 101 | 102 | // IsDirectory describes whether the file being unpacked is a directory 103 | func (i UnpackInfo) IsDirectory() bool { 104 | return i.Typeflag == tar.TypeDir 105 | } 106 | 107 | // IsTypeX describes whether the file being unpacked is a special TypeXHeader that can 108 | // be ignored by go-slug 109 | func (i UnpackInfo) IsTypeX() bool { 110 | return i.Typeflag == tar.TypeXGlobalHeader || i.Typeflag == tar.TypeXHeader 111 | } 112 | 113 | // IsRegular describes whether the file being unpacked is a regular file 114 | func (i UnpackInfo) IsRegular() bool { 115 | return i.Typeflag == tar.TypeReg || i.Typeflag == tar.TypeRegA 116 | } 117 | 118 | // RestoreInfo changes the file mode and timestamps for the given UnpackInfo data 119 | func (i UnpackInfo) RestoreInfo() error { 120 | switch { 121 | case i.IsDirectory(): 122 | return i.restoreDirectory() 123 | case i.IsSymlink(): 124 | if CanMaintainSymlinkTimestamps() { 125 | return i.restoreSymlink() 126 | } 127 | return nil 128 | default: // Normal file 129 | return i.restoreNormal() 130 | } 131 | } 132 | 133 | func (i UnpackInfo) restoreDirectory() error { 134 | if err := os.Chmod(i.Path, i.OriginalMode); err != nil && !os.IsNotExist(err) { 135 | return fmt.Errorf("failed setting permissions on directory %q: %w", i.Path, err) 136 | } 137 | 138 | if err := os.Chtimes(i.Path, i.OriginalAccessTime, i.OriginalModTime); err != nil && !os.IsNotExist(err) { 139 | return fmt.Errorf("failed setting times on directory %q: %w", i.Path, err) 140 | } 141 | return nil 142 | } 143 | 144 | func (i UnpackInfo) restoreSymlink() error { 145 | if err := i.Lchtimes(); err != nil { 146 | return fmt.Errorf("failed setting times on symlink %q: %w", i.Path, err) 147 | } 148 | return nil 149 | } 150 | 151 | func (i UnpackInfo) restoreNormal() error { 152 | if err := os.Chmod(i.Path, i.OriginalMode); err != nil { 153 | return fmt.Errorf("failed setting permissions on %q: %w", i.Path, err) 154 | } 155 | 156 | if err := os.Chtimes(i.Path, i.OriginalAccessTime, i.OriginalModTime); err != nil { 157 | return fmt.Errorf("failed setting times on %q: %w", i.Path, err) 158 | } 159 | return nil 160 | } 161 | -------------------------------------------------------------------------------- /internal/unpackinfo/unpackinfo_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package unpackinfo 5 | 6 | import ( 7 | "archive/tar" 8 | "os" 9 | "path" 10 | "path/filepath" 11 | "strings" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | func TestNewUnpackInfo(t *testing.T) { 17 | t.Parallel() 18 | 19 | t.Run("disallow parent traversal", func(t *testing.T) { 20 | _, err := NewUnpackInfo("test", &tar.Header{ 21 | Name: "../off-limits", 22 | Typeflag: tar.TypeSymlink, 23 | }) 24 | 25 | if err == nil { 26 | t.Fatal("expected error, got nil") 27 | } 28 | 29 | expected := "invalid filename, traversal with \"..\"" 30 | if !strings.Contains(err.Error(), expected) { 31 | t.Fatalf("expected error to contain %q, got %q", expected, err) 32 | } 33 | }) 34 | 35 | t.Run("disallow zipslip", func(t *testing.T) { 36 | dst := t.TempDir() 37 | 38 | err := os.Symlink("..", path.Join(dst, "subdir")) 39 | if err != nil { 40 | t.Fatalf("failed to create temp symlink: %s", err) 41 | } 42 | 43 | _, err = NewUnpackInfo(dst, &tar.Header{ 44 | Name: "subdir/escapes", 45 | Typeflag: tar.TypeReg, 46 | }) 47 | 48 | if err == nil { 49 | t.Fatal("expected error, got nil") 50 | } 51 | 52 | expected := "through symlink" 53 | if !strings.Contains(err.Error(), expected) { 54 | t.Fatalf("expected error to contain %q, got %q", expected, err) 55 | } 56 | }) 57 | 58 | t.Run("disallow zipslip extended", func(t *testing.T) { 59 | dst := t.TempDir() 60 | 61 | err := os.Symlink("..", path.Join(dst, "subdir")) 62 | if err != nil { 63 | t.Fatalf("failed to create temp symlink: %s", err) 64 | } 65 | 66 | _, err = NewUnpackInfo(dst, &tar.Header{ 67 | Name: "foo/../subdir/escapes", 68 | Typeflag: tar.TypeReg, 69 | }) 70 | 71 | if err == nil { 72 | t.Fatal("expected error, got nil") 73 | } 74 | 75 | expected := "through symlink" 76 | if !strings.Contains(err.Error(), expected) { 77 | t.Fatalf("expected error to contain %q, got %q", expected, err) 78 | } 79 | }) 80 | 81 | t.Run("stay in dst", func(t *testing.T) { 82 | tmp := t.TempDir() 83 | dst := path.Join(tmp, "dst") 84 | 85 | _, err := NewUnpackInfo(dst, &tar.Header{ 86 | Name: "../dst2/escapes", 87 | Typeflag: tar.TypeReg, 88 | }) 89 | 90 | if err == nil { 91 | t.Fatal("expected error, got nil") 92 | } 93 | 94 | expected := "traversal with \"..\" outside of current" 95 | if !strings.Contains(err.Error(), expected) { 96 | t.Fatalf("expected error to contain %q, got %q", expected, err) 97 | } 98 | }) 99 | 100 | t.Run("disallow strange types", func(t *testing.T) { 101 | _, err := NewUnpackInfo("test", &tar.Header{ 102 | Name: "subdir/escapes", 103 | Typeflag: tar.TypeFifo, 104 | }) 105 | 106 | if err == nil { 107 | t.Fatal("expected error, got nil") 108 | } 109 | 110 | expected := "unsupported file type" 111 | if !strings.Contains(err.Error(), expected) { 112 | t.Fatalf("expected error to contain %q, got %q", expected, err) 113 | } 114 | }) 115 | t.Run("path starting with ./", func(t *testing.T) { 116 | dst := t.TempDir() 117 | result, err := NewUnpackInfo(dst, &tar.Header{ 118 | Name: "./test/foo.txt", 119 | Typeflag: tar.TypeSymlink, 120 | }) 121 | 122 | if err != nil { 123 | t.Fatalf("expected nil, got %q", err) 124 | } 125 | 126 | expected := dst + "/test/foo.txt" 127 | if result.Path != expected { 128 | t.Fatalf("expected error to contain %q, got %q", expected, result.Path) 129 | } 130 | }) 131 | t.Run("path starting with ./ followed with ../", func(t *testing.T) { 132 | dst := t.TempDir() 133 | _, err := NewUnpackInfo(dst, &tar.Header{ 134 | Name: "./../../test/foo.txt", 135 | Typeflag: tar.TypeSymlink, 136 | }) 137 | 138 | if err == nil { 139 | t.Fatal("expected error, got nil") 140 | } 141 | 142 | expected := "traversal with \"..\" outside of current" 143 | if !strings.Contains(err.Error(), expected) { 144 | t.Fatalf("expected error to contain %q, got %q", expected, err) 145 | } 146 | }) 147 | t.Run("destination starting with ./", func(t *testing.T) { 148 | dst := t.TempDir() 149 | outsideDst := "./" + dst 150 | result, err := NewUnpackInfo(outsideDst, &tar.Header{ 151 | Name: "foo.txt", 152 | Typeflag: tar.TypeSymlink, 153 | }) 154 | 155 | if err != nil { 156 | t.Fatalf("expected nil, got %q", err) 157 | } 158 | 159 | expected := filepath.Join(outsideDst, "foo.txt") 160 | if expected != result.Path { 161 | t.Fatalf("expected error to contain %q, got %q", expected, result.Path) 162 | } 163 | }) 164 | t.Run("empty destination", func(t *testing.T) { 165 | emptyDestination := "" 166 | _, err := NewUnpackInfo(emptyDestination, &tar.Header{ 167 | Name: "foo.txt", 168 | Typeflag: tar.TypeSymlink, 169 | }) 170 | 171 | if err == nil { 172 | t.Fatal("expected error, got nil") 173 | } 174 | 175 | expected := "empty destination is not allowed" 176 | if !strings.Contains(err.Error(), expected) { 177 | t.Fatalf("expected error to contain %q, got %q", expected, err) 178 | } 179 | }) 180 | t.Run("valid empty path", func(t *testing.T) { 181 | dst := t.TempDir() 182 | 183 | _, err := NewUnpackInfo(dst, &tar.Header{ 184 | Name: "", 185 | Typeflag: tar.TypeSymlink, 186 | }) 187 | 188 | if err != nil { 189 | t.Fatalf("expected nil, got %q", err) 190 | } 191 | }) 192 | t.Run("valid empty path with destination without the / sufix", func(t *testing.T) { 193 | dst := t.TempDir() 194 | dst = strings.TrimSuffix(dst, "/") 195 | 196 | _, err := NewUnpackInfo(dst, &tar.Header{ 197 | Name: "", 198 | Typeflag: tar.TypeSymlink, 199 | }) 200 | 201 | if err != nil { 202 | t.Fatalf("expected nil, got %q", err) 203 | } 204 | }) 205 | t.Run("valid path multiple / prefix", func(t *testing.T) { 206 | dst := t.TempDir() 207 | 208 | _, err := NewUnpackInfo(dst, &tar.Header{ 209 | Name: "///////foo", 210 | Typeflag: tar.TypeSymlink, 211 | }) 212 | 213 | if err != nil { 214 | t.Fatalf("expected nil, got %q", err) 215 | } 216 | }) 217 | t.Run("valid path with / sufix", func(t *testing.T) { 218 | dst := t.TempDir() 219 | 220 | _, err := NewUnpackInfo(dst, &tar.Header{ 221 | Name: "foo/", 222 | Typeflag: tar.TypeSymlink, 223 | }) 224 | 225 | if err != nil { 226 | t.Fatalf("expected nil, got %q", err) 227 | } 228 | }) 229 | t.Run("valid destination with / prefix", func(t *testing.T) { 230 | dst := "/" + t.TempDir() 231 | 232 | _, err := NewUnpackInfo(dst, &tar.Header{ 233 | Name: "foo/", 234 | Typeflag: tar.TypeSymlink, 235 | }) 236 | 237 | if err != nil { 238 | t.Fatalf("expected nil, got %q", err) 239 | } 240 | }) 241 | t.Run("valid symlink", func(t *testing.T) { 242 | dst := t.TempDir() 243 | 244 | _, err := NewUnpackInfo(dst, &tar.Header{ 245 | Name: "foo.txt", 246 | Typeflag: tar.TypeSymlink, 247 | }) 248 | 249 | if err != nil { 250 | t.Fatalf("expected nil, got %q", err) 251 | } 252 | }) 253 | t.Run("valid file", func(t *testing.T) { 254 | dst := t.TempDir() 255 | 256 | _, err := NewUnpackInfo(dst, &tar.Header{ 257 | Name: "foo.txt", 258 | Typeflag: tar.TypeReg, 259 | }) 260 | 261 | if err != nil { 262 | t.Fatalf("expected nil, got %q", err) 263 | } 264 | }) 265 | t.Run("valid directory", func(t *testing.T) { 266 | dst := t.TempDir() 267 | 268 | _, err := NewUnpackInfo(dst, &tar.Header{ 269 | Name: "foo", 270 | Typeflag: tar.TypeDir, 271 | }) 272 | 273 | if err != nil { 274 | t.Fatalf("expected nil, got %q", err) 275 | } 276 | }) 277 | } 278 | 279 | func TestUnpackInfo_RestoreInfo(t *testing.T) { 280 | root := t.TempDir() 281 | 282 | err := os.Mkdir(path.Join(root, "subdir"), 0700) 283 | if err != nil { 284 | t.Fatalf("failed to create temp subdir: %s", err) 285 | } 286 | 287 | err = os.WriteFile(path.Join(root, "bar.txt"), []byte("Hello, World!"), 0700) 288 | if err != nil { 289 | t.Fatalf("failed to create temp file: %s", err) 290 | } 291 | 292 | err = os.Symlink(path.Join(root, "bar.txt"), path.Join(root, "foo.txt")) 293 | if err != nil { 294 | t.Fatalf("failed to create temp symlink: %s", err) 295 | } 296 | 297 | exampleAccessTime := time.Date(2023, time.April, 1, 11, 22, 33, 0, time.UTC) 298 | exampleModTime := time.Date(2023, time.May, 29, 11, 22, 33, 0, time.UTC) 299 | 300 | dirinfo, err := NewUnpackInfo(root, &tar.Header{ 301 | Name: "subdir", 302 | Typeflag: tar.TypeDir, 303 | AccessTime: exampleAccessTime, 304 | ModTime: exampleModTime, 305 | Mode: 0666, 306 | }) 307 | if err != nil { 308 | t.Fatalf("failed to define dirinfo: %s", err) 309 | } 310 | 311 | finfo, err := NewUnpackInfo(root, &tar.Header{ 312 | Name: "bar.txt", 313 | Typeflag: tar.TypeReg, 314 | AccessTime: exampleAccessTime, 315 | ModTime: exampleModTime, 316 | Mode: 0666, 317 | }) 318 | if err != nil { 319 | t.Fatalf("failed to define finfo: %s", err) 320 | } 321 | 322 | linfo, err := NewUnpackInfo(root, &tar.Header{ 323 | Name: "foo.txt", 324 | Typeflag: tar.TypeSymlink, 325 | AccessTime: exampleAccessTime, 326 | ModTime: exampleModTime, 327 | Mode: 0666, 328 | }) 329 | if err != nil { 330 | t.Fatalf("failed to define linfo: %s", err) 331 | } 332 | 333 | infoCollection := []UnpackInfo{dirinfo, finfo, linfo} 334 | 335 | for _, info := range infoCollection { 336 | err = info.RestoreInfo() 337 | if err != nil { 338 | t.Errorf("failed to restore %q: %s", info.Path, err) 339 | } 340 | stat, err := os.Lstat(info.Path) 341 | if err != nil { 342 | t.Errorf("failed to lstat %q: %s", info.Path, err) 343 | } 344 | 345 | if !info.IsSymlink() { 346 | if stat.Mode() != info.OriginalMode { 347 | t.Errorf("%q mode %q did not match expected header mode %q", info.Path, stat.Mode(), info.OriginalMode) 348 | } 349 | } else if CanMaintainSymlinkTimestamps() { 350 | if !stat.ModTime().Equal(exampleModTime) { 351 | t.Errorf("%q modtime %q did not match example", info.Path, stat.ModTime()) 352 | } 353 | } 354 | } 355 | } 356 | -------------------------------------------------------------------------------- /slug.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package slug 5 | 6 | import ( 7 | "archive/tar" 8 | "compress/gzip" 9 | "fmt" 10 | "io" 11 | "os" 12 | "path/filepath" 13 | "runtime" 14 | "strings" 15 | 16 | "github.com/hashicorp/go-slug/internal/escapingfs" 17 | "github.com/hashicorp/go-slug/internal/ignorefiles" 18 | "github.com/hashicorp/go-slug/internal/unpackinfo" 19 | ) 20 | 21 | // Meta provides detailed information about a slug. 22 | type Meta struct { 23 | // The list of files contained in the slug. 24 | Files []string 25 | 26 | // Total size of the slug in bytes. 27 | Size int64 28 | } 29 | 30 | // IllegalSlugError indicates the provided slug (io.Writer for Pack, io.Reader 31 | // for Unpack) violates a rule about its contents. For example, an absolute or 32 | // external symlink. It implements the error interface. 33 | type IllegalSlugError struct { 34 | Err error 35 | } 36 | 37 | func (e *IllegalSlugError) Error() string { 38 | return fmt.Sprintf("illegal slug error: %v", e.Err) 39 | } 40 | 41 | // Unwrap returns the underlying issue with the provided Slug into the error 42 | // chain. 43 | func (e *IllegalSlugError) Unwrap() error { return e.Err } 44 | 45 | // externalSymlink is a simple abstraction for a information about a symlink target 46 | type externalSymlink struct { 47 | absTarget string 48 | target string 49 | info os.FileInfo 50 | } 51 | 52 | // PackerOption is a functional option that can configure non-default Packers. 53 | type PackerOption func(*Packer) error 54 | 55 | // ApplyTerraformIgnore is a PackerOption that will apply the .terraformignore 56 | // rules and skip packing files it specifies. 57 | func ApplyTerraformIgnore() PackerOption { 58 | return func(p *Packer) error { 59 | p.applyTerraformIgnore = true 60 | return nil 61 | } 62 | } 63 | 64 | // DereferenceSymlinks is a PackerOption that will allow symlinks that 65 | // reference a target outside of the source directory by copying the link 66 | // target, turning it into a normal file within the archive. 67 | func DereferenceSymlinks() PackerOption { 68 | return func(p *Packer) error { 69 | p.dereference = true 70 | return nil 71 | } 72 | } 73 | 74 | // AllowSymlinkTarget relaxes safety checks on symlinks with targets matching 75 | // path. Specifically, absolute symlink targets (e.g. "/foo/bar") and relative 76 | // targets (e.g. "../foo/bar") which resolve to a path outside of the 77 | // source/destination directories for pack/unpack operations respectively, may 78 | // be expressly permitted, whereas they are forbidden by default. Exercise 79 | // caution when using this option. A symlink matches path if its target 80 | // resolves to path exactly, or if path is a parent directory of target. 81 | // 82 | // Deprecated: This option is deprecated and will be removed in a future 83 | // release. 84 | func AllowSymlinkTarget(path string) PackerOption { 85 | return func(p *Packer) error { 86 | p.allowSymlinkTargets = append(p.allowSymlinkTargets, path) 87 | return nil 88 | } 89 | } 90 | 91 | // Packer holds options for the Pack function. 92 | type Packer struct { 93 | dereference bool 94 | applyTerraformIgnore bool 95 | allowSymlinkTargets []string // Deprecated 96 | } 97 | 98 | // NewPacker is a constructor for Packer. 99 | func NewPacker(options ...PackerOption) (*Packer, error) { 100 | p := &Packer{ 101 | dereference: false, 102 | applyTerraformIgnore: false, 103 | } 104 | 105 | for _, opt := range options { 106 | if err := opt(p); err != nil { 107 | return nil, fmt.Errorf("option failed: %w", err) 108 | } 109 | } 110 | 111 | return p, nil 112 | } 113 | 114 | // Pack at the package level is used to maintain compatibility with existing 115 | // code that relies on this function signature. New options related to packing 116 | // slugs should be added to the Packer struct instead. 117 | func Pack(src string, w io.Writer, dereference bool) (*Meta, error) { 118 | p := Packer{ 119 | dereference: dereference, 120 | 121 | // This defaults to false in NewPacker, but is true here. This matches 122 | // the old behavior of Pack, which always used .terraformignore. 123 | applyTerraformIgnore: true, 124 | } 125 | return p.Pack(src, w) 126 | } 127 | 128 | // Pack creates a slug from a src directory, and writes the new slug 129 | // to w. Returns metadata about the slug and any errors. 130 | // 131 | // When dereference is set to true, symlinks with a target outside of 132 | // the src directory will be dereferenced. When dereference is set to 133 | // false symlinks with a target outside the src directory are omitted 134 | // from the slug. 135 | func (p *Packer) Pack(src string, w io.Writer) (*Meta, error) { 136 | // Gzip compress all the output data. 137 | gzipW, err := gzip.NewWriterLevel(w, gzip.BestSpeed) 138 | if err != nil { 139 | // This error is only raised when an incorrect gzip level is 140 | // specified. 141 | return nil, err 142 | } 143 | 144 | // Tar the file contents. 145 | tarW := tar.NewWriter(gzipW) 146 | 147 | // Track the metadata details as we go. 148 | meta := &Meta{} 149 | 150 | info, err := os.Lstat(src) 151 | if err != nil { 152 | return nil, err 153 | } 154 | 155 | // Check if the root (src) is a symlink 156 | if isSymlink(info.Mode()) { 157 | src, err = os.Readlink(src) 158 | if err != nil { 159 | return nil, err 160 | } 161 | } 162 | 163 | // Load the ignore rule configuration, which will use 164 | // defaults if no .terraformignore is configured 165 | var ignoreRules *ignorefiles.Ruleset 166 | if p.applyTerraformIgnore { 167 | ignoreRules = parseIgnoreFile(src) 168 | } 169 | 170 | // Ensure the source path provided is absolute 171 | src, err = filepath.Abs(src) 172 | if err != nil { 173 | return nil, fmt.Errorf("failed to read absolute path for source: %w", err) 174 | } 175 | 176 | // Walk the tree of files. 177 | err = filepath.Walk(src, p.packWalkFn(src, src, src, tarW, meta, ignoreRules)) 178 | if err != nil { 179 | return nil, err 180 | } 181 | 182 | // Flush the tar writer. 183 | if err := tarW.Close(); err != nil { 184 | return nil, fmt.Errorf("failed to close the tar archive: %w", err) 185 | } 186 | 187 | // Flush the gzip writer. 188 | if err := gzipW.Close(); err != nil { 189 | return nil, fmt.Errorf("failed to close the gzip writer: %w", err) 190 | } 191 | 192 | return meta, nil 193 | } 194 | 195 | func (p *Packer) packWalkFn(root, src, dst string, tarW *tar.Writer, meta *Meta, ignoreRules *ignorefiles.Ruleset) filepath.WalkFunc { 196 | return func(path string, info os.FileInfo, err error) error { 197 | if err != nil { 198 | return err 199 | } 200 | 201 | // Get the relative path from the current src directory. 202 | subpath, err := filepath.Rel(src, path) 203 | if err != nil { 204 | return fmt.Errorf("failed to get relative path for file %q: %w", path, err) 205 | } 206 | if subpath == "." { 207 | return nil 208 | } 209 | 210 | if r := matchIgnoreRules(subpath, ignoreRules); r.Excluded { 211 | return nil 212 | } 213 | 214 | // Catch directories so we don't end up with empty directories, 215 | // the files are ignored correctly 216 | if info.IsDir() { 217 | if r := matchIgnoreRules(subpath+string(os.PathSeparator), ignoreRules); r.Excluded { 218 | if r.Dominating { 219 | return filepath.SkipDir 220 | } else { 221 | return nil 222 | } 223 | } 224 | } 225 | 226 | // Get the relative path from the initial root directory. 227 | subpath, err = filepath.Rel(root, strings.Replace(path, src, dst, 1)) 228 | if err != nil { 229 | return fmt.Errorf("failed to get relative path for file %q: %w", path, err) 230 | } 231 | if subpath == "." { 232 | return nil 233 | } 234 | 235 | // Check the file type and if we need to write the body. 236 | keepFile, writeBody := checkFileMode(info.Mode()) 237 | if !keepFile { 238 | return nil 239 | } 240 | 241 | fm := info.Mode() 242 | // An "Unknown" format is imposed because this is the default but also because 243 | // it imposes the simplest behavior. Notably, the mod time is preserved by rounding 244 | // to the nearest second. During unpacking, these rounded timestamps are restored 245 | // upon the corresponding file/directory/symlink. 246 | header := &tar.Header{ 247 | Format: tar.FormatUnknown, 248 | Name: filepath.ToSlash(subpath), 249 | ModTime: info.ModTime(), 250 | Mode: int64(fm.Perm()), 251 | } 252 | 253 | switch { 254 | case info.IsDir(): 255 | header.Typeflag = tar.TypeDir 256 | header.Name += "/" 257 | 258 | case fm.IsRegular(): 259 | header.Typeflag = tar.TypeReg 260 | header.Size = info.Size() 261 | 262 | case isSymlink(info.Mode()): 263 | // Read the symlink file to find the destination. 264 | target, err := os.Readlink(path) 265 | if err != nil { 266 | return fmt.Errorf("failed to read symlink %q: %w", path, err) 267 | } 268 | 269 | // Check if the symlink's target falls within the root. 270 | if ok, err := p.validSymlink(root, path, target); ok { 271 | // We can simply copy the link. 272 | header.Typeflag = tar.TypeSymlink 273 | header.Linkname = filepath.ToSlash(target) 274 | break 275 | } else if !p.dereference { 276 | // If the target does not fall within the root and dereference 277 | // is set to false, we can't resolve the target and copy its 278 | // contents. 279 | return err 280 | } 281 | 282 | // Attempt to follow the external target so we can copy its contents 283 | resolved, err := p.resolveExternalLink(root, path) 284 | if err != nil { 285 | return err 286 | } 287 | 288 | // If the target is a directory we can recurse into the target 289 | // directory by calling the packWalkFn with updated arguments. 290 | if resolved.info.IsDir() { 291 | return filepath.Walk(resolved.absTarget, p.packWalkFn(root, resolved.absTarget, path, tarW, meta, ignoreRules)) 292 | } 293 | 294 | // Dereference this symlink by updating the header with the target file 295 | // details and set writeBody to true so the body will be written. 296 | header.Typeflag = tar.TypeReg 297 | header.ModTime = resolved.info.ModTime() 298 | header.Mode = int64(resolved.info.Mode().Perm()) 299 | header.Size = resolved.info.Size() 300 | writeBody = true 301 | 302 | default: 303 | return fmt.Errorf("unexpected file mode %v", fm) 304 | } 305 | 306 | // Write the header first to the archive. 307 | if err := tarW.WriteHeader(header); err != nil { 308 | return fmt.Errorf("failed writing archive header for file %q: %w", path, err) 309 | } 310 | 311 | // Account for the file in the list. 312 | meta.Files = append(meta.Files, header.Name) 313 | 314 | // Skip writing file data for certain file types (above). 315 | if !writeBody { 316 | return nil 317 | } 318 | 319 | f, err := os.Open(path) 320 | if err != nil { 321 | return fmt.Errorf("failed opening file %q for archiving: %w", path, err) 322 | } 323 | defer f.Close() 324 | 325 | size, err := io.Copy(tarW, f) 326 | if err != nil { 327 | return fmt.Errorf("failed copying file %q to archive: %w", path, err) 328 | } 329 | 330 | // Add the size we copied to the body. 331 | meta.Size += size 332 | 333 | return nil 334 | } 335 | } 336 | 337 | // resolveExternalSymlink attempts to recursively follow target paths if we 338 | // encounter a symbolic link chain. It returns path information about the final 339 | // target pointing to a regular file or directory. 340 | func (p *Packer) resolveExternalLink(root string, path string) (*externalSymlink, error) { 341 | // Read the symlink file to find the destination. 342 | target, err := os.Readlink(path) 343 | if err != nil { 344 | return nil, fmt.Errorf("failed to read symlink %q: %w", path, err) 345 | } 346 | 347 | // Get the absolute path of the symlink target. 348 | absTarget := target 349 | if !filepath.IsAbs(absTarget) { 350 | absTarget = filepath.Join(filepath.Dir(path), target) 351 | } 352 | if !filepath.IsAbs(absTarget) { 353 | absTarget = filepath.Join(root, absTarget) 354 | } 355 | 356 | // Get the file info for the target. 357 | info, err := os.Lstat(absTarget) 358 | if err != nil { 359 | return nil, fmt.Errorf("failed to get file info from file %q: %w", target, err) 360 | } 361 | 362 | // Recurse if the symlink resolves to another symlink 363 | if isSymlink(info.Mode()) { 364 | return p.resolveExternalLink(root, absTarget) 365 | } 366 | 367 | return &externalSymlink{ 368 | absTarget: absTarget, 369 | target: target, 370 | info: info, 371 | }, err 372 | } 373 | 374 | // Unpack is used to read and extract the contents of a slug to the dst 375 | // directory, which must be an absolute path. Symlinks within the slug 376 | // are supported, provided their targets are relative and point to paths 377 | // within the destination directory. 378 | func Unpack(r io.Reader, dst string) error { 379 | p := &Packer{} 380 | return p.Unpack(r, dst) 381 | } 382 | 383 | // Unpack unpacks the archive data in r into directory dst. 384 | func (p *Packer) Unpack(r io.Reader, dst string) error { 385 | // Track directory times and permissions so they can be restored after all files 386 | // are extracted. This metadata modification is delayed because extracting files 387 | // into a new directory would necessarily change its timestamps. By way of 388 | // comparison, see 389 | // https://www.gnu.org/software/tar/manual/html_node/Directory-Modification-Times-and-Permissions.html 390 | // for more details about how tar attempts to preserve file metadata. 391 | directoriesExtracted := []unpackinfo.UnpackInfo{} 392 | 393 | // Decompress as we read. 394 | uncompressed, err := gzip.NewReader(r) 395 | if err != nil { 396 | return fmt.Errorf("failed to decompress slug: %w", err) 397 | } 398 | 399 | // Untar as we read. 400 | untar := tar.NewReader(uncompressed) 401 | 402 | // Unpackage all the contents into the directory. 403 | for { 404 | header, err := untar.Next() 405 | if err == io.EOF { 406 | break 407 | } 408 | if err != nil { 409 | return fmt.Errorf("failed to untar slug: %w", err) 410 | } 411 | 412 | // If the entry has no name, ignore it. 413 | if header.Name == "" { 414 | continue 415 | } 416 | 417 | info, err := unpackinfo.NewUnpackInfo(dst, header) 418 | if err != nil { 419 | return &IllegalSlugError{Err: err} 420 | } 421 | 422 | // Make the directories to the path. 423 | dir := filepath.Dir(info.Path) 424 | 425 | // Timestamps and permissions will be restored after all files are extracted. 426 | if err := os.MkdirAll(dir, 0755); err != nil { 427 | return fmt.Errorf("failed to create directory %q: %w", dir, err) 428 | } 429 | 430 | // Handle symlinks, directories, non-regular files 431 | if info.IsSymlink() { 432 | 433 | if ok, err := p.validSymlink(dst, header.Name, header.Linkname); ok { 434 | // Create the symlink. 435 | headerName := filepath.Clean(header.Name) 436 | headerLinkname := filepath.Clean(header.Linkname) 437 | if err = os.Symlink(headerLinkname, info.Path); err != nil { 438 | return fmt.Errorf("failed creating symlink (%q -> %q): %w", 439 | headerName, headerLinkname, err) 440 | } 441 | } else { 442 | return err 443 | } 444 | 445 | if err := info.RestoreInfo(); err != nil { 446 | return err 447 | } 448 | 449 | continue 450 | } 451 | 452 | if info.IsDirectory() { 453 | // Restore directory info after all files are extracted because 454 | // the extraction process changes directory's timestamps. 455 | directoriesExtracted = append(directoriesExtracted, info) 456 | continue 457 | } 458 | 459 | // The remaining logic only applies to regular files 460 | if !info.IsRegular() { 461 | continue 462 | } 463 | 464 | // Open a handle to the destination. 465 | fh, err := os.Create(info.Path) 466 | if err != nil { 467 | // This mimics tar's behavior wrt the tar file containing duplicate files 468 | // and it allowing later ones to clobber earlier ones even if the file 469 | // has perms that don't allow overwriting. The file permissions will be restored 470 | // once the file contents are copied. 471 | if os.IsPermission(err) { 472 | os.Chmod(info.Path, 0600) 473 | fh, err = os.Create(info.Path) 474 | } 475 | 476 | if err != nil { 477 | return fmt.Errorf("failed creating file %q: %w", info.Path, err) 478 | } 479 | } 480 | 481 | // Copy the contents of the file. 482 | _, err = io.Copy(fh, untar) 483 | fh.Close() 484 | if err != nil { 485 | return fmt.Errorf("failed to copy slug file %q: %w", info.Path, err) 486 | } 487 | 488 | if err := info.RestoreInfo(); err != nil { 489 | return err 490 | } 491 | } 492 | 493 | for _, dir := range directoriesExtracted { 494 | if err := dir.RestoreInfo(); err != nil { 495 | return err 496 | } 497 | } 498 | 499 | return nil 500 | } 501 | 502 | // Given a "root" directory, the path to a symlink within said root, and the 503 | // target of said symlink, validSymlink checks that the target either falls 504 | // into root somewhere, or is explicitly allowed per the Packer's config. 505 | func (p *Packer) validSymlink(root, path, target string) (bool, error) { 506 | // Get the absolute path to root. 507 | absRoot, err := filepath.Abs(root) 508 | if err != nil { 509 | return false, fmt.Errorf("failed making path %q absolute: %w", root, err) 510 | } 511 | 512 | // Get the absolute path to the file path. 513 | absPath := path 514 | if !filepath.IsAbs(absPath) { 515 | absPath = filepath.Clean(filepath.Join(absRoot, path)) 516 | } 517 | 518 | // Get the absolute path of the symlink target. 519 | var absTarget string 520 | if filepath.IsAbs(target) { 521 | absTarget = filepath.Clean(target) 522 | } else { 523 | absTarget = filepath.Clean(filepath.Join(filepath.Dir(absPath), target)) 524 | } 525 | 526 | // Target falls within root. 527 | rel, err := escapingfs.TargetWithinRoot(absRoot, absTarget) 528 | if err != nil { 529 | return false, err 530 | } 531 | 532 | if rel { 533 | return true, nil 534 | } 535 | 536 | // The link target is outside of root. Check if it is allowed. 537 | for _, prefix := range p.allowSymlinkTargets { 538 | // Ensure prefix is absolute. 539 | if !filepath.IsAbs(prefix) { 540 | prefix = filepath.Join(absRoot, prefix) 541 | } 542 | prefix = filepath.Clean(prefix) 543 | 544 | // Exact match is allowed. 545 | if absTarget == prefix { 546 | return true, nil 547 | } 548 | 549 | // Target falls within root. 550 | rel, err := escapingfs.TargetWithinRoot(prefix, absTarget) 551 | if err != nil { 552 | return false, err 553 | } 554 | 555 | if rel { 556 | return true, nil 557 | } 558 | 559 | } 560 | 561 | return false, &IllegalSlugError{ 562 | Err: fmt.Errorf( 563 | "invalid symlink (%q -> %q) has external target", 564 | path, target, 565 | ), 566 | } 567 | } 568 | 569 | // checkFileMode is used to examine an os.FileMode and determine if it should 570 | // be included in the archive, and if it has a data body which needs writing. 571 | func checkFileMode(m os.FileMode) (keep, body bool) { 572 | switch { 573 | case m.IsDir(): 574 | return true, false 575 | 576 | case m.IsRegular(): 577 | return true, true 578 | 579 | case isSymlink(m): 580 | return true, false 581 | } 582 | 583 | return false, false 584 | } 585 | 586 | // isSymlink checks if the provider file mode is a symlink 587 | // as of Go 1.23 Windows files with linked/mounted modes are considered irregular 588 | func isSymlink(m os.FileMode) bool { 589 | if runtime.GOOS == "windows" { 590 | return m&os.ModeSymlink != 0 || m&os.ModeIrregular != 0 591 | } 592 | return m&os.ModeSymlink != 0 593 | } 594 | -------------------------------------------------------------------------------- /sourceaddrs/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | // Package sourceaddrs deals with the various types of source code address 5 | // that Terraform can gather into a source bundle via the sibling package 6 | // "sourcebundle". 7 | // 8 | // NOTE WELL: Everything in this package is currently experimental and subject 9 | // to breaking changes even in patch releases. We will make stronger commitments 10 | // to backward-compatibility once we have more experience using this 11 | // functionality in real contexts. 12 | package sourceaddrs 13 | -------------------------------------------------------------------------------- /sourceaddrs/package_remote.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "net/url" 9 | ) 10 | 11 | type RemotePackage struct { 12 | sourceType string 13 | 14 | // NOTE: A remote package URL may never have a "userinfo" portion, and 15 | // all relevant fields are comparable, so it's safe to compare 16 | // RemotePackage using the == operator. 17 | url url.URL 18 | } 19 | 20 | // ParseRemotePackage parses a standalone remote package address, which is a 21 | // remote source address without any sub-path portion. 22 | func ParseRemotePackage(given string) (RemotePackage, error) { 23 | srcAddr, err := ParseRemoteSource(given) 24 | if err != nil { 25 | return RemotePackage{}, err 26 | } 27 | if srcAddr.subPath != "" { 28 | return RemotePackage{}, fmt.Errorf("remote package address may not have a sub-path") 29 | } 30 | return srcAddr.pkg, nil 31 | } 32 | 33 | func (p RemotePackage) String() string { 34 | // Our address normalization rules are a bit odd since we inherited the 35 | // fundamentals of this addressing scheme from go-getter. 36 | if p.url.Scheme == p.sourceType { 37 | // When scheme and source type match we don't actually mention the 38 | // source type in the stringification, because it looks redundant 39 | // and confusing. 40 | return p.url.String() 41 | } 42 | return p.sourceType + "::" + p.url.String() 43 | } 44 | 45 | // SourceAddr returns a remote source address referring to the given sub-path 46 | // inside the recieving package. 47 | // 48 | // subPath must be a valid sub-path (as defined by [ValidSubPath]) or this 49 | // function will panic. An empty string is a valid sub-path representing the 50 | // root directory of the package. 51 | func (p RemotePackage) SourceAddr(subPath string) RemoteSource { 52 | finalPath, err := normalizeSubpath(subPath) 53 | if err != nil { 54 | panic(fmt.Sprintf("invalid subPath: %s", subPath)) 55 | } 56 | return RemoteSource{ 57 | pkg: p, 58 | subPath: finalPath, 59 | } 60 | } 61 | 62 | func (p RemotePackage) subPathString(subPath string) string { 63 | if subPath == "" { 64 | // Easy case... the package address is also the source address 65 | return p.String() 66 | } 67 | 68 | // The weird syntax we've inherited from go-getter expects the URL's 69 | // query string to appear after the subpath portion, so we need to 70 | // now tweak the package URL to be a sub-path URL instead. 71 | subURL := p.url // shallow copy 72 | subURL.Path += "//" + subPath 73 | if subURL.Scheme == p.sourceType { 74 | return subURL.String() 75 | } 76 | return p.sourceType + "::" + subURL.String() 77 | } 78 | 79 | // SourceType returns the source type component of the package address. 80 | func (p RemotePackage) SourceType() string { 81 | return p.sourceType 82 | } 83 | 84 | // URL returns the URL component of the package address. 85 | // 86 | // Callers MUST NOT mutate anything accessible through the returned pointer, 87 | // even though the Go type system cannot enforce that. 88 | func (p RemotePackage) URL() *url.URL { 89 | return &p.url 90 | } 91 | -------------------------------------------------------------------------------- /sourceaddrs/source.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "path" 9 | "strings" 10 | ) 11 | 12 | // Source acts as a tagged union over the three possible source address types, 13 | // for situations where all three are acceptable. 14 | // 15 | // Source is used to specify source addresses for installation. Once packages 16 | // have been resolved and installed we use [SourceFinal] instead to represent 17 | // those finalized selections, which allows capturing the selected version 18 | // number for a module registry source address. 19 | // 20 | // Only address types within this package can implement Source. 21 | type Source interface { 22 | sourceSigil() 23 | 24 | String() string 25 | SupportsVersionConstraints() bool 26 | } 27 | 28 | // ParseSource attempts to parse the given string as any one of the three 29 | // supported source address types, recognizing which type it belongs to based 30 | // on the syntax differences between the address forms. 31 | func ParseSource(given string) (Source, error) { 32 | if strings.TrimSpace(given) != given { 33 | return nil, fmt.Errorf("source address must not have leading or trailing spaces") 34 | } 35 | if len(given) == 0 { 36 | return nil, fmt.Errorf("a valid source address is required") 37 | } 38 | switch { 39 | case looksLikeLocalSource(given) || given == "." || given == "..": 40 | ret, err := ParseLocalSource(given) 41 | if err != nil { 42 | return nil, fmt.Errorf("invalid local source address %q: %w", given, err) 43 | } 44 | return ret, nil 45 | case looksLikeRegistrySource(given): 46 | ret, err := ParseRegistrySource(given) 47 | if err != nil { 48 | return nil, fmt.Errorf("invalid module registry source address %q: %w", given, err) 49 | } 50 | return ret, nil 51 | default: 52 | // If it's neither a local source nor a module registry source then 53 | // we'll assume it's intended to be a remote source. 54 | // (This parser will return a suitable error if the given string 55 | // is not of any of the supported address types.) 56 | ret, err := ParseRemoteSource(given) 57 | if err != nil { 58 | return nil, fmt.Errorf("invalid remote source address %q: %w", given, err) 59 | } 60 | return ret, nil 61 | } 62 | } 63 | 64 | // MustParseSource is a thin wrapper around [ParseSource] that panics if it 65 | // returns an error, or returns its result if not. 66 | func MustParseSource(given string) Source { 67 | ret, err := ParseSource(given) 68 | if err != nil { 69 | panic(err) 70 | } 71 | return ret 72 | } 73 | 74 | // ResolveRelativeSource calculates a new source address from the combination 75 | // of two other source addresses. 76 | // 77 | // If "b" is already an absolute source address then the result is "b" verbatim. 78 | // 79 | // If "b" is a relative source then the result is an address of the same type 80 | // as "a", but with a different path component. If "a" is an absolute address 81 | // type then the result is guaranteed to also be an absolute address type. 82 | // 83 | // Returns an error if "b" is a relative path that attempts to traverse out 84 | // of the package of an absolute address given in "a". 85 | func ResolveRelativeSource(a, b Source) (Source, error) { 86 | if sourceIsAbs(b) { 87 | return b, nil 88 | } 89 | // If we get here then b is definitely a local source, because 90 | // otherwise it would have been absolute. 91 | bRaw := b.(LocalSource).relPath 92 | 93 | switch a := a.(type) { 94 | case LocalSource: 95 | aRaw := a.relPath 96 | new := path.Join(aRaw, bRaw) 97 | if !looksLikeLocalSource(new) { 98 | new = "./" + new // preserve LocalSource's prefix invariant 99 | } 100 | return LocalSource{relPath: new}, nil 101 | case RegistrySource: 102 | aSub := a.subPath 103 | newSub, err := joinSubPath(aSub, bRaw) 104 | if err != nil { 105 | return nil, fmt.Errorf("invalid traversal from %s: %w", a.String(), err) 106 | } 107 | return RegistrySource{ 108 | pkg: a.pkg, 109 | subPath: newSub, 110 | }, nil 111 | case RemoteSource: 112 | aSub := a.subPath 113 | newSub, err := joinSubPath(aSub, bRaw) 114 | if err != nil { 115 | return nil, fmt.Errorf("invalid traversal from %s: %w", a.String(), err) 116 | } 117 | return RemoteSource{ 118 | pkg: a.pkg, 119 | subPath: newSub, 120 | }, nil 121 | default: 122 | // Should not get here, because the cases above are exhaustive for 123 | // all of our defined Source implementations. 124 | panic(fmt.Sprintf("unsupported Source implementation %T", a)) 125 | } 126 | } 127 | 128 | // SourceFilename returns the base name (in the same sense as [path.Base]) 129 | // of the sub-path or local path portion of the given source address. 130 | // 131 | // This only really makes sense for a source address that refers to an 132 | // individual file, and is intended for needs such as using the suffix of 133 | // the filename to decide how to parse a particular file. Passing a source 134 | // address that refers to a directory will not fail but its result is 135 | // unlikely to be useful. 136 | func SourceFilename(addr Source) string { 137 | switch addr := addr.(type) { 138 | case LocalSource: 139 | return path.Base(addr.RelativePath()) 140 | case RemoteSource: 141 | return path.Base(addr.SubPath()) 142 | case RegistrySource: 143 | return path.Base(addr.SubPath()) 144 | default: 145 | // above should be exhaustive for all source types 146 | panic(fmt.Sprintf("cannot SourceFilename for %T", addr)) 147 | } 148 | } 149 | 150 | func sourceIsAbs(source Source) bool { 151 | _, isLocal := source.(LocalSource) 152 | return !isLocal 153 | } 154 | -------------------------------------------------------------------------------- /sourceaddrs/source_final.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "path" 9 | "strings" 10 | ) 11 | 12 | // FinalSource is a variant of [Source] that always refers to a single 13 | // specific package. 14 | // 15 | // Specifically this models the annoying oddity that while [LocalSource] and 16 | // [RemoteSource] fully specify what they refer to, [RegistrySource] only 17 | // gives partial information and must be qualified with a selected version 18 | // number to determine exactly what it refers to. 19 | type FinalSource interface { 20 | finalSourceSigil() 21 | 22 | String() string 23 | } 24 | 25 | // ParseFinalSource attempts to parse the given string as any one of the three 26 | // supported final source address types, recognizing which type it belongs to 27 | // based on the syntax differences between the address forms. 28 | func ParseFinalSource(given string) (FinalSource, error) { 29 | if strings.TrimSpace(given) != given { 30 | return nil, fmt.Errorf("source address must not have leading or trailing spaces") 31 | } 32 | if len(given) == 0 { 33 | return nil, fmt.Errorf("a valid source address is required") 34 | } 35 | switch { 36 | case looksLikeLocalSource(given) || given == "." || given == "..": 37 | ret, err := ParseLocalSource(given) 38 | if err != nil { 39 | return nil, fmt.Errorf("invalid local source address %q: %w", given, err) 40 | } 41 | return ret, nil 42 | case looksLikeFinalRegistrySource(given): 43 | ret, err := ParseFinalRegistrySource(given) 44 | if err != nil { 45 | return nil, fmt.Errorf("invalid module registry source address %q: %w", given, err) 46 | } 47 | return ret, nil 48 | default: 49 | // If it's neither a local source nor a final module registry source 50 | // then we'll assume it's intended to be a remote source. 51 | // (This parser will return a suitable error if the given string 52 | // is not of any of the supported address types.) 53 | ret, err := ParseRemoteSource(given) 54 | if err != nil { 55 | return nil, fmt.Errorf("invalid remote source address %q: %w", given, err) 56 | } 57 | return ret, nil 58 | } 59 | } 60 | 61 | // FinalSourceFilename returns the base name (in the same sense as [path.Base]) 62 | // of the sub-path or local path portion of the given final source address. 63 | // 64 | // This only really makes sense for a source address that refers to an 65 | // individual file, and is intended for needs such as using the suffix of 66 | // the filename to decide how to parse a particular file. Passing a source 67 | // address that refers to a directory will not fail but its result is 68 | // unlikely to be useful. 69 | func FinalSourceFilename(addr FinalSource) string { 70 | switch addr := addr.(type) { 71 | case LocalSource: 72 | return path.Base(addr.RelativePath()) 73 | case RemoteSource: 74 | return path.Base(addr.SubPath()) 75 | case RegistrySourceFinal: 76 | return path.Base(addr.SubPath()) 77 | default: 78 | // above should be exhaustive for all final source types 79 | panic(fmt.Sprintf("cannot FinalSourceFilename for %T", addr)) 80 | } 81 | } 82 | 83 | // ResolveRelativeFinalSource is like [ResolveRelativeSource] but for 84 | // [FinalSource] addresses instead of [Source] addresses. 85 | // 86 | // Aside from the address type difference its meaning and behavior rules 87 | // are the same. 88 | func ResolveRelativeFinalSource(a, b FinalSource) (FinalSource, error) { 89 | if finalSourceIsAbs(b) { 90 | return b, nil 91 | } 92 | // If we get here then b is definitely a local source, because 93 | // otherwise it would have been absolute. 94 | bRaw := b.(LocalSource).relPath 95 | 96 | switch a := a.(type) { 97 | case LocalSource: 98 | aRaw := a.relPath 99 | new := path.Join(aRaw, bRaw) 100 | if !looksLikeLocalSource(new) { 101 | new = "./" + new // preserve LocalSource's prefix invariant 102 | } 103 | return LocalSource{relPath: new}, nil 104 | case RegistrySourceFinal: 105 | aSub := a.src.subPath 106 | newSub, err := joinSubPath(aSub, bRaw) 107 | if err != nil { 108 | return nil, fmt.Errorf("invalid traversal from %s: %w", a.String(), err) 109 | } 110 | return RegistrySource{ 111 | pkg: a.Package(), 112 | subPath: newSub, 113 | }.Versioned(a.version), nil 114 | case RemoteSource: 115 | aSub := a.subPath 116 | newSub, err := joinSubPath(aSub, bRaw) 117 | if err != nil { 118 | return nil, fmt.Errorf("invalid traversal from %s: %w", a.String(), err) 119 | } 120 | return RemoteSource{ 121 | pkg: a.pkg, 122 | subPath: newSub, 123 | }, nil 124 | default: 125 | // Should not get here, because the cases above are exhaustive for 126 | // all of our defined Source implementations. 127 | panic(fmt.Sprintf("unsupported Source implementation %T", a)) 128 | } 129 | } 130 | 131 | func finalSourceIsAbs(source FinalSource) bool { 132 | _, isLocal := source.(LocalSource) 133 | return !isLocal 134 | } 135 | -------------------------------------------------------------------------------- /sourceaddrs/source_final_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "reflect" 9 | "testing" 10 | 11 | "github.com/apparentlymart/go-versions/versions" 12 | ) 13 | 14 | func TestResolveRelativeFinalSource(t *testing.T) { 15 | onePointOh := versions.MustParseVersion("1.0.0") 16 | 17 | tests := []struct { 18 | Base FinalSource 19 | Rel FinalSource 20 | Want FinalSource 21 | WantErr string 22 | }{ 23 | { 24 | Base: MustParseSource("./a/b").(FinalSource), 25 | Rel: MustParseSource("../c").(FinalSource), 26 | Want: MustParseSource("./a/c").(FinalSource), 27 | }, 28 | { 29 | Base: MustParseSource("./a").(FinalSource), 30 | Rel: MustParseSource("../c").(FinalSource), 31 | Want: MustParseSource("./c").(FinalSource), 32 | }, 33 | { 34 | Base: MustParseSource("./a").(FinalSource), 35 | Rel: MustParseSource("../../c").(FinalSource), 36 | Want: MustParseSource("../c").(FinalSource), 37 | }, 38 | { 39 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop").(FinalSource), 40 | Rel: MustParseSource("git::https://github.com/hashicorp/go-slug.git//blah/blah").(FinalSource), 41 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//blah/blah").(FinalSource), 42 | }, 43 | { 44 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop").(FinalSource), 45 | Rel: MustParseSource("git::https://example.com/foo.git").(FinalSource), 46 | Want: MustParseSource("git::https://example.com/foo.git").(FinalSource), 47 | }, 48 | { 49 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop").(FinalSource), 50 | Rel: MustParseSource("../bloop").(FinalSource), 51 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/bloop").(FinalSource), 52 | }, 53 | { 54 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop").(FinalSource), 55 | Rel: MustParseSource("../").(FinalSource), 56 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep").(FinalSource), 57 | }, 58 | { 59 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop").(FinalSource), 60 | Rel: MustParseSource("../..").(FinalSource), 61 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git").(FinalSource), 62 | }, 63 | { 64 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop").(FinalSource), 65 | Rel: MustParseSource("../../../baz").(FinalSource), 66 | WantErr: `invalid traversal from git::https://github.com/hashicorp/go-slug.git//beep/boop: relative path ../../../baz traverses up too many levels from source path beep/boop`, 67 | }, 68 | { 69 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git").(FinalSource), 70 | Rel: MustParseSource("./boop").(FinalSource), 71 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//boop").(FinalSource), 72 | }, 73 | { 74 | Base: MustParseSource("example.com/foo/bar/baz//beep/boop").(RegistrySource).Versioned(onePointOh), 75 | Rel: MustParseSource("../").(FinalSource), 76 | Want: MustParseSource("example.com/foo/bar/baz//beep").(RegistrySource).Versioned(onePointOh), 77 | }, 78 | } 79 | 80 | for _, test := range tests { 81 | t.Run(fmt.Sprintf("%s + %s", test.Base, test.Rel), func(t *testing.T) { 82 | got, gotErr := ResolveRelativeFinalSource(test.Base, test.Rel) 83 | 84 | if test.WantErr != "" { 85 | if gotErr == nil { 86 | t.Fatalf("unexpected success\ngot result: %s (%T)\nwant error: %s", got, got, test.WantErr) 87 | } 88 | if got, want := gotErr.Error(), test.WantErr; got != want { 89 | t.Fatalf("wrong error\ngot error: %s\nwant error: %s", got, want) 90 | } 91 | return 92 | } 93 | 94 | if gotErr != nil { 95 | t.Fatalf("unexpected error: %s", gotErr) 96 | } 97 | 98 | // Two addresses are equal if they have the same string representation 99 | // and the same dynamic type. 100 | gotStr := got.String() 101 | wantStr := test.Want.String() 102 | if gotStr != wantStr { 103 | t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) 104 | } 105 | 106 | if gotType, wantType := reflect.TypeOf(got), reflect.TypeOf(test.Want); gotType != wantType { 107 | t.Errorf("wrong result type\ngot: %s\nwant: %s", gotType, wantType) 108 | } 109 | }) 110 | } 111 | } 112 | 113 | func TestParseFinalSource(t *testing.T) { 114 | onePointOh := versions.MustParseVersion("1.0.0") 115 | 116 | tests := []struct { 117 | Addr string 118 | Want FinalSource 119 | WantErr string 120 | }{ 121 | { 122 | Addr: "./a/b", 123 | Want: MustParseSource("./a/b").(FinalSource), 124 | }, 125 | { 126 | Addr: "git::https://github.com/hashicorp/go-slug.git//beep/boop", 127 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop").(FinalSource), 128 | }, 129 | { 130 | Addr: "git::https://github.com/hashicorp/go-slug.git//beep@1.2.3/boop", 131 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep@1.2.3/boop").(FinalSource), 132 | }, 133 | { 134 | Addr: "example.com/foo/bar/baz@1.0.0//beep", 135 | Want: MustParseSource("example.com/foo/bar/baz//beep").(RegistrySource).Versioned(onePointOh), 136 | }, 137 | { 138 | Addr: "example.com/foo/bar/baz@1.0.0", 139 | Want: MustParseSource("example.com/foo/bar/baz").(RegistrySource).Versioned(onePointOh), 140 | }, 141 | { 142 | Addr: "gitlab.com/hashicorp/go-slug/bleep@1.0.0", 143 | Want: MustParseSource("gitlab.com/hashicorp/go-slug/bleep").(RegistrySource).Versioned(onePointOh), 144 | }, 145 | { 146 | Addr: "./a/b@1.0.0", 147 | Want: MustParseSource("./a/b@1.0.0").(FinalSource), 148 | }, 149 | { 150 | Addr: " ./a/b", 151 | WantErr: "source address must not have leading or trailing spaces", 152 | }, 153 | { 154 | Addr: "", 155 | WantErr: "a valid source address is required", 156 | }, 157 | { 158 | Addr: "example.com/foo/bar/baz@1.0.x//beep", 159 | WantErr: `invalid module registry source address "example.com/foo/bar/baz@1.0.x//beep": invalid version: can't use wildcard for patch number; an exact version is required`, 160 | }, 161 | } 162 | 163 | for _, test := range tests { 164 | t.Run(test.Addr, func(t *testing.T) { 165 | got, gotErr := ParseFinalSource(test.Addr) 166 | 167 | if test.WantErr != "" { 168 | if gotErr == nil { 169 | t.Fatalf("unexpected success\ngot result: %#v (%T)\nwant error: %s", got, got, test.WantErr) 170 | } 171 | if got, want := gotErr.Error(), test.WantErr; got != want { 172 | t.Fatalf("wrong error\ngot error: %s\nwant error: %s", got, want) 173 | } 174 | return 175 | } 176 | 177 | if gotErr != nil { 178 | t.Fatalf("unexpected error: %s", gotErr) 179 | } 180 | 181 | // Two addresses are equal if they have the same string representation 182 | // and the same dynamic type. 183 | gotStr := got.String() 184 | wantStr := test.Want.String() 185 | if gotStr != wantStr { 186 | t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) 187 | } 188 | 189 | if gotType, wantType := reflect.TypeOf(got), reflect.TypeOf(test.Want); gotType != wantType { 190 | t.Errorf("wrong result type\ngot: %s\nwant: %s", gotType, wantType) 191 | } 192 | }) 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /sourceaddrs/source_local.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "path" 9 | "strings" 10 | ) 11 | 12 | // LocalSource represents a relative traversal to another path within the same 13 | // source package as whatever source artifact included this path. 14 | // 15 | // LocalSource sources will typically need to be resolved into either 16 | // [RemoteSource] or [RegistrySource] addresses by reference to the address 17 | // of whatever artifact declared them, because otherwise they cannot be 18 | // mapped onto any real source location. 19 | type LocalSource struct { 20 | // relPath is a slash-separate path in the style of the Go standard 21 | // library package "path", which should always be stored in its "Clean" 22 | // form, aside from the mandatory "./" or "../" prefixes. 23 | relPath string 24 | } 25 | 26 | var _ Source = LocalSource{} 27 | var _ FinalSource = LocalSource{} 28 | 29 | // sourceSigil implements Source 30 | func (s LocalSource) sourceSigil() {} 31 | 32 | // finalSourceSigil implements FinalSource 33 | func (s LocalSource) finalSourceSigil() {} 34 | 35 | func looksLikeLocalSource(given string) bool { 36 | return strings.HasPrefix(given, "./") || strings.HasPrefix(given, "../") 37 | } 38 | 39 | // ParseLocalSource interprets the given path as a local source address, or 40 | // returns an error if it cannot be interpreted as such. 41 | func ParseLocalSource(given string) (LocalSource, error) { 42 | // First we'll catch some situations that seem likely to suggest that 43 | // the caller was trying to use a real filesystem path instead of 44 | // just a virtual relative path within a source package. 45 | if strings.ContainsAny(given, ":\\") { 46 | return LocalSource{}, fmt.Errorf("must be a relative path using forward-slash separators between segments, like in a relative URL") 47 | } 48 | 49 | // We distinguish local source addresses from other address types by them 50 | // starting with some kind of relative path prefix. 51 | if !looksLikeLocalSource(given) && given != "." && given != ".." { 52 | return LocalSource{}, fmt.Errorf("must start with either ./ or ../ to indicate a local path") 53 | } 54 | 55 | clean := path.Clean(given) 56 | 57 | // We use the "path" package's definition of "clean" aside from two 58 | // exceptions: 59 | // - we need to retain the leading "./", if it was originally present, to 60 | // disambiguate from module registry addresses. 61 | // - If the cleaned path is just "." or ".." then we need a slash on the end 62 | // because that's part of how we recognize an address as a relative path. 63 | if clean == ".." { 64 | clean = "../" 65 | } else if clean == "." { 66 | clean = "./" 67 | } 68 | if !looksLikeLocalSource(clean) { 69 | clean = "./" + clean 70 | } 71 | 72 | if clean != given { 73 | return LocalSource{}, fmt.Errorf("relative path must be written in canonical form %q", clean) 74 | } 75 | 76 | return LocalSource{relPath: clean}, nil 77 | } 78 | 79 | // String implements Source 80 | func (s LocalSource) String() string { 81 | return s.relPath 82 | } 83 | 84 | // SupportsVersionConstraints implements Source 85 | func (s LocalSource) SupportsVersionConstraints() bool { 86 | return false 87 | } 88 | 89 | // RelativePath returns the effective relative path for this source address, 90 | // in our platform-agnostic slash-separated canonical syntax. 91 | func (s LocalSource) RelativePath() string { 92 | return s.relPath 93 | } 94 | -------------------------------------------------------------------------------- /sourceaddrs/source_registry.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "path" 9 | 10 | "github.com/apparentlymart/go-versions/versions" 11 | regaddr "github.com/hashicorp/terraform-registry-address" 12 | ) 13 | 14 | // RegistrySource represents a source address referring to a set of versions 15 | // published in a Module Registry. 16 | // 17 | // A RegistrySource is an extra indirection over a set of [RemoteSource] 18 | // addresses, which Terraform chooses from based on version constraints given 19 | // alongside the registry source address. 20 | type RegistrySource struct { 21 | pkg regaddr.ModulePackage 22 | 23 | // subPath is an optional subdirectory or sub-file path beneath the 24 | // prefix of the selected underlying source address. 25 | // 26 | // Sub-paths are always slash-separated paths interpreted relative to 27 | // the root of the package, and may not include ".." or "." segments. 28 | // The sub-path is empty to indicate the root directory of the package. 29 | subPath string 30 | } 31 | 32 | // sourceSigil implements Source 33 | func (s RegistrySource) sourceSigil() {} 34 | 35 | var _ Source = RegistrySource{} 36 | 37 | func looksLikeRegistrySource(given string) bool { 38 | _, err := regaddr.ParseModuleSource(given) 39 | return err == nil 40 | } 41 | 42 | // ParseRegistrySource parses the given string as a registry source address, 43 | // or returns an error if it does not use the correct syntax for interpretation 44 | // as a registry source address. 45 | func ParseRegistrySource(given string) (RegistrySource, error) { 46 | pkgRaw, subPathRaw := splitSubPath(given) 47 | subPath, err := normalizeSubpath(subPathRaw) 48 | if err != nil { 49 | return RegistrySource{}, fmt.Errorf("invalid sub-path: %w", err) 50 | } 51 | 52 | // We delegate the package address parsing to the shared library 53 | // terraform-registry-address, but then we'll impose some additional 54 | // validation and normalization over that since we're intentionally 55 | // being a little stricter than Terraform has historically been, 56 | // prioritizing "one obvious way to do it" over many esoteric variations. 57 | pkgOnlyAddr, err := regaddr.ParseModuleSource(pkgRaw) 58 | if err != nil { 59 | return RegistrySource{}, err 60 | } 61 | if pkgOnlyAddr.Subdir != "" { 62 | // Should never happen, because we split the subpath off above. 63 | panic("post-split registry address still has subdir") 64 | } 65 | 66 | return RegistrySource{ 67 | pkg: pkgOnlyAddr.Package, 68 | subPath: subPath, 69 | }, nil 70 | } 71 | 72 | // ParseRegistryPackage parses the given string as a registry package address, 73 | // which is the same syntax as a registry source address with no sub-path 74 | // portion. 75 | func ParseRegistryPackage(given string) (regaddr.ModulePackage, error) { 76 | srcAddr, err := ParseRegistrySource(given) 77 | if err != nil { 78 | return regaddr.ModulePackage{}, err 79 | } 80 | if srcAddr.subPath != "" { 81 | return regaddr.ModulePackage{}, fmt.Errorf("remote package address may not have a sub-path") 82 | } 83 | return srcAddr.pkg, nil 84 | } 85 | 86 | func (s RegistrySource) String() string { 87 | if s.subPath != "" { 88 | return s.pkg.String() + "//" + s.subPath 89 | } 90 | return s.pkg.String() 91 | } 92 | 93 | func (s RegistrySource) SupportsVersionConstraints() bool { 94 | return true 95 | } 96 | 97 | func (s RegistrySource) Package() regaddr.ModulePackage { 98 | return s.pkg 99 | } 100 | 101 | func (s RegistrySource) SubPath() string { 102 | return s.subPath 103 | } 104 | 105 | // Versioned combines the receiver with a specific selected version number to 106 | // produce a final source address that can be used to resolve to a single 107 | // source package. 108 | func (s RegistrySource) Versioned(selectedVersion versions.Version) RegistrySourceFinal { 109 | return RegistrySourceFinal{ 110 | src: s, 111 | version: selectedVersion, 112 | } 113 | } 114 | 115 | // FinalSourceAddr takes the result of looking up the package portion of the 116 | // receiver in a module registry and appends the reciever's sub-path to the 117 | // returned sub-path to produce the final fully-qualified remote source address. 118 | func (s RegistrySource) FinalSourceAddr(realSource RemoteSource) RemoteSource { 119 | if s.subPath == "" { 120 | return realSource // Easy case 121 | } 122 | if realSource.subPath == "" { 123 | return RemoteSource{ 124 | pkg: realSource.pkg, 125 | subPath: s.subPath, 126 | } 127 | } 128 | // If we get here then both addresses have a sub-path, so we need to 129 | // combine them together. This assumes that the "real source" from the 130 | // module registry will always refer to a directory, which is a fundamental 131 | // assumption of the module registry protocol. 132 | return RemoteSource{ 133 | pkg: realSource.pkg, 134 | subPath: path.Join(realSource.subPath, s.subPath), 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /sourceaddrs/source_registry_final.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "regexp" 9 | 10 | "github.com/apparentlymart/go-versions/versions" 11 | regaddr "github.com/hashicorp/terraform-registry-address" 12 | ) 13 | 14 | // RegistrySourceFinal annotates a [RegistrySource] with a specific version 15 | // selection, thereby making it sufficient for selecting a single real source 16 | // package. 17 | // 18 | // Registry sources are weird in comparison to others in that they must be 19 | // combined with a version constraint to select from possibly many available 20 | // versions. After completing the version selection process, the result can 21 | // be represented as a RegistrySourceFinal that carries the selected version 22 | // number along with the originally-specified source address. 23 | type RegistrySourceFinal struct { 24 | src RegistrySource 25 | version versions.Version 26 | } 27 | 28 | // NOTE: RegistrySourceFinal is intentionally not a Source, because it isn't 29 | // possible to represent a final registry source as a single source address 30 | // string. 31 | var _ FinalSource = RegistrySourceFinal{} 32 | 33 | func looksLikeFinalRegistrySource(given string) bool { 34 | var addr string 35 | if matches := finalRegistrySourcePattern.FindStringSubmatch(given); len(matches) != 0 { 36 | addr = matches[1] 37 | if len(matches) == 5 { 38 | addr = fmt.Sprintf("%s//%s", addr, matches[4]) 39 | } 40 | } 41 | return looksLikeRegistrySource(addr) 42 | } 43 | 44 | // finalSourceSigil implements FinalSource 45 | func (s RegistrySourceFinal) finalSourceSigil() {} 46 | 47 | // ParseFinalRegistrySource parses the given string as a final registry source 48 | // address, or returns an error if it does not use the correct syntax for 49 | // interpretation as a final registry source address. 50 | func ParseFinalRegistrySource(given string) (RegistrySourceFinal, error) { 51 | var addr, ver string 52 | if matches := finalRegistrySourcePattern.FindStringSubmatch(given); len(matches) != 0 { 53 | addr = matches[1] 54 | ver = matches[2] 55 | if len(matches) == 5 { 56 | addr = fmt.Sprintf("%s//%s", addr, matches[4]) 57 | } 58 | } 59 | version, err := versions.ParseVersion(ver) 60 | if err != nil { 61 | return RegistrySourceFinal{}, fmt.Errorf("invalid version: %w", err) 62 | } 63 | regSrc, err := ParseRegistrySource(addr) 64 | if err != nil { 65 | return RegistrySourceFinal{}, fmt.Errorf("invalid registry source: %w", err) 66 | } 67 | return regSrc.Versioned(version), nil 68 | } 69 | 70 | // Unversioned returns the address of the registry package that this final 71 | // address is a version of. 72 | func (s RegistrySourceFinal) Unversioned() RegistrySource { 73 | return s.src 74 | } 75 | 76 | func (s RegistrySourceFinal) Package() regaddr.ModulePackage { 77 | return s.src.Package() 78 | } 79 | 80 | func (s RegistrySourceFinal) SubPath() string { 81 | return s.src.SubPath() 82 | } 83 | 84 | func (s RegistrySourceFinal) SelectedVersion() versions.Version { 85 | return s.version 86 | } 87 | 88 | func (s RegistrySourceFinal) String() string { 89 | pkgAddr := s.src.Package() 90 | subPath := s.src.SubPath() 91 | if subPath != "" { 92 | return pkgAddr.String() + "@" + s.version.String() + "//" + subPath 93 | } 94 | return pkgAddr.String() + "@" + s.version.String() 95 | } 96 | 97 | // FinalSourceAddr takes the result of looking up the package portion of the 98 | // receiver in a module registry and appends the reciever's sub-path to the 99 | // returned sub-path to produce the final fully-qualified remote source address. 100 | func (s RegistrySourceFinal) FinalSourceAddr(realSource RemoteSource) RemoteSource { 101 | // The version number doesn't have any impact on how we combine the 102 | // paths together, so we can just delegate to our unversioned equivalent. 103 | return s.Unversioned().FinalSourceAddr(realSource) 104 | } 105 | 106 | // finalRegistrySourcePattern is a non-exhaustive regexp which looks only for 107 | // the expected three components of a RegistrySourceFinal string encoding: the 108 | // package address, version, and subpath. The subpath is optional. 109 | var finalRegistrySourcePattern = regexp.MustCompile(`^(.+)@([^/]+)(//(.+))?$`) 110 | -------------------------------------------------------------------------------- /sourceaddrs/source_remote.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "net/url" 9 | "regexp" 10 | "strings" 11 | ) 12 | 13 | type RemoteSource struct { 14 | pkg RemotePackage 15 | subPath string 16 | } 17 | 18 | var _ Source = RemoteSource{} 19 | var _ FinalSource = RemoteSource{} 20 | 21 | // sourceSigil implements Source 22 | func (RemoteSource) sourceSigil() {} 23 | 24 | // finalSourceSigil implements FinalSource 25 | func (RemoteSource) finalSourceSigil() {} 26 | 27 | // ParseRemoteSource parses the given string as a remote source address, 28 | // or returns an error if it does not use the correct syntax for interpretation 29 | // as a remote source address. 30 | func ParseRemoteSource(given string) (RemoteSource, error) { 31 | expandedGiven := given 32 | for _, shorthand := range remoteSourceShorthands { 33 | replacement, ok, err := shorthand(given) 34 | if err != nil { 35 | return RemoteSource{}, err 36 | } 37 | if ok { 38 | expandedGiven = replacement 39 | } 40 | } 41 | 42 | pkgRaw, subPathRaw := splitSubPath(expandedGiven) 43 | subPath, err := normalizeSubpath(subPathRaw) 44 | if err != nil { 45 | return RemoteSource{}, fmt.Errorf("invalid sub-path: %w", err) 46 | } 47 | 48 | // Once we've dealt with all the "shorthand" business, our address 49 | // should be in the form sourcetype::url, where "sourcetype::" is 50 | // optional and defaults to matching the URL scheme if not present. 51 | var sourceType string 52 | if matches := remoteSourceTypePattern.FindStringSubmatch(pkgRaw); len(matches) != 0 { 53 | sourceType = matches[1] 54 | pkgRaw = matches[2] 55 | } 56 | 57 | u, err := url.Parse(pkgRaw) 58 | if err != nil { 59 | return RemoteSource{}, fmt.Errorf("invalid URL syntax in %q: %w", pkgRaw, err) 60 | } 61 | if u.Scheme == "" { 62 | return RemoteSource{}, fmt.Errorf("must contain an absolute URL with a scheme") 63 | } 64 | if u.User != nil { 65 | return RemoteSource{}, fmt.Errorf("must not use username or password in URL portion") 66 | } 67 | 68 | u.Scheme = strings.ToLower(u.Scheme) 69 | sourceType = strings.ToLower(sourceType) 70 | 71 | if sourceType == "" { 72 | // sourceType defaults to the URL scheme if not explicitly set. 73 | sourceType = u.Scheme 74 | } else if sourceType == u.Scheme { 75 | // This catches weirdo constructions like: https::https://example.com/ 76 | return RemoteSource{}, fmt.Errorf("don't specify redundant %q source type for %q URL", sourceType, u.Scheme) 77 | } 78 | 79 | _, err = url.ParseQuery(u.RawQuery) 80 | if err != nil { 81 | return RemoteSource{}, fmt.Errorf("invalid URL query string syntax in %q: %w", pkgRaw, err) 82 | } 83 | 84 | return makeRemoteSource(sourceType, u, subPath) 85 | } 86 | 87 | // MakeRemoteSource constructs a [RemoteSource] from its component parts. 88 | // 89 | // This is useful for deriving one remote source from another, by disassembling 90 | // the original address into its component parts, modifying those parts, and 91 | // then combining the modified parts back together with this function. 92 | func MakeRemoteSource(sourceType string, u *url.URL, subPath string) (RemoteSource, error) { 93 | var err error 94 | subPath, err = normalizeSubpath(subPath) 95 | if err != nil { 96 | return RemoteSource{}, fmt.Errorf("invalid sub-path: %w", err) 97 | } 98 | 99 | copyU := *u // shallow copy so we can safely modify 100 | 101 | return makeRemoteSource(sourceType, ©U, subPath) 102 | } 103 | 104 | func makeRemoteSource(sourceType string, u *url.URL, subPath string) (RemoteSource, error) { 105 | typeImpl, ok := remoteSourceTypes[sourceType] 106 | if !ok { 107 | if sourceType == u.Scheme { 108 | // In this case the user didn't actually specify a source type, 109 | // so we won't confuse them by mentioning it. 110 | return RemoteSource{}, fmt.Errorf("unsupported URL scheme %q", u.Scheme) 111 | } else { 112 | return RemoteSource{}, fmt.Errorf("unsupported package source type %q", sourceType) 113 | } 114 | } 115 | 116 | err := typeImpl.PrepareURL(u) 117 | if err != nil { 118 | return RemoteSource{}, err 119 | } 120 | 121 | return RemoteSource{ 122 | pkg: RemotePackage{ 123 | sourceType: sourceType, 124 | url: *u, 125 | }, 126 | subPath: subPath, 127 | }, nil 128 | } 129 | 130 | // String implements Source 131 | func (s RemoteSource) String() string { 132 | return s.pkg.subPathString(s.subPath) 133 | } 134 | 135 | func (s RemoteSource) SupportsVersionConstraints() bool { 136 | return false 137 | } 138 | 139 | func (s RemoteSource) Package() RemotePackage { 140 | return s.pkg 141 | } 142 | 143 | func (s RemoteSource) SubPath() string { 144 | return s.subPath 145 | } 146 | 147 | type remoteSourceShorthand func(given string) (normed string, ok bool, err error) 148 | 149 | var remoteSourceShorthands = []remoteSourceShorthand{ 150 | func(given string) (string, bool, error) { 151 | // Allows a github.com repository to be presented in a scheme-less 152 | // format like github.com/organization/repository/path, which we'll 153 | // turn into a git:: source string selecting the repository's main 154 | // branch. 155 | // 156 | // This is intentionally compatible with what's accepted by the 157 | // "GitHub detector" in the go-getter library, so that module authors 158 | // can specify GitHub repositories in the same way both for the 159 | // old Terraform module installer and the newer source bundle builder. 160 | 161 | if !strings.HasPrefix(given, "github.com/") { 162 | return "", false, nil 163 | } 164 | 165 | parts := strings.Split(given, "/") 166 | if len(parts) < 3 { 167 | return "", false, fmt.Errorf("GitHub.com shorthand addresses must start with github.com/organization/repository") 168 | } 169 | 170 | urlStr := "https://" + strings.Join(parts[:3], "/") 171 | if !strings.HasSuffix(urlStr, "git") { 172 | urlStr += ".git" 173 | } 174 | 175 | if len(parts) > 3 { 176 | // The remaining parts will become the sub-path portion, since 177 | // the repository as a whole is the source package. 178 | urlStr += "//" + strings.Join(parts[3:], "/") 179 | } 180 | 181 | return "git::" + urlStr, true, nil 182 | }, 183 | func(given string) (string, bool, error) { 184 | // Allows a gitlab.com repository to be presented in a scheme-less 185 | // format like gitlab.com/organization/repository/path, which we'll 186 | // turn into a git:: source string selecting the repository's main 187 | // branch. 188 | // 189 | // This is intentionally compatible with what's accepted by the 190 | // "GitLab detector" in the go-getter library, so that module authors 191 | // can specify GitHub repositories in the same way both for the 192 | // old Terraform module installer and the newer source bundle builder. 193 | 194 | if !strings.HasPrefix(given, "gitlab.com/") { 195 | return "", false, nil 196 | } 197 | 198 | parts := strings.Split(given, "/") 199 | if len(parts) < 3 { 200 | return "", false, fmt.Errorf("GitLab.com shorthand addresses must start with gitlab.com/organization/repository") 201 | } 202 | 203 | urlStr := "https://" + strings.Join(parts[:3], "/") 204 | if !strings.HasSuffix(urlStr, "git") { 205 | urlStr += ".git" 206 | } 207 | 208 | if len(parts) > 3 { 209 | // The remaining parts will become the sub-path portion, since 210 | // the repository as a whole is the source package. 211 | urlStr += "//" + strings.Join(parts[3:], "/") 212 | // NOTE: We can't actually get here if there are exactly four 213 | // parts, because gitlab.com is also a Terraform module registry 214 | // and so gitlab.com/a/b/c must be interpreted as a registry 215 | // module address instead of a GitLab repository address. Users 216 | // must write an explicit git source address if they intend to 217 | // refer to a Git repository. 218 | } 219 | 220 | return "git::" + urlStr, true, nil 221 | }, 222 | } 223 | 224 | var remoteSourceTypePattern = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) 225 | -------------------------------------------------------------------------------- /sourceaddrs/source_remote_types.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "net/url" 9 | "strings" 10 | ) 11 | 12 | type remoteSourceType interface { 13 | PrepareURL(u *url.URL) error 14 | } 15 | 16 | var remoteSourceTypes = map[string]remoteSourceType{ 17 | "git": gitSourceType{}, 18 | "http": httpSourceType{}, 19 | "https": httpSourceType{}, 20 | } 21 | 22 | type gitSourceType struct{} 23 | 24 | func (gitSourceType) PrepareURL(u *url.URL) error { 25 | // The Git source type requires one of the URL schemes that Git itself 26 | // supports. We're also currently being more rigid than Git to ease 27 | // initial implementation. We will extend this over time as the source 28 | // bundle mechanism graduates from experimental to real use. 29 | 30 | if u.Scheme != "ssh" && u.Scheme != "https" { 31 | // NOTE: We don't support "git" or "http" here because we require 32 | // source code to originate from sources that can support 33 | // authentication and encryption, to reduce the risk of mitm attacks 34 | // introducing malicious code. 35 | return fmt.Errorf("a Git repository URL must use either the https or ssh scheme") 36 | } 37 | 38 | qs := u.Query() 39 | for k, vs := range qs { 40 | if k != "ref" { 41 | return fmt.Errorf("a Git repository URL's query string may include only the argument 'ref'") 42 | } 43 | if len(vs) > 1 { 44 | return fmt.Errorf("a Git repository URL's query string may include only one 'ref' argument") 45 | } 46 | } 47 | 48 | return nil 49 | } 50 | 51 | type httpSourceType struct{} 52 | 53 | func (httpSourceType) PrepareURL(u *url.URL) error { 54 | if u.Scheme == "http" { 55 | return fmt.Errorf("source package addresses may not use unencrypted HTTP") 56 | } 57 | if u.Scheme != "https" { 58 | return fmt.Errorf("invalid scheme %q for https source type", u.Scheme) 59 | } 60 | 61 | // For our initial implementation the address must be something that 62 | // go-getter would've recognized as referring to a gzipped tar archive, 63 | // to reduce the scope of the initial source bundler fetcher 64 | // implementations. We may extend this later, but if we do then we should 65 | // use go-getter's syntax for anything go-getter also supports. 66 | // 67 | // Go-getter's treatment of HTTP is quite odd, because by default it does 68 | // an extra module-registry-like indirection where it expects the 69 | // given URL to return a header pointing to another source address type. 70 | // We don't intend to support that here, but we do want to support the 71 | // behavior of go-getter's special case for URLs whose paths end with 72 | // suffixes that match those typically used for archives, and its magical 73 | // treatment of the "archive" query string argument as a way to force 74 | // treatment of archives. This does mean that we can't fetch from any 75 | // URL that _really_ needs an "archive" query string parameter, but that's 76 | // been true for Terraform for many years and hasn't been a problem, so 77 | // we'll accept that for now and wait to see if any need for it arises. 78 | // 79 | // Ideally we'd just make an HTTP request and then decide what to do based 80 | // on the Content-Type of the response, like a sensible HTTP client would, 81 | // but for now compatibility with go-getter is more important than being 82 | // sensible. 83 | 84 | qs := u.Query() 85 | if vs := qs["archive"]; len(vs) > 0 { 86 | if len(vs) > 1 { 87 | return fmt.Errorf("a HTTPS URL's query string may include only one 'archive' argument") 88 | } 89 | if vs[0] != "tar.gz" && vs[0] != "tgz" { 90 | return fmt.Errorf("the special 'archive' query string argument must be set to 'tgz' if present") 91 | } 92 | if vs[0] == "tar.gz" { 93 | qs.Set("archive", "tgz") // normalize on the shorter form 94 | } 95 | // NOTE: We don't remove the "archive" argument here because the code 96 | // which eventually fetches this will need it to understand what kind 97 | // of archive it's supposed to be fetching, but that final client ought 98 | // to remove this argument itself to avoid potentially confusing the 99 | // remote server, since this is an argument reserved for go-getter and 100 | // for the subset of go-getter's syntax we're implementing here. 101 | u.RawQuery = qs.Encode() 102 | } else { 103 | p := u.EscapedPath() 104 | if !(strings.HasSuffix(p, ".tar.gz") || strings.HasSuffix(p, ".tgz")) { 105 | return fmt.Errorf("a HTTPS URL's path must end with either .tar.gz or .tgz") 106 | } 107 | } 108 | 109 | if len(qs["checksum"]) != 0 { 110 | // This is another go-getter oddity. go-getter would treat this as 111 | // a request to verify that the result matches the given checksum 112 | // and not send this argument to the server. However, go-getter actually 113 | // doesn't support this (it returns an error) when it's dealing with 114 | // an archive. We'll explicitly reject it to avoid folks being 115 | // misled into thinking that it _is_ working, and thus believing 116 | // they've achieved a verification that isn't present, though we 117 | // might relax this later since go-getter wouldn't have allowed this 118 | // anyway. 119 | return fmt.Errorf("a HTTPS URL's query string must not include 'checksum' argument") 120 | } 121 | 122 | return nil 123 | } 124 | -------------------------------------------------------------------------------- /sourceaddrs/source_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "net/url" 9 | "reflect" 10 | "testing" 11 | 12 | regaddr "github.com/hashicorp/terraform-registry-address" 13 | svchost "github.com/hashicorp/terraform-svchost" 14 | ) 15 | 16 | func TestParseSource(t *testing.T) { 17 | tests := []struct { 18 | Given string 19 | Want Source 20 | WantErr string 21 | }{ 22 | { 23 | Given: "", 24 | WantErr: `a valid source address is required`, 25 | }, 26 | { 27 | Given: " hello", 28 | WantErr: `source address must not have leading or trailing spaces`, 29 | }, 30 | { 31 | Given: "hello ", 32 | WantErr: `source address must not have leading or trailing spaces`, 33 | }, 34 | { 35 | Given: "./boop", 36 | Want: LocalSource{ 37 | relPath: "./boop", 38 | }, 39 | }, 40 | { 41 | Given: "./boop/../beep", 42 | WantErr: `invalid local source address "./boop/../beep": relative path must be written in canonical form "./beep"`, 43 | }, 44 | { 45 | Given: "../boop", 46 | Want: LocalSource{ 47 | relPath: "../boop", 48 | }, 49 | }, 50 | { 51 | Given: "../", 52 | Want: LocalSource{ 53 | relPath: "../", 54 | }, 55 | }, 56 | { 57 | Given: "..", 58 | WantErr: `invalid local source address "..": relative path must be written in canonical form "../"`, 59 | }, 60 | { 61 | Given: "./", 62 | Want: LocalSource{ 63 | relPath: "./", 64 | }, 65 | }, 66 | { 67 | Given: ".", 68 | WantErr: `invalid local source address ".": relative path must be written in canonical form "./"`, 69 | }, 70 | { 71 | Given: "./.", 72 | WantErr: `invalid local source address "./.": relative path must be written in canonical form "./"`, 73 | }, 74 | { 75 | Given: "../boop/../beep", 76 | WantErr: `invalid local source address "../boop/../beep": relative path must be written in canonical form "../beep"`, 77 | }, 78 | { 79 | Given: "hashicorp/subnets/cidr", 80 | Want: RegistrySource{ 81 | pkg: regaddr.ModulePackage{ 82 | Host: regaddr.DefaultModuleRegistryHost, 83 | Namespace: "hashicorp", 84 | Name: "subnets", 85 | TargetSystem: "cidr", 86 | }, 87 | }, 88 | }, 89 | { 90 | Given: "hashicorp/subnets/cidr//blah/blah", 91 | Want: RegistrySource{ 92 | pkg: regaddr.ModulePackage{ 93 | Host: regaddr.DefaultModuleRegistryHost, 94 | Namespace: "hashicorp", 95 | Name: "subnets", 96 | TargetSystem: "cidr", 97 | }, 98 | subPath: "blah/blah", 99 | }, 100 | }, 101 | { 102 | Given: "hashicorp/subnets/cidr//blah/blah/../bloop", 103 | WantErr: `invalid module registry source address "hashicorp/subnets/cidr//blah/blah/../bloop": invalid sub-path: must be slash-separated relative path without any .. or . segments`, 104 | }, 105 | { 106 | Given: "terraform.example.com/bleep/bloop/blorp", 107 | Want: RegistrySource{ 108 | pkg: regaddr.ModulePackage{ 109 | Host: svchost.Hostname("terraform.example.com"), 110 | Namespace: "bleep", 111 | Name: "bloop", 112 | TargetSystem: "blorp", 113 | }, 114 | }, 115 | }, 116 | { 117 | Given: "テラフォーム.example.com/bleep/bloop/blorp", 118 | Want: RegistrySource{ 119 | pkg: regaddr.ModulePackage{ 120 | Host: svchost.Hostname("xn--jckxc1b4b2b6g.example.com"), 121 | Namespace: "bleep", 122 | Name: "bloop", 123 | TargetSystem: "blorp", 124 | }, 125 | }, 126 | }, 127 | { 128 | Given: "git::https://github.com/hashicorp/go-slug.git", 129 | Want: RemoteSource{ 130 | pkg: RemotePackage{ 131 | sourceType: "git", 132 | url: *mustParseURL("https://github.com/hashicorp/go-slug.git"), 133 | }, 134 | }, 135 | }, 136 | { 137 | Given: "git::https://github.com/hashicorp/go-slug.git//blah/blah", 138 | Want: RemoteSource{ 139 | pkg: RemotePackage{ 140 | sourceType: "git", 141 | url: *mustParseURL("https://github.com/hashicorp/go-slug.git"), 142 | }, 143 | subPath: "blah/blah", 144 | }, 145 | }, 146 | { 147 | Given: "git::https://github.com/hashicorp/go-slug.git?ref=main", 148 | Want: RemoteSource{ 149 | pkg: RemotePackage{ 150 | sourceType: "git", 151 | url: *mustParseURL("https://github.com/hashicorp/go-slug.git?ref=main"), 152 | }, 153 | }, 154 | }, 155 | { 156 | Given: "git::https://github.com/hashicorp/go-slug.git?ref=main&ref=main", 157 | WantErr: `invalid remote source address "git::https://github.com/hashicorp/go-slug.git?ref=main&ref=main": a Git repository URL's query string may include only one 'ref' argument`, 158 | }, 159 | { 160 | Given: "git::https://github.com/hashicorp/go-slug.git//blah/blah?ref=main", 161 | Want: RemoteSource{ 162 | pkg: RemotePackage{ 163 | sourceType: "git", 164 | url: *mustParseURL("https://github.com/hashicorp/go-slug.git?ref=main"), 165 | }, 166 | subPath: "blah/blah", 167 | }, 168 | }, 169 | { 170 | Given: "git::https://github.com/hashicorp/go-slug.git?sshkey=blahblah", 171 | WantErr: `invalid remote source address "git::https://github.com/hashicorp/go-slug.git?sshkey=blahblah": a Git repository URL's query string may include only the argument 'ref'`, 172 | }, 173 | { 174 | Given: "git::https://github.com/hashicorp/go-slug.git?depth=1", 175 | WantErr: `invalid remote source address "git::https://github.com/hashicorp/go-slug.git?depth=1": a Git repository URL's query string may include only the argument 'ref'`, 176 | }, 177 | { 178 | Given: "git::https://git@github.com/hashicorp/go-slug.git", 179 | WantErr: `invalid remote source address "git::https://git@github.com/hashicorp/go-slug.git": must not use username or password in URL portion`, 180 | }, 181 | { 182 | Given: "git::https://git:blit@github.com/hashicorp/go-slug.git", 183 | WantErr: `invalid remote source address "git::https://git:blit@github.com/hashicorp/go-slug.git": must not use username or password in URL portion`, 184 | }, 185 | { 186 | Given: "git::https://:blit@github.com/hashicorp/go-slug.git", 187 | WantErr: `invalid remote source address "git::https://:blit@github.com/hashicorp/go-slug.git": must not use username or password in URL portion`, 188 | }, 189 | { 190 | Given: "git::ssh://github.com/hashicorp/go-slug.git", 191 | Want: RemoteSource{ 192 | pkg: RemotePackage{ 193 | sourceType: "git", 194 | url: *mustParseURL("ssh://github.com/hashicorp/go-slug.git"), 195 | }, 196 | }, 197 | }, 198 | { 199 | Given: "git::ssh://github.com/hashicorp/go-slug.git//blah/blah?ref=main", 200 | Want: RemoteSource{ 201 | pkg: RemotePackage{ 202 | sourceType: "git", 203 | url: *mustParseURL("ssh://github.com/hashicorp/go-slug.git?ref=main"), 204 | }, 205 | subPath: "blah/blah", 206 | }, 207 | }, 208 | { 209 | Given: "git://github.com/hashicorp/go-slug.git", 210 | WantErr: `invalid remote source address "git://github.com/hashicorp/go-slug.git": a Git repository URL must use either the https or ssh scheme`, 211 | }, 212 | { 213 | Given: "git::git://github.com/hashicorp/go-slug.git", 214 | WantErr: `invalid remote source address "git::git://github.com/hashicorp/go-slug.git": don't specify redundant "git" source type for "git" URL`, 215 | }, 216 | { 217 | Given: "github.com/hashicorp/go-slug.git", 218 | Want: RemoteSource{ 219 | pkg: RemotePackage{ 220 | sourceType: "git", 221 | url: *mustParseURL("https://github.com/hashicorp/go-slug.git"), 222 | }, 223 | }, 224 | }, 225 | { 226 | Given: "github.com/hashicorp/go-slug", 227 | Want: RemoteSource{ 228 | pkg: RemotePackage{ 229 | sourceType: "git", 230 | url: *mustParseURL("https://github.com/hashicorp/go-slug.git"), 231 | }, 232 | }, 233 | }, 234 | { 235 | Given: "github.com/hashicorp/go-slug/bleep", 236 | Want: RemoteSource{ 237 | pkg: RemotePackage{ 238 | sourceType: "git", 239 | url: *mustParseURL("https://github.com/hashicorp/go-slug.git"), 240 | }, 241 | subPath: "bleep", 242 | }, 243 | }, 244 | { 245 | Given: "gitlab.com/hashicorp/go-slug.git", 246 | Want: RemoteSource{ 247 | pkg: RemotePackage{ 248 | sourceType: "git", 249 | url: *mustParseURL("https://gitlab.com/hashicorp/go-slug.git"), 250 | }, 251 | }, 252 | }, 253 | { 254 | Given: "gitlab.com/hashicorp/go-slug", 255 | Want: RemoteSource{ 256 | pkg: RemotePackage{ 257 | sourceType: "git", 258 | url: *mustParseURL("https://gitlab.com/hashicorp/go-slug.git"), 259 | }, 260 | }, 261 | }, 262 | { 263 | Given: "gitlab.com/hashicorp/go-slug/bleep", 264 | // NOTE: gitlab.com _also_ hosts a Terraform Module registry, and so 265 | // the registry address interpretation takes precedence if it 266 | // matches. Users must write an explicit git:: source address if 267 | // they want this to be interpreted as a Git source address. 268 | Want: RegistrySource{ 269 | pkg: regaddr.ModulePackage{ 270 | Host: svchost.Hostname("gitlab.com"), 271 | Namespace: "hashicorp", 272 | Name: "go-slug", 273 | TargetSystem: "bleep", 274 | }, 275 | }, 276 | }, 277 | { 278 | // This is the explicit Git source address version of the previous 279 | // case, overriding the default interpretation as module registry. 280 | Given: "git::https://gitlab.com/hashicorp/go-slug//bleep", 281 | Want: RemoteSource{ 282 | pkg: RemotePackage{ 283 | sourceType: "git", 284 | url: *mustParseURL("https://gitlab.com/hashicorp/go-slug"), 285 | }, 286 | subPath: "bleep", 287 | }, 288 | }, 289 | { 290 | Given: "gitlab.com/hashicorp/go-slug/bleep/bloop", 291 | // Two or more subpath portions is fine for Git interpretation, 292 | // because that's not ambigious with module registry. This is 293 | // an annoying inconsistency but necessary for backward 294 | // compatibility with go-getter's interpretations. 295 | Want: RemoteSource{ 296 | pkg: RemotePackage{ 297 | sourceType: "git", 298 | url: *mustParseURL("https://gitlab.com/hashicorp/go-slug.git"), 299 | }, 300 | subPath: "bleep/bloop", 301 | }, 302 | }, 303 | { 304 | Given: "https://example.com/foo.tar.gz", 305 | Want: RemoteSource{ 306 | pkg: RemotePackage{ 307 | sourceType: "https", 308 | url: *mustParseURL("https://example.com/foo.tar.gz"), 309 | }, 310 | }, 311 | }, 312 | { 313 | Given: "https://example.com/foo.tar.gz//bleep/bloop", 314 | Want: RemoteSource{ 315 | pkg: RemotePackage{ 316 | sourceType: "https", 317 | url: *mustParseURL("https://example.com/foo.tar.gz"), 318 | }, 319 | subPath: "bleep/bloop", 320 | }, 321 | }, 322 | { 323 | Given: "https://example.com/foo.tar.gz?something=anything", 324 | Want: RemoteSource{ 325 | pkg: RemotePackage{ 326 | sourceType: "https", 327 | url: *mustParseURL("https://example.com/foo.tar.gz?something=anything"), 328 | }, 329 | }, 330 | }, 331 | { 332 | Given: "https://example.com/foo.tar.gz//bleep/bloop?something=anything", 333 | Want: RemoteSource{ 334 | pkg: RemotePackage{ 335 | sourceType: "https", 336 | url: *mustParseURL("https://example.com/foo.tar.gz?something=anything"), 337 | }, 338 | subPath: "bleep/bloop", 339 | }, 340 | }, 341 | { 342 | Given: "https://example.com/foo.tgz", 343 | Want: RemoteSource{ 344 | pkg: RemotePackage{ 345 | sourceType: "https", 346 | url: *mustParseURL("https://example.com/foo.tgz"), 347 | }, 348 | }, 349 | }, 350 | { 351 | Given: "https://example.com/foo?archive=tar.gz", 352 | Want: RemoteSource{ 353 | pkg: RemotePackage{ 354 | sourceType: "https", 355 | url: *mustParseURL("https://example.com/foo?archive=tgz"), 356 | }, 357 | }, 358 | }, 359 | { 360 | Given: "https://example.com/foo?archive=tgz", 361 | Want: RemoteSource{ 362 | pkg: RemotePackage{ 363 | sourceType: "https", 364 | url: *mustParseURL("https://example.com/foo?archive=tgz"), 365 | }, 366 | }, 367 | }, 368 | { 369 | Given: "https://example.com/foo.zip", 370 | WantErr: `invalid remote source address "https://example.com/foo.zip": a HTTPS URL's path must end with either .tar.gz or .tgz`, 371 | }, 372 | { 373 | Given: "https://example.com/foo?archive=zip", 374 | WantErr: `invalid remote source address "https://example.com/foo?archive=zip": the special 'archive' query string argument must be set to 'tgz' if present`, 375 | }, 376 | { 377 | Given: "http://example.com/foo.tar.gz", 378 | WantErr: `invalid remote source address "http://example.com/foo.tar.gz": source package addresses may not use unencrypted HTTP`, 379 | }, 380 | { 381 | Given: "http::http://example.com/foo.tar.gz", 382 | WantErr: `invalid remote source address "http::http://example.com/foo.tar.gz": don't specify redundant "http" source type for "http" URL`, 383 | }, 384 | { 385 | Given: "https::https://example.com/foo.tar.gz", 386 | WantErr: `invalid remote source address "https::https://example.com/foo.tar.gz": don't specify redundant "https" source type for "https" URL`, 387 | }, 388 | { 389 | Given: "https://foo@example.com/foo.tgz", 390 | WantErr: `invalid remote source address "https://foo@example.com/foo.tgz": must not use username or password in URL portion`, 391 | }, 392 | { 393 | Given: "https://foo:bar@example.com/foo.tgz", 394 | WantErr: `invalid remote source address "https://foo:bar@example.com/foo.tgz": must not use username or password in URL portion`, 395 | }, 396 | { 397 | Given: "https://:bar@example.com/foo.tgz", 398 | WantErr: `invalid remote source address "https://:bar@example.com/foo.tgz": must not use username or password in URL portion`, 399 | }, 400 | } 401 | 402 | for _, test := range tests { 403 | t.Run(test.Given, func(t *testing.T) { 404 | got, gotErr := ParseSource(test.Given) 405 | 406 | if test.WantErr != "" { 407 | if gotErr == nil { 408 | t.Fatalf("unexpected success\ngot result: %s (%T)\nwant error: %s", got, got, test.WantErr) 409 | } 410 | if got, want := gotErr.Error(), test.WantErr; got != want { 411 | t.Fatalf("wrong error\ngot error: %s\nwant error: %s", got, want) 412 | } 413 | return 414 | } 415 | 416 | if gotErr != nil { 417 | t.Fatalf("unexpected error: %s", gotErr) 418 | } 419 | 420 | // Two addresses are equal if they have the same string representation 421 | // and the same dynamic type. 422 | gotStr := got.String() 423 | wantStr := test.Want.String() 424 | if gotStr != wantStr { 425 | t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) 426 | } 427 | 428 | if gotType, wantType := reflect.TypeOf(got), reflect.TypeOf(test.Want); gotType != wantType { 429 | t.Errorf("wrong result type\ngot: %s\nwant: %s", gotType, wantType) 430 | } 431 | }) 432 | } 433 | } 434 | 435 | func TestResolveRelativeSource(t *testing.T) { 436 | tests := []struct { 437 | Base Source 438 | Rel Source 439 | Want Source 440 | WantErr string 441 | }{ 442 | { 443 | Base: MustParseSource("./a/b"), 444 | Rel: MustParseSource("../c"), 445 | Want: MustParseSource("./a/c"), 446 | }, 447 | { 448 | Base: MustParseSource("./a"), 449 | Rel: MustParseSource("../c"), 450 | Want: MustParseSource("./c"), 451 | }, 452 | { 453 | Base: MustParseSource("./a"), 454 | Rel: MustParseSource("../../c"), 455 | Want: MustParseSource("../c"), 456 | }, 457 | { 458 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop"), 459 | Rel: MustParseSource("git::https://github.com/hashicorp/go-slug.git//blah/blah"), 460 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//blah/blah"), 461 | }, 462 | { 463 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop"), 464 | Rel: MustParseSource("git::https://example.com/foo.git"), 465 | Want: MustParseSource("git::https://example.com/foo.git"), 466 | }, 467 | { 468 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop"), 469 | Rel: MustParseSource("../bloop"), 470 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/bloop"), 471 | }, 472 | { 473 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop"), 474 | Rel: MustParseSource("../"), 475 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep"), 476 | }, 477 | { 478 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop"), 479 | Rel: MustParseSource("../.."), 480 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git"), 481 | }, 482 | { 483 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git//beep/boop"), 484 | Rel: MustParseSource("../../../baz"), 485 | WantErr: `invalid traversal from git::https://github.com/hashicorp/go-slug.git//beep/boop: relative path ../../../baz traverses up too many levels from source path beep/boop`, 486 | }, 487 | { 488 | Base: MustParseSource("git::https://github.com/hashicorp/go-slug.git"), 489 | Rel: MustParseSource("./boop"), 490 | Want: MustParseSource("git::https://github.com/hashicorp/go-slug.git//boop"), 491 | }, 492 | { 493 | Base: MustParseSource("example.com/foo/bar/baz//beep/boop"), 494 | Rel: MustParseSource("../"), 495 | Want: MustParseSource("example.com/foo/bar/baz//beep"), 496 | }, 497 | } 498 | 499 | for _, test := range tests { 500 | t.Run(fmt.Sprintf("%s + %s", test.Base, test.Rel), func(t *testing.T) { 501 | got, gotErr := ResolveRelativeSource(test.Base, test.Rel) 502 | 503 | if test.WantErr != "" { 504 | if gotErr == nil { 505 | t.Fatalf("unexpected success\ngot result: %s (%T)\nwant error: %s", got, got, test.WantErr) 506 | } 507 | if got, want := gotErr.Error(), test.WantErr; got != want { 508 | t.Fatalf("wrong error\ngot error: %s\nwant error: %s", got, want) 509 | } 510 | return 511 | } 512 | 513 | if gotErr != nil { 514 | t.Fatalf("unexpected error: %s", gotErr) 515 | } 516 | 517 | // Two addresses are equal if they have the same string representation 518 | // and the same dynamic type. 519 | gotStr := got.String() 520 | wantStr := test.Want.String() 521 | if gotStr != wantStr { 522 | t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) 523 | } 524 | 525 | if gotType, wantType := reflect.TypeOf(got), reflect.TypeOf(test.Want); gotType != wantType { 526 | t.Errorf("wrong result type\ngot: %s\nwant: %s", gotType, wantType) 527 | } 528 | }) 529 | } 530 | } 531 | 532 | func TestSourceFilename(t *testing.T) { 533 | tests := []struct { 534 | Addr Source 535 | Want string 536 | }{ 537 | { 538 | MustParseSource("./foo.tf"), 539 | "foo.tf", 540 | }, 541 | { 542 | MustParseSource("./boop/foo.tf"), 543 | "foo.tf", 544 | }, 545 | { 546 | MustParseSource("git::https://example.com/foo.git//foo.tf"), 547 | "foo.tf", 548 | }, 549 | { 550 | MustParseSource("git::https://example.com/foo.git//boop/foo.tf"), 551 | "foo.tf", 552 | }, 553 | { 554 | MustParseSource("git::https://example.com/foo.git//boop/foo.tf?ref=main"), 555 | "foo.tf", 556 | }, 557 | { 558 | MustParseSource("hashicorp/subnets/cidr//main.tf"), 559 | "main.tf", 560 | }, 561 | { 562 | MustParseSource("hashicorp/subnets/cidr//test/simple.tf"), 563 | "simple.tf", 564 | }, 565 | } 566 | 567 | for _, test := range tests { 568 | t.Run(test.Addr.String(), func(t *testing.T) { 569 | got := SourceFilename(test.Addr) 570 | if got != test.Want { 571 | t.Errorf( 572 | "wrong result\naddr: %s\ngot: %s\nwant: %s", 573 | test.Addr, got, test.Want, 574 | ) 575 | } 576 | }) 577 | } 578 | } 579 | 580 | func mustParseURL(s string) *url.URL { 581 | ret, err := url.Parse(s) 582 | if err != nil { 583 | panic(err) 584 | } 585 | return ret 586 | } 587 | -------------------------------------------------------------------------------- /sourceaddrs/subpath.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourceaddrs 5 | 6 | import ( 7 | "fmt" 8 | "io/fs" 9 | "path" 10 | "strings" 11 | ) 12 | 13 | // ValidSubPath returns true if the given string is a valid sub-path string 14 | // as could be included in either a remote or registry source address. 15 | // 16 | // A sub-path is valid if it's a slash-separated sequence of path segments 17 | // without a leading or trailing slash and without any "." or ".." segments, 18 | // since a sub-path can only traverse downwards from the root of a package. 19 | func ValidSubPath(s string) bool { 20 | _, err := normalizeSubpath(s) 21 | return err == nil 22 | } 23 | 24 | // normalizeSubpath interprets the given string as a package "sub-path", 25 | // returning a normalized form of the path or an error if the string does 26 | // not use correct syntax. 27 | func normalizeSubpath(given string) (string, error) { 28 | if given == "" { 29 | // The empty string is how we represent the absense of a subpath, 30 | // which represents the root directory of a package. 31 | return "", nil 32 | } 33 | 34 | // Our definition of "sub-path" aligns with the definition used by Go's 35 | // virtual filesystem abstraction, since our "module package" idea 36 | // is also essentially just a virtual filesystem. 37 | // This definition prohibits "." and ".." segments and therefore prevents 38 | // upward path traversal. 39 | if !fs.ValidPath(given) { 40 | return "", fmt.Errorf("must be slash-separated relative path without any .. or . segments") 41 | } 42 | 43 | clean := path.Clean(given) 44 | 45 | // Go's path wrangling uses "." to represent "root directory", but 46 | // we represent that by omitting the subpath entirely, so we forbid that 47 | // too even though Go would consider it valid. 48 | if clean == "." { 49 | return "", fmt.Errorf("must be slash-separated relative path without any .. or . segments") 50 | } 51 | 52 | return clean, nil 53 | } 54 | 55 | // subPathAsLocalSource interprets the given subpath (which should be a value 56 | // previously returned from [normalizeSubpath]) as a local source address 57 | // relative to the root of the package that the sub-path was presented against. 58 | func subPathAsLocalSource(p string) LocalSource { 59 | // Local source addresses are _mostly_ a superset of what we allow in 60 | // sub-paths, except that downward traversals must always start with 61 | // "./" to disambiguate from other address types. 62 | return LocalSource{relPath: "./" + p} 63 | } 64 | 65 | // splitSubPath takes a source address that would be accepted either as a 66 | // remote source address or a registry source address and returns a tuple of 67 | // its package address and its sub-path portion. 68 | // 69 | // For example: 70 | // dom.com/path/?q=p => "dom.com/path/?q=p", "" 71 | // proto://dom.com/path//*?q=p => "proto://dom.com/path?q=p", "*" 72 | // proto://dom.com/path//path2?q=p => "proto://dom.com/path?q=p", "path2" 73 | // 74 | // This function DOES NOT validate or normalize the sub-path. Pass the second 75 | // return value to [normalizeSubpath] to check if it is valid and to obtain 76 | // its normalized form. 77 | func splitSubPath(src string) (string, string) { 78 | // This is careful to handle the query string portion of a remote source 79 | // address. That's not actually necessary for a module registry address 80 | // because those don't have query strings anyway, but it doesn't _hurt_ 81 | // to check for a query string in that case and allows us to reuse this 82 | // function for both cases. 83 | 84 | // URL might contains another url in query parameters 85 | stop := len(src) 86 | if idx := strings.Index(src, "?"); idx > -1 { 87 | stop = idx 88 | } 89 | 90 | // Calculate an offset to avoid accidentally marking the scheme 91 | // as the dir. 92 | var offset int 93 | if idx := strings.Index(src[:stop], "://"); idx > -1 { 94 | offset = idx + 3 95 | } 96 | 97 | // First see if we even have an explicit subdir 98 | idx := strings.Index(src[offset:stop], "//") 99 | if idx == -1 { 100 | return src, "" 101 | } 102 | 103 | idx += offset 104 | subdir := src[idx+2:] 105 | src = src[:idx] 106 | 107 | // Next, check if we have query parameters and push them onto the 108 | // URL. 109 | if idx = strings.Index(subdir, "?"); idx > -1 { 110 | query := subdir[idx:] 111 | subdir = subdir[:idx] 112 | src += query 113 | } 114 | 115 | return src, subdir 116 | } 117 | 118 | func joinSubPath(subPath, rel string) (string, error) { 119 | new := path.Join(subPath, rel) 120 | if new == "." { 121 | return "", nil // the root of the package 122 | } 123 | // If subPath was a valid sub-path (no "." or ".." segments) then the 124 | // appearance of such segments in our result suggests that "rel" has 125 | // too many upward traversals and would thus escape from its containing 126 | // package. 127 | if !fs.ValidPath(new) { 128 | return "", fmt.Errorf("relative path %s traverses up too many levels from source path %s", rel, subPath) 129 | } 130 | return new, nil 131 | } 132 | -------------------------------------------------------------------------------- /sourcebundle/bundle.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | import ( 7 | "crypto/sha256" 8 | "encoding/hex" 9 | "encoding/json" 10 | "fmt" 11 | "io" 12 | "io/fs" 13 | "os" 14 | "path" 15 | "path/filepath" 16 | "sort" 17 | "strings" 18 | 19 | "github.com/apparentlymart/go-versions/versions" 20 | "github.com/hashicorp/go-slug" 21 | "github.com/hashicorp/go-slug/sourceaddrs" 22 | regaddr "github.com/hashicorp/terraform-registry-address" 23 | ) 24 | 25 | const manifestFilename = "terraform-sources.json" 26 | 27 | type Bundle struct { 28 | rootDir string 29 | 30 | manifestChecksum string 31 | 32 | remotePackageDirs map[sourceaddrs.RemotePackage]string 33 | remotePackageMeta map[sourceaddrs.RemotePackage]*PackageMeta 34 | 35 | registryPackageSources map[regaddr.ModulePackage]map[versions.Version]sourceaddrs.RemoteSource 36 | registryPackageVersionDeprecations map[regaddr.ModulePackage]map[versions.Version]*RegistryVersionDeprecation 37 | } 38 | 39 | // OpenDir opens a bundle rooted at the given base directory. 40 | // 41 | // If OpenDir succeeds then nothing else (inside or outside the calling program) 42 | // may modify anything under the given base directory for the lifetime of 43 | // the returned [Bundle] object. If the bundle directory is modified while the 44 | // object is still alive then behavior is undefined. 45 | func OpenDir(baseDir string) (*Bundle, error) { 46 | // We'll take the absolute form of the directory to be resilient in case 47 | // something else in this program rudely changes the current working 48 | // directory while the bundle is still alive. 49 | rootDir, err := filepath.Abs(baseDir) 50 | if err != nil { 51 | return nil, fmt.Errorf("cannot resolve base directory: %w", err) 52 | } 53 | 54 | ret := &Bundle{ 55 | rootDir: rootDir, 56 | remotePackageDirs: make(map[sourceaddrs.RemotePackage]string), 57 | remotePackageMeta: make(map[sourceaddrs.RemotePackage]*PackageMeta), 58 | registryPackageSources: make(map[regaddr.ModulePackage]map[versions.Version]sourceaddrs.RemoteSource), 59 | registryPackageVersionDeprecations: make(map[regaddr.ModulePackage]map[versions.Version]*RegistryVersionDeprecation), 60 | } 61 | 62 | manifestSrc, err := os.ReadFile(filepath.Join(rootDir, manifestFilename)) 63 | if err != nil { 64 | return nil, fmt.Errorf("cannot read manifest: %w", err) 65 | } 66 | 67 | hash := sha256.New() 68 | ret.manifestChecksum = hex.EncodeToString(hash.Sum(manifestSrc)) 69 | 70 | var manifest manifestRoot 71 | err = json.Unmarshal(manifestSrc, &manifest) 72 | if err != nil { 73 | return nil, fmt.Errorf("invalid manifest: %w", err) 74 | } 75 | if manifest.FormatVersion != 1 { 76 | return nil, fmt.Errorf("invalid manifest: unsupported format version %d", manifest.FormatVersion) 77 | } 78 | 79 | for _, rpm := range manifest.Packages { 80 | // We'll be quite fussy about the local directory name to avoid a 81 | // crafted manifest sending us to other random places in the filesystem. 82 | // It must be just a single directory name, without any path separators 83 | // or any traversals. 84 | localDir := filepath.ToSlash(rpm.LocalDir) 85 | if !fs.ValidPath(localDir) || localDir == "." || strings.IndexByte(localDir, '/') >= 0 { 86 | return nil, fmt.Errorf("invalid package directory name %q", rpm.LocalDir) 87 | } 88 | 89 | pkgAddr, err := sourceaddrs.ParseRemotePackage(rpm.SourceAddr) 90 | if err != nil { 91 | return nil, fmt.Errorf("invalid remote package address %q: %w", rpm.SourceAddr, err) 92 | } 93 | ret.remotePackageDirs[pkgAddr] = localDir 94 | 95 | if rpm.Meta.GitCommitID != "" { 96 | ret.remotePackageMeta[pkgAddr] = PackageMetaWithGitMetadata( 97 | rpm.Meta.GitCommitID, 98 | rpm.Meta.GitCommitMessage, 99 | ) 100 | } 101 | } 102 | 103 | for _, rpm := range manifest.RegistryMeta { 104 | pkgAddr, err := sourceaddrs.ParseRegistryPackage(rpm.SourceAddr) 105 | if err != nil { 106 | return nil, fmt.Errorf("invalid registry package address %q: %w", rpm.SourceAddr, err) 107 | } 108 | vs := ret.registryPackageSources[pkgAddr] 109 | if vs == nil { 110 | vs = make(map[versions.Version]sourceaddrs.RemoteSource) 111 | ret.registryPackageSources[pkgAddr] = vs 112 | } 113 | deprecations := ret.registryPackageVersionDeprecations[pkgAddr] 114 | if deprecations == nil { 115 | deprecations = make(map[versions.Version]*RegistryVersionDeprecation) 116 | ret.registryPackageVersionDeprecations[pkgAddr] = deprecations 117 | } 118 | for versionStr, mv := range rpm.Versions { 119 | version, err := versions.ParseVersion(versionStr) 120 | if err != nil { 121 | return nil, fmt.Errorf("invalid registry package version %q: %w", versionStr, err) 122 | } 123 | deprecations[version] = mv.Deprecation 124 | sourceAddr, err := sourceaddrs.ParseRemoteSource(mv.SourceAddr) 125 | if err != nil { 126 | return nil, fmt.Errorf("invalid registry package source address %q: %w", mv.SourceAddr, err) 127 | } 128 | vs[version] = sourceAddr 129 | } 130 | } 131 | 132 | return ret, nil 133 | } 134 | 135 | // LocalPathForSource takes either a remote or registry final source address 136 | // and returns the local path within the bundle that corresponds with it. 137 | // 138 | // It doesn't make sense to pass a [sourceaddrs.LocalSource] to this function 139 | // because a source bundle cannot contain anything other than remote packages, 140 | // but as a concession to convenience this function will return a 141 | // filepath-shaped relative path in that case, assuming that the source was 142 | // intended to be a local filesystem path relative to the current working 143 | // directory. The result will therefore not necessarily be a subdirectory of 144 | // the recieving bundle in that case. 145 | func (b *Bundle) LocalPathForSource(addr sourceaddrs.FinalSource) (string, error) { 146 | switch addr := addr.(type) { 147 | case sourceaddrs.RemoteSource: 148 | return b.LocalPathForRemoteSource(addr) 149 | case sourceaddrs.RegistrySourceFinal: 150 | return b.LocalPathForRegistrySource(addr.Unversioned(), addr.SelectedVersion()) 151 | case sourceaddrs.LocalSource: 152 | return filepath.FromSlash(addr.RelativePath()), nil 153 | default: 154 | // If we get here then it's probably a bug: the above cases should be 155 | // exhaustive for all sourceaddrs.FinalSource implementations. 156 | return "", fmt.Errorf("cannot produce local path for source address of type %T", addr) 157 | } 158 | } 159 | 160 | // LocalPathForRemoteSource returns the local path within the bundle that 161 | // corresponds with the given source address, or an error if the source address 162 | // is within a source package not included in the bundle. 163 | func (b *Bundle) LocalPathForRemoteSource(addr sourceaddrs.RemoteSource) (string, error) { 164 | pkgAddr := addr.Package() 165 | localName, ok := b.remotePackageDirs[pkgAddr] 166 | if !ok { 167 | return "", fmt.Errorf("source bundle does not include %s", pkgAddr) 168 | } 169 | subPath := filepath.FromSlash(addr.SubPath()) 170 | return filepath.Join(b.rootDir, localName, subPath), nil 171 | } 172 | 173 | // LocalPathForRegistrySource returns the local path within the bundle that 174 | // corresponds with the given registry address and version, or an error if the 175 | // source address is within a source package not included in the bundle. 176 | func (b *Bundle) LocalPathForRegistrySource(addr sourceaddrs.RegistrySource, version versions.Version) (string, error) { 177 | pkgAddr := addr.Package() 178 | vs, ok := b.registryPackageSources[pkgAddr] 179 | if !ok { 180 | return "", fmt.Errorf("source bundle does not include %s", pkgAddr) 181 | } 182 | baseSourceAddr, ok := vs[version] 183 | if !ok { 184 | return "", fmt.Errorf("source bundle does not include %s v%s", pkgAddr, version) 185 | } 186 | 187 | // The address we were given might have its own source address, so we need 188 | // to incorporate that into our result. 189 | finalSourceAddr := addr.FinalSourceAddr(baseSourceAddr) 190 | return b.LocalPathForRemoteSource(finalSourceAddr) 191 | } 192 | 193 | // LocalPathForFinalRegistrySource is a variant of 194 | // [Bundle.LocalPathForRegistrySource] which passes the source address and 195 | // selected version together as a single address value. 196 | func (b *Bundle) LocalPathForFinalRegistrySource(addr sourceaddrs.RegistrySourceFinal) (string, error) { 197 | return b.LocalPathForRegistrySource(addr.Unversioned(), addr.SelectedVersion()) 198 | } 199 | 200 | // SourceForLocalPath is the inverse of [Bundle.LocalPathForSource], 201 | // translating a local path beneath the bundle's base directory back into 202 | // a source address that it's a snapshot of. 203 | // 204 | // Returns an error if the given directory is not within the bundle's base 205 | // directory, or is not within one of the subdirectories of the bundle 206 | // that represents a source package. A caller using this to present more 207 | // user-friendly file paths in error messages etc could reasonably choose 208 | // to just retain the source string if this function returns an error, and 209 | // not show the error to the user. 210 | // 211 | // The [Bundle] implementation is optimized for forward lookups from source 212 | // address to local path rather than the other way around, so this function 213 | // may be considerably more expensive than the forward lookup and is intended 214 | // primarily for reporting friendly source locations in diagnostic messages 215 | // instead of exposing the opaque internal directory names from the source 216 | // bundle. This function should not typically be used in performance-sensitive 217 | // portions of the happy path. 218 | func (b *Bundle) SourceForLocalPath(p string) (sourceaddrs.FinalSource, error) { 219 | // This implementation is a best effort sort of thing, and might not 220 | // always succeed in awkward cases. 221 | 222 | // We'll start by making our path absolute because that'll make it 223 | // more comparable with b.rootDir, which is also absolute. 224 | absPath, err := filepath.Abs(p) 225 | if err != nil { 226 | return nil, fmt.Errorf("can't determine absolute path for %q: %w", p, err) 227 | } 228 | 229 | // Now we'll reinterpret the path as relative to our base directory, 230 | // so we can see what local directory name it starts with. 231 | relPath, err := filepath.Rel(b.rootDir, absPath) 232 | if err != nil { 233 | // If the path can't be made relative then that suggests it's on a 234 | // different volume, such as a different drive letter on Windows. 235 | return nil, fmt.Errorf("path %q does not belong to the source bundle", absPath) 236 | } 237 | 238 | // We'll do all of our remaining work in the abstract "forward-slash-path" 239 | // mode, matching how we represent "sub-paths" for our source addresses. 240 | subPath := path.Clean(filepath.ToSlash(relPath)) 241 | if !fs.ValidPath(subPath) || subPath == "." { 242 | // If the path isn't "valid" by now then that suggests it's a 243 | // path outside of our source bundle which would appear as a 244 | // path with a ".." segment on the front, or to the root of 245 | // our source bundle which would appear as "." and isn't part 246 | // of any particular package. 247 | return nil, fmt.Errorf("path %q does not belong to the source bundle", absPath) 248 | } 249 | 250 | // If all of the above passed then we should now have one or more 251 | // slash-separated path segments. The first one should be one of the 252 | // local directories we know from our manifest, and then the rest is 253 | // the sub-path in the associated package. 254 | localDir, subPath, _ := strings.Cut(subPath, "/") 255 | 256 | // There can be potentially several packages all referring to the same 257 | // directory, so to make the result deterministic we'll just take the 258 | // one whose stringified source address is shortest. 259 | var pkgAddr sourceaddrs.RemotePackage 260 | found := false 261 | for candidateAddr, candidateDir := range b.remotePackageDirs { 262 | if candidateDir != localDir { 263 | continue 264 | } 265 | if found { 266 | // We've found multiple possible source addresses, so we 267 | // need to decide which one to keep. 268 | if len(candidateAddr.String()) > len(pkgAddr.String()) { 269 | continue 270 | } 271 | } 272 | pkgAddr = candidateAddr 273 | found = true 274 | } 275 | 276 | if !found { 277 | return nil, fmt.Errorf("path %q does not belong to the source bundle", absPath) 278 | } 279 | 280 | return pkgAddr.SourceAddr(subPath), nil 281 | } 282 | 283 | // ChecksumV1 returns a checksum of the contents of the source bundle that 284 | // can be used to determine if another source bundle is equivalent to this one. 285 | // 286 | // "Equivalent" means that it contains all of the same source packages with 287 | // identical content each. 288 | // 289 | // A successful result is a string with the prefix "h1:" to indicate that 290 | // it was built with checksum algorithm version 1. Future versions may 291 | // introduce other checksum formats. 292 | func (b *Bundle) ChecksumV1() (string, error) { 293 | // Our first checksum format assumes that the checksum of the manifest 294 | // is sufficient to cover the entire archive, which in turn assumes that 295 | // the builder either directly or indirectly encodes the checksum of 296 | // each package into the manifest. For the initial implementation of 297 | // Builder we achieve that by using the checksum as the directory name 298 | // for each package, which avoids the need to redundantly store the 299 | // checksum again. If a future Builder implementation moves away from 300 | // using checksums as directory names then the builder will need to 301 | // introduce explicit checksums as a separate property into the manifest 302 | // in order to preserve our assumptions here. 303 | return "h1:" + b.manifestChecksum, nil 304 | } 305 | 306 | // RemotePackages returns a slice of all of the remote source packages that 307 | // contributed to this source bundle. 308 | // 309 | // The result is sorted into a consistent but unspecified order. 310 | func (b *Bundle) RemotePackages() []sourceaddrs.RemotePackage { 311 | ret := make([]sourceaddrs.RemotePackage, 0, len(b.remotePackageDirs)) 312 | for pkgAddr := range b.remotePackageDirs { 313 | ret = append(ret, pkgAddr) 314 | } 315 | sort.Slice(ret, func(i, j int) bool { 316 | return ret[i].String() < ret[j].String() 317 | }) 318 | return ret 319 | } 320 | 321 | // RemotePackageMeta returns the package metadata for the given package address, 322 | // or nil if there is no metadata for that package tracked in the bundle. 323 | func (b *Bundle) RemotePackageMeta(pkgAddr sourceaddrs.RemotePackage) *PackageMeta { 324 | return b.remotePackageMeta[pkgAddr] 325 | } 326 | 327 | // RegistryPackages returns a list of all of the distinct registry packages 328 | // that contributed to this bundle. 329 | // 330 | // The result is in a consistent but unspecified sorted order. 331 | func (b *Bundle) RegistryPackages() []regaddr.ModulePackage { 332 | ret := make([]regaddr.ModulePackage, 0, len(b.remotePackageDirs)) 333 | for pkgAddr := range b.registryPackageSources { 334 | ret = append(ret, pkgAddr) 335 | } 336 | sort.Slice(ret, func(i, j int) bool { 337 | return ret[i].String() < ret[j].String() 338 | }) 339 | return ret 340 | } 341 | 342 | // RegistryPackageVersions returns a list of all of the versions of the given 343 | // module registry package that this bundle has package content for. 344 | // 345 | // This result can be used as a substitute for asking the remote registry which 346 | // versions are available in any situation where a caller is interested only 347 | // in what's bundled, and will not consider installing anything new from 348 | // the origin registry. 349 | // 350 | // The result is guaranteed to be sorted with lower-precedence version numbers 351 | // placed earlier in the list. 352 | func (b *Bundle) RegistryPackageVersions(pkgAddr regaddr.ModulePackage) versions.List { 353 | vs := b.registryPackageSources[pkgAddr] 354 | if len(vs) == 0 { 355 | return nil 356 | } 357 | ret := make(versions.List, 0, len(vs)) 358 | for v := range vs { 359 | ret = append(ret, v) 360 | } 361 | ret.Sort() 362 | return ret 363 | } 364 | 365 | func (b *Bundle) RegistryPackageVersionDeprecation(pkgAddr regaddr.ModulePackage, version versions.Version) *RegistryVersionDeprecation { 366 | return b.registryPackageVersionDeprecations[pkgAddr][version] 367 | } 368 | 369 | // RegistryPackageSourceAddr returns the remote source address corresponding 370 | // to the given version of the given module package, or sets its second return 371 | // value to false if no such version is included in the bundle. 372 | func (b *Bundle) RegistryPackageSourceAddr(pkgAddr regaddr.ModulePackage, version versions.Version) (sourceaddrs.RemoteSource, bool) { 373 | sourceAddr, ok := b.registryPackageSources[pkgAddr][version] 374 | return sourceAddr, ok 375 | } 376 | 377 | // WriteArchive writes a source bundle archive containing the same contents 378 | // as the bundle to the given writer. 379 | // 380 | // A source bundle archive is a gzip-compressed tar stream that can then 381 | // be extracted in some other location to produce an equivalent source 382 | // bundle directory. 383 | func (b *Bundle) WriteArchive(w io.Writer) error { 384 | // For this part we just delegate to the main slug packer, since a 385 | // source bundle archive is effectively just a slug with multiple packages 386 | // (and a manifest) inside it. 387 | packer, err := slug.NewPacker(slug.DereferenceSymlinks()) 388 | if err != nil { 389 | return fmt.Errorf("can't instantiate archive packer: %w", err) 390 | } 391 | _, err = packer.Pack(b.rootDir, w) 392 | return err 393 | } 394 | 395 | // ExtractArchive reads a source bundle archive from the given reader and 396 | // extracts it into the given target directory, which must already exist and 397 | // must be empty. 398 | // 399 | // If successful, it returns a [Bundle] value representing the created bundle, 400 | // as if the given target directory were passed to [OpenDir]. 401 | func ExtractArchive(r io.Reader, targetDir string) (*Bundle, error) { 402 | // A bundle archive is just a slug archive created over a bundle 403 | // directory, so we can use the normal unpack function to deal with it. 404 | err := slug.Unpack(r, targetDir) 405 | if err != nil { 406 | return nil, err 407 | } 408 | return OpenDir(targetDir) 409 | } 410 | -------------------------------------------------------------------------------- /sourcebundle/dependency_finder.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | import ( 7 | "io/fs" 8 | 9 | "github.com/apparentlymart/go-versions/versions" 10 | "github.com/hashicorp/go-slug/sourceaddrs" 11 | ) 12 | 13 | // A DependencyFinder analyzes a file or directory inside a source package 14 | // and reports any dependencies described in that location. 15 | // 16 | // The same location could potentially be analyzed by multiple different 17 | // DependencyFinder implementations if e.g. it's a directory containing 18 | // a mixture of different kinds of artifact where each artifact has a 19 | // disjoint set of relevant files. 20 | // 21 | // All DependencyFinder implementations must be comparable in the sense of 22 | // supporting the == operator without panicking, and should typically be 23 | // singletons, because [Builder] will use values of this type as part of 24 | // the unique key for tracking whether a particular dependency has already 25 | // been analyzed. A typical DependencyFinder implementation is an empty 26 | // struct type with the FindDependency method implemented on it. 27 | type DependencyFinder interface { 28 | // FindDependencies should analyze the file or directory at the given 29 | // sub-path of the given filesystem and then call the given callback 30 | // once for each detected dependency, providing both its source 31 | // address and the appropriate [DependencyFinder] for whatever kind 32 | // of source artifact is expected at that source address. 33 | // 34 | // The same source address can potentially contain artifacts of multiple 35 | // different types. The calling [Builder] will visit each distinct 36 | // (source, finder) pair only once for analysis, and will also aim to 37 | // avoid redundantly re-fetching the same source package more than once. 38 | // 39 | // If an implementer sends a local source address to the callback function, 40 | // the calling [Builder] will automatically resolve that relative to 41 | // the source address being analyzed. Implementers should typically first 42 | // validate that the local address does not traverse up (with "..") more 43 | // levels than are included in subPath, because implementers can return 44 | // higher-quality error diagnostics (with source location information) 45 | // than the calling Builder can. 46 | // 47 | // If the implementer emits diagnostics with source location information 48 | // then the filenames in the source ranges must be strings that would 49 | // pass [fs.ValidPath] describing a path from the root of the given fs 50 | // to the file containing the error. The builder will then translate those 51 | // paths into remote source address strings within the containing package. 52 | FindDependencies(fsys fs.FS, subPath string, deps *Dependencies) Diagnostics 53 | } 54 | 55 | // Dependencies is part of the callback API for [DependencyFinder]. Dependency 56 | // finders use the methods of this type to report the dependencies they find 57 | // in the source artifact being analyzed. 58 | type Dependencies struct { 59 | baseAddr sourceaddrs.RemoteSource 60 | 61 | remoteCb func(source sourceaddrs.RemoteSource, depFinder DependencyFinder) 62 | registryCb func(source sourceaddrs.RegistrySource, allowedVersions versions.Set, depFinder DependencyFinder) 63 | localResolveErrCb func(err error) 64 | } 65 | 66 | func (d *Dependencies) AddRemoteSource(source sourceaddrs.RemoteSource, depFinder DependencyFinder) { 67 | d.remoteCb(source, depFinder) 68 | } 69 | 70 | func (d *Dependencies) AddRegistrySource(source sourceaddrs.RegistrySource, allowedVersions versions.Set, depFinder DependencyFinder) { 71 | d.registryCb(source, allowedVersions, depFinder) 72 | } 73 | 74 | func (d *Dependencies) AddLocalSource(source sourceaddrs.LocalSource, depFinder DependencyFinder) { 75 | // A local source always becomes a remote source in the same package as 76 | // the current base address. 77 | realSource, err := sourceaddrs.ResolveRelativeSource(d.baseAddr, source) 78 | if err != nil { 79 | d.localResolveErrCb(err) 80 | return 81 | } 82 | // realSource is guaranteed to be a RemoteSource because source is 83 | // a LocalSource and so the ResolveRelativeSource address is guaranteed 84 | // to have the same source type as d.baseAddr. 85 | d.remoteCb(realSource.(sourceaddrs.RemoteSource), depFinder) 86 | } 87 | 88 | // disable ensures that a [DependencyFinder] implementation can't incorrectly 89 | // hold on to its given Dependencies object and continue calling it after it 90 | // returns. 91 | func (d *Dependencies) disable() { 92 | d.remoteCb = nil 93 | d.registryCb = nil 94 | d.localResolveErrCb = nil 95 | } 96 | -------------------------------------------------------------------------------- /sourcebundle/diagnostics.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | import ( 7 | "github.com/hashicorp/go-slug/sourceaddrs" 8 | ) 9 | 10 | // Diagnostics is a collection of problems (errors and warnings) that occurred 11 | // during an operation. 12 | type Diagnostics []Diagnostic 13 | 14 | // Diagnostic represents a single problem (error or warning) that has occurred 15 | // during an operation. 16 | // 17 | // This interface has no concrete implementations in this package. 18 | // Implementors of [DependencyFinder] will need to implement this interface 19 | // to report any problems they find while analyzing the designated source 20 | // artifact. For example, a [DependencyFinder] that uses the HCL library 21 | // to analyze an HCL-based language would probably implement this interface 22 | // in terms of HCL's Diagnostic type. 23 | type Diagnostic interface { 24 | Severity() DiagSeverity 25 | Description() DiagDescription 26 | Source() DiagSource 27 | 28 | // ExtraInfo returns the raw extra information value. This is a low-level 29 | // API which requires some work on the part of the caller to properly 30 | // access associated information. This convention comes from HCL and 31 | // Terraform and this is here primarily for their benefit; sourcebundle 32 | // passes through these values verbatim without trying to interpret them. 33 | ExtraInfo() interface{} 34 | } 35 | 36 | func (diags Diagnostics) HasErrors() bool { 37 | for _, diag := range diags { 38 | if diag.Severity() == DiagError { 39 | return true 40 | } 41 | } 42 | return false 43 | } 44 | 45 | type DiagSeverity rune 46 | 47 | const ( 48 | DiagError DiagSeverity = 'E' 49 | DiagWarning DiagSeverity = 'W' 50 | ) 51 | 52 | type DiagDescription struct { 53 | Summary string 54 | Detail string 55 | } 56 | 57 | type DiagSource struct { 58 | Subject *SourceRange 59 | Context *SourceRange 60 | } 61 | 62 | type SourceRange struct { 63 | // Filename is a human-oriented label for the file that the range belongs 64 | // to. This is often the string representation of a source address, but 65 | // isn't guaranteed to be. 66 | Filename string 67 | Start, End SourcePos 68 | } 69 | 70 | type SourcePos struct { 71 | Line, Column, Byte int 72 | } 73 | 74 | // diagnosticInSourcePackage is a thin wrapper around diagnostic that 75 | // reinterprets the filenames in any source ranges to be relative to a 76 | // particular remote source package, so it's unambiguous which remote 77 | // source package the diagnostic originated in. 78 | type diagnosticInSourcePackage struct { 79 | wrapped Diagnostic 80 | pkg sourceaddrs.RemotePackage 81 | } 82 | 83 | // inRemoteSourcePackage modifies the reciever in-place so that all of the 84 | // diagnostics will have their source filenames (if any) interpreted as 85 | // sub-paths within the given source package. 86 | // 87 | // For convenience, returns the same diags slice whose backing array has now 88 | // been modified with different diagnostics. 89 | func (diags Diagnostics) inRemoteSourcePackage(pkg sourceaddrs.RemotePackage) Diagnostics { 90 | for i, diag := range diags { 91 | diags[i] = diagnosticInSourcePackage{ 92 | wrapped: diag, 93 | pkg: pkg, 94 | } 95 | } 96 | return diags 97 | } 98 | 99 | var _ Diagnostic = diagnosticInSourcePackage{} 100 | 101 | func (diag diagnosticInSourcePackage) Description() DiagDescription { 102 | return diag.wrapped.Description() 103 | } 104 | 105 | func (diag diagnosticInSourcePackage) ExtraInfo() interface{} { 106 | return diag.wrapped.ExtraInfo() 107 | } 108 | 109 | func (diag diagnosticInSourcePackage) Severity() DiagSeverity { 110 | return diag.wrapped.Severity() 111 | } 112 | 113 | func (diag diagnosticInSourcePackage) Source() DiagSource { 114 | ret := diag.wrapped.Source() 115 | if ret.Subject != nil && sourceaddrs.ValidSubPath(ret.Subject.Filename) { 116 | newRng := *ret.Subject // shallow copy 117 | newRng.Filename = diag.pkg.SourceAddr(newRng.Filename).String() 118 | ret.Subject = &newRng 119 | } 120 | if ret.Context != nil && sourceaddrs.ValidSubPath(ret.Context.Filename) { 121 | newRng := *ret.Context // shallow copy 122 | newRng.Filename = diag.pkg.SourceAddr(newRng.Filename).String() 123 | ret.Context = &newRng 124 | } 125 | return ret 126 | } 127 | 128 | // internalDiagnostic is a diagnostic type used to report this package's own 129 | // errors as diagnostics. 130 | // 131 | // This package doesn't ever work directly with individual source file contents, 132 | // so an internal diagnostic never has source location information. 133 | type internalDiagnostic struct { 134 | severity DiagSeverity 135 | summary string 136 | detail string 137 | } 138 | 139 | var _ Diagnostic = (*internalDiagnostic)(nil) 140 | 141 | // Description implements Diagnostic 142 | func (d *internalDiagnostic) Description() DiagDescription { 143 | return DiagDescription{ 144 | Summary: d.summary, 145 | Detail: d.detail, 146 | } 147 | } 148 | 149 | // ExtraInfo implements Diagnostic 150 | func (d *internalDiagnostic) ExtraInfo() interface{} { 151 | return nil 152 | } 153 | 154 | // Severity implements Diagnostic 155 | func (d *internalDiagnostic) Severity() DiagSeverity { 156 | return d.severity 157 | } 158 | 159 | // Source implements Diagnostic 160 | func (d *internalDiagnostic) Source() DiagSource { 161 | return DiagSource{ 162 | // Never any source location information for internal diagnostics. 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /sourcebundle/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | // Package sourcebundle deals with the construction of and later consumption of 5 | // "source bundles", which are in some sense "meta-slugs" that capture a 6 | // variety of different source packages together into a single working 7 | // directory, which can optionally be bundled up into an archive for insertion 8 | // into a blob storage system. 9 | // 10 | // Whereas single slugs (as implemented in the parent package) have very little 11 | // predefined structure aside from the possibility of a .terraformignore file, 12 | // source bundles have a more prescriptive structure that allows callers to 13 | // use a source bundle as a direct substitute for fetching the individual 14 | // source packages it was built from. 15 | // 16 | // NOTE WELL: Everything in this package is currently experimental and subject 17 | // to breaking changes even in patch releases. We will make stronger commitments 18 | // to backward-compatibility once we have more experience using this 19 | // functionality in real contexts. 20 | package sourcebundle 21 | -------------------------------------------------------------------------------- /sourcebundle/manifest_json.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | // This file contains some internal-only types used to help with marshalling 7 | // and unmarshalling our manifest file format. The manifest format is not 8 | // itself a public interface, so these should stay unexported and any caller 9 | // that needs to interact with previously-generated source bundle manifests 10 | // should do so via the Bundle type. 11 | 12 | type manifestRoot struct { 13 | // FormatVersion should always be 1 for now, because there is only 14 | // one version of this format. 15 | FormatVersion uint64 `json:"terraform_source_bundle"` 16 | 17 | Packages []manifestRemotePackage `json:"packages,omitempty"` 18 | RegistryMeta []manifestRegistryMeta `json:"registry,omitempty"` 19 | } 20 | 21 | type manifestRemotePackage struct { 22 | // SourceAddr is the address of an entire remote package, meaning that 23 | // it must not have a sub-path portion. 24 | SourceAddr string `json:"source"` 25 | 26 | // LocalDir is the name of the subdirectory of the bundle containing the 27 | // source code for this package. 28 | LocalDir string `json:"local"` 29 | 30 | Meta manifestPackageMeta `json:"meta,omitempty"` 31 | } 32 | 33 | type manifestRegistryMeta struct { 34 | // SourceAddr is the address of an entire registry package, meaning that 35 | // it must not have a sub-path portion. 36 | SourceAddr string `json:"source"` 37 | 38 | // Versions is a map from string representations of [versions.Version]. 39 | Versions map[string]manifestRegistryVersion `json:"versions,omitempty"` 40 | } 41 | 42 | type manifestRegistryVersion struct { 43 | // This SourceAddr is a full source address, so it might potentially 44 | // have a sub-path portion. If it does then it must be combined with 45 | // any sub-path included in the user's registry module source address. 46 | SourceAddr string `json:"source"` 47 | Deprecation *RegistryVersionDeprecation `json:"deprecation"` 48 | } 49 | 50 | type manifestPackageMeta struct { 51 | GitCommitID string `json:"git_commit_id,omitempty"` 52 | GitCommitMessage string `json:"git_commit_message,omitempty"` 53 | } 54 | -------------------------------------------------------------------------------- /sourcebundle/package_fetcher.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | import ( 7 | "context" 8 | "net/url" 9 | ) 10 | 11 | // A PackageFetcher knows how to fetch remote source packages into a local 12 | // filesystem directory. 13 | type PackageFetcher interface { 14 | // FetchSourcePackage retrieves the a source package from the given 15 | // location and extracts it into the given local filesystem directory. 16 | // 17 | // A package fetcher is responsible for ensuring that nothing gets written 18 | // outside of the given target directory. However, a fetcher can assume that 19 | // nothing should be modifying or moving targetDir and or any of its contents 20 | // concurrently with the fetcher running. 21 | // 22 | // If the function returns with a nil error then the target directory must be 23 | // a complete copy of the designated remote package, ready for further analysis. 24 | // 25 | // Package fetchers should respond to cancellation of the given 26 | // [context.Context] to a reasonable extent, so that the source bundle build 27 | // process can be interrupted relatively promptly. Return a non-nil error when 28 | // cancelled to allow the caller to detect that the target directory might not 29 | // be in a consistent state. 30 | // 31 | // PackageFetchers should not have any persistent mutable state: each call 32 | // should be independent of all past, concurrent, and future calls. In 33 | // particular, a fetcher should not attempt to implement any caching behavior, 34 | // because it's [Builder]'s responsibility to handle caching and request 35 | // coalescing during bundle construction to ensure that it will happen 36 | // consistently across different fetcher implementations. 37 | FetchSourcePackage(ctx context.Context, sourceType string, url *url.URL, targetDir string) (FetchSourcePackageResponse, error) 38 | } 39 | 40 | // FetchSourcePackageResponse is a structure which represents metadata about 41 | // the fetch operation. This type may grow to add more data over time in later 42 | // minor releases. 43 | type FetchSourcePackageResponse struct { 44 | PackageMeta *PackageMeta 45 | } 46 | -------------------------------------------------------------------------------- /sourcebundle/package_meta.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | // PackageMeta is a collection of metadata about how the content of a 7 | // particular remote package was derived. 8 | // 9 | // A nil value of this type represents no metadata. A non-nil value will 10 | // typically omit some or all of the fields if they are not relevant. 11 | type PackageMeta struct { 12 | // NOTE: Everything in here is unexported for now because it's not clear 13 | // how this is going to evolve in future and whether it's a good idea 14 | // to just have a separate field for each piece of metadata. This will 15 | // give some freedom to switch to other storage strategies in future if 16 | // this struct ends up getting too big and is only sparsely used by most 17 | // fetchers. 18 | 19 | gitCommitID string 20 | gitCommitMessage string 21 | } 22 | 23 | type RegistryVersionDeprecation struct { 24 | Version string 25 | Reason string 26 | Link string 27 | } 28 | 29 | // PackageMetaWithGitMetadata returns a [PackageMeta] object with a Git Commit 30 | // ID and message tracked. 31 | // 32 | // The given commit ID must be a fully-qualified ID, and never an 33 | // abbreviated commit ID, the name of a ref, or anything other proxy-for-commit 34 | // identifier. 35 | func PackageMetaWithGitMetadata( 36 | commitID string, 37 | commitMessage string, 38 | ) *PackageMeta { 39 | return &PackageMeta{ 40 | gitCommitID: commitID, 41 | gitCommitMessage: commitMessage, 42 | } 43 | } 44 | 45 | // If the content of this package was derived from a particular commit 46 | // from a Git repository, GitCommitID returns the fully-qualified ID of 47 | // that commit. This is never an abbreviated commit ID, the name of a ref, 48 | // or anything else that could serve as a proxy for a commit ID. 49 | // 50 | // If there is no relevant commit ID for this package, returns an empty string. 51 | func (m *PackageMeta) GitCommitID() string { 52 | return m.gitCommitID 53 | } 54 | 55 | // GitCommitMessage returns a commit message for the commit this package was 56 | // derived from. 57 | func (m *PackageMeta) GitCommitMessage() string { 58 | return m.gitCommitMessage 59 | } 60 | -------------------------------------------------------------------------------- /sourcebundle/registry_client.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/apparentlymart/go-versions/versions" 10 | "github.com/hashicorp/go-slug/sourceaddrs" 11 | regaddr "github.com/hashicorp/terraform-registry-address" 12 | ) 13 | 14 | // RegistryClient provides a minimal client for the Terraform module registry 15 | // protocol, sufficient to find the available versions for a particular 16 | // registry entry and then to find the real remote package for a particular 17 | // version. 18 | // 19 | // An implementation should not itself attempt to cache the direct results of 20 | // the client methods, but it can (and probably should) cache prerequisite 21 | // information such as the results of performing service discovery against 22 | // the hostname in a module package address. 23 | type RegistryClient interface { 24 | // ModulePackageVersions fetches all of the known exact versions 25 | // available for the given package in its module registry. 26 | ModulePackageVersions(ctx context.Context, pkgAddr regaddr.ModulePackage) (ModulePackageVersionsResponse, error) 27 | 28 | // ModulePackageSourceAddr fetches the real remote source address for the 29 | // given version of the given module registry package. 30 | ModulePackageSourceAddr(ctx context.Context, pkgAddr regaddr.ModulePackage, version versions.Version) (ModulePackageSourceAddrResponse, error) 31 | } 32 | 33 | // ModulePackageVersionsResponse is an opaque type which represents the result 34 | // of the package versions client operation. This type may grow to add more 35 | // functionality over time in later minor releases. 36 | type ModulePackageVersionsResponse struct { 37 | Versions []ModulePackageInfo `json:"versions"` 38 | } 39 | 40 | type ModulePackageInfo struct { 41 | Version versions.Version 42 | Deprecation *ModulePackageVersionDeprecation `json:"deprecation"` 43 | } 44 | 45 | type ModulePackageVersionDeprecation struct { 46 | Reason string `json:"reason"` 47 | Link string `json:"link"` 48 | } 49 | 50 | // ModulePackageSourceAddrResponse is an opaque type which represents the 51 | // result of the source address client operation. This type may grow to add 52 | // more functionality over time in later minor releases. 53 | type ModulePackageSourceAddrResponse struct { 54 | SourceAddr sourceaddrs.RemoteSource 55 | } 56 | -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/hello/hello: -------------------------------------------------------------------------------- 1 | Hello, world! 2 | -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/subdirs/a/b/beepbeep: -------------------------------------------------------------------------------- 1 | BEEP! 2 | -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/terraformignore/.terraformignore: -------------------------------------------------------------------------------- 1 | excluded 2 | excluded-dir/ 3 | -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/terraformignore/excluded: -------------------------------------------------------------------------------- 1 | This file is ignored. 2 | -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/terraformignore/excluded-dir/excluded: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/sourcebundle/testdata/pkgs/terraformignore/excluded-dir/excluded -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/terraformignore/included: -------------------------------------------------------------------------------- 1 | This file is included. 2 | -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/with-remote-deps/dependencies: -------------------------------------------------------------------------------- 1 | https://example.com/dependency1.tgz 2 | https://example.com/dependency2.tgz 3 | -------------------------------------------------------------------------------- /sourcebundle/testdata/pkgs/with-remote-deps/self_dependency: -------------------------------------------------------------------------------- 1 | https://example.com/self_dependency.tgz -------------------------------------------------------------------------------- /sourcebundle/trace.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package sourcebundle 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/apparentlymart/go-versions/versions" 10 | "github.com/hashicorp/go-slug/sourceaddrs" 11 | regaddr "github.com/hashicorp/terraform-registry-address" 12 | ) 13 | 14 | // BuildTracer contains a set of callbacks that a caller can optionally provide 15 | // to [Builder] methods via their [context.Context] arguments to be notified 16 | // when various long-running events are starting and stopping, to allow both 17 | // for debugging and for UI feedback about progress. 18 | // 19 | // Any or all of the callbacks may be left as nil, in which case no event 20 | // will be delivered for the corresponding event. 21 | // 22 | // The [context.Context] passed to each trace function is guaranteed to be a 23 | // child of the one passed to whatever [Builder] method caused the event 24 | // to occur, and so it can carry cross-cutting information such as distributed 25 | // tracing clients. 26 | // 27 | // The "Start"-suffixed methods all allow returning a new context which will 28 | // then be passed to the corresponding "Success"-suffixed or "Failure"-suffixed 29 | // function, and also used for outgoing requests within the scope of that 30 | // operation. This allows carrying values such as tracing spans between the 31 | // start and end, so they can properly bracket the operation in question. If 32 | // your tracer doesn't need this then just return the given context. 33 | type BuildTracer struct { 34 | // The RegistryPackageVersions... callbacks frame any requests to 35 | // fetch the list of available versions for a module registry package. 36 | RegistryPackageVersionsStart func(ctx context.Context, pkgAddr regaddr.ModulePackage) context.Context 37 | RegistryPackageVersionsSuccess func(ctx context.Context, pkgAddr regaddr.ModulePackage, versions versions.List) 38 | RegistryPackageVersionsFailure func(ctx context.Context, pkgAddr regaddr.ModulePackage, err error) 39 | RegistryPackageVersionsAlready func(ctx context.Context, pkgAddr regaddr.ModulePackage, versions versions.List) 40 | 41 | // The RegistryPackageSource... callbacks frame any requests to fetch 42 | // the real underlying source address for a selected registry package 43 | // version. 44 | RegistryPackageSourceStart func(ctx context.Context, pkgAddr regaddr.ModulePackage, version versions.Version) context.Context 45 | RegistryPackageSourceSuccess func(ctx context.Context, pkgAddr regaddr.ModulePackage, version versions.Version, sourceAddr sourceaddrs.RemoteSource) 46 | RegistryPackageSourceFailure func(ctx context.Context, pkgAddr regaddr.ModulePackage, version versions.Version, err error) 47 | RegistryPackageSourceAlready func(ctx context.Context, pkgAddr regaddr.ModulePackage, version versions.Version, sourceAddr sourceaddrs.RemoteSource) 48 | 49 | // The RemotePackageDownload... callbacks frame any requests to download 50 | // remote source packages. 51 | RemotePackageDownloadStart func(ctx context.Context, pkgAddr sourceaddrs.RemotePackage) context.Context 52 | RemotePackageDownloadSuccess func(ctx context.Context, pkgAddr sourceaddrs.RemotePackage) 53 | RemotePackageDownloadFailure func(ctx context.Context, pkgAddr sourceaddrs.RemotePackage, err error) 54 | RemotePackageDownloadAlready func(ctx context.Context, pkgAddr sourceaddrs.RemotePackage) 55 | 56 | // Diagnostics will be called for any diagnostics that describe problems 57 | // that aren't also reported by calling one of the "Failure" callbacks 58 | // above. A recipient that is going to report the errors itself using 59 | // the Failure callbacks anyway should consume diagnostics from this 60 | // event, rather than from the return values of the [Builder] methods, 61 | // to avoid redundantly reporting the same errors twice. 62 | // 63 | // Diagnostics might be called multiple times during an operation. Callers 64 | // should consider each new call to represent additional diagnostics, 65 | // not replacing any previously returned. 66 | Diagnostics func(ctx context.Context, diags Diagnostics) 67 | } 68 | 69 | // OnContext takes a context and returns a derived context which has everything 70 | // the given context already had plus also the receiving BuildTrace object, 71 | // so that passing the resulting context to methods of [Builder] will cause 72 | // the trace object's callbacks to be called. 73 | // 74 | // Each context can have only one tracer, so if the given context already has 75 | // a tracer then it will be overridden by the new one. 76 | func (bt *BuildTracer) OnContext(ctx context.Context) context.Context { 77 | return context.WithValue(ctx, buildTraceKey, bt) 78 | } 79 | 80 | func buildTraceFromContext(ctx context.Context) *BuildTracer { 81 | ret, ok := ctx.Value(buildTraceKey).(*BuildTracer) 82 | if !ok { 83 | // We'll always return a non-nil pointer just because that reduces 84 | // the amount of boilerplate required in the caller when announcing 85 | // events. 86 | ret = &noopBuildTrace 87 | } 88 | return ret 89 | } 90 | 91 | type buildTraceKeyType int 92 | 93 | const buildTraceKey buildTraceKeyType = 0 94 | 95 | // noopBuildTrace is an all-nil [BuildTracer] we return a pointer to if we're 96 | // asked for a BuildTrace from a context that doesn't have one. 97 | var noopBuildTrace BuildTracer 98 | -------------------------------------------------------------------------------- /terraformignore.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package slug 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | 11 | "github.com/hashicorp/go-slug/internal/ignorefiles" 12 | ) 13 | 14 | func parseIgnoreFile(rootPath string) *ignorefiles.Ruleset { 15 | // Look for .terraformignore at our root path/src 16 | file, err := os.Open(filepath.Join(rootPath, ".terraformignore")) 17 | defer file.Close() 18 | 19 | // If there's any kind of file error, punt and use the default ignore patterns 20 | if err != nil { 21 | // Only show the error debug if an error *other* than IsNotExist 22 | if !os.IsNotExist(err) { 23 | fmt.Fprintf(os.Stderr, "Error reading .terraformignore, default exclusions will apply: %v \n", err) 24 | } 25 | return ignorefiles.DefaultRuleset 26 | } 27 | 28 | ret, err := ignorefiles.ParseIgnoreFileContent(file) 29 | if err != nil { 30 | fmt.Fprintf(os.Stderr, "Error reading .terraformignore, default exclusions will apply: %v \n", err) 31 | return ignorefiles.DefaultRuleset 32 | } 33 | 34 | return ret 35 | } 36 | 37 | func matchIgnoreRules(path string, ruleset *ignorefiles.Ruleset) ignorefiles.ExcludesResult { 38 | // Ruleset.Excludes explicitly allows ignoring its error, in which 39 | // case we are ignoring any individual invalid rules in the set 40 | // but still taking all of the others into account. 41 | ret, _ := ruleset.Excludes(path) 42 | return ret 43 | } 44 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/_common/extra-files/bar.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) HashiCorp, Inc. 3 | # SPDX-License-Identifier: MPL-2.0 4 | 5 | echo "bar" 6 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/_common/extra-files/foo.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) HashiCorp, Inc. 3 | # SPDX-License-Identifier: MPL-2.0 4 | 5 | echo "foo" 6 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/_common/locals.tf: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | locals { 5 | app = "service-01" 6 | } 7 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/_common/output.tf: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | locals { 5 | files = fileset("${path.module}/extra-files", "*.sh") 6 | } 7 | 8 | output "scripts" { 9 | value = local.files 10 | } 11 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/_common/versions.tf: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | terraform { 5 | required_version = "~> 1.2" 6 | } 7 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/dev/backend.tf: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | terraform { 5 | remote "backend" { 6 | hostname = "foobar.terraform.io" 7 | organization = "hashicorp" 8 | 9 | workspaces { 10 | name = "dev-service-02" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/dev/extra-files: -------------------------------------------------------------------------------- 1 | ../_common/extra-files -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/dev/locals.tf: -------------------------------------------------------------------------------- 1 | ../_common/locals.tf -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/dev/output.tf: -------------------------------------------------------------------------------- 1 | ../_common/output.tf -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/dev/variables.tf: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | locals { 5 | env = "dev" 6 | region = "us-east-2" 7 | } 8 | -------------------------------------------------------------------------------- /testdata/archive-dir-absolute/dev/versions.tf: -------------------------------------------------------------------------------- 1 | ../_common/versions.tf -------------------------------------------------------------------------------- /testdata/archive-dir-defaults-only/.terraform/modules/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | -------------------------------------------------------------------------------- /testdata/archive-dir-defaults-only/.terraform/modules/subdir/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | -------------------------------------------------------------------------------- /testdata/archive-dir-defaults-only/.terraform/plugins/foo.txt: -------------------------------------------------------------------------------- 1 | This file should be ignored 2 | -------------------------------------------------------------------------------- /testdata/archive-dir-defaults-only/bar.txt: -------------------------------------------------------------------------------- 1 | bar 2 | -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/.terraform/file.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir-no-external/.terraform/file.txt -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/.terraform/modules/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | 3 | -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/.terraform/plugins/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | 3 | -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/.terraformignore: -------------------------------------------------------------------------------- 1 | # comments are ignored 2 | # extra spaces are irrelevant 3 | # ignore a file 4 | baz.txt 5 | # below is an empty line 6 | 7 | # ignore a directory 8 | terraform.d/ 9 | # negate ignoring a directory at the root 10 | !/terraform.d/ 11 | # ignore a file at a subpath 12 | **/foo/bar.tf 13 | # ignore files with specific endings 14 | foo/*.md 15 | # character groups 16 | bar/something-[a-z].txt 17 | # ignore a file 18 | boop.txt 19 | # but not one at the current directory 20 | !/boop.txt 21 | -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/.terraformrc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir-no-external/.terraformrc -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/bar.txt: -------------------------------------------------------------------------------- 1 | bar 2 | -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/baz.txt: -------------------------------------------------------------------------------- 1 | baz -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir-no-external/exe -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/foo.terraform/bar.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir-no-external/foo.terraform/bar.txt -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/sub/bar.txt: -------------------------------------------------------------------------------- 1 | ../bar.txt -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/sub/zip.txt: -------------------------------------------------------------------------------- 1 | zip 2 | -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/sub2/bar.txt: -------------------------------------------------------------------------------- 1 | ../sub/bar.txt -------------------------------------------------------------------------------- /testdata/archive-dir-no-external/sub2/zip.txt: -------------------------------------------------------------------------------- 1 | zip 2 | -------------------------------------------------------------------------------- /testdata/archive-dir/.terraform/file.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir/.terraform/file.txt -------------------------------------------------------------------------------- /testdata/archive-dir/.terraform/modules/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | 3 | -------------------------------------------------------------------------------- /testdata/archive-dir/.terraform/plugins/README: -------------------------------------------------------------------------------- 1 | Keep this file and directory here to test if its properly ignored 2 | 3 | -------------------------------------------------------------------------------- /testdata/archive-dir/.terraformignore: -------------------------------------------------------------------------------- 1 | # comments are ignored 2 | # extra spaces are irrelevant 3 | # ignore a file 4 | baz.txt 5 | # below is an empty line 6 | 7 | # ignore a directory 8 | terraform.d/ 9 | # negate ignoring a directory at the root 10 | !/terraform.d/ 11 | # ignore a file at a subpath 12 | **/foo/bar.tf 13 | # ignore files with specific endings 14 | foo/*.md 15 | # character groups 16 | bar/something-[a-z].txt 17 | # ignore a file 18 | boop.txt 19 | # but not one at the current directory 20 | !/boop.txt 21 | -------------------------------------------------------------------------------- /testdata/archive-dir/.terraformrc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir/.terraformrc -------------------------------------------------------------------------------- /testdata/archive-dir/bar.txt: -------------------------------------------------------------------------------- 1 | bar 2 | -------------------------------------------------------------------------------- /testdata/archive-dir/baz.txt: -------------------------------------------------------------------------------- 1 | baz -------------------------------------------------------------------------------- /testdata/archive-dir/example.tf: -------------------------------------------------------------------------------- 1 | ../example.tf -------------------------------------------------------------------------------- /testdata/archive-dir/exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir/exe -------------------------------------------------------------------------------- /testdata/archive-dir/foo.terraform/bar.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/archive-dir/foo.terraform/bar.txt -------------------------------------------------------------------------------- /testdata/archive-dir/sub/bar.txt: -------------------------------------------------------------------------------- 1 | ../bar.txt -------------------------------------------------------------------------------- /testdata/archive-dir/sub/zip.txt: -------------------------------------------------------------------------------- 1 | zip 2 | -------------------------------------------------------------------------------- /testdata/archive-dir/sub2/bar.txt: -------------------------------------------------------------------------------- 1 | ../sub/bar.txt -------------------------------------------------------------------------------- /testdata/archive-dir/sub2/zip.txt: -------------------------------------------------------------------------------- 1 | zip 2 | -------------------------------------------------------------------------------- /testdata/example.tf: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | resource "null_resource" "foo" {} 5 | -------------------------------------------------------------------------------- /testdata/subdir-appears-first.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/go-slug/5dc4856e86ae2426722f0eb4b0d4ec8233ab5494/testdata/subdir-appears-first.tar.gz -------------------------------------------------------------------------------- /testdata/subdir-ordering/README.md: -------------------------------------------------------------------------------- 1 | Builds the `subdir-appears-first.tar.gz` test dependency for TestUnpack_HeaderOrdering 2 | -------------------------------------------------------------------------------- /testdata/subdir-ordering/main.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) HashiCorp, Inc. 2 | // SPDX-License-Identifier: MPL-2.0 3 | 4 | package main 5 | 6 | import ( 7 | "archive/tar" 8 | "compress/gzip" 9 | "fmt" 10 | "io" 11 | "log" 12 | "os" 13 | ) 14 | 15 | func main() { 16 | w, err := os.Create("../subdir-appears-first.tar.gz") 17 | if err != nil { 18 | log.Fatal(err) 19 | } 20 | defer w.Close() 21 | 22 | gzipW, err := gzip.NewWriterLevel(w, gzip.BestSpeed) 23 | if err != nil { 24 | log.Fatal(err) 25 | } 26 | defer gzipW.Close() 27 | 28 | tarW := tar.NewWriter(gzipW) 29 | defer tarW.Close() 30 | 31 | // The order of headers to write to the output file 32 | targets := []string{ 33 | "super/duper", 34 | "super/duper/trooper", 35 | "super", 36 | "super/duper/trooper/foo.txt", 37 | } 38 | 39 | for _, t := range targets { 40 | info, err := os.Stat(t) 41 | if err != nil { 42 | log.Fatal(err) 43 | } 44 | 45 | header := &tar.Header{ 46 | Format: tar.FormatUnknown, 47 | Name: t, 48 | ModTime: info.ModTime(), 49 | Mode: int64(info.Mode()), 50 | } 51 | 52 | switch { 53 | case info.IsDir(): 54 | header.Typeflag = tar.TypeDir 55 | header.Name += "/" 56 | default: 57 | header.Typeflag = tar.TypeReg 58 | header.Size = info.Size() 59 | } 60 | 61 | // Write the header first to the archive. 62 | if err := tarW.WriteHeader(header); err != nil { 63 | log.Fatal(err) 64 | } 65 | 66 | fmt.Printf("Added %q, unix nano mtime %d / %d\n", header.Name, info.ModTime().Unix(), info.ModTime().UnixNano()) 67 | 68 | if info.IsDir() { 69 | continue 70 | } 71 | 72 | f, err := os.Open(t) 73 | if err != nil { 74 | log.Fatal(err) 75 | } 76 | defer f.Close() 77 | 78 | _, err = io.Copy(tarW, f) 79 | if err != nil { 80 | log.Fatal(err) 81 | } 82 | } 83 | 84 | fmt.Printf("Copy these values into the 85 | } 86 | -------------------------------------------------------------------------------- /testdata/subdir-ordering/super/duper/trooper/foo.txt: -------------------------------------------------------------------------------- 1 | placeholder 2 | --------------------------------------------------------------------------------