├── .circleci └── config.yml ├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ └── test.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── codecov.yml ├── go.mod ├── go.sum ├── hugoreleaser.env ├── hugoreleaser.toml ├── lib ├── cloudfront.go ├── cloudfront_test.go ├── config.go ├── config_test.go ├── deployer.go ├── deployer_test.go ├── files.go ├── files_test.go ├── s3.go ├── s3_test.go ├── session.go ├── session_test.go ├── stats.go ├── store.go ├── store_test.go ├── testdata │ ├── .hidden │ │ ├── .s3deploy.ignore.yml │ │ └── hidden.txt │ ├── .s3deploy.yml │ ├── ab.txt │ ├── index.html │ └── main.css ├── url.go └── url_test.go ├── main.go ├── main_test.go ├── testscripts ├── basic.txt ├── flag_strip-index-html.txt ├── flags.txt ├── routes.txt ├── skipdirs_custom.txt ├── skipdirs_default.txt └── unfinished │ └── empty.txt └── watch_testscripts.sh /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | docker: 5 | - image: bepsays/ci-goreleaser:1.21900.20200 6 | working_directory: /src 7 | steps: 8 | - checkout 9 | - run: 10 | command: | 11 | go mod download 12 | workflows: 13 | version: 2 14 | release: 15 | jobs: 16 | - build: 17 | context: org-global 18 | filters: 19 | tags: 20 | only: /disabled-v[0-9]+(\.[0-9]+)*(-.*)*/ 21 | branches: 22 | ignore: /.*/ 23 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [bep] -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # See https://docs.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates#package-ecosystem 2 | version: 2 3 | updates: 4 | - package-ecosystem: "gomod" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ master ] 4 | pull_request: 5 | name: Test 6 | permissions: 7 | contents: read 8 | jobs: 9 | test: 10 | strategy: 11 | matrix: 12 | go-version: [1.22.x,1.23.x] 13 | platform: [macos-latest, ubuntu-latest, windows-latest] 14 | runs-on: ${{ matrix.platform }} 15 | steps: 16 | - name: Install Go 17 | uses: actions/setup-go@v4 18 | with: 19 | go-version: ${{ matrix.go-version }} 20 | - name: Install staticcheck 21 | run: go install honnef.co/go/tools/cmd/staticcheck@latest 22 | shell: bash 23 | - name: Install golint 24 | run: go install golang.org/x/lint/golint@latest 25 | shell: bash 26 | - name: Update PATH 27 | run: echo "$(go env GOPATH)/bin" >> $GITHUB_PATH 28 | shell: bash 29 | - name: Checkout code 30 | uses: actions/checkout@v3 31 | - name: Fmt 32 | if: matrix.platform != 'windows-latest' # :( 33 | run: "diff <(gofmt -d .) <(printf '')" 34 | shell: bash 35 | - name: Vet 36 | run: go vet ./... 37 | - name: Staticcheck 38 | run: staticcheck ./... 39 | #- name: Lint 40 | # run: golint ./... 41 | - name: Test 42 | env: 43 | S3DEPLOY_TEST_KEY: ${{ secrets.S3DEPLOY_TEST_KEY }} 44 | S3DEPLOY_TEST_SECRET: ${{ secrets.S3DEPLOY_TEST_SECRET }} 45 | run: go test -race ./... -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic 46 | - name: Upload coverage 47 | if: success() && matrix.platform == 'ubuntu-latest' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' 48 | run: | 49 | curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import # One-time step 50 | curl -Os https://uploader.codecov.io/latest/linux/codecov 51 | curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM 52 | curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig 53 | gpgv codecov.SHA256SUM.sig codecov.SHA256SUM 54 | shasum -a 256 -c codecov.SHA256SUM 55 | chmod +x codecov 56 | ./codecov 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | s3deploy 26 | *.log 27 | cover.out 28 | nohup.out 29 | 30 | dist 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Bjørn Erik Pedersen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | go build 3 | 4 | install: 5 | go install 6 | 7 | release: 8 | git tag -a ${version} -m "Release ${version}" 9 | git push --follow-tags 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # s3deploy 2 | 3 | [![Project status: active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) 4 | [![GoDoc](https://godoc.org/github.com/bep/s3deploy?status.svg)](https://godoc.org/github.com/bep/s3deploy) 5 | [![Test](https://github.com/bep/s3deploy/actions/workflows/test.yml/badge.svg)](https://github.com/bep/s3deploy/actions/workflows/test.yml) 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/bep/s3deploy)](https://goreportcard.com/report/github.com/bep/s3deploy) 7 | [![codecov](https://codecov.io/gh/bep/s3deploy/branch/master/graph/badge.svg)](https://codecov.io/gh/bep/s3deploy) 8 | [![Release](https://img.shields.io/github/release/bep/s3deploy.svg?style=flat-square)](https://github.com/bep/s3deploy/releases/latest) 9 | 10 | A simple tool to deploy static websites to Amazon S3 and CloudFront with Gzip and custom headers support (e.g. "Cache-Control"). It uses ETag hashes to check if a file has changed, which makes it optimal in combination with static site generators like [Hugo](https://github.com/gohugoio/hugo). 11 | 12 | * [Install](#install) 13 | * [Configuration](#configuration) 14 | * [Flags](#flags) 15 | * [Routes](#routes) 16 | * [Global AWS Configuration](#global-aws-configuration) 17 | * [Example IAM Policy](#example-iam-policy) 18 | * [CloudFront CDN Cache Invalidation](#cloudfront-cdn-cache-invalidation) 19 | * [Example IAM Policy With CloudFront Config](#example-iam-policy-with-cloudfront-config) 20 | * [Background Information](#background-information) 21 | * [Alternatives](#alternatives) 22 | * [Stargazers over time](#stargazers-over-time) 23 | 24 | ## Install 25 | 26 | Pre-built binaries can be found [here](https://github.com/bep/s3deploy/releases/latest). 27 | 28 | **s3deploy** is a [Go application](https://golang.org/doc/install), so you can also install the latest version with: 29 | 30 | ```bash 31 | go install github.com/bep/s3deploy/v2@latest 32 | ``` 33 | 34 | To install on MacOS using Homebrew: 35 | 36 | ```bash 37 | brew install bep/tap/s3deploy 38 | ``` 39 | 40 | **Note** The brew tap above currently stops at v2.8.1; see [this issue](https://github.com/bep/s3deploy/issues/312) for more info. 41 | 42 | Note that `s3deploy` is a perfect tool to use with a continuous integration tool such as [CircleCI](https://circleci.com/). See [this](https://mostlygeek.com/posts/hugo-circle-s3-hosting/) for a tutorial that uses s3deploy with CircleCI. 43 | 44 | ## Configuration 45 | 46 | ### Flags 47 | 48 | The list of flags from running `s3deploy -h`: 49 | 50 | ``` 51 | -V print version and exit 52 | -acl string 53 | provide an ACL for uploaded objects. to make objects public, set to 'public-read'. all possible values are listed here: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl (default "private") 54 | -bucket string 55 | destination bucket name on AWS 56 | -config string 57 | optional config file (default ".s3deploy.yml") 58 | -distribution-id value 59 | optional CDN distribution ID for cache invalidation, repeat flag for multiple distributions 60 | -endpoint-url string 61 | optional endpoint URL 62 | -force 63 | upload even if the etags match 64 | -h help 65 | -ignore value 66 | regexp pattern for ignoring files, repeat flag for multiple patterns, 67 | -key string 68 | access key ID for AWS 69 | -max-delete int 70 | maximum number of files to delete per deploy (default 256) 71 | -path string 72 | optional bucket sub path 73 | -public-access 74 | DEPRECATED: please set -acl='public-read' 75 | -quiet 76 | enable silent mode 77 | -region string 78 | name of AWS region 79 | -secret string 80 | secret access key for AWS 81 | -skip-local-dirs value 82 | regexp pattern of files of directories to ignore when walking the local directory, repeat flag for multiple patterns, default "^\\/?(?:\\w+\\/)*(\\.\\w+)" 83 | -skip-local-files value 84 | regexp pattern of files to ignore when walking the local directory, repeat flag for multiple patterns, default "^(.*/)?/?.DS_Store$" 85 | -source string 86 | path of files to upload (default ".") 87 | -strip-index-html 88 | strip index.html from all directories expect for the root entry 89 | -try 90 | trial run, no remote updates 91 | -v enable verbose logging 92 | -workers int 93 | number of workers to upload files (default -1) 94 | ``` 95 | 96 | The flags can be set in one of (in priority order): 97 | 98 | 1. As a flag, e.g. `s3deploy -path public/` 99 | 1. As an OS environment variable prefixed with `S3DEPLOY_`, e.g. `S3DEPLOY_PATH="public/"`. 100 | 1. As a key/value in `.s3deploy.yml`, e.g. `path: "public/"` 101 | 1. For `key` and `secret` resolution, the OS environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` (and `AWS_SESSION_TOKEN`) will also be checked. This way you don't need to do any special to make it work with [AWS Vault](https://github.com/99designs/aws-vault) and similar tools. 102 | 103 | 104 | Environment variable expressions in `.s3deploy.yml` on the form `${VAR}` will be expanded before it's parsed: 105 | 106 | ```yaml 107 | path: "${MYVARS_PATH}" 108 | max-delete: "${MYVARS_MAX_DELETE@U}" 109 | ``` 110 | 111 | Note the special `@U` (_Unquoute_) syntax for the int field. 112 | 113 | #### Skip local files and directories 114 | 115 | The options `-skip-local-dirs` and `-skip-local-files` will match against a relative path from the source directory with Unix-style path separators. The source directory is represented by `.`, the rest starts with a `/`. 116 | 117 | #### Strip index.html 118 | 119 | The option `-strip-index-html` strips index.html from all directories expect for the root entry. This matches the option with (almost) same name in [hugo deploy](https://gohugo.io/hosting-and-deployment/hugo-deploy/). This simplifies the cloud configuration needed for some use cases, such as CloudFront distributions with S3 bucket origins. See this [PR](https://github.com/gohugoio/hugo/pull/12608) for more information. 120 | 121 | ### Routes 122 | 123 | The `.s3deploy.yml` configuration file can also contain one or more routes. A route matches files given a regexp. Each route can apply: 124 | 125 | `header` 126 | : Header values, the most notable is probably `Cache-Control`. Note that the list of [system-defined metadata](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html#object-metadata) that S3 currently supports and returns as HTTP headers when hosting a static site is very short. If you have more advanced requirements (e.g. security headers), see [this comment](https://github.com/bep/s3deploy/issues/57#issuecomment-991782098). 127 | 128 | `gzip` 129 | : Set to true to gzip the content when stored in S3. This will also set the correct `Content-Encoding` when fetching the object from S3. 130 | 131 | Example: 132 | 133 | ```yaml 134 | routes: 135 | - route: "^.+\\.(js|css|svg|ttf)$" 136 | # cache static assets for 1 year. 137 | headers: 138 | Cache-Control: "max-age=31536000, no-transform, public" 139 | gzip: true 140 | - route: "^.+\\.(png|jpg)$" 141 | headers: 142 | Cache-Control: "max-age=31536000, no-transform, public" 143 | gzip: false 144 | - route: "^.+\\.(html|xml|json)$" 145 | gzip: true 146 | ``` 147 | 148 | 149 | 150 | 151 | ## Global AWS Configuration 152 | 153 | See https://docs.aws.amazon.com/sdk-for-go/api/aws/session/#hdr-Sessions_from_Shared_Config 154 | 155 | The `AWS SDK` will fall back to credentials from `~/.aws/credentials`. 156 | 157 | If you set the `AWS_SDK_LOAD_CONFIG` environment variable, it will also load shared config from `~/.aws/config` where you can set the global `region` to use if not provided etc. 158 | 159 | ## Example IAM Policy 160 | 161 | ```json 162 | { 163 | "Version": "2012-10-17", 164 | "Statement":[ 165 | { 166 | "Effect":"Allow", 167 | "Action":[ 168 | "s3:ListBucket", 169 | "s3:GetBucketLocation" 170 | ], 171 | "Resource":"arn:aws:s3:::" 172 | }, 173 | { 174 | "Effect":"Allow", 175 | "Action":[ 176 | "s3:PutObject", 177 | "s3:PutObjectAcl", 178 | "s3:DeleteObject" 179 | ], 180 | "Resource":"arn:aws:s3:::/*" 181 | } 182 | ] 183 | } 184 | ``` 185 | 186 | Replace with your own. 187 | 188 | ## CloudFront CDN Cache Invalidation 189 | 190 | If you have configured CloudFront CDN in front of your S3 bucket, you can supply the `distribution-id` as a flag. This will make sure to invalidate the cache for the updated files after the deployment to S3. Note that the AWS user must have the needed access rights. 191 | 192 | Note that CloudFront allows [1,000 paths per month at no charge](https://aws.amazon.com/blogs/aws/simplified-multiple-object-invalidation-for-amazon-cloudfront/), so S3deploy tries to be smart about the invalidation strategy; we try to reduce the number of paths to 8. If that isn't possible, we will fall back to a full invalidation, e.g. "/*". 193 | 194 | ### Example IAM Policy With CloudFront Config 195 | 196 | ```json 197 | { 198 | "Version": "2012-10-17", 199 | "Statement": [ 200 | { 201 | "Effect": "Allow", 202 | "Action": [ 203 | "s3:ListBucket", 204 | "s3:GetBucketLocation" 205 | ], 206 | "Resource": "arn:aws:s3:::" 207 | }, 208 | { 209 | "Effect": "Allow", 210 | "Action": [ 211 | "s3:PutObject", 212 | "s3:DeleteObject", 213 | "s3:PutObjectAcl" 214 | ], 215 | "Resource": "arn:aws:s3:::/*" 216 | }, 217 | { 218 | "Effect": "Allow", 219 | "Action": [ 220 | "cloudfront:GetDistribution", 221 | "cloudfront:CreateInvalidation" 222 | ], 223 | "Resource": "*" 224 | } 225 | ] 226 | } 227 | ``` 228 | 229 | ## Background Information 230 | 231 | If you're looking at `s3deploy` then you've probably already seen the [`aws s3 sync` command](https://docs.aws.amazon.com/cli/latest/reference/s3/sync.html) - this command has a sync-strategy that is not optimised for static sites, it compares the **timestamp** and **size** of your files to decide whether to upload the file. 232 | 233 | Because static-site generators can recreate **every** file (even if identical) the timestamp is updated and thus `aws s3 sync` will needlessly upload every single file. `s3deploy` on the other hand checks the etag hash to check for actual changes, and uses that instead. 234 | 235 | ## Alternatives 236 | 237 | * [go3up](https://github.com/alexaandru/go3up) by Alexandru Ungur 238 | * [s3up](https://github.com/nathany/s3up) by Nathan Youngman (the starting-point of this project) 239 | 240 | ## Stargazers over time 241 | 242 | [![Stargazers over time](https://starchart.cc/bep/s3deploy.svg)](https://starchart.cc/bep/s3deploy) 243 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | target: auto 6 | threshold: 0.5% 7 | patch: off 8 | 9 | comment: 10 | require_changes: true 11 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/bep/s3deploy/v2 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go-v2 v1.18.1 7 | github.com/aws/aws-sdk-go-v2/credentials v1.13.22 8 | github.com/aws/aws-sdk-go-v2/service/cloudfront v1.26.7 9 | github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0 10 | github.com/bep/helpers v0.5.0 11 | github.com/bep/predicate v0.2.0 12 | github.com/dsnet/golib/memfile v1.0.0 13 | github.com/frankban/quicktest v1.14.6 14 | github.com/oklog/ulid/v2 v2.1.0 15 | github.com/peterbourgon/ff/v3 v3.4.0 16 | github.com/rogpeppe/go-internal v1.12.0 17 | golang.org/x/sync v0.8.0 18 | golang.org/x/text v0.19.0 19 | gopkg.in/yaml.v2 v2.4.0 20 | ) 21 | 22 | require ( 23 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect 24 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect 25 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect 26 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect 27 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect 28 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect 29 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect 30 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect 31 | github.com/aws/smithy-go v1.13.5 // indirect 32 | github.com/davecgh/go-spew v1.1.1 // indirect 33 | github.com/google/go-cmp v0.6.0 // indirect 34 | github.com/jmespath/go-jmespath v0.4.0 // indirect 35 | github.com/kr/pretty v0.3.1 // indirect 36 | github.com/kr/text v0.2.0 // indirect 37 | golang.org/x/sys v0.20.0 // indirect 38 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect 39 | ) 40 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= 2 | github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo= 3 | github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= 4 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= 5 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= 6 | github.com/aws/aws-sdk-go-v2/credentials v1.13.22 h1:Hp9rwJS4giQ48xqonRV/s7QcDf/wxF6UY7osRmBabvI= 7 | github.com/aws/aws-sdk-go-v2/credentials v1.13.22/go.mod h1:BfNcm6A9nSd+bzejDcMJ5RE+k6WbkCwWkQil7q4heRk= 8 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= 9 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= 10 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 h1:A5UqQEmPaCFpedKouS4v+dHCTUo2sKqhoKO9U5kxyWo= 11 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY= 12 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= 13 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 h1:srIVS45eQuewqz6fKKu6ZGXaq6FuFg5NzgQBAM6g8Y4= 14 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw= 15 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 h1:wscW+pnn3J1OYnanMnza5ZVYXLX4cKk5rAvUAl4Qu+c= 16 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26/go.mod h1:MtYiox5gvyB+OyP0Mr0Sm/yzbEAIPL9eijj/ouHAPw0= 17 | github.com/aws/aws-sdk-go-v2/service/cloudfront v1.26.7 h1:tKOqS6lQgQQfGxHmTIb16YyVmT0YDCS4g0wwyOzOtVA= 18 | github.com/aws/aws-sdk-go-v2/service/cloudfront v1.26.7/go.mod h1:YTd4wGn2beCF9wkSTpEcupk79zDFYJk2Ca76B8YyvJg= 19 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= 20 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= 21 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 h1:zZSLP3v3riMOP14H7b4XP0uyfREDQOYv2cqIrvTXDNQ= 22 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29/go.mod h1:z7EjRjVwZ6pWcWdI2H64dKttvzaP99jRIj5hphW0M5U= 23 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= 24 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s= 25 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU= 26 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 h1:dBL3StFxHtpBzJJ/mNEsjXVgfO+7jR0dAIEwLqMapEA= 27 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3/go.mod h1:f1QyiAsvIv4B49DmCqrhlXqyaR+0IxMmyX+1P+AnzOM= 28 | github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0 h1:ya7fmrN2fE7s1P2gaPbNg5MTkERVWfsH8ToP1YC4Z9o= 29 | github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0/go.mod h1:aVbf0sko/TsLWHx30c/uVu7c62+0EAJ3vbxaJga0xCw= 30 | github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= 31 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= 32 | github.com/aws/aws-sdk-go-v2/service/sts v1.18.11/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= 33 | github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= 34 | github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= 35 | github.com/bep/helpers v0.5.0 h1:rneezhnG7GzLFlsEWO/EnleaBRuluBDGFimalO6Y50o= 36 | github.com/bep/helpers v0.5.0/go.mod h1:dSqCzIvHbzsk5YOesp1M7sKAq5xUcvANsRoKdawxH4Q= 37 | github.com/bep/predicate v0.2.0 h1:+jHhIbj1UOZn1POqZNKDryuJoi/9wPYg83siaRPb2b0= 38 | github.com/bep/predicate v0.2.0/go.mod h1:MQHXILk/U5Dg7eazQsAB69BrQrYSsl5jLlEejgBQyzg= 39 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 40 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 41 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 42 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 43 | github.com/dsnet/golib/memfile v1.0.0 h1:J9pUspY2bDCbF9o+YGwcf3uG6MdyITfh/Fk3/CaEiFs= 44 | github.com/dsnet/golib/memfile v1.0.0/go.mod h1:tXGNW9q3RwvWt1VV2qrRKlSSz0npnh12yftCSCy2T64= 45 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 46 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 47 | github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 48 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 49 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 50 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 51 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 52 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 53 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 54 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 55 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 56 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 57 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 58 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 59 | github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= 60 | github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= 61 | github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= 62 | github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= 63 | github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= 64 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 65 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 66 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 67 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 68 | github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= 69 | github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= 70 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 71 | golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= 72 | golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 73 | golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= 74 | golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 75 | golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= 76 | golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= 77 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= 78 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= 79 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 80 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 81 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 82 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 83 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 84 | -------------------------------------------------------------------------------- /hugoreleaser.env: -------------------------------------------------------------------------------- 1 | HUGORELEASER_COMMITISH=master 2 | -------------------------------------------------------------------------------- /hugoreleaser.toml: -------------------------------------------------------------------------------- 1 | project = "s3deploy" 2 | 3 | # Create archive aliases (copies) for base archive filenames 4 | # which contains the string on the left, e.g. 5 | # { "linux-64bit" = "linux-amd64" }. 6 | # 7 | # The common use case is to to preserve some backward compatibility on name changes, 8 | # the example above is taken from Hugo. 9 | # You can include any extension in the above to limit this to e.g. only .deb archives. 10 | archive_alias_replacements = {} 11 | 12 | # Go settings can be set on any of Project > Build. 13 | # See Build settings for merge rules. 14 | [go_settings] 15 | # go_proxy will be used to set GOPROXY when running Go. 16 | # It will default to 'https://proxy.golang.org' if not set. 17 | # 18 | # See https://proxy.golang.org/ for more information. 19 | go_proxy = "https://proxy.golang.org" 20 | go_exe = "go" 21 | 22 | # Build settings can be set on any of Project > Build > Goos > Goarch. 23 | # Zero values (empty strings, 0 numbers) and nil slices/maps will inherit values from the nearest non-zero value above for a key. 24 | # Empty slices and maps will stay empty (e.g. `env = []`) 25 | [build_settings] 26 | binary = "s3deploy" 27 | flags = ["-buildmode", "exe"] 28 | env = ["CGO_ENABLED=0"] 29 | ldflags = "-s -w -X main.tag=${HUGORELEASER_TAG}" 30 | 31 | # Archive settings can be set on any of Project > Archive. 32 | # Follows the same merge rules as Build settings. 33 | [archive_settings] 34 | name_template = "{{ .Project }}_{{ .Tag | trimPrefix `v` }}_{{ .Goos }}-{{ .Goarch }}" 35 | # Extra, as in: In addition to the binary. 36 | extra_files = [ 37 | { source_path = "README.md", target_path = "README.md" }, 38 | { source_path = "LICENSE", target_path = "LICENSE" }, 39 | ] 40 | [archive_settings.type] 41 | format = "tar.gz" 42 | extension = ".tar.gz" 43 | 44 | [release_settings] 45 | name = "${HUGORELEASER_TAG}" 46 | type = "github" 47 | repository = "s3deploy" 48 | repository_owner = "bep" 49 | 50 | draft = true 51 | prerelease = false 52 | 53 | [release_settings.release_notes_settings] 54 | # Use Hugoreleaser's autogenerated release notes. 55 | generate = true 56 | # Enable this to use GitHub's autogenerated release notes. 57 | generate_on_host = false 58 | 59 | # Set this if you have release notes file ready to use. 60 | filename = "" 61 | 62 | # A custom template filename for Hugoreleaser's autogenerated release notes. 63 | # Will fall back to the default if not set. 64 | template_filename = "" 65 | 66 | # Collapse releases with < 10 changes below one title. 67 | short_threshold = 10 68 | short_title = "What's Changed" 69 | 70 | groups = [ 71 | # Group the changes in the release notes by title. 72 | # You need at least one. 73 | # The groups will be tested in order until a match is found. 74 | # The titles will, by default, be listed in the given order in the release note. 75 | # You can set an optional ordinal to adjust the order (as in the setup below). 76 | # Any match with ignore=true will be dropped. 77 | { regexp = "snapcraft:|Merge commit|Squashed", ignore = true }, 78 | { title = "Bug fixes", regexp = "fix", ordinal = 20 }, 79 | { title = "Dependency Updates", regexp = "deps", ordinal = 30 }, 80 | { title = "Documentation", regexp = "doc", ordinal = 40 }, 81 | { title = "Improvements", regexp = ".*", ordinal = 10 }, 82 | ] 83 | 84 | [[builds]] 85 | # User-defined path. Can be used to partition the build/archive step, e.g.: 86 | # 87 | # hugoreleaser build -paths "builds/unix/**" 88 | # 89 | # The above would only build binaries matching the given path ("Unix binaries"). 90 | path = "unix" 91 | 92 | [[builds.os]] 93 | goos = "linux" 94 | [[builds.os.archs]] 95 | goarch = "amd64" 96 | 97 | [[builds]] 98 | path = "macos" 99 | 100 | [[builds.os]] 101 | goos = "darwin" 102 | [[builds.os.archs]] 103 | goarch = "universal" 104 | 105 | [[builds]] 106 | path = "windows" 107 | 108 | [[builds.os]] 109 | goos = "windows" 110 | [builds.os.build_settings] 111 | binary = "s3deploy.exe" 112 | [[builds.os.archs]] 113 | goarch = "amd64" 114 | 115 | [[archives]] 116 | paths = ["builds/{unix,macos}/**"] 117 | [[archives]] 118 | paths = ["builds/macos/**"] 119 | [archives.archive_settings] 120 | extra_files = [] 121 | [archives.archive_settings.type] 122 | # When format is _plugin, 123 | # archive_settings.plugin must also be configured. 124 | format = "_plugin" 125 | extension = ".pkg" 126 | [archives.archive_settings.plugin] 127 | id = "macospkgremote" 128 | type = "gorun" 129 | # Note there is a "local" variant of this that may be simpler to set up, see 130 | # https://github.com/gohugoio/hugoreleaser-archive-plugins/tree/main/macospkg 131 | command = "github.com/gohugoio/hugoreleaser-archive-plugins/macospkgremote@v0.1.1" 132 | [archives.archive_settings.custom_settings] 133 | # Package settings 134 | package_identifier = "io.gohugo.hugoreleaser" 135 | package_version = "${HUGORELEASER_TAG}" 136 | 137 | # Apple settings. Only needed in the "local" variant of this plugin. 138 | # apple_signing_identity = "${BUILDPKG_APPLE_DEVELOPER_SIGNING_IDENTITY}" 139 | 140 | # AWS Settings 141 | bucket = "s3fptest" 142 | queue = "https://sqs.eu-north-1.amazonaws.com/656975317043/s3fptest_client" 143 | access_key_id = "${S3RPC_CLIENT_ACCESS_KEY_ID}" 144 | secret_access_key = "${S3RPC_CLIENT_SECRET_ACCESS_KEY}" 145 | 146 | [[archives]] 147 | paths = ["builds/**/linux/amd64"] 148 | [archives.archive_settings] 149 | binary_dir = "/usr/local/bin" 150 | extra_files = [] 151 | [archives.archive_settings.type] 152 | # When format is _plugin, 153 | # archive_settings.plugin must also be configured. 154 | format = "_plugin" 155 | extension = ".deb" 156 | [archives.archive_settings.plugin] 157 | id = "deb" 158 | # gorun is currently the only type. 159 | type = "gorun" 160 | # If the plugin source lives locally, this can also be a file path, 161 | # e.g. './myplugin'. In those setups you may need to set dir to the directory path, 162 | # and use "." for the command. 163 | command = "github.com/gohugoio/hugoreleaser-archive-plugins/deb@v0.6.1" 164 | [archives.archive_settings.custom_settings] 165 | # Custom settings as defined by the Deb plugin. 166 | vendor = "gohugo.io" 167 | homepage = "https://github.com/gohugoio/hugoreleaser" 168 | maintainer = "Bjørn Erik Pedersen " 169 | description = "Build, archive and release Go programs." 170 | license = "Apache-2.0" 171 | [[archives]] 172 | paths = ["builds/windows/**"] 173 | [archives.archive_settings] 174 | [archives.archive_settings.type] 175 | format = "zip" 176 | extension = ".zip" 177 | 178 | [[releases]] 179 | paths = ["archives/**"] 180 | # In this file we have only one release, but path can be used to partition the release step, e.g.: 181 | # hugoreleaser release -paths "releases/myrelease" 182 | path = "myrelease" 183 | -------------------------------------------------------------------------------- /lib/cloudfront.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "context" 10 | "errors" 11 | "path" 12 | "sort" 13 | "strings" 14 | "time" 15 | 16 | "github.com/aws/aws-sdk-go-v2/service/cloudfront" 17 | "github.com/aws/aws-sdk-go-v2/service/cloudfront/types" 18 | ) 19 | 20 | var _ remoteCDN = (*cloudFrontClient)(nil) 21 | 22 | type cloudFrontClient struct { 23 | // The CloudFront distribution IDs 24 | distributionIDs Strings 25 | 26 | // Will invalidate the entire cache, e.g. "/*" 27 | force bool 28 | bucketPath string 29 | 30 | logger printer 31 | cf cloudfrontHandler 32 | } 33 | 34 | func newCloudFrontClient( 35 | handler cloudfrontHandler, 36 | logger printer, 37 | cfg *Config, 38 | ) (*cloudFrontClient, error) { 39 | if len(cfg.CDNDistributionIDs) == 0 { 40 | return nil, errors.New("must provide one or more distribution ID") 41 | } 42 | return &cloudFrontClient{ 43 | distributionIDs: cfg.CDNDistributionIDs, 44 | force: cfg.Force, 45 | bucketPath: cfg.BucketPath, 46 | logger: logger, 47 | cf: handler, 48 | }, nil 49 | } 50 | 51 | type cloudfrontHandler interface { 52 | GetDistribution(ctx context.Context, params *cloudfront.GetDistributionInput, optFns ...func(*cloudfront.Options)) (*cloudfront.GetDistributionOutput, error) 53 | CreateInvalidation(ctx context.Context, params *cloudfront.CreateInvalidationInput, optFns ...func(*cloudfront.Options)) (*cloudfront.CreateInvalidationOutput, error) 54 | } 55 | 56 | func (c *cloudFrontClient) InvalidateCDNCache(ctx context.Context, paths ...string) error { 57 | if len(paths) == 0 { 58 | return nil 59 | } 60 | 61 | invalidateForID := func(id string) error { 62 | dcfg, err := c.cf.GetDistribution(ctx, &cloudfront.GetDistributionInput{ 63 | Id: &id, 64 | }) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | originPath := *dcfg.Distribution.DistributionConfig.Origins.Items[0].OriginPath 70 | var root string 71 | if originPath != "" || c.bucketPath != "" { 72 | var subPath string 73 | root, subPath = c.determineRootAndSubPath(c.bucketPath, originPath) 74 | if subPath != "" { 75 | for i, p := range paths { 76 | paths[i] = strings.TrimPrefix(p, subPath) 77 | } 78 | } 79 | } 80 | 81 | // This will try to reduce the number of invaldation paths to maximum 8. 82 | // If that isn't possible it will fall back to a full invalidation, e.g. "/*". 83 | // CloudFront allows 1000 free invalidations per month. After that they 84 | // cost money, so we want to keep this down. 85 | paths = c.normalizeInvalidationPaths(root, 8, c.force, paths...) 86 | 87 | if len(paths) > 10 { 88 | c.logger.Printf("Create CloudFront invalidation request for %d paths", len(paths)) 89 | } else { 90 | c.logger.Printf("Create CloudFront invalidation request for %v", paths) 91 | } 92 | 93 | in := &cloudfront.CreateInvalidationInput{ 94 | DistributionId: &id, 95 | InvalidationBatch: c.pathsToInvalidationBatch(time.Now().Format("20060102150405"), paths...), 96 | } 97 | 98 | _, err = c.cf.CreateInvalidation( 99 | ctx, 100 | in, 101 | ) 102 | 103 | return err 104 | } 105 | 106 | for _, id := range c.distributionIDs { 107 | if err := invalidateForID(id); err != nil { 108 | return err 109 | } 110 | } 111 | 112 | return nil 113 | } 114 | 115 | func (*cloudFrontClient) pathsToInvalidationBatch(ref string, paths ...string) *types.InvalidationBatch { 116 | cfpaths := &types.Paths{} 117 | for _, p := range paths { 118 | cfpaths.Items = append(cfpaths.Items, pathEscapeRFC1738(p)) 119 | } 120 | 121 | qty := int32(len(paths)) 122 | cfpaths.Quantity = &qty 123 | 124 | return &types.InvalidationBatch{ 125 | CallerReference: &ref, 126 | Paths: cfpaths, 127 | } 128 | } 129 | 130 | // determineRootAndSubPath takes the bucketPath, as set as a flag, 131 | // and the originPath, as set in the CDN config, and 132 | // determines the web context root and the sub path below this context. 133 | func (c *cloudFrontClient) determineRootAndSubPath(bucketPath, originPath string) (webContextRoot string, subPath string) { 134 | if bucketPath == "" && originPath == "" { 135 | panic("one of bucketPath or originPath must be set") 136 | } 137 | bucketPath = strings.Trim(bucketPath, "/") 138 | originPath = strings.Trim(originPath, "/") 139 | 140 | webContextRoot = strings.TrimPrefix(bucketPath, originPath) 141 | if webContextRoot == "" { 142 | webContextRoot = "/" 143 | } 144 | 145 | if originPath != bucketPath { 146 | // If the bucket path is a prefix of the origin, these resources 147 | // are served from a sub path, e.g. https://example.com/foo. 148 | subPath = strings.TrimPrefix(originPath, bucketPath) 149 | } else { 150 | // Served from the root. 151 | subPath = bucketPath 152 | } 153 | 154 | return 155 | } 156 | 157 | // For path rules, see https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html 158 | func (c *cloudFrontClient) normalizeInvalidationPaths( 159 | root string, 160 | threshold int, 161 | force bool, 162 | paths ...string, 163 | ) []string { 164 | if !strings.HasPrefix(root, "/") { 165 | root = "/" + root 166 | } 167 | 168 | matchAll := path.Join(root, "*") 169 | clearAll := []string{matchAll} 170 | 171 | if force { 172 | return clearAll 173 | } 174 | 175 | var normalized []string 176 | var maxlevels int 177 | 178 | for _, p := range paths { 179 | p = pathClean(p) 180 | if !strings.HasPrefix(p, "/") { 181 | p = "/" + p 182 | } 183 | levels := strings.Count(p, "/") 184 | if levels > maxlevels { 185 | maxlevels = levels 186 | } 187 | 188 | if strings.HasSuffix(p, "index.html") { 189 | dir := path.Dir(p) 190 | if !strings.HasSuffix(dir, "/") { 191 | dir += "/" 192 | } 193 | normalized = append(normalized, dir) 194 | } else { 195 | normalized = append(normalized, p) 196 | } 197 | } 198 | 199 | normalized = uniqueStrings(normalized) 200 | sort.Strings(normalized) 201 | 202 | if len(normalized) > threshold { 203 | if len(normalized) > threshold { 204 | for k := maxlevels; k > 0; k-- { 205 | for i, p := range normalized { 206 | if strings.Count(p, "/") > k { 207 | parts := strings.Split(strings.TrimPrefix(path.Dir(p), "/"), "/") 208 | if len(parts) > 1 { 209 | parts = parts[:len(parts)-1] 210 | } 211 | normalized[i] = "/" + path.Join(parts...) + "/*" 212 | } 213 | } 214 | normalized = uniqueStrings(normalized) 215 | if len(normalized) <= threshold { 216 | break 217 | } 218 | } 219 | 220 | if len(normalized) > threshold { 221 | // Give up. 222 | return clearAll 223 | } 224 | } 225 | } 226 | 227 | for _, pattern := range normalized { 228 | if pattern == matchAll { 229 | return clearAll 230 | } 231 | } 232 | 233 | return normalized 234 | } 235 | 236 | func uniqueStrings(s []string) []string { 237 | var unique []string 238 | set := map[string]interface{}{} 239 | for _, val := range s { 240 | if _, ok := set[val]; !ok { 241 | unique = append(unique, val) 242 | set[val] = val 243 | } 244 | } 245 | return unique 246 | } 247 | -------------------------------------------------------------------------------- /lib/cloudfront_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "io" 12 | "path" 13 | "testing" 14 | 15 | "github.com/aws/aws-sdk-go-v2/aws" 16 | "github.com/aws/aws-sdk-go-v2/service/cloudfront" 17 | "github.com/aws/aws-sdk-go-v2/service/cloudfront/types" 18 | qt "github.com/frankban/quicktest" 19 | ) 20 | 21 | func TestReduceInvalidationPaths(t *testing.T) { 22 | c := qt.New(t) 23 | 24 | var client *cloudFrontClient 25 | 26 | c.Assert(client.normalizeInvalidationPaths("root", 5, false, "/root/index.html"), qt.DeepEquals, []string{"/root/"}) 27 | c.Assert(client.normalizeInvalidationPaths("", 5, false, "/index.html"), qt.DeepEquals, []string{"/"}) 28 | c.Assert(client.normalizeInvalidationPaths("", 5, true, "/a", "/b"), qt.DeepEquals, []string{"/*"}) 29 | c.Assert(client.normalizeInvalidationPaths("root", 5, true, "/a", "/b"), qt.DeepEquals, []string{"/root/*"}) 30 | c.Assert(client.normalizeInvalidationPaths("root", 5, false, "/root/b/"), qt.DeepEquals, []string{"/root/b/"}) 31 | 32 | rootPlusMany := append([]string{"/index.html", "/styles.css"}, createFiles("css", false, 20)...) 33 | normalized := client.normalizeInvalidationPaths("", 5, false, rootPlusMany...) 34 | c.Assert(len(normalized), qt.DeepEquals, 3) 35 | c.Assert(normalized, qt.DeepEquals, []string{"/", "/css/*", "/styles.css"}) 36 | 37 | rootPlusManyInDifferentFolders := append([]string{"/index.html", "/styles.css"}, createFiles("css", true, 20)...) 38 | c.Assert(client.normalizeInvalidationPaths("", 5, false, rootPlusManyInDifferentFolders...), qt.DeepEquals, []string{"/*"}) 39 | 40 | rootPlusManyInDifferentFoldersNested := append([]string{"/index.html", "/styles.css"}, createFiles("blog", false, 10)...) 41 | rootPlusManyInDifferentFoldersNested = append(rootPlusManyInDifferentFoldersNested, createFiles("blog/l1", false, 10)...) 42 | rootPlusManyInDifferentFoldersNested = append(rootPlusManyInDifferentFoldersNested, createFiles("blog/l1/l2/l3/l5", false, 10)...) 43 | rootPlusManyInDifferentFoldersNested = append(rootPlusManyInDifferentFoldersNested, createFiles("blog/l1/l3", false, 10)...) 44 | rootPlusManyInDifferentFoldersNested = append(rootPlusManyInDifferentFoldersNested, createFiles("about/l1", true, 10)...) 45 | rootPlusManyInDifferentFoldersNested = append(rootPlusManyInDifferentFoldersNested, createFiles("about/l1/l2/l3", false, 10)...) 46 | 47 | // avoid situations where many changes in some HTML template triggers update in /images and similar 48 | normalized = client.normalizeInvalidationPaths("", 5, false, rootPlusManyInDifferentFoldersNested...) 49 | c.Assert(len(normalized), qt.Equals, 4) 50 | c.Assert(normalized, qt.DeepEquals, []string{"/", "/about/*", "/blog/*", "/styles.css"}) 51 | 52 | changes := []string{"/hugoscss/categories/index.html", "/hugoscss/index.html", "/hugoscss/tags/index.html", "/hugoscss/post/index.html", "/hugoscss/post/hello-scss/index.html", "/hugoscss/styles/main.min.36816b22057425f8a5f66b73918446b0cd793c0c6125406c285948f507599d1e.css"} 53 | normalized = client.normalizeInvalidationPaths("/hugoscss", 3, false, changes...) 54 | c.Assert(normalized, qt.DeepEquals, []string{"/hugoscss/*"}) 55 | 56 | changes = []string{"/a/b1/a.css", "/a/b2/b.css"} 57 | normalized = client.normalizeInvalidationPaths("/", 3, false, changes...) 58 | c.Assert(normalized, qt.DeepEquals, []string{"/a/b1/a.css", "/a/b2/b.css"}) 59 | 60 | normalized = client.normalizeInvalidationPaths("/", 1, false, changes...) 61 | c.Assert(normalized, qt.DeepEquals, []string{"/a/*"}) 62 | 63 | // Force 64 | normalized = client.normalizeInvalidationPaths("", 5, true, rootPlusManyInDifferentFoldersNested...) 65 | c.Assert(normalized, qt.DeepEquals, []string{"/*"}) 66 | normalized = client.normalizeInvalidationPaths("root", 5, true, rootPlusManyInDifferentFoldersNested...) 67 | c.Assert(normalized, qt.DeepEquals, []string{"/root/*"}) 68 | } 69 | 70 | func TestDetermineRootAndSubPath(t *testing.T) { 71 | c := qt.New(t) 72 | 73 | var client *cloudFrontClient 74 | 75 | check := func(bucketPath, originPath, expectWebContextRoot, expectSubPath string) { 76 | t.Helper() 77 | s1, s2 := client.determineRootAndSubPath(bucketPath, originPath) 78 | c.Assert(s1, qt.Equals, expectWebContextRoot) 79 | c.Assert(s2, qt.Equals, expectSubPath) 80 | } 81 | 82 | check("temp/forsale", "temp", "/forsale", "temp") 83 | check("/temp/forsale/", "temp", "/forsale", "temp") 84 | check("root", "root", "/", "root") 85 | check("root", "/root", "/", "root") 86 | } 87 | 88 | func TestPathsToInvalidationBatch(t *testing.T) { 89 | c := qt.New(t) 90 | 91 | var client *cloudFrontClient 92 | 93 | batch := client.pathsToInvalidationBatch("myref", "/path1/", "/path2/") 94 | 95 | c.Assert(batch, qt.IsNotNil) 96 | c.Assert(*batch.CallerReference, qt.Equals, "myref") 97 | c.Assert(int(*batch.Paths.Quantity), qt.Equals, 2) 98 | } 99 | 100 | func TestNewCloudFrontClient(t *testing.T) { 101 | c := qt.New(t) 102 | client, err := newCloudFrontClient( 103 | &mockCloudfrontHandler{}, 104 | newPrinter(io.Discard), 105 | &Config{ 106 | CDNDistributionIDs: Strings{"12345"}, 107 | Force: true, 108 | BucketPath: "/mypath", 109 | }, 110 | ) 111 | c.Assert(err, qt.IsNil) 112 | c.Assert(client, qt.IsNotNil) 113 | c.Assert(client.distributionIDs[0], qt.Equals, "12345") 114 | c.Assert(client.bucketPath, qt.Equals, "/mypath") 115 | c.Assert(client.force, qt.Equals, true) 116 | } 117 | 118 | func createFiles(root string, differentFolders bool, num int) []string { 119 | files := make([]string, num) 120 | 121 | for i := 0; i < num; i++ { 122 | nroot := root 123 | if differentFolders { 124 | nroot = fmt.Sprintf("%s-%d", root, i) 125 | } 126 | files[i] = path.Join(nroot, fmt.Sprintf("file%d.css", i+1)) 127 | } 128 | 129 | return files 130 | } 131 | 132 | type mockCloudfrontHandler struct{} 133 | 134 | func (c *mockCloudfrontHandler) GetDistribution(ctx context.Context, params *cloudfront.GetDistributionInput, optFns ...func(*cloudfront.Options)) (*cloudfront.GetDistributionOutput, error) { 135 | return &cloudfront.GetDistributionOutput{ 136 | Distribution: &types.Distribution{ 137 | DomainName: aws.String("example.com"), 138 | }, 139 | }, nil 140 | } 141 | 142 | func (c *mockCloudfrontHandler) CreateInvalidation(ctx context.Context, params *cloudfront.CreateInvalidationInput, optFns ...func(*cloudfront.Options)) (*cloudfront.CreateInvalidationOutput, error) { 143 | return &cloudfront.CreateInvalidationOutput{}, nil 144 | } 145 | -------------------------------------------------------------------------------- /lib/config.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "bytes" 10 | "errors" 11 | "flag" 12 | "fmt" 13 | "io" 14 | "log" 15 | "os" 16 | "path/filepath" 17 | "regexp" 18 | "strconv" 19 | "strings" 20 | "sync" 21 | 22 | "github.com/bep/helpers/envhelpers" 23 | "github.com/bep/predicate" 24 | "github.com/peterbourgon/ff/v3" 25 | "gopkg.in/yaml.v2" 26 | ) 27 | 28 | var errUnsupportedFlagType = errors.New("unsupported flag type") 29 | 30 | // Parse the flags in the flag set from the provided (presumably commandline) 31 | // args. Additional flags may be provided to parse from a config file and/or 32 | // environment variables in that priority order. 33 | // The Config needs to be initialized with Init before it's used. 34 | func ConfigFromArgs(args []string) (*Config, error) { 35 | fs := flag.NewFlagSet("s3deploy", flag.ContinueOnError) 36 | cfg := flagsToConfig(fs) 37 | 38 | if err := ff.Parse(fs, args, 39 | ff.WithEnvVarPrefix("S3DEPLOY"), 40 | ff.WithConfigFileFlag("config"), 41 | ff.WithConfigFileParser(parserYAMLConfig), 42 | ff.WithAllowMissingConfigFile(true), 43 | ); err != nil { 44 | return nil, err 45 | } 46 | 47 | return cfg, nil 48 | } 49 | 50 | // Config configures a deployment. 51 | type Config struct { 52 | fileConf fileConfig 53 | 54 | AccessKey string 55 | SecretKey string 56 | 57 | SourcePath string 58 | BucketName string 59 | 60 | // To have multiple sites in one bucket. 61 | BucketPath string 62 | RegionName string 63 | 64 | // When set, will invalidate the CDN cache(s) for the updated files. 65 | CDNDistributionIDs Strings 66 | 67 | // When set, will override the default AWS endpoint. 68 | EndpointURL string 69 | 70 | // Optional configFile 71 | ConfigFile string 72 | 73 | NumberOfWorkers int 74 | MaxDelete int 75 | ACL string 76 | PublicReadACL bool 77 | StripIndexHTML bool 78 | Verbose bool 79 | Silent bool 80 | Force bool 81 | Try bool 82 | Ignore Strings 83 | 84 | // One or more regular expressions of files to ignore when walking the local directory. 85 | // If not set, defaults to ".DS_Store". 86 | // Note that the path given will have Unix separators, regardless of the OS. 87 | SkipLocalFiles Strings 88 | 89 | // A list of regular expressions of directories to ignore when walking the local directory. 90 | // If not set, defaults to ignoring hidden directories. 91 | // Note that the path given will have Unix separators, regardless of the OS. 92 | SkipLocalDirs Strings 93 | 94 | // CLI state 95 | PrintVersion bool 96 | 97 | // Print help 98 | Help bool 99 | 100 | // Mostly useful for testing. 101 | baseStore remoteStore 102 | 103 | fs *flag.FlagSet 104 | 105 | initOnce sync.Once 106 | 107 | // Compiled values. 108 | skipLocalFiles predicate.P[string] 109 | skipLocalDirs predicate.P[string] 110 | ignore predicate.P[string] 111 | } 112 | 113 | func (cfg *Config) Usage() { 114 | cfg.fs.Usage() 115 | } 116 | 117 | func (cfg *Config) Init() error { 118 | var err error 119 | cfg.initOnce.Do(func() { 120 | err = cfg.init() 121 | }) 122 | return err 123 | } 124 | 125 | func (cfg *Config) loadFileConfig() error { 126 | if cfg.ConfigFile != "" { 127 | data, err := os.ReadFile(cfg.ConfigFile) 128 | if err != nil { 129 | if !os.IsNotExist(err) { 130 | return err 131 | } 132 | } else { 133 | s := envhelpers.Expand(string(data), func(k string) string { 134 | return os.Getenv(k) 135 | }) 136 | data = []byte(s) 137 | 138 | err = yaml.Unmarshal(data, &cfg.fileConf) 139 | if err != nil { 140 | return err 141 | } 142 | } 143 | } 144 | 145 | return cfg.fileConf.init() 146 | } 147 | 148 | func (cfg *Config) shouldIgnoreLocal(key string) bool { 149 | return cfg.ignore(key) 150 | } 151 | 152 | func (cfg *Config) shouldIgnoreRemote(key string) bool { 153 | sub := key[len(cfg.BucketPath):] 154 | sub = strings.TrimPrefix(sub, "/") 155 | 156 | for _, r := range cfg.fileConf.Routes { 157 | if r.Ignore && r.routerRE.MatchString(sub) { 158 | return true 159 | } 160 | } 161 | 162 | return cfg.ignore(sub) 163 | } 164 | 165 | const ( 166 | defaultSkipLocalFiles = `^(.*/)?/?.DS_Store$` 167 | defaultSkipLocalDirs = `^\/?(?:\w+\/)*(\.\w+)` 168 | ) 169 | 170 | func (cfg *Config) init() error { 171 | if cfg.BucketName == "" { 172 | return errors.New("AWS bucket is required") 173 | } 174 | 175 | // The region may be possible for the AWS SDK to figure out from the context. 176 | 177 | if cfg.AccessKey == "" { 178 | cfg.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") 179 | } 180 | if cfg.SecretKey == "" { 181 | cfg.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") 182 | } 183 | 184 | if cfg.AccessKey == "" && cfg.SecretKey == "" { 185 | // The AWS SDK will fall back to other ways of finding credentials, so we cannot throw an error here; it will eventually fail. 186 | } else if cfg.AccessKey == "" || cfg.SecretKey == "" { 187 | return errors.New("both AWS access key and secret key must be provided") 188 | } 189 | 190 | cfg.SourcePath = filepath.Clean(cfg.SourcePath) 191 | 192 | // Sanity check to prevent people from uploading their entire disk. 193 | // The returned path from filepath.Clean ends in a slash only if it represents 194 | // a root directory, such as "/" on Unix or `C:\` on Windows. 195 | if strings.HasSuffix(cfg.SourcePath, string(os.PathSeparator)) { 196 | return errors.New("invalid source path: Cannot deploy from root") 197 | } 198 | 199 | if cfg.PublicReadACL { 200 | log.Print("WARNING: the 'public-access' flag is deprecated. Please use -acl='public-read' instead.") 201 | } 202 | 203 | if cfg.PublicReadACL && cfg.ACL != "" { 204 | return errors.New("you passed a value for the flags public-access and acl, which is not supported. the public-access flag is deprecated. please use the acl flag moving forward") 205 | } 206 | 207 | if cfg.Ignore != nil { 208 | for _, pattern := range cfg.Ignore { 209 | re, err := regexp.Compile(pattern) 210 | if err != nil { 211 | return errors.New("cannot compile 'ignore' flag pattern " + err.Error()) 212 | } 213 | fn := func(s string) bool { 214 | return re.MatchString(s) 215 | } 216 | cfg.ignore = cfg.ignore.Or(fn) 217 | } 218 | } else { 219 | cfg.ignore = predicate.P[string](func(s string) bool { 220 | return false 221 | }) 222 | } 223 | 224 | if cfg.SkipLocalFiles == nil { 225 | cfg.SkipLocalFiles = Strings{defaultSkipLocalFiles} 226 | } 227 | if cfg.SkipLocalDirs == nil { 228 | cfg.SkipLocalDirs = Strings{defaultSkipLocalDirs} 229 | } 230 | 231 | for _, pattern := range cfg.SkipLocalFiles { 232 | re, err := regexp.Compile(pattern) 233 | if err != nil { 234 | return err 235 | } 236 | fn := func(s string) bool { 237 | return re.MatchString(s) 238 | } 239 | cfg.skipLocalFiles = cfg.skipLocalFiles.Or(fn) 240 | } 241 | 242 | for _, pattern := range cfg.SkipLocalDirs { 243 | re, err := regexp.Compile(pattern) 244 | if err != nil { 245 | return err 246 | } 247 | fn := func(s string) bool { 248 | return re.MatchString(s) 249 | } 250 | cfg.skipLocalDirs = cfg.skipLocalDirs.Or(fn) 251 | } 252 | 253 | // load additional config (routes) from file if it exists. 254 | err := cfg.loadFileConfig() 255 | if err != nil { 256 | return fmt.Errorf("failed to load config from %s: %s", cfg.ConfigFile, err) 257 | } 258 | 259 | return nil 260 | } 261 | 262 | type Strings []string 263 | 264 | func (i *Strings) String() string { 265 | return strings.Join(*i, ",") 266 | } 267 | 268 | func (i *Strings) Set(value string) error { 269 | *i = append(*i, value) 270 | return nil 271 | } 272 | 273 | func flagsToConfig(f *flag.FlagSet) *Config { 274 | cfg := &Config{} 275 | cfg.fs = f 276 | f.StringVar(&cfg.AccessKey, "key", "", "access key ID for AWS") 277 | f.StringVar(&cfg.SecretKey, "secret", "", "secret access key for AWS") 278 | f.StringVar(&cfg.RegionName, "region", "", "name of AWS region") 279 | f.StringVar(&cfg.BucketName, "bucket", "", "destination bucket name on AWS") 280 | f.StringVar(&cfg.BucketPath, "path", "", "optional bucket sub path") 281 | f.StringVar(&cfg.SourcePath, "source", ".", "path of files to upload") 282 | f.Var(&cfg.CDNDistributionIDs, "distribution-id", "optional CDN distribution ID for cache invalidation, repeat flag for multiple distributions") 283 | f.StringVar(&cfg.EndpointURL, "endpoint-url", "", "optional endpoint URL") 284 | f.StringVar(&cfg.ConfigFile, "config", ".s3deploy.yml", "optional config file") 285 | f.IntVar(&cfg.MaxDelete, "max-delete", 256, "maximum number of files to delete per deploy") 286 | f.BoolVar(&cfg.PublicReadACL, "public-access", false, "DEPRECATED: please set -acl='public-read'") 287 | f.BoolVar(&cfg.StripIndexHTML, "strip-index-html", false, "strip index.html from all directories expect for the root entry") 288 | f.StringVar(&cfg.ACL, "acl", "", "provide an ACL for uploaded objects. to make objects public, set to 'public-read'. all possible values are listed here: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl (default \"private\")") 289 | f.BoolVar(&cfg.Force, "force", false, "upload even if the etags match") 290 | f.Var(&cfg.Ignore, "ignore", "regexp pattern for ignoring files, repeat flag for multiple patterns,") 291 | f.Var(&cfg.SkipLocalFiles, "skip-local-files", fmt.Sprintf("regexp pattern of files to ignore when walking the local directory, repeat flag for multiple patterns, default %q", defaultSkipLocalFiles)) 292 | f.Var(&cfg.SkipLocalDirs, "skip-local-dirs", fmt.Sprintf("regexp pattern of files of directories to ignore when walking the local directory, repeat flag for multiple patterns, default %q", defaultSkipLocalDirs)) 293 | f.BoolVar(&cfg.Try, "try", false, "trial run, no remote updates") 294 | f.BoolVar(&cfg.Verbose, "v", false, "enable verbose logging") 295 | f.BoolVar(&cfg.Silent, "quiet", false, "enable silent mode") 296 | f.BoolVar(&cfg.PrintVersion, "V", false, "print version and exit") 297 | f.IntVar(&cfg.NumberOfWorkers, "workers", -1, "number of workers to upload files") 298 | f.BoolVar(&cfg.Help, "h", false, "help") 299 | 300 | return cfg 301 | } 302 | 303 | // parserYAMLConfig is a parser for YAML file format. Flags and their values are read 304 | // from the key/value pairs defined in the config file. 305 | // YAML types that cannot easily be represented as a string gets skipped (e.g. maps). 306 | // This is based on https://github.com/peterbourgon/ff/blob/main/ffyaml/ffyaml.go 307 | func parserYAMLConfig(r io.Reader, set func(name, value string) error) error { 308 | // We need to buffer the Reader so we can expand any environment variables. 309 | var b bytes.Buffer 310 | if _, err := io.Copy(&b, r); err != nil { 311 | return err 312 | } 313 | 314 | s := envhelpers.Expand(b.String(), func(k string) string { 315 | return os.Getenv(k) 316 | }) 317 | 318 | r = strings.NewReader(s) 319 | 320 | var m map[string]interface{} 321 | d := yaml.NewDecoder(r) 322 | if err := d.Decode(&m); err != nil && err != io.EOF { 323 | return err 324 | } 325 | for key, val := range m { 326 | values, err := valsToStrs(val) 327 | if err != nil { 328 | if err == errUnsupportedFlagType { 329 | continue 330 | } 331 | return err 332 | } 333 | for _, value := range values { 334 | if err := set(key, value); err != nil { 335 | return err 336 | } 337 | } 338 | } 339 | return nil 340 | } 341 | 342 | func valToStr(val interface{}) (string, error) { 343 | switch v := val.(type) { 344 | case byte: 345 | return string([]byte{v}), nil 346 | case string: 347 | return v, nil 348 | case bool: 349 | return strconv.FormatBool(v), nil 350 | case uint64: 351 | return strconv.FormatUint(v, 10), nil 352 | case int: 353 | return strconv.Itoa(v), nil 354 | case int64: 355 | return strconv.FormatInt(v, 10), nil 356 | case float64: 357 | return strconv.FormatFloat(v, 'g', -1, 64), nil 358 | case nil: 359 | return "", nil 360 | default: 361 | return "", errUnsupportedFlagType 362 | } 363 | } 364 | 365 | func valsToStrs(val interface{}) ([]string, error) { 366 | if vals, ok := val.([]interface{}); ok { 367 | ss := make([]string, len(vals)) 368 | for i := range vals { 369 | s, err := valToStr(vals[i]) 370 | if err != nil { 371 | return nil, err 372 | } 373 | ss[i] = s 374 | } 375 | return ss, nil 376 | } 377 | s, err := valToStr(val) 378 | if err != nil { 379 | return nil, err 380 | } 381 | return []string{s}, nil 382 | } 383 | -------------------------------------------------------------------------------- /lib/config_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "os" 10 | "path/filepath" 11 | "testing" 12 | 13 | qt "github.com/frankban/quicktest" 14 | ) 15 | 16 | func TestConfigFromArgs(t *testing.T) { 17 | c := qt.New(t) 18 | args := []string{ 19 | "-bucket=mybucket", 20 | "-config=myconfig", 21 | "-force=true", 22 | "-key=mykey", 23 | "-secret=mysecret", 24 | "-max-delete=42", 25 | "-acl=public-read", 26 | "-path=mypath", 27 | "-quiet=true", 28 | "-region=myregion", 29 | "-source=mysource", 30 | "-endpoint-url=http://localhost:9000", 31 | "-distribution-id=mydistro1", 32 | "-distribution-id=mydistro2", 33 | "-ignore=^ignored-prefix.*", 34 | "-try=true", 35 | } 36 | 37 | cfg, err := ConfigFromArgs(args) 38 | c.Assert(err, qt.IsNil) 39 | c.Assert(cfg.Init(), qt.IsNil) 40 | c.Assert(cfg.BucketName, qt.Equals, "mybucket") 41 | c.Assert(cfg.ConfigFile, qt.Equals, "myconfig") 42 | c.Assert(cfg.Force, qt.Equals, true) 43 | c.Assert(cfg.AccessKey, qt.Equals, "mykey") 44 | c.Assert(cfg.SecretKey, qt.Equals, "mysecret") 45 | c.Assert(cfg.MaxDelete, qt.Equals, 42) 46 | c.Assert(cfg.ACL, qt.Equals, "public-read") 47 | c.Assert(cfg.BucketPath, qt.Equals, "mypath") 48 | c.Assert(cfg.Silent, qt.Equals, true) 49 | c.Assert(cfg.SourcePath, qt.Equals, "mysource") 50 | c.Assert(cfg.EndpointURL, qt.Equals, "http://localhost:9000") 51 | c.Assert(cfg.Try, qt.Equals, true) 52 | c.Assert(cfg.RegionName, qt.Equals, "myregion") 53 | c.Assert(cfg.CDNDistributionIDs, qt.DeepEquals, Strings{"mydistro1", "mydistro2"}) 54 | c.Assert(cfg.Ignore, qt.DeepEquals, Strings{"^ignored-prefix.*"}) 55 | } 56 | 57 | func TestConfigFromEnvAndFile(t *testing.T) { 58 | c := qt.New(t) 59 | dir := t.TempDir() 60 | os.Setenv("S3DEPLOY_REGION", "myenvregion") 61 | os.Setenv("S3TEST_MYPATH", "mypath") 62 | os.Setenv("S3TEST_GZIP", "true") 63 | os.Setenv("S3TEST_CACHE_CONTROL", "max-age=1234") 64 | cfgFile := filepath.Join(dir, "config.yml") 65 | c.Assert(os.WriteFile(cfgFile, []byte(` 66 | bucket: mybucket 67 | region: myregion 68 | path: ${S3TEST_MYPATH} 69 | ignore: foo 70 | skip-local-dirs: ["a", "b"] 71 | skip-local-files: c 72 | 73 | routes: 74 | - route: "^.+\\.(a)$" 75 | headers: 76 | Cache-Control: "${S3TEST_CACHE_CONTROL}" 77 | gzip: true 78 | - route: "^.+\\.(b)$" 79 | headers: 80 | Cache-Control: "max-age=630720000, no-transform, public" 81 | gzip: false 82 | - route: "^.+\\.(c)$" 83 | gzip: "${S3TEST_GZIP@U}" 84 | `), 0o644), qt.IsNil) 85 | 86 | args := []string{ 87 | "-config=" + cfgFile, 88 | } 89 | 90 | cfg, err := ConfigFromArgs(args) 91 | c.Assert(err, qt.IsNil) 92 | c.Assert(cfg.Init(), qt.IsNil) 93 | c.Assert(cfg.BucketName, qt.Equals, "mybucket") 94 | c.Assert(cfg.BucketPath, qt.Equals, "mypath") 95 | c.Assert(cfg.RegionName, qt.Equals, "myenvregion") 96 | c.Assert(cfg.Ignore, qt.DeepEquals, Strings{"foo"}) 97 | c.Assert(cfg.SkipLocalDirs, qt.DeepEquals, Strings{"a", "b"}) 98 | c.Assert(cfg.SkipLocalFiles, qt.DeepEquals, Strings{"c"}) 99 | routes := cfg.fileConf.Routes 100 | c.Assert(routes, qt.HasLen, 3) 101 | c.Assert(routes[0].Route, qt.Equals, "^.+\\.(a)$") 102 | c.Assert(routes[0].Headers["Cache-Control"], qt.Equals, "max-age=1234") 103 | c.Assert(routes[0].Gzip, qt.IsTrue) 104 | c.Assert(routes[2].Gzip, qt.IsTrue) 105 | } 106 | 107 | func TestConfigFromFileErrors(t *testing.T) { 108 | c := qt.New(t) 109 | dir := t.TempDir() 110 | cfgFileInvalidYaml := filepath.Join(dir, "config_invalid_yaml.yml") 111 | c.Assert(os.WriteFile(cfgFileInvalidYaml, []byte(` 112 | bucket=foo 113 | `), 0o644), qt.IsNil) 114 | 115 | args := []string{ 116 | "-config=" + cfgFileInvalidYaml, 117 | } 118 | 119 | _, err := ConfigFromArgs(args) 120 | c.Assert(err, qt.IsNotNil) 121 | 122 | cfgFileInvalidRoute := filepath.Join(dir, "config_invalid_route.yml") 123 | c.Assert(os.WriteFile(cfgFileInvalidRoute, []byte(` 124 | bucket: foo 125 | routes: 126 | - route: "*" # invalid regexp. 127 | `), 0o644), qt.IsNil) 128 | 129 | args = []string{ 130 | "-config=" + cfgFileInvalidRoute, 131 | } 132 | 133 | cfg, err := ConfigFromArgs(args) 134 | c.Assert(err, qt.IsNil) 135 | err = cfg.Init() 136 | c.Assert(err, qt.IsNotNil) 137 | } 138 | 139 | func TestSetAclAndPublicAccessFlag(t *testing.T) { 140 | c := qt.New(t) 141 | args := []string{ 142 | "-bucket=mybucket", 143 | "-acl=public-read", 144 | "-public-access=true", 145 | } 146 | 147 | cfg, err := ConfigFromArgs(args) 148 | c.Assert(err, qt.IsNil) 149 | 150 | err = cfg.Init() 151 | c.Assert(err, qt.IsNotNil) 152 | c.Assert(err.Error(), qt.Contains, "you passed a value for the flags public-access and acl") 153 | } 154 | 155 | func TestIgnoreFlagError(t *testing.T) { 156 | c := qt.New(t) 157 | args := []string{ 158 | "-bucket=mybucket", 159 | "-ignore=((INVALID_PATTERN", 160 | } 161 | 162 | cfg, err := ConfigFromArgs(args) 163 | c.Assert(err, qt.IsNil) 164 | 165 | err = cfg.Init() 166 | c.Assert(err, qt.IsNotNil) 167 | c.Assert(err.Error(), qt.Contains, "cannot compile 'ignore' flag pattern") 168 | } 169 | 170 | func TestShouldIgnore(t *testing.T) { 171 | c := qt.New(t) 172 | 173 | argsDefault := []string{ 174 | "-bucket=mybucket", 175 | "-path=my/path", 176 | } 177 | argsIgnore := []string{ 178 | "-bucket=mybucket", 179 | "-path=my/path", 180 | "-ignore=^ignored-prefix.*", 181 | } 182 | 183 | cfgDefault, err := ConfigFromArgs(argsDefault) 184 | c.Assert(err, qt.IsNil) 185 | cfgIgnore, err := ConfigFromArgs(argsIgnore) 186 | c.Assert(err, qt.IsNil) 187 | 188 | c.Assert(cfgDefault.Init(), qt.IsNil) 189 | c.Assert(cfgIgnore.Init(), qt.IsNil) 190 | 191 | c.Assert(cfgDefault.shouldIgnoreLocal("any"), qt.IsFalse) 192 | c.Assert(cfgDefault.shouldIgnoreLocal("ignored-prefix/file.txt"), qt.IsFalse) 193 | 194 | c.Assert(cfgIgnore.shouldIgnoreLocal("any"), qt.IsFalse) 195 | c.Assert(cfgIgnore.shouldIgnoreLocal("ignored-prefix/file.txt"), qt.IsTrue) 196 | 197 | c.Assert(cfgDefault.shouldIgnoreRemote("my/path/any"), qt.IsFalse) 198 | c.Assert(cfgDefault.shouldIgnoreRemote("my/path/ignored-prefix/file.txt"), qt.IsFalse) 199 | 200 | c.Assert(cfgIgnore.shouldIgnoreRemote("my/path/any"), qt.IsFalse) 201 | c.Assert(cfgIgnore.shouldIgnoreRemote("my/path/ignored-prefix/file.txt"), qt.IsTrue) 202 | } 203 | 204 | func TestSkipLocalDefault(t *testing.T) { 205 | c := qt.New(t) 206 | 207 | args := []string{ 208 | "-bucket=mybucket", 209 | } 210 | 211 | cfg, err := ConfigFromArgs(args) 212 | c.Assert(err, qt.IsNil) 213 | c.Assert(cfg.Init(), qt.IsNil) 214 | 215 | c.Assert(cfg.skipLocalFiles("foo"), qt.IsFalse) 216 | c.Assert(cfg.skipLocalDirs("foo"), qt.IsFalse) 217 | c.Assert(cfg.skipLocalFiles(".DS_Store"), qt.IsTrue) 218 | c.Assert(cfg.skipLocalFiles("a.DS_Store"), qt.IsFalse) 219 | c.Assert(cfg.skipLocalFiles("foo/bar/.DS_Store"), qt.IsTrue) 220 | 221 | c.Assert(cfg.skipLocalDirs("foo/bar/.git"), qt.IsTrue) 222 | c.Assert(cfg.skipLocalDirs(".git"), qt.IsTrue) 223 | c.Assert(cfg.skipLocalDirs("a.b"), qt.IsFalse) 224 | } 225 | -------------------------------------------------------------------------------- /lib/deployer.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "io" 12 | "os" 13 | "path" 14 | "path/filepath" 15 | "runtime" 16 | "strings" 17 | "sync/atomic" 18 | "time" 19 | 20 | "golang.org/x/sync/errgroup" 21 | "golang.org/x/text/unicode/norm" 22 | ) 23 | 24 | const up = `↑` 25 | 26 | // Deployer deploys. 27 | type Deployer struct { 28 | cfg *Config 29 | stats *DeployStats 30 | 31 | g *errgroup.Group 32 | 33 | filesToUpload chan *osFile 34 | filesToDelete []string 35 | 36 | // Verbose output. 37 | outv io.Writer 38 | // Regular output. 39 | printer 40 | 41 | store remoteStore 42 | } 43 | 44 | // Deploy deploys to the remote based on the given config. 45 | func Deploy(cfg *Config) (DeployStats, error) { 46 | if err := cfg.Init(); err != nil { 47 | return DeployStats{}, err 48 | } 49 | var outv, out io.Writer = io.Discard, os.Stdout 50 | if cfg.Silent { 51 | out = io.Discard 52 | } else { 53 | if cfg.Verbose { 54 | outv = os.Stdout 55 | } 56 | start := time.Now() 57 | defer func() { 58 | fmt.Printf("\nTotal in %.2f seconds\n", time.Since(start).Seconds()) 59 | }() 60 | } 61 | 62 | var g *errgroup.Group 63 | ctx, cancel := context.WithCancel(context.Background()) 64 | g, ctx = errgroup.WithContext(ctx) 65 | defer cancel() 66 | 67 | d := &Deployer{ 68 | g: g, 69 | outv: outv, 70 | printer: newPrinter(out), 71 | filesToUpload: make(chan *osFile), 72 | cfg: cfg, 73 | stats: &DeployStats{}, 74 | } 75 | 76 | numberOfWorkers := cfg.NumberOfWorkers 77 | if numberOfWorkers <= 0 { 78 | numberOfWorkers = runtime.NumCPU() 79 | } 80 | 81 | baseStore := d.cfg.baseStore 82 | if baseStore == nil { 83 | var err error 84 | baseStore, err = newRemoteStore(d.cfg, d) 85 | if err != nil { 86 | return *d.stats, err 87 | } 88 | } 89 | if d.cfg.Try { 90 | baseStore = newNoUpdateStore(baseStore) 91 | d.Println("This is a trial run, with no remote updates.") 92 | } 93 | d.store = newStore(d.cfg, baseStore) 94 | 95 | for i := 0; i < numberOfWorkers; i++ { 96 | g.Go(func() error { 97 | return d.upload(ctx) 98 | }) 99 | } 100 | 101 | err := d.plan(ctx) 102 | if err != nil { 103 | cancel() 104 | } 105 | 106 | errg := g.Wait() 107 | 108 | if err != nil { 109 | return *d.stats, err 110 | } 111 | 112 | if errg != nil && errg != context.Canceled { 113 | return *d.stats, errg 114 | } 115 | 116 | err = d.store.DeleteObjects( 117 | context.Background(), 118 | d.filesToDelete, 119 | withDeleteStats(d.stats), 120 | withMaxDelete(d.cfg.MaxDelete)) 121 | 122 | if err == nil { 123 | err = d.store.Finalize(context.Background()) 124 | } 125 | 126 | return *d.stats, err 127 | } 128 | 129 | type printer interface { 130 | Println(a ...interface{}) (n int, err error) 131 | Printf(format string, a ...interface{}) (n int, err error) 132 | } 133 | 134 | type print struct { 135 | out io.Writer 136 | } 137 | 138 | func newPrinter(out io.Writer) printer { 139 | return print{out: out} 140 | } 141 | 142 | func (p print) Println(a ...interface{}) (n int, err error) { 143 | return fmt.Fprintln(p.out, a...) 144 | } 145 | 146 | func (p print) Printf(format string, a ...interface{}) (n int, err error) { 147 | return fmt.Fprintf(p.out, format, a...) 148 | } 149 | 150 | func (d *Deployer) printf(format string, a ...interface{}) { 151 | fmt.Fprintf(d.outv, format, a...) 152 | } 153 | 154 | func (d *Deployer) enqueueUpload(ctx context.Context, f *osFile) { 155 | d.Printf("%s (%s) %s ", f.keyPath, f.reason, up) 156 | select { 157 | case <-ctx.Done(): 158 | case d.filesToUpload <- f: 159 | } 160 | } 161 | 162 | func (d *Deployer) skipFile(f *osFile) { 163 | d.printf("%s skipping …\n", f.relPath) 164 | atomic.AddUint64(&d.stats.Skipped, uint64(1)) 165 | } 166 | 167 | func (d *Deployer) enqueueDelete(key string) { 168 | d.printf("%s not found in source, deleting.\n", key) 169 | d.filesToDelete = append(d.filesToDelete, key) 170 | } 171 | 172 | type uploadReason string 173 | 174 | const ( 175 | reasonNotFound uploadReason = "not found" 176 | reasonForce uploadReason = "force" 177 | reasonSize uploadReason = "size" 178 | reasonETag uploadReason = "ETag" 179 | ) 180 | 181 | // plan figures out which files need to be uploaded. 182 | func (d *Deployer) plan(ctx context.Context) error { 183 | remoteFiles, err := d.store.FileMap(ctx) 184 | if err != nil { 185 | return err 186 | } 187 | d.printf("Found %d remote files\n", len(remoteFiles)) 188 | 189 | // All local files at sourcePath 190 | localFiles := make(chan *osFile) 191 | d.g.Go(func() error { 192 | return d.walk(ctx, d.cfg.SourcePath, localFiles) 193 | }) 194 | 195 | for f := range localFiles { 196 | // default: upload because local file not found on remote. 197 | up := true 198 | reason := reasonNotFound 199 | 200 | bucketPath := f.keyPath 201 | if d.cfg.BucketPath != "" { 202 | bucketPath = pathJoin(d.cfg.BucketPath, bucketPath) 203 | } 204 | 205 | if remoteFile, ok := remoteFiles[bucketPath]; ok { 206 | if d.cfg.Force { 207 | up = true 208 | reason = reasonForce 209 | } else { 210 | up, reason = f.shouldThisReplace(remoteFile) 211 | } 212 | // remove from map, whatever is leftover should be deleted: 213 | delete(remoteFiles, bucketPath) 214 | } 215 | 216 | f.reason = reason 217 | 218 | if up { 219 | d.enqueueUpload(ctx, f) 220 | } else { 221 | d.skipFile(f) 222 | } 223 | } 224 | close(d.filesToUpload) 225 | 226 | // any remote files not found locally should be removed: 227 | // except for ignored files 228 | for key := range remoteFiles { 229 | if d.cfg.shouldIgnoreRemote(key) { 230 | d.printf("%s ignored …\n", key) 231 | continue 232 | } 233 | d.enqueueDelete(key) 234 | } 235 | 236 | return nil 237 | } 238 | 239 | // walk a local directory 240 | func (d *Deployer) walk(ctx context.Context, basePath string, files chan<- *osFile) error { 241 | err := filepath.Walk(basePath, func(fpath string, info os.FileInfo, err error) error { 242 | if err != nil { 243 | return err 244 | } 245 | 246 | pathUnix := path.Clean(filepath.ToSlash(strings.TrimPrefix(fpath, basePath))) 247 | 248 | if info.IsDir() { 249 | if d.cfg.skipLocalDirs(pathUnix) { 250 | return filepath.SkipDir 251 | } 252 | return nil 253 | } else { 254 | if d.cfg.skipLocalFiles(pathUnix) { 255 | return nil 256 | } 257 | } 258 | 259 | if runtime.GOOS == "darwin" { 260 | // When a file system is HFS+, its filepath is in NFD form. 261 | fpath = norm.NFC.String(fpath) 262 | } 263 | 264 | abs, err := filepath.Abs(fpath) 265 | if err != nil { 266 | return err 267 | } 268 | rel, err := filepath.Rel(basePath, fpath) 269 | if err != nil { 270 | return err 271 | } 272 | 273 | if d.cfg.shouldIgnoreLocal(rel) { 274 | return nil 275 | } 276 | 277 | f, err := newOSFile(d.cfg, rel, abs, info) 278 | if err != nil { 279 | return err 280 | } 281 | 282 | if f.route != nil && f.route.Ignore { 283 | return nil 284 | } 285 | 286 | select { 287 | case <-ctx.Done(): 288 | return ctx.Err() 289 | case files <- f: 290 | } 291 | 292 | return nil 293 | }) 294 | 295 | close(files) 296 | 297 | return err 298 | } 299 | 300 | func (d *Deployer) upload(ctx context.Context) error { 301 | for { 302 | select { 303 | case f, ok := <-d.filesToUpload: 304 | if !ok { 305 | return nil 306 | } 307 | err := d.store.Put(ctx, f, withUploadStats(d.stats)) 308 | if err != nil { 309 | return err 310 | } 311 | case <-ctx.Done(): 312 | return ctx.Err() 313 | } 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /lib/deployer_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "context" 10 | "errors" 11 | "fmt" 12 | "os" 13 | "path" 14 | "path/filepath" 15 | "runtime" 16 | "sync" 17 | "testing" 18 | 19 | qt "github.com/frankban/quicktest" 20 | ) 21 | 22 | var _ remoteStore = (*testStore)(nil) 23 | 24 | func TestDeploy(t *testing.T) { 25 | c := qt.New(t) 26 | store, m := newTestStore(0, "") 27 | source := testSourcePath() 28 | configFile := filepath.Join(source, ".s3deploy.yml") 29 | 30 | cfg := &Config{ 31 | BucketName: "example.com", 32 | RegionName: "eu-west-1", 33 | ConfigFile: configFile, 34 | MaxDelete: 300, 35 | ACL: "public-read", 36 | Silent: true, 37 | SourcePath: source, 38 | baseStore: store, 39 | } 40 | 41 | stats, err := Deploy(cfg) 42 | c.Assert(err, qt.IsNil) 43 | c.Assert(stats.Summary(), qt.Equals, "Deleted 1 of 1, uploaded 3, skipped 1 (80% changed)") 44 | assertKeys(t, m, ".s3deploy.yml", "main.css", "index.html", "ab.txt") 45 | 46 | mainCss := m["main.css"] 47 | c.Assert(mainCss.(*osFile).ContentType(), qt.Equals, "text/css; charset=utf-8") 48 | headers := mainCss.(*osFile).Headers() 49 | c.Assert(headers["Content-Encoding"], qt.Equals, "gzip") 50 | c.Assert(headers["Cache-Control"], qt.Equals, "max-age=630720000, no-transform, public") 51 | } 52 | 53 | func TestDeployWithBucketPath(t *testing.T) { 54 | c := qt.New(t) 55 | root := "my/path" 56 | store, m := newTestStore(0, root) 57 | source := testSourcePath() 58 | configFile := filepath.Join(source, ".s3deploy.yml") 59 | 60 | cfg := &Config{ 61 | BucketName: "example.com", 62 | RegionName: "eu-west-1", 63 | ConfigFile: configFile, 64 | BucketPath: root, 65 | MaxDelete: 300, 66 | Silent: false, 67 | SourcePath: source, 68 | baseStore: store, 69 | } 70 | 71 | stats, err := Deploy(cfg) 72 | c.Assert(err, qt.IsNil) 73 | c.Assert(stats.Summary(), qt.Equals, "Deleted 1 of 1, uploaded 3, skipped 1 (80% changed)") 74 | assertKeys(t, m, "my/path/.s3deploy.yml", "my/path/main.css", "my/path/index.html", "my/path/ab.txt") 75 | mainCss := m["my/path/main.css"] 76 | c.Assert(mainCss.(*osFile).Key(), qt.Equals, "my/path/main.css") 77 | headers := mainCss.(*osFile).Headers() 78 | c.Assert(headers["Content-Encoding"], qt.Equals, "gzip") 79 | } 80 | 81 | func TestDeployForce(t *testing.T) { 82 | c := qt.New(t) 83 | store, _ := newTestStore(0, "") 84 | source := testSourcePath() 85 | 86 | cfg := &Config{ 87 | BucketName: "example.com", 88 | RegionName: "eu-west-1", 89 | Force: true, 90 | MaxDelete: 300, 91 | Silent: true, 92 | SourcePath: source, 93 | baseStore: store, 94 | } 95 | 96 | stats, err := Deploy(cfg) 97 | c.Assert(err, qt.IsNil) 98 | c.Assert(stats.Summary(), qt.Equals, "Deleted 1 of 1, uploaded 4, skipped 0 (100% changed)") 99 | } 100 | 101 | func TestDeployWitIgnorePattern(t *testing.T) { 102 | c := qt.New(t) 103 | root := "my/path" 104 | re := `^(main\.css|deleteme\.txt)$` 105 | 106 | store, m := newTestStore(0, root) 107 | source := testSourcePath() 108 | configFile := filepath.Join(source, ".s3deploy.yml") 109 | 110 | cfg := &Config{ 111 | BucketName: "example.com", 112 | RegionName: "eu-west-1", 113 | ConfigFile: configFile, 114 | BucketPath: root, 115 | MaxDelete: 300, 116 | Silent: false, 117 | SourcePath: source, 118 | baseStore: store, 119 | Ignore: Strings{re}, 120 | } 121 | 122 | prevCss := m["my/path/main.css"] 123 | prevTag := prevCss.ETag() 124 | 125 | stats, err := Deploy(cfg) 126 | c.Assert(err, qt.IsNil) 127 | c.Assert(stats.Summary(), qt.Equals, "Deleted 0 of 0, uploaded 2, skipped 1 (67% changed)") 128 | assertKeys(t, m, 129 | "my/path/.s3deploy.yml", 130 | "my/path/index.html", 131 | "my/path/ab.txt", 132 | "my/path/deleteme.txt", // ignored: stale 133 | "my/path/main.css", // ignored: not updated 134 | ) 135 | mainCss := m["my/path/main.css"] 136 | c.Assert(prevTag, qt.Equals, mainCss.ETag()) 137 | } 138 | 139 | func TestDeployWitRoutesIgnore(t *testing.T) { 140 | c := qt.New(t) 141 | root := "my/path" 142 | 143 | store, m := newTestStore(0, root) 144 | source := testSourcePath() 145 | configFile := filepath.Join(source, ".hidden/.s3deploy.ignore.yml") 146 | 147 | cfg := &Config{ 148 | BucketName: "example.com", 149 | RegionName: "eu-west-1", 150 | ConfigFile: configFile, 151 | BucketPath: root, 152 | MaxDelete: 300, 153 | Silent: false, 154 | SourcePath: source, 155 | baseStore: store, 156 | } 157 | 158 | // same as TestDeployWitIgnorePattern 159 | 160 | prevCss := m["my/path/main.css"] 161 | prevTag := prevCss.ETag() 162 | 163 | stats, err := Deploy(cfg) 164 | c.Assert(err, qt.IsNil) 165 | c.Assert(stats.Summary(), qt.Equals, "Deleted 0 of 0, uploaded 2, skipped 1 (67% changed)") 166 | assertKeys(t, m, 167 | "my/path/.s3deploy.yml", 168 | "my/path/index.html", 169 | "my/path/ab.txt", 170 | "my/path/deleteme.txt", // ignored: stale 171 | "my/path/main.css", // ignored: not updated 172 | ) 173 | mainCss := m["my/path/main.css"] 174 | c.Assert(prevTag, qt.Equals, mainCss.ETag()) 175 | } 176 | 177 | func TestDeploySourceNotFound(t *testing.T) { 178 | c := qt.New(t) 179 | store, _ := newTestStore(0, "") 180 | wd, _ := os.Getwd() 181 | source := filepath.Join(wd, "thisdoesnotexist") 182 | 183 | cfg := &Config{ 184 | BucketName: "example.com", 185 | RegionName: "eu-west-1", 186 | MaxDelete: 300, 187 | Silent: true, 188 | SourcePath: source, 189 | baseStore: store, 190 | } 191 | 192 | stats, err := Deploy(cfg) 193 | c.Assert(err, qt.IsNotNil) 194 | c.Assert(err.Error(), qt.Contains, "thisdoesnotexist") 195 | c.Assert(stats.Summary(), qt.Contains, "Deleted 0 of 0, uploaded 0, skipped 0") 196 | } 197 | 198 | func TestDeployInvalidSourcePath(t *testing.T) { 199 | c := qt.New(t) 200 | store, _ := newTestStore(0, "") 201 | root := "/" 202 | 203 | if runtime.GOOS == "windows" { 204 | root = `C:\` 205 | } 206 | 207 | cfg := &Config{ 208 | BucketName: "example.com", 209 | RegionName: "eu-west-1", 210 | MaxDelete: 300, 211 | Silent: true, 212 | SourcePath: root, 213 | baseStore: store, 214 | } 215 | 216 | stats, err := Deploy(cfg) 217 | c.Assert(err, qt.IsNotNil) 218 | c.Assert(err.Error(), qt.Contains, "invalid source path") 219 | c.Assert(stats.Summary(), qt.Contains, "Deleted 0 of 0, uploaded 0, skipped 0") 220 | } 221 | 222 | func TestDeployNoBucket(t *testing.T) { 223 | c := qt.New(t) 224 | _, err := Deploy(&Config{Silent: true}) 225 | c.Assert(err, qt.IsNotNil) 226 | } 227 | 228 | func TestDeployStoreFailures(t *testing.T) { 229 | for i := 1; i <= 3; i++ { 230 | c := qt.New(t) 231 | 232 | store, _ := newTestStore(i, "") 233 | source := testSourcePath() 234 | 235 | cfg := &Config{ 236 | BucketName: "example.com", 237 | RegionName: "eu-west-1", 238 | MaxDelete: 300, 239 | Silent: true, 240 | SourcePath: source, 241 | baseStore: store, 242 | } 243 | 244 | message := fmt.Sprintf("Failure %d", i) 245 | 246 | stats, err := Deploy(cfg) 247 | c.Assert(err, qt.IsNotNil) 248 | 249 | if i == 3 { 250 | // Fail delete step 251 | c.Assert(stats.Summary(), qt.Contains, "Deleted 0 of 0, uploaded 3", qt.Commentf(message)) 252 | } else { 253 | c.Assert(stats.Summary(), qt.Contains, "Deleted 0 of 0, uploaded 0", qt.Commentf(message)) 254 | } 255 | } 256 | } 257 | 258 | func TestDeployMaxDelete(t *testing.T) { 259 | c := qt.New(t) 260 | 261 | m := make(map[string]file) 262 | 263 | for i := 0; i < 200; i++ { 264 | m[fmt.Sprintf("file%d.css", i)] = &testFile{} 265 | } 266 | 267 | store := newTestStoreFrom(m, 0) 268 | 269 | cfg := &Config{ 270 | BucketName: "example.com", 271 | RegionName: "eu-west-1", 272 | Silent: true, 273 | SourcePath: testSourcePath(), 274 | MaxDelete: 42, 275 | baseStore: store, 276 | } 277 | 278 | stats, err := Deploy(cfg) 279 | c.Assert(err, qt.IsNil) 280 | c.Assert(len(m), qt.Equals, 158+4) 281 | c.Assert(stats.Summary(), qt.Equals, "Deleted 42 of 200, uploaded 4, skipped 0 (100% changed)") 282 | } 283 | 284 | func testSourcePath() string { 285 | wd, _ := os.Getwd() 286 | return filepath.Join(wd, "testdata") + "/" 287 | } 288 | 289 | func newTestStore(failAt int, root string) (remoteStore, map[string]file) { 290 | m := map[string]file{ 291 | path.Join(root, "ab.txt"): &testFile{key: path.Join(root, "ab.txt"), etag: `"b86fc6b051f63d73de262d4c34e3a0a9"`, size: int64(2)}, 292 | path.Join(root, "main.css"): &testFile{key: path.Join(root, "main.css"), etag: `"changed"`, size: int64(27)}, 293 | path.Join(root, "deleteme.txt"): &testFile{}, 294 | } 295 | 296 | return newTestStoreFrom(m, failAt), m 297 | } 298 | 299 | func newTestStoreFrom(m map[string]file, failAt int) remoteStore { 300 | return &testStore{m: m, failAt: failAt} 301 | } 302 | 303 | type testStore struct { 304 | failAt int 305 | m map[string]file 306 | 307 | sync.Mutex 308 | } 309 | 310 | func assertKeys(t *testing.T, m map[string]file, keys ...string) { 311 | for _, k := range keys { 312 | if _, found := m[k]; !found { 313 | t.Fatal("key not found:", k) 314 | } 315 | } 316 | 317 | if len(keys) != len(m) { 318 | t.Log(m) 319 | t.Fatalf("map length mismatch: %d vs %d", len(keys), len(m)) 320 | } 321 | } 322 | 323 | func (s *testStore) FileMap(ctx context.Context, opts ...opOption) (map[string]file, error) { 324 | s.Lock() 325 | defer s.Unlock() 326 | 327 | if s.failAt == 1 { 328 | return nil, errors.New("fail") 329 | } 330 | c := make(map[string]file) 331 | for k, v := range s.m { 332 | c[k] = v 333 | } 334 | return c, nil 335 | } 336 | 337 | func (s *testStore) Put(ctx context.Context, f localFile, opts ...opOption) error { 338 | s.Lock() 339 | defer s.Unlock() 340 | 341 | if s.failAt == 2 { 342 | return errors.New("fail") 343 | } 344 | s.m[f.Key()] = f 345 | return nil 346 | } 347 | 348 | func (s *testStore) DeleteObjects(ctx context.Context, keys []string, opts ...opOption) error { 349 | s.Lock() 350 | defer s.Unlock() 351 | 352 | if s.failAt == 3 { 353 | return errors.New("fail") 354 | } 355 | for _, k := range keys { 356 | delete(s.m, k) 357 | } 358 | return nil 359 | } 360 | 361 | func (s *testStore) Finalize(ctx context.Context) error { 362 | return nil 363 | } 364 | -------------------------------------------------------------------------------- /lib/files.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "bytes" 10 | "compress/gzip" 11 | "crypto/md5" 12 | "encoding/hex" 13 | "fmt" 14 | "io" 15 | "mime" 16 | "net/http" 17 | "os" 18 | "path/filepath" 19 | "regexp" 20 | "sync" 21 | 22 | "github.com/dsnet/golib/memfile" 23 | ) 24 | 25 | var ( 26 | _ file = (*osFile)(nil) 27 | _ localFile = (*osFile)(nil) 28 | _ reasoner = (*osFile)(nil) 29 | ) 30 | 31 | type file interface { 32 | // Key represents the key on the target file store. 33 | Key() string 34 | ETag() string 35 | Size() int64 36 | } 37 | 38 | type reasoner interface { 39 | UploadReason() uploadReason 40 | } 41 | 42 | type localFile interface { 43 | file 44 | shouldThisReplace(other file) (bool, uploadReason) 45 | 46 | // Content returns the content to be stored remotely. If this file 47 | // configured to be gzipped, then that is what you get. 48 | Content() io.ReadSeeker 49 | 50 | ContentType() string 51 | 52 | Headers() map[string]string 53 | } 54 | 55 | type osFile struct { 56 | relPath string 57 | keyPath string // may be different from relPath if StripIndexHTML is set. 58 | 59 | // Filled when BucketPath is provided. Will store files in a sub-path 60 | // of the target file store. 61 | targetRoot string 62 | 63 | reason uploadReason 64 | 65 | absPath string 66 | size int64 67 | 68 | etag string 69 | etagInit sync.Once 70 | 71 | contentType string 72 | 73 | f *memfile.File 74 | 75 | route *route 76 | } 77 | 78 | func (f *osFile) Key() string { 79 | if f.targetRoot != "" { 80 | return pathJoin(f.targetRoot, f.keyPath) 81 | } 82 | return f.keyPath 83 | } 84 | 85 | func (f *osFile) UploadReason() uploadReason { 86 | return f.reason 87 | } 88 | 89 | func (f *osFile) ETag() string { 90 | f.etagInit.Do(func() { 91 | var err error 92 | f.etag, err = calculateETag(f.Content()) 93 | if err != nil { 94 | panic(err) 95 | } 96 | }) 97 | return f.etag 98 | } 99 | 100 | func (f *osFile) Size() int64 { 101 | return f.size 102 | } 103 | 104 | func (f *osFile) ContentType() string { 105 | return f.contentType 106 | } 107 | 108 | func (f *osFile) Content() io.ReadSeeker { 109 | f.f.Seek(0, 0) 110 | return f.f 111 | } 112 | 113 | func (f *osFile) Headers() map[string]string { 114 | headers := map[string]string{} 115 | 116 | if f.route != nil { 117 | if f.route.Gzip { 118 | headers["Content-Encoding"] = "gzip" 119 | } 120 | 121 | if f.route.Headers != nil { 122 | for k, v := range f.route.Headers { 123 | headers[k] = v 124 | } 125 | } 126 | } 127 | 128 | return headers 129 | } 130 | 131 | func (f *osFile) initContentType(peek []byte) error { 132 | if f.route != nil { 133 | if contentType, found := f.route.Headers["Content-Type"]; found { 134 | f.contentType = contentType 135 | return nil 136 | } 137 | } 138 | 139 | contentType := mime.TypeByExtension(filepath.Ext(f.relPath)) 140 | if contentType != "" { 141 | f.contentType = contentType 142 | return nil 143 | } 144 | 145 | // Have to look inside the file itself. 146 | if peek != nil { 147 | f.contentType = detectContentTypeFromContent(peek) 148 | } else { 149 | f.contentType = detectContentTypeFromContent(f.f.Bytes()) 150 | } 151 | 152 | return nil 153 | } 154 | 155 | func detectContentTypeFromContent(b []byte) string { 156 | const magicSize = 512 // Size that DetectContentType expects 157 | var peek []byte 158 | 159 | if len(b) > magicSize { 160 | peek = b[:magicSize] 161 | } else { 162 | peek = b 163 | } 164 | 165 | return http.DetectContentType(peek) 166 | } 167 | 168 | func (f *osFile) shouldThisReplace(other file) (bool, uploadReason) { 169 | if f.Size() != other.Size() { 170 | return true, reasonSize 171 | } 172 | 173 | if f.ETag() != other.ETag() { 174 | return true, reasonETag 175 | } 176 | 177 | return false, "" 178 | } 179 | 180 | func newOSFile(cfg *Config, relPath, absPath string, fi os.FileInfo) (*osFile, error) { 181 | targetRoot := cfg.BucketPath 182 | routes := cfg.fileConf.Routes 183 | 184 | relPath = filepath.ToSlash(relPath) 185 | 186 | file, err := os.Open(absPath) 187 | if err != nil { 188 | return nil, fmt.Errorf("failed to open %q: %s", absPath, err) 189 | } 190 | defer file.Close() 191 | 192 | var ( 193 | mFile *memfile.File 194 | size = fi.Size() 195 | peek []byte 196 | ) 197 | 198 | route := routes.get(relPath) 199 | 200 | if route != nil && route.Gzip { 201 | var b bytes.Buffer 202 | gz := gzip.NewWriter(&b) 203 | io.Copy(gz, file) 204 | gz.Close() 205 | mFile = memfile.New(b.Bytes()) 206 | size = int64(b.Len()) 207 | peek = make([]byte, 512) 208 | file.Read(peek) 209 | } else { 210 | b, err := io.ReadAll(file) 211 | if err != nil { 212 | return nil, err 213 | } 214 | mFile = memfile.New(b) 215 | } 216 | 217 | keyPath := relPath 218 | if cfg.StripIndexHTML { 219 | keyPath = trimIndexHTML(keyPath) 220 | } 221 | 222 | of := &osFile{route: route, f: mFile, targetRoot: targetRoot, absPath: absPath, relPath: relPath, keyPath: keyPath, size: size} 223 | 224 | if err := of.initContentType(peek); err != nil { 225 | return nil, err 226 | } 227 | 228 | return of, nil 229 | } 230 | 231 | type routes []*route 232 | 233 | func (r routes) get(path string) *route { 234 | for _, route := range r { 235 | if route.routerRE.MatchString(path) { 236 | return route 237 | } 238 | } 239 | 240 | // no route found 241 | return nil 242 | } 243 | 244 | // read config from .s3deploy.yml if found. 245 | type fileConfig struct { 246 | Routes routes `yaml:"routes"` 247 | } 248 | 249 | func (c *fileConfig) init() error { 250 | for _, r := range c.Routes { 251 | var err error 252 | r.routerRE, err = regexp.Compile(r.Route) 253 | if err != nil { 254 | return err 255 | } 256 | } 257 | 258 | return nil 259 | } 260 | 261 | type route struct { 262 | Route string `yaml:"route"` 263 | Headers map[string]string `yaml:"headers"` 264 | Gzip bool `yaml:"gzip"` 265 | Ignore bool `yaml:"ignore"` 266 | 267 | routerRE *regexp.Regexp // compiled version of Route 268 | } 269 | 270 | func calculateETag(r io.Reader) (string, error) { 271 | h := md5.New() 272 | 273 | _, err := io.Copy(h, r) 274 | if err != nil { 275 | return "", err 276 | } 277 | return "\"" + hex.EncodeToString(h.Sum(nil)) + "\"", nil 278 | } 279 | -------------------------------------------------------------------------------- /lib/files_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "io" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | "testing" 14 | 15 | qt "github.com/frankban/quicktest" 16 | ) 17 | 18 | func TestOSFile(t *testing.T) { 19 | c := qt.New(t) 20 | 21 | of, err := openTestFile("main.css") 22 | c.Assert(err, qt.IsNil) 23 | 24 | c.Assert(of.Size(), qt.Equals, int64(3)) 25 | c.Assert(of.ETag(), qt.Equals, `"902fbdd2b1df0c4f70b4a5d23525e932"`) 26 | c.Assert(of.Content(), qt.IsNotNil) 27 | b, err := io.ReadAll(of.Content()) 28 | c.Assert(err, qt.IsNil) 29 | c.Assert(string(b), qt.Equals, "ABC") 30 | c.Assert(of.ContentType(), qt.Equals, "text/css; charset=utf-8") 31 | } 32 | 33 | func TestShouldThisReplace(t *testing.T) { 34 | c := qt.New(t) 35 | 36 | of, err := openTestFile("main.css") 37 | c.Assert(err, qt.IsNil) 38 | 39 | correctETag := `"902fbdd2b1df0c4f70b4a5d23525e932"` 40 | 41 | for _, test := range []struct { 42 | testFile 43 | expect bool 44 | expectReason string 45 | }{ 46 | {testFile{"k1", int64(123), correctETag}, true, "size"}, 47 | {testFile{"k2", int64(3), "FOO"}, true, "ETag"}, 48 | {testFile{"k3", int64(3), correctETag}, false, ""}, 49 | } { 50 | b, reason := of.shouldThisReplace(test.testFile) 51 | c.Assert(b, qt.Equals, test.expect) 52 | c.Assert(reason, qt.Equals, uploadReason(test.expectReason)) 53 | } 54 | } 55 | 56 | func TestDetectContentTypeFromContent(t *testing.T) { 57 | c := qt.New(t) 58 | 59 | c.Assert(detectContentTypeFromContent([]byte("foo")), qt.Equals, "text/html; charset=utf-8") 60 | c.Assert(detectContentTypeFromContent([]byte(""+strings.Repeat("abc", 300)+"")), qt.Equals, "text/html; charset=utf-8") 61 | } 62 | 63 | type testFile struct { 64 | key string 65 | size int64 66 | etag string 67 | } 68 | 69 | func (f testFile) Key() string { 70 | return f.key 71 | } 72 | 73 | func (f testFile) ETag() string { 74 | return f.etag 75 | } 76 | 77 | func (f testFile) Size() int64 { 78 | return f.size 79 | } 80 | 81 | func openTestFile(name string) (*osFile, error) { 82 | wd, err := os.Getwd() 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | relPath := filepath.Join("testdata", name) 88 | absPath := filepath.Join(wd, relPath) 89 | fi, err := os.Stat(absPath) 90 | if err != nil { 91 | return nil, err 92 | } 93 | 94 | args := []string{ 95 | "-bucket=mybucket", 96 | } 97 | 98 | cfg, err := ConfigFromArgs(args) 99 | if err != nil { 100 | return nil, err 101 | } 102 | 103 | return newOSFile(cfg, relPath, absPath, fi) 104 | } 105 | -------------------------------------------------------------------------------- /lib/s3.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "time" 12 | 13 | "github.com/aws/aws-sdk-go-v2/service/cloudfront" 14 | 15 | "github.com/aws/aws-sdk-go-v2/aws" 16 | "github.com/aws/aws-sdk-go-v2/service/s3" 17 | "github.com/aws/aws-sdk-go-v2/service/s3/types" 18 | ) 19 | 20 | var ( 21 | _ remoteStore = (*s3Store)(nil) 22 | _ remoteCDN = (*s3Store)(nil) 23 | _ file = (*s3File)(nil) 24 | ) 25 | 26 | type s3Store struct { 27 | bucket string 28 | bucketPath string 29 | r routes 30 | svc *s3.Client 31 | acl string 32 | cfc *cloudFrontClient 33 | } 34 | 35 | type s3File struct { 36 | o types.Object 37 | } 38 | 39 | func (f *s3File) Key() string { 40 | return *f.o.Key 41 | } 42 | 43 | func (f *s3File) ETag() string { 44 | return *f.o.ETag 45 | } 46 | 47 | func (f *s3File) Size() int64 { 48 | return f.o.Size 49 | } 50 | 51 | func newRemoteStore(cfg *Config, logger printer) (*s3Store, error) { 52 | var s *s3Store 53 | var cfc *cloudFrontClient 54 | 55 | awsConfig, err := newAWSConfig(cfg) 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | cf := cloudfront.NewFromConfig(awsConfig) 61 | 62 | if len(cfg.CDNDistributionIDs) > 0 { 63 | cfc, err = newCloudFrontClient(cf, logger, cfg) 64 | if err != nil { 65 | return nil, err 66 | } 67 | } 68 | 69 | acl := "private" 70 | if cfg.ACL != "" { 71 | acl = cfg.ACL 72 | } else if cfg.PublicReadACL { 73 | acl = "public-read" 74 | } 75 | 76 | client := s3.NewFromConfig(awsConfig) 77 | 78 | s = &s3Store{svc: client, cfc: cfc, acl: acl, bucket: cfg.BucketName, r: cfg.fileConf.Routes, bucketPath: cfg.BucketPath} 79 | 80 | return s, nil 81 | } 82 | 83 | func (s *s3Store) FileMap(ctx context.Context, opts ...opOption) (map[string]file, error) { 84 | m := make(map[string]file) 85 | 86 | listObjectsV2Response, err := s.svc.ListObjectsV2(ctx, 87 | &s3.ListObjectsV2Input{ 88 | Bucket: aws.String(s.bucket), 89 | Prefix: aws.String(s.bucketPath), 90 | }) 91 | 92 | for { 93 | if err != nil { 94 | return nil, err 95 | } 96 | 97 | for _, o := range listObjectsV2Response.Contents { 98 | m[*o.Key] = &s3File{o: o} 99 | } 100 | 101 | if listObjectsV2Response.IsTruncated { 102 | listObjectsV2Response, err = s.svc.ListObjectsV2(ctx, 103 | &s3.ListObjectsV2Input{ 104 | Bucket: aws.String(s.bucket), 105 | Prefix: aws.String(s.bucketPath), 106 | ContinuationToken: listObjectsV2Response.NextContinuationToken, 107 | }, 108 | ) 109 | } else { 110 | break 111 | } 112 | 113 | } 114 | 115 | return m, nil 116 | } 117 | 118 | func (s *s3Store) Put(ctx context.Context, f localFile, opts ...opOption) error { 119 | input := &s3.PutObjectInput{ 120 | Bucket: aws.String(s.bucket), 121 | Key: aws.String(f.Key()), 122 | Body: f.Content(), 123 | ACL: types.ObjectCannedACL(s.acl), 124 | ContentType: aws.String(f.ContentType()), 125 | ContentLength: f.Size(), 126 | } 127 | 128 | if err := s.applyMetadataToPutObjectInput(input, f); err != nil { 129 | return err 130 | } 131 | 132 | _, err := s.svc.PutObject(ctx, input) 133 | 134 | return err 135 | } 136 | 137 | func (s *s3Store) applyMetadataToPutObjectInput(input *s3.PutObjectInput, f localFile) error { 138 | m := f.Headers() 139 | if len(m) == 0 { 140 | return nil 141 | } 142 | 143 | if input.Metadata == nil { 144 | input.Metadata = make(map[string]string) 145 | } 146 | 147 | for k, v := range m { 148 | switch k { 149 | case "Cache-Control": 150 | input.CacheControl = aws.String(v) 151 | case "Content-Disposition": 152 | input.ContentDisposition = aws.String(v) 153 | case "Content-Encoding": 154 | input.ContentEncoding = aws.String(v) 155 | case "Content-Language": 156 | input.ContentLanguage = aws.String(v) 157 | case "Content-Type": 158 | // ContentType is already set. 159 | case "Expires": 160 | t, err := time.Parse(time.RFC1123, v) 161 | if err != nil { 162 | return fmt.Errorf("invalid Expires header: %s", err) 163 | } 164 | input.Expires = &t 165 | default: 166 | input.Metadata[k] = v 167 | } 168 | } 169 | 170 | return nil 171 | } 172 | 173 | func (s *s3Store) DeleteObjects(ctx context.Context, keys []string, opts ...opOption) error { 174 | ids := make([]types.ObjectIdentifier, len(keys)) 175 | for i := 0; i < len(keys); i++ { 176 | ids[i] = types.ObjectIdentifier{Key: aws.String(keys[i])} 177 | } 178 | 179 | _, err := s.svc.DeleteObjects(ctx, &s3.DeleteObjectsInput{ 180 | Bucket: aws.String(s.bucket), 181 | Delete: &types.Delete{ 182 | Objects: ids, 183 | }, 184 | }) 185 | return err 186 | } 187 | 188 | func (s *s3Store) Finalize(ctx context.Context) error { 189 | return nil 190 | } 191 | 192 | func (s *s3Store) InvalidateCDNCache(ctx context.Context, paths ...string) error { 193 | if s.cfc == nil { 194 | return nil 195 | } 196 | return s.cfc.InvalidateCDNCache(ctx, paths...) 197 | } 198 | -------------------------------------------------------------------------------- /lib/s3_test.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "io" 5 | "testing" 6 | 7 | qt "github.com/frankban/quicktest" 8 | ) 9 | 10 | func TestNewRemoteStoreNoAclProvided(t *testing.T) { 11 | c := qt.New(t) 12 | 13 | cfg := &Config{ 14 | BucketName: "example.com", 15 | RegionName: "us-east-1", 16 | ACL: "", 17 | Silent: true, 18 | } 19 | 20 | s, err := newRemoteStore(cfg, newPrinter(io.Discard)) 21 | c.Assert(err, qt.IsNil) 22 | 23 | c.Assert("private", qt.Equals, s.acl) 24 | } 25 | 26 | func TestNewRemoteStoreAclProvided(t *testing.T) { 27 | c := qt.New(t) 28 | 29 | cfg := &Config{ 30 | BucketName: "example.com", 31 | RegionName: "us-east-1", 32 | ACL: "public-read", 33 | Silent: true, 34 | } 35 | 36 | s, err := newRemoteStore(cfg, newPrinter(io.Discard)) 37 | c.Assert(err, qt.IsNil) 38 | 39 | c.Assert("public-read", qt.Equals, s.acl) 40 | } 41 | 42 | func TestNewRemoteStoreOtherCannedAclProvided(t *testing.T) { 43 | c := qt.New(t) 44 | 45 | cfg := &Config{ 46 | BucketName: "example.com", 47 | RegionName: "us-east-1", 48 | ACL: "bucket-owner-full-control", 49 | Silent: true, 50 | } 51 | 52 | s, err := newRemoteStore(cfg, newPrinter(io.Discard)) 53 | c.Assert(err, qt.IsNil) 54 | 55 | c.Assert("bucket-owner-full-control", qt.Equals, s.acl) 56 | } 57 | 58 | func TestNewRemoteStoreDeprecatedPublicReadACLFlaglProvided(t *testing.T) { 59 | c := qt.New(t) 60 | 61 | cfg := &Config{ 62 | BucketName: "example.com", 63 | RegionName: "us-east-1", 64 | PublicReadACL: true, 65 | ACL: "", 66 | Silent: true, 67 | } 68 | 69 | s, err := newRemoteStore(cfg, newPrinter(io.Discard)) 70 | c.Assert(err, qt.IsNil) 71 | 72 | c.Assert("public-read", qt.Equals, s.acl) 73 | } 74 | -------------------------------------------------------------------------------- /lib/session.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "os" 10 | 11 | "github.com/aws/aws-sdk-go-v2/aws" 12 | "github.com/aws/aws-sdk-go-v2/credentials" 13 | ) 14 | 15 | func newAWSConfig(cfg *Config) (aws.Config, error) { 16 | config := aws.Config{ 17 | Region: cfg.RegionName, 18 | Credentials: createCredentials(cfg), 19 | } 20 | 21 | if cfg.EndpointURL != "" { 22 | resolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { 23 | return aws.Endpoint{ 24 | URL: cfg.EndpointURL, 25 | }, nil 26 | }) 27 | config.EndpointResolverWithOptions = resolver 28 | } 29 | 30 | return config, nil 31 | } 32 | 33 | func createCredentials(cfg *Config) aws.CredentialsProvider { 34 | 35 | if cfg.AccessKey != "" { 36 | return credentials.NewStaticCredentialsProvider(cfg.AccessKey, cfg.SecretKey, os.Getenv("AWS_SESSION_TOKEN")) 37 | } 38 | 39 | // Use AWS default 40 | return nil 41 | } 42 | -------------------------------------------------------------------------------- /lib/session_test.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "testing" 5 | 6 | qt "github.com/frankban/quicktest" 7 | ) 8 | 9 | func TestNewAWSConfigWithCustomEndpoint(t *testing.T) { 10 | c := qt.New(t) 11 | 12 | cfg := &Config{ 13 | BucketName: "example.com", 14 | RegionName: "us-east-1", 15 | EndpointURL: "http://localhost:9000", 16 | Silent: true, 17 | } 18 | 19 | awsCfg, err := newAWSConfig(cfg) 20 | c.Assert(err, qt.IsNil) 21 | 22 | endpoint, err := awsCfg.EndpointResolverWithOptions.ResolveEndpoint("s3", "us-east-1") 23 | c.Assert(err, qt.IsNil) 24 | 25 | c.Assert("http://localhost:9000", qt.Equals, endpoint.URL) 26 | } 27 | -------------------------------------------------------------------------------- /lib/stats.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "fmt" 10 | ) 11 | 12 | // DeployStats contains some simple stats about the deployment. 13 | type DeployStats struct { 14 | // Number of files deleted. 15 | Deleted uint64 16 | // Number of files on remote not present locally (-max-delete threshold reached) 17 | Stale uint64 18 | // Number of files uploaded. 19 | Uploaded uint64 20 | // Number of files skipped (i.e. not changed) 21 | Skipped uint64 22 | } 23 | 24 | // Summary returns formatted summary of the stats. 25 | func (d DeployStats) Summary() string { 26 | return fmt.Sprintf("Deleted %d of %d, uploaded %d, skipped %d (%.0f%% changed)", d.Deleted, (d.Deleted + d.Stale), d.Uploaded, d.Skipped, d.PercentageChanged()) 27 | } 28 | 29 | // FileCountChanged returns the total number of files changed on server. 30 | func (d DeployStats) FileCountChanged() uint64 { 31 | return d.Deleted + d.Uploaded 32 | } 33 | 34 | // FileCount returns the total number of files both locally and remote. 35 | func (d DeployStats) FileCount() uint64 { 36 | return d.FileCountChanged() + d.Skipped 37 | } 38 | 39 | // PercentageChanged returns the percentage of files that have changed. 40 | func (d DeployStats) PercentageChanged() float32 { 41 | if d.FileCount() == 0 { 42 | return 0.0 43 | } 44 | return (float32(d.FileCountChanged()) / float32(d.FileCount()) * 100) 45 | } 46 | -------------------------------------------------------------------------------- /lib/store.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "sort" 12 | "sync" 13 | "sync/atomic" 14 | ) 15 | 16 | var ( 17 | _ remoteStore = (*store)(nil) 18 | _ remoteCDN = (*noUpdateStore)(nil) 19 | ) 20 | 21 | type remoteStore interface { 22 | FileMap(ctx context.Context, opts ...opOption) (map[string]file, error) 23 | Put(ctx context.Context, f localFile, opts ...opOption) error 24 | DeleteObjects(ctx context.Context, keys []string, opts ...opOption) error 25 | Finalize(ctx context.Context) error 26 | } 27 | 28 | type remoteCDN interface { 29 | InvalidateCDNCache(ctx context.Context, paths ...string) error 30 | } 31 | 32 | type store struct { 33 | cfg *Config 34 | delegate remoteStore 35 | 36 | changedKeys []string 37 | changedMu sync.Mutex 38 | } 39 | 40 | func newStore(cfg *Config, s remoteStore) remoteStore { 41 | return &store{cfg: cfg, delegate: s} 42 | } 43 | 44 | func (s *store) trackChanged(keys ...string) { 45 | s.changedMu.Lock() 46 | defer s.changedMu.Unlock() 47 | s.changedKeys = append(s.changedKeys, keys...) 48 | } 49 | 50 | func (s *store) FileMap(ctx context.Context, opts ...opOption) (map[string]file, error) { 51 | return s.delegate.FileMap(ctx, opts...) 52 | } 53 | 54 | func (s *store) Finalize(ctx context.Context) error { 55 | if cdn, ok := s.delegate.(remoteCDN); ok { 56 | return cdn.InvalidateCDNCache(ctx, s.changedKeys...) 57 | } 58 | return nil 59 | } 60 | 61 | func (s *store) Put(ctx context.Context, f localFile, opts ...opOption) error { 62 | conf, err := optsToConfig(opts...) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | err = s.delegate.Put(ctx, f, opts...) 68 | 69 | if err == nil { 70 | s.trackChanged(f.Key()) 71 | conf.statsCollector(1, 0) 72 | } 73 | 74 | return err 75 | } 76 | 77 | func (s *store) DeleteObjects(ctx context.Context, keys []string, opts ...opOption) error { 78 | if len(keys) == 0 { 79 | return nil 80 | } 81 | 82 | conf, err := optsToConfig(opts...) 83 | if err != nil { 84 | return err 85 | } 86 | 87 | if conf.maxDelete <= 0 { 88 | // Nothing to do. 89 | return nil 90 | } 91 | 92 | chunkSize := 1000 // This is the maximum supported by the AWS SDK. 93 | if conf.maxDelete < chunkSize { 94 | chunkSize = conf.maxDelete 95 | } 96 | 97 | keyChunks := chunkStrings(keys, chunkSize) 98 | deleted := 0 99 | 100 | for i := 0; i < len(keyChunks); i++ { 101 | keyChunk := keyChunks[i] 102 | 103 | err := s.delegate.DeleteObjects(ctx, keyChunk, opts...) 104 | if err != nil { 105 | return err 106 | } 107 | 108 | s.trackChanged(keyChunk...) 109 | deleted += len(keyChunk) 110 | conf.statsCollector(deleted, 0) 111 | if deleted >= conf.maxDelete { 112 | conf.statsCollector(0, len(keys)-deleted) 113 | break 114 | } 115 | } 116 | 117 | return nil 118 | } 119 | 120 | type noUpdateStore struct { 121 | readOps remoteStore 122 | } 123 | 124 | func newNoUpdateStore(base remoteStore) remoteStore { 125 | return &noUpdateStore{readOps: base} 126 | } 127 | 128 | func (s *noUpdateStore) FileMap(ctx context.Context, opts ...opOption) (map[string]file, error) { 129 | if s.readOps != nil { 130 | return s.readOps.FileMap(ctx, opts...) 131 | } 132 | return make(map[string]file), nil 133 | } 134 | 135 | func (s *noUpdateStore) Put(ctx context.Context, f localFile, opts ...opOption) error { 136 | return nil 137 | } 138 | 139 | func (s *noUpdateStore) DeleteObjects(ctx context.Context, keys []string, opts ...opOption) error { 140 | return nil 141 | } 142 | 143 | func (s *noUpdateStore) Finalize(ctx context.Context) error { 144 | if s.readOps != nil { 145 | return s.readOps.Finalize(ctx) 146 | } 147 | return nil 148 | } 149 | 150 | func (s *noUpdateStore) InvalidateCDNCache(ctx context.Context, paths ...string) error { 151 | sort.Strings(paths) 152 | fmt.Println("\nInvalidate CDN:", paths) 153 | return nil 154 | } 155 | 156 | type opConfig struct { 157 | maxDelete int 158 | statsCollector func(handled, skipped int) 159 | } 160 | 161 | type opOption func(c *opConfig) error 162 | 163 | func withMaxDelete(count int) opOption { 164 | return func(c *opConfig) error { 165 | c.maxDelete = count 166 | return nil 167 | } 168 | } 169 | 170 | func withUploadStats(stats *DeployStats) opOption { 171 | return func(c *opConfig) error { 172 | c.statsCollector = func(handled, skipped int) { 173 | atomic.AddUint64(&stats.Uploaded, uint64(handled)) 174 | atomic.AddUint64(&stats.Skipped, uint64(skipped)) 175 | } 176 | return nil 177 | } 178 | } 179 | 180 | func withDeleteStats(stats *DeployStats) opOption { 181 | return func(c *opConfig) error { 182 | c.statsCollector = func(handled, skipped int) { 183 | atomic.AddUint64(&stats.Deleted, uint64(handled)) 184 | atomic.AddUint64(&stats.Stale, uint64(skipped)) 185 | } 186 | return nil 187 | } 188 | } 189 | 190 | func optsToConfig(opts ...opOption) (*opConfig, error) { 191 | c := &opConfig{} 192 | for _, opt := range opts { 193 | if err := opt(c); err != nil { 194 | return c, err 195 | } 196 | } 197 | 198 | if c.statsCollector == nil { 199 | c.statsCollector = func(handled, skipped int) {} 200 | } 201 | 202 | return c, nil 203 | } 204 | 205 | func chunkStrings(s []string, size int) [][]string { 206 | if len(s) == 0 { 207 | return nil 208 | } 209 | 210 | var chunks [][]string 211 | 212 | for i := 0; i < len(s); i += size { 213 | end := i + size 214 | 215 | if end > len(s) { 216 | end = len(s) 217 | } 218 | 219 | chunks = append(chunks, s[i:end]) 220 | } 221 | 222 | return chunks 223 | } 224 | -------------------------------------------------------------------------------- /lib/store_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package lib 7 | 8 | import ( 9 | "context" 10 | "testing" 11 | 12 | qt "github.com/frankban/quicktest" 13 | ) 14 | 15 | func TestChunkStrings(t *testing.T) { 16 | c := qt.New(t) 17 | 18 | c1 := chunkStrings([]string{"a", "b", "c", "d"}, 2) 19 | c2 := chunkStrings([]string{"a", "b", "c", "d"}, 3) 20 | c3 := chunkStrings([]string{}, 2) 21 | c.Assert(c1, qt.DeepEquals, [][]string{{"a", "b"}, {"c", "d"}}) 22 | c.Assert(c2, qt.DeepEquals, [][]string{{"a", "b", "c"}, {"d"}}) 23 | c.Assert(len(c3), qt.Equals, 0) 24 | } 25 | 26 | func TestNoUpdateStore(t *testing.T) { 27 | store := new(noUpdateStore) 28 | c := qt.New(t) 29 | m, err := store.FileMap(context.Background()) 30 | c.Assert(err, qt.IsNil) 31 | c.Assert(len(m), qt.Equals, 0) 32 | c.Assert(store.DeleteObjects(context.Background(), nil), qt.IsNil) 33 | c.Assert(store.Put(context.Background(), nil), qt.IsNil) 34 | } 35 | -------------------------------------------------------------------------------- /lib/testdata/.hidden/.s3deploy.ignore.yml: -------------------------------------------------------------------------------- 1 | routes: 2 | - route: "^main\\.css|deleteme\\.txt$" 3 | ignore: true 4 | - route: "^.+\\.(js|css|svg|ttf)$" 5 | # cache static assets for 20 years 6 | headers: 7 | Cache-Control: "max-age=630720000, no-transform, public" 8 | gzip: true 9 | - route: "^.+\\.(png|jpg)$" 10 | headers: 11 | Cache-Control: "max-age=630720000, no-transform, public" 12 | gzip: true 13 | - route: "^.+\\.(html|xml|json)$" 14 | gzip: true 15 | -------------------------------------------------------------------------------- /lib/testdata/.hidden/hidden.txt: -------------------------------------------------------------------------------- 1 | hidden 2 | -------------------------------------------------------------------------------- /lib/testdata/.s3deploy.yml: -------------------------------------------------------------------------------- 1 | routes: 2 | - route: "^.+\\.(js|css|svg|ttf)$" 3 | # cache static assets for 20 years 4 | headers: 5 | Cache-Control: "max-age=630720000, no-transform, public" 6 | gzip: true 7 | - route: "^.+\\.(png|jpg)$" 8 | headers: 9 | Cache-Control: "max-age=630720000, no-transform, public" 10 | gzip: true 11 | - route: "^.+\\.(html|xml|json)$" 12 | gzip: true 13 | 14 | -------------------------------------------------------------------------------- /lib/testdata/ab.txt: -------------------------------------------------------------------------------- 1 | AB -------------------------------------------------------------------------------- /lib/testdata/index.html: -------------------------------------------------------------------------------- 1 | s3deploy 2 | -------------------------------------------------------------------------------- /lib/testdata/main.css: -------------------------------------------------------------------------------- 1 | ABC -------------------------------------------------------------------------------- /lib/url.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "path" 5 | "strings" 6 | ) 7 | 8 | // [RFC 1738](https://www.ietf.org/rfc/rfc1738.txt) 9 | // §2.2 10 | func shouldEscape(c byte) bool { 11 | // alphanum 12 | if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { 13 | return false 14 | } 15 | 16 | switch c { 17 | case '$', '-', '_', '.', '+', '!', '*', '\'', '(', ')', ',': // Special characters 18 | return false 19 | 20 | case '/', '?', ':', '@', '=', '&': // Reserved characters 21 | return c == '?' 22 | } 23 | // Everything else must be escaped. 24 | return true 25 | } 26 | 27 | // pathEscapeRFC1738 escapes the string so it can be safely placed 28 | // inside a URL path segment according to RFC1738. 29 | // Based on golang native implementation of `url.PathEscape` 30 | // https://golang.org/src/net/url/url.go?s=7976:8008#L276 31 | func pathEscapeRFC1738(s string) string { 32 | spaceCount, hexCount := 0, 0 33 | for i := 0; i < len(s); i++ { 34 | c := s[i] 35 | if shouldEscape(c) { 36 | hexCount++ 37 | } 38 | } 39 | 40 | if spaceCount == 0 && hexCount == 0 { 41 | return s 42 | } 43 | 44 | var buf [64]byte 45 | var t []byte 46 | 47 | required := len(s) + 2*hexCount 48 | if required <= len(buf) { 49 | t = buf[:required] 50 | } else { 51 | t = make([]byte, required) 52 | } 53 | 54 | if hexCount == 0 { 55 | copy(t, s) 56 | for i := 0; i < len(s); i++ { 57 | if s[i] == ' ' { 58 | t[i] = '+' 59 | } 60 | } 61 | return string(t) 62 | } 63 | 64 | j := 0 65 | for i := 0; i < len(s); i++ { 66 | switch c := s[i]; { 67 | case shouldEscape(c): 68 | t[j] = '%' 69 | t[j+1] = "0123456789ABCDEF"[c>>4] 70 | t[j+2] = "0123456789ABCDEF"[c&15] 71 | j += 3 72 | default: 73 | t[j] = s[i] 74 | j++ 75 | } 76 | } 77 | return string(t) 78 | } 79 | 80 | // Like path.Join, but preserves trailing slash.. 81 | func pathJoin(elem ...string) string { 82 | if len(elem) == 0 { 83 | return "" 84 | } 85 | hadSlash := strings.HasSuffix(elem[len(elem)-1], "/") 86 | p := path.Join(elem...) 87 | if hadSlash { 88 | p += "/" 89 | } 90 | return p 91 | } 92 | 93 | // pathClean works like path.Clean but will always preserve a trailing slash. 94 | func pathClean(p string) string { 95 | hadSlash := strings.HasSuffix(p, "/") 96 | p = path.Clean(p) 97 | if hadSlash && !strings.HasSuffix(p, "/") { 98 | p += "/" 99 | } 100 | return p 101 | } 102 | 103 | // trimIndexHTML remaps paths matching "/index.html" to "/". 104 | func trimIndexHTML(p string) string { 105 | const suffix = "/index.html" 106 | if strings.HasSuffix(p, suffix) { 107 | return p[:len(p)-len(suffix)+1] 108 | } 109 | return p 110 | } 111 | -------------------------------------------------------------------------------- /lib/url_test.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "testing" 5 | 6 | qt "github.com/frankban/quicktest" 7 | ) 8 | 9 | func TestPathEscapeRFC1738(t *testing.T) { 10 | c := qt.New(t) 11 | 12 | testCases := []struct { 13 | input string 14 | expected string 15 | }{ 16 | // should NOT encode 17 | {"/path/", "/path/"}, 18 | {"/path/-/", "/path/-/"}, 19 | {"/path/_/", "/path/_/"}, 20 | {"/path/*", "/path/*"}, 21 | {"/path*", "/path*"}, 22 | {"/path/*.ext", "/path/*.ext"}, 23 | {"/path/filename*", "/path/filename*"}, 24 | 25 | // should encode 26 | {"/path/tilde~file", "/path/tilde%7Efile"}, // https://github.com/bep/s3deploy/issues/46 27 | {"/path/世界", "/path/%E4%B8%96%E7%95%8C"}, // non-ascii 28 | } 29 | 30 | for _, tc := range testCases { 31 | actual := pathEscapeRFC1738(tc.input) 32 | c.Assert(actual, qt.Equals, tc.expected) 33 | } 34 | } 35 | 36 | func TestPathJoin(t *testing.T) { 37 | c := qt.New(t) 38 | 39 | testCases := []struct { 40 | elements []string 41 | expected string 42 | }{ 43 | {[]string{"a", "b"}, "a/b"}, 44 | {[]string{"a", "b/"}, "a/b/"}, 45 | {[]string{"/a", "b/"}, "/a/b/"}, 46 | } 47 | 48 | for _, tc := range testCases { 49 | actual := pathJoin(tc.elements...) 50 | c.Assert(actual, qt.Equals, tc.expected) 51 | } 52 | } 53 | 54 | func TestPathClean(t *testing.T) { 55 | c := qt.New(t) 56 | 57 | testCases := []struct { 58 | in string 59 | expected string 60 | }{ 61 | {"/path/", "/path/"}, 62 | {"/path/./", "/path/"}, 63 | {"/path", "/path"}, 64 | } 65 | 66 | for _, tc := range testCases { 67 | actual := pathClean(tc.in) 68 | c.Assert(actual, qt.Equals, tc.expected) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package main 7 | 8 | import ( 9 | "fmt" 10 | "log" 11 | "os" 12 | "runtime/debug" 13 | 14 | "github.com/bep/s3deploy/v2/lib" 15 | ) 16 | 17 | var ( 18 | commit = "none" 19 | tag = "(devel)" 20 | date = "unknown" 21 | ) 22 | 23 | func main() { 24 | log.SetFlags(0) 25 | 26 | if err := parseAndRun(os.Args[1:]); err != nil { 27 | log.Fatal(err) 28 | } 29 | } 30 | 31 | func parseAndRun(args []string) error { 32 | cfg, err := lib.ConfigFromArgs(args) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | initVersionInfo() 38 | 39 | if !cfg.Silent { 40 | fmt.Printf("s3deploy %v, commit %v, built at %v\n", tag, commit, date) 41 | } 42 | 43 | if cfg.Help { 44 | cfg.Usage() 45 | return nil 46 | } 47 | 48 | if cfg.PrintVersion { 49 | return nil 50 | } 51 | 52 | stats, err := lib.Deploy(cfg) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | if !cfg.Silent { 58 | fmt.Println(stats.Summary()) 59 | } 60 | 61 | return nil 62 | } 63 | 64 | func initVersionInfo() { 65 | bi, ok := debug.ReadBuildInfo() 66 | if !ok { 67 | return 68 | } 69 | 70 | for _, s := range bi.Settings { 71 | switch s.Key { 72 | case "vcs": 73 | case "vcs.revision": 74 | commit = s.Value 75 | case "vcs.time": 76 | date = s.Value 77 | case "vcs.modified": 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2022 Bjørn Erik Pedersen . 2 | // 3 | // Use of this source code is governed by an MIT-style 4 | // license that can be found in the LICENSE file. 5 | 6 | package main 7 | 8 | import ( 9 | "bytes" 10 | "context" 11 | "fmt" 12 | "net/http" 13 | "os" 14 | "sort" 15 | "strings" 16 | "testing" 17 | 18 | "github.com/aws/aws-sdk-go-v2/aws" 19 | "github.com/aws/aws-sdk-go-v2/credentials" 20 | "github.com/aws/aws-sdk-go-v2/service/s3" 21 | "github.com/oklog/ulid/v2" 22 | 23 | "github.com/rogpeppe/go-internal/testscript" 24 | ) 25 | 26 | const s3IntegrationTestHttpRoot = "http://s3deployintegrationtest.s3-website.eu-north-1.amazonaws.com" 27 | 28 | func TestIntegration(t *testing.T) { 29 | if os.Getenv("S3DEPLOY_TEST_KEY") == "" { 30 | t.Skip("S3DEPLOY_TEST_KEY not set") 31 | } 32 | p := commonTestScriptsParam 33 | p.Dir = "testscripts" 34 | testscript.Run(t, p) 35 | } 36 | 37 | // Tests in development can be put in "testscripts/unfinished". 38 | func TestUnfinished(t *testing.T) { 39 | if os.Getenv("CI") != "" { 40 | t.Skip("skip unfinished tests on CI") 41 | } 42 | p := commonTestScriptsParam 43 | p.Dir = "testscripts/unfinished" 44 | testscript.Run(t, p) 45 | } 46 | 47 | func TestMain(m *testing.M) { 48 | os.Exit( 49 | testscript.RunMain(m, map[string]func() int{ 50 | // The main program. 51 | "s3deploy": func() int { 52 | if err := parseAndRun(os.Args[1:]); err != nil { 53 | fmt.Fprintln(os.Stderr, err) 54 | return 1 55 | } 56 | return 0 57 | }, 58 | }), 59 | ) 60 | } 61 | 62 | const ( 63 | testBucket = "s3deployintegrationtest" 64 | testRegion = "eu-north-1" 65 | ) 66 | 67 | func setup(env *testscript.Env) error { 68 | env.Setenv("S3DEPLOY_TEST_KEY", os.Getenv("S3DEPLOY_TEST_KEY")) 69 | env.Setenv("S3DEPLOY_TEST_SECRET", os.Getenv("S3DEPLOY_TEST_SECRET")) 70 | env.Setenv("S3DEPLOY_TEST_BUCKET", testBucket) 71 | env.Setenv("S3DEPLOY_TEST_REGION", testRegion) 72 | env.Setenv("S3DEPLOY_TEST_URL", s3IntegrationTestHttpRoot) 73 | env.Setenv("S3DEPLOY_TEST_ID", strings.ToLower(ulid.Make().String())) 74 | return nil 75 | } 76 | 77 | func gtKeySecret(ts *testscript.TestScript) (string, string) { 78 | key := ts.Getenv("S3DEPLOY_TEST_KEY") 79 | secret := ts.Getenv("S3DEPLOY_TEST_SECRET") 80 | if key == "" || secret == "" { 81 | ts.Fatalf("S3DEPLOY_TEST_KEY and S3DEPLOY_TEST_SECRET must be set") 82 | } 83 | return key, secret 84 | } 85 | 86 | var commonTestScriptsParam = testscript.Params{ 87 | Setup: func(env *testscript.Env) error { 88 | return setup(env) 89 | }, 90 | Cmds: map[string]func(ts *testscript.TestScript, neg bool, args []string){ 91 | "s3get": func(ts *testscript.TestScript, neg bool, args []string) { 92 | key := args[0] 93 | testKey, testSecret := gtKeySecret(ts) 94 | config := aws.Config{ 95 | Region: testRegion, 96 | Credentials: credentials.NewStaticCredentialsProvider(testKey, testSecret, os.Getenv("AWS_SESSION_TOKEN")), 97 | } 98 | 99 | client := s3.NewFromConfig(config) 100 | 101 | obj, err := client.GetObject( 102 | context.Background(), 103 | &s3.GetObjectInput{ 104 | Bucket: aws.String(testBucket), 105 | Key: aws.String(key), 106 | }, 107 | ) 108 | if err != nil { 109 | ts.Fatalf("failed to get object: %v", err) 110 | } 111 | defer obj.Body.Close() 112 | var buf bytes.Buffer 113 | if _, err := buf.ReadFrom(obj.Body); err != nil { 114 | ts.Fatalf("failed to read object: %v", err) 115 | } 116 | var ( 117 | contentEncoding string 118 | contentType string 119 | ) 120 | if obj.ContentEncoding != nil { 121 | contentEncoding = *obj.ContentEncoding 122 | } 123 | if obj.ContentType != nil { 124 | contentType = *obj.ContentType 125 | } 126 | fmt.Fprintf(ts.Stdout(), "s3get %s: ContentEncoding: %s ContentType: %s %s\n", key, contentEncoding, contentType, buf.String()) 127 | for k, v := range obj.Metadata { 128 | fmt.Fprintf(ts.Stdout(), "s3get metadata: %s: %s\n", k, v) 129 | } 130 | }, 131 | 132 | // head executes HTTP HEAD on the given URL and prints the response status code and 133 | // headers to stdout. 134 | "head": func(ts *testscript.TestScript, neg bool, args []string) { 135 | url := s3IntegrationTestHttpRoot + args[0] 136 | fmt.Fprintln(ts.Stdout(), "head", url) 137 | resp, err := http.DefaultClient.Head(url) 138 | if err != nil { 139 | ts.Fatalf("failed to HEAD %s: %v", url, err) 140 | } 141 | path := strings.ReplaceAll(args[0], ts.Getenv("S3DEPLOY_TEST_ID"), "S3DEPLOY_TEST_ID") 142 | fmt.Fprintf(ts.Stdout(), "Head: %s;Status: %d;", path, resp.StatusCode) 143 | // Print headers 144 | var headers []string 145 | for k, v := range resp.Header { 146 | headers = append(headers, fmt.Sprintf("%s: %s", k, v[0])) 147 | } 148 | sort.Strings(headers) 149 | fmt.Fprintf(ts.Stdout(), "Headers: %s", strings.Join(headers, ";")) 150 | }, 151 | 152 | // append appends to a file with a leaading newline. 153 | "append": func(ts *testscript.TestScript, neg bool, args []string) { 154 | if len(args) < 2 { 155 | ts.Fatalf("usage: append FILE TEXT") 156 | } 157 | 158 | filename := ts.MkAbs(args[0]) 159 | words := args[1:] 160 | for i, word := range words { 161 | words[i] = strings.Trim(word, "\"") 162 | } 163 | text := strings.Join(words, " ") 164 | 165 | _, err := os.Stat(filename) 166 | if err != nil { 167 | if os.IsNotExist(err) { 168 | ts.Fatalf("file does not exist: %s", filename) 169 | } 170 | ts.Fatalf("failed to stat file: %v", err) 171 | } 172 | 173 | f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0o644) 174 | if err != nil { 175 | ts.Fatalf("failed to open file: %v", err) 176 | } 177 | defer f.Close() 178 | 179 | _, err = f.WriteString("\n" + text) 180 | if err != nil { 181 | ts.Fatalf("failed to write to file: %v", err) 182 | } 183 | }, 184 | // replace replaces a string in a file. 185 | "replace": func(ts *testscript.TestScript, neg bool, args []string) { 186 | if len(args) < 3 { 187 | ts.Fatalf("usage: replace FILE OLD NEW") 188 | } 189 | filename := ts.MkAbs(args[0]) 190 | oldContent, err := os.ReadFile(filename) 191 | if err != nil { 192 | ts.Fatalf("failed to read file %v", err) 193 | } 194 | newContent := bytes.Replace(oldContent, []byte(args[1]), []byte(args[2]), -1) 195 | err = os.WriteFile(filename, newContent, 0o644) 196 | if err != nil { 197 | ts.Fatalf("failed to write file: %v", err) 198 | } 199 | }, 200 | }, 201 | } 202 | -------------------------------------------------------------------------------- /testscripts/basic.txt: -------------------------------------------------------------------------------- 1 | env AWS_ACCESS_KEY_ID=$S3DEPLOY_TEST_KEY 2 | env AWS_SECRET_ACCESS_KEY=$S3DEPLOY_TEST_SECRET 3 | 4 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=public/ 5 | 6 | stdout 'Deleted 0 of 0, uploaded 1, skipped 0.*100% changed' 7 | 8 | head /$S3DEPLOY_TEST_ID/ 9 | stdout 'Status: 200' 10 | 11 | -- public/index.html -- 12 | Test

Test

13 | -------------------------------------------------------------------------------- /testscripts/flag_strip-index-html.txt: -------------------------------------------------------------------------------- 1 | env AWS_ACCESS_KEY_ID=$S3DEPLOY_TEST_KEY 2 | env AWS_SECRET_ACCESS_KEY=$S3DEPLOY_TEST_SECRET 3 | 4 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=public/ -strip-index-html 5 | 6 | stdout 'Deleted 0 of 0, uploaded 3, skipped 0.*100% changed' 7 | stdout 'foo/ \(not found\) ↑ index.html \(not found\)' 8 | 9 | head /$S3DEPLOY_TEST_ID/index.html 10 | stdout 'Status: 200' 11 | s3get $S3DEPLOY_TEST_ID/foo/ 12 | stdout 's3get.*/foo/:.*ContentType: text/html.*foo' 13 | s3get $S3DEPLOY_TEST_ID/bar/ 14 | stdout 's3get.*/bar/:.*ContentType: text/html.*bar' 15 | s3get $S3DEPLOY_TEST_ID/index.html 16 | stdout 's3get.*/index.html:.*ContentType: text/html.*root' 17 | 18 | # Repeat the same command without any changes 19 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -source=public/ -strip-index-html 20 | stdout 'uploaded 0.*\(0% changed' 21 | 22 | -- public/index.html -- 23 | root 24 | -- public/foo/index.html -- 25 | deliberately no HTML in foo 26 | -- public/bar/index.html -- 27 |

bar

-------------------------------------------------------------------------------- /testscripts/flags.txt: -------------------------------------------------------------------------------- 1 | # No flags. 2 | ! s3deploy 3 | stderr 'AWS bucket is required' 4 | 5 | # Missing keys. 6 | ! s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID 7 | stderr 'Access Denied' 8 | 9 | # Invalid keys. 10 | ! s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -key foo -secret bar 11 | stderr 'InvalidAccessKeyId' 12 | 13 | # Only key. 14 | ! s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -key foo 15 | stderr 'both AWS access key and secret key must be provided' 16 | 17 | # Print help. 18 | s3deploy -h 19 | stderr 'Usage of s3deploy' 20 | 21 | # Print help, flag from env. 22 | env S3DEPLOY_H=true 23 | s3deploy 24 | stderr 'Usage of s3deploy' 25 | env S3DEPLOY_H='' 26 | 27 | # Print help, flag from config file. 28 | s3deploy -config myconfig.yml 29 | stderr 'Usage of s3deploy' 30 | 31 | # Print version. 32 | s3deploy -V 33 | stdout 's3deploy \(devel\), commit none, built at unknown' 34 | 35 | s3deploy -V -quiet 36 | ! stdout . 37 | 38 | # Try 39 | env AWS_ACCESS_KEY_ID=$S3DEPLOY_TEST_KEY 40 | env AWS_SECRET_ACCESS_KEY=$S3DEPLOY_TEST_SECRET 41 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -try -acl 'public-read' -source=public/ 42 | stdout 'This is a trial run' 43 | stdout 'Invalidate CDN: \[\w*/index.html \w*/styles.css\]' 44 | stdout 'Deleted 0 of 0, uploaded 2, skipped 0' 45 | 46 | -- public/index.html -- 47 | Test

Test

48 | -- public/styles.css -- 49 | body { background: #fff; } 50 | 51 | -- myconfig.yml -- 52 | # This isn't a very realistic use case, but it's possible ... 53 | h: true -------------------------------------------------------------------------------- /testscripts/routes.txt: -------------------------------------------------------------------------------- 1 | env AWS_ACCESS_KEY_ID=$S3DEPLOY_TEST_KEY 2 | env AWS_SECRET_ACCESS_KEY=$S3DEPLOY_TEST_SECRET 3 | 4 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=public/ 5 | 6 | stdout 'Deleted 0 of 0, uploaded 2, skipped 0.*100% changed' 7 | 8 | head /$S3DEPLOY_TEST_ID/ 9 | stdout 'Head: /S3DEPLOY_TEST_ID/;Status: 200;Headers: Content-Disposition: inline;Content-Encoding: gzip;Content-Language: nn;Content-Length: 10\d;Content-Type: text/html; charset=utf-8;.*;Expires: Mon, 01 Dec 2098 16:00:00 GMT;' 10 | 11 | head /$S3DEPLOY_TEST_ID/styles.css 12 | stdout 'Head: /S3DEPLOY_TEST_ID/styles.css;Status: 200;Headers: Cache-Control: max-age=630720000, no-transform, public;Content-Encoding: gzip;Content-Length: 5\d;Content-Type: text/css; charset=utf-8;' 13 | 14 | # This is added as a system defined property. 15 | ! stdout 'X-Amz-Meta-Content-Encoding: gzip' 16 | 17 | # Change 1 file and redeploy. 18 | append public/styles.css 'p { color: red; }' 19 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=public/ 20 | 21 | stdout 'styles.css \(size\)' 22 | stdout 'Deleted 0 of 0, uploaded 1, skipped 1.*50% changed' 23 | 24 | # Delete 1 file and redeploy. 25 | rm public/styles.css 26 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=public/ 27 | stdout 'Deleted 1 of 1, uploaded 0, skipped 1.*50% changed' 28 | 29 | -- public/index.html -- 30 | Test

Test

31 | -- public/styles.css -- 32 | body { background: #fff; } 33 | 34 | -- .s3deploy.yml -- 35 | routes: 36 | - route: "^.+\\.(js|css|svg|ttf)$" 37 | # cache static assets for 20 years 38 | headers: 39 | Cache-Control: "max-age=630720000, no-transform, public" 40 | gzip: true 41 | - route: "^.+\\.(png|jpg)$" 42 | headers: 43 | Cache-Control: "max-age=630720000, no-transform, public" 44 | gzip: false 45 | - route: "^.+\\.(html|xml|json)$" 46 | headers: 47 | Content-Language: "nn" 48 | Content-Disposition: "inline" 49 | Expires: "Thu, 01 Dec 2098 16:00:00 GMT" 50 | gzip: true 51 | -------------------------------------------------------------------------------- /testscripts/skipdirs_custom.txt: -------------------------------------------------------------------------------- 1 | env AWS_ACCESS_KEY_ID=$S3DEPLOY_TEST_KEY 2 | env AWS_SECRET_ACCESS_KEY=$S3DEPLOY_TEST_SECRET 3 | 4 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=public/ -skip-local-files 'foo' -skip-local-files bar -skip-local-dirs baz 5 | 6 | stdout 'Deleted 0 of 0, uploaded 2, skipped 0.*100% changed' 7 | stdout 'baz.txt \(not found\) ↑ index.html \(not found\) ↑ $' 8 | 9 | head /$S3DEPLOY_TEST_ID/ 10 | stdout 'Status: 200' 11 | 12 | # By default we skip all . directories and the .DS_Store file. 13 | -- public/index.html -- 14 | Test

Test

15 | -- public/foo.txt -- 16 | foo content. 17 | -- public/bar.txt -- 18 | bar content. 19 | -- public/baz.txt -- 20 | baz content. 21 | -- public/baz/moo.txt -- 22 | moo content. 23 | 24 | -------------------------------------------------------------------------------- /testscripts/skipdirs_default.txt: -------------------------------------------------------------------------------- 1 | env AWS_ACCESS_KEY_ID=$S3DEPLOY_TEST_KEY 2 | env AWS_SECRET_ACCESS_KEY=$S3DEPLOY_TEST_SECRET 3 | 4 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=$WORK/public/ 5 | 6 | stdout 'Deleted 0 of 0, uploaded 1, skipped 0.*100% changed' 7 | 8 | head /$S3DEPLOY_TEST_ID/ 9 | stdout 'Status: 200' 10 | 11 | 12 | # Do the same with relative path. 13 | s3deploy -bucket $S3DEPLOY_TEST_BUCKET -region $S3DEPLOY_TEST_REGION -path $S3DEPLOY_TEST_ID -acl 'public-read' -source=public/ 14 | 15 | stdout 'Deleted 0 of 0, uploaded 0, skipped 1 .0% changed' 16 | 17 | head /$S3DEPLOY_TEST_ID/ 18 | stdout 'Status: 200' 19 | 20 | 21 | # By default we skip all . directories and the .DS_Store file. 22 | -- public/index.html -- 23 | Test

Test

24 | -- public/.hidden/foo.txt -- 25 | foo content. 26 | -- public/.DS_Store -- 27 | binary 28 | -- public/foo/.DS_Store -- 29 | binary 30 | 31 | -------------------------------------------------------------------------------- /testscripts/unfinished/empty.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bep/s3deploy/f951c0dd5743194faee8fb13d4c1954228c9ddf2/testscripts/unfinished/empty.txt -------------------------------------------------------------------------------- /watch_testscripts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # I use "run tests on save" in my editor. 4 | # Unfortunately, changes to text files does not trigger this. Hence this workaround. 5 | while true; do find testscripts -type f -name "*.txt" | entr -pd touch main_test.go; done --------------------------------------------------------------------------------