├── .dockerignore
├── .github
└── FUNDING.yml
├── .gitignore
├── CODEOWNERS
├── Dockerfile
├── LICENSE.md
├── NOTICE.md
├── README.md
├── build_tools.go
├── check
├── check_suite_test.go
├── command.go
├── command_test.go
└── models.go
├── cmd
├── check
│ └── main.go
├── in
│ └── main.go
└── out
│ └── main.go
├── fakes
└── fake_s3client.go
├── go.mod
├── go.sum
├── in
├── archive.go
├── command.go
├── command_test.go
├── in_suite_test.go
└── models.go
├── integration
├── check_test.go
├── in_test.go
├── integration_suite_test.go
├── out_test.go
└── s3client_test.go
├── models.go
├── out
├── command.go
├── command_test.go
├── models.go
└── out_suite_test.go
├── progress_writer_at.go
├── s3client.go
├── s3client_test.go
├── scripts
└── test
├── suite_test.go
├── utils.go
└── versions
├── versions.go
├── versions_suite_test.go
└── versions_test.go
/.dockerignore:
--------------------------------------------------------------------------------
1 | Dockerfile
2 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [taylorsilva]
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | built-*
2 | *.test
3 | assets/*
4 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @pivotal-jwinters
2 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG base_image=cgr.dev/chainguard/wolfi-base
2 | ARG builder_image=concourse/golang-builder
3 |
4 | ARG BUILDPLATFORM
5 | FROM --platform=${BUILDPLATFORM} ${builder_image} AS builder
6 |
7 | ARG TARGETOS
8 | ARG TARGETARCH
9 | ENV GOOS=$TARGETOS
10 | ENV GOARCH=$TARGETARCH
11 |
12 | WORKDIR /go/src/github.com/concourse/s3-resource
13 | COPY . .
14 | ENV CGO_ENABLED=0
15 | RUN go mod download
16 | RUN go build -o /assets/in ./cmd/in
17 | RUN go build -o /assets/out ./cmd/out
18 | RUN go build -o /assets/check ./cmd/check
19 | RUN set -e; for pkg in $(go list ./...); do \
20 | go test -o "/tests/$(basename $pkg).test" -c $pkg; \
21 | done
22 |
23 | FROM ${base_image} AS resource
24 | RUN apk --no-cache add \
25 | tzdata \
26 | ca-certificates \
27 | cmd:unzip \
28 | cmd:tar \
29 | cmd:gunzip
30 | COPY --from=builder assets/ /opt/resource/
31 | RUN chmod +x /opt/resource/*
32 |
33 | FROM resource AS tests
34 | ARG S3_TESTING_ACCESS_KEY_ID
35 | ARG S3_TESTING_SECRET_ACCESS_KEY
36 | ARG S3_TESTING_SESSION_TOKEN
37 | ARG S3_TESTING_AWS_ROLE_ARN
38 | ARG S3_VERSIONED_TESTING_BUCKET
39 | ARG S3_TESTING_BUCKET
40 | ARG S3_TESTING_REGION
41 | ARG S3_ENDPOINT
42 | ARG S3_USE_PATH_STYLE
43 | ARG TEST_SESSION_TOKEN
44 | COPY --from=builder /tests /go-tests
45 | WORKDIR /go-tests
46 | RUN set -e; for test in /go-tests/*.test; do \
47 | $test; \
48 | done
49 |
50 | FROM resource
51 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/NOTICE.md:
--------------------------------------------------------------------------------
1 | Copyright 2014-2016 Alex Suraci, Chris Brown, and Pivotal Software, Inc.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use
4 | this file except in compliance with the License. You may obtain a copy of the
5 | License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software distributed
10 | under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
11 | CONDITIONS OF ANY KIND, either express or implied. See the License for the
12 | specific language governing permissions and limitations under the License.
13 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # S3 Resource
2 |
3 | Versions objects in an S3 bucket, by pattern-matching filenames to identify
4 | version numbers.
5 |
6 |
7 |
8 |
9 |
10 | ## Source Configuration
11 |
12 | * `bucket`: *Required.* The name of the bucket.
13 |
14 | * `access_key_id`: *Optional.* The AWS access key to use when accessing the
15 | bucket.
16 |
17 | * `secret_access_key`: *Optional.* The AWS secret key to use when accessing
18 | the bucket.
19 |
20 | * `session_token`: *Optional.* The AWS STS session token to use when
21 | accessing the bucket.
22 |
23 | * `aws_role_arn`: *Optional.* The AWS role ARN to be assumed by the resource.
24 | Will be assumed using the AWS SDK's default authentication chain. If
25 | `access_key_id` and `secret_access_key` are provided those will be used
26 | instead to try and assume the role. If no role is provided then the resource
27 | will use the AWS SDK's `AnonymousCredentials` for authentication.
28 |
29 | * `enable_aws_creds_provider`: *Optional.* Do not fall back to `AnonymousCredentials`
30 | if no other creds are provided. This allows the use of AWS SDK's Default
31 | Credentials Provider. e.g. Instance Profile(EC2) if set on the underlying worker.
32 |
33 | * `region_name`: *Optional.* The region the bucket is in. Defaults to
34 | `us-east-1`.
35 |
36 | * `private`: *Optional.* Indicates that the bucket is private, so that any
37 | URLs provided by this resource are presigned. Otherwise this resource will
38 | generate generic Virtual-Hosted style URLs. If you're using a custom
39 | endpoint you should include the bucketname in the endpoint URL.
40 |
41 | * `cloudfront_url`: *Optional._Deprecated_* The URL (scheme and domain) of your CloudFront
42 | distribution that is fronting this bucket (e.g
43 | `https://d5yxxxxx.cloudfront.net`). This will affect `in` but not `check`
44 | and `put`. `in` will ignore the `bucket` name setting, exclusively using the
45 | `cloudfront_url`. When configuring CloudFront with versioned buckets, set
46 | `Query String Forwarding and Caching` to `Forward all, cache based on all` to
47 | ensure S3 calls succeed. _Deprecated: Since upgrading this resource to the v2
48 | AWS Go SDK there is no need to specify this along with `endpoint`._
49 |
50 | * `endpoint`: *Optional.* Custom endpoint for using an S3 compatible provider. Can
51 | be just a hostname or include the scheme (e.g. `https://eu1.my-endpoint.com`
52 | or `eu1.my-endpoint.com`)
53 |
54 | * `disable_ssl`: *Optional.* Disable SSL for the endpoint, useful for S3
55 | compatible providers without SSL.
56 |
57 | * `skip_ssl_verification`: *Optional.* Skip SSL verification for S3 endpoint.
58 | Useful for S3 compatible providers using self-signed SSL certificates.
59 |
60 | * `skip_download`: *Optional.* Skip downloading object from S3. Useful only
61 | trigger the pipeline without using the object.
62 |
63 | * `server_side_encryption`: *Optional.* The encryption algorithm to use when
64 | storing objects in S3. One of `AES256`, `aws:kms`, `aws:kms:dsse`
65 |
66 | * `sse_kms_key_id`: *Optional.* The ID of the AWS KMS master encryption key
67 | used for the object.
68 |
69 | * `disable_multipart`: *Optional.* Disable Multipart Upload. useful for S3
70 | compatible providers that do not support multipart upload.
71 |
72 | * `use_path_style`: *Optional.* Enables legacy path-style access for S3
73 | compatible providers. The default behavior is virtual path-style.
74 |
75 | ### File Names
76 |
77 | One of the following two options must be specified:
78 |
79 | * `regexp`: *Optional.* The forward-slash (`/`) delimited sequence of patterns to
80 | match against the sub-directories and filenames of the objects stored within
81 | the S3 bucket. The first grouped match is used to extract the version, or if
82 | a group is explicitly named `version`, that group is used. At least one
83 | capture group must be specified, with parentheses.
84 |
85 | The version extracted from this pattern is used to version the resource.
86 | Semantic versions, or just numbers, are supported. Accordingly, full regular
87 | expressions are supported, to specify the capture groups.
88 |
89 | The full `regexp` will be matched against the S3 objects as if it was anchored
90 | on both ends, even if you don't specify `^` and `$` explicitly.
91 |
92 | * `versioned_file`: *Optional* If you enable versioning for your S3 bucket then
93 | you can keep the file name the same and upload new versions of your file
94 | without resorting to version numbers. This property is the path to the file
95 | in your S3 bucket.
96 |
97 | ### Initial state
98 |
99 | If no resource versions exist you can set up this resource to emit an initial version with a specified content. This won't create a real resource in S3 but only create an initial version for Concourse. The resource file will be created as usual when you `get` a resource with an initial version.
100 |
101 | You can define one of the following two options:
102 |
103 | * `initial_path`: *Optional.* Must be used with the `regexp` option. You should set this to the file path containing the initial version which would match the given regexp. E.g. if `regexp` is `file/build-(.*).zip`, then `initial_path` might be `file/build-0.0.0.zip`. The resource version will be `0.0.0` in this case.
104 |
105 | * `initial_version`: *Optional.* Must be used with the `versioned_file` option. This will be the resource version.
106 |
107 | By default the resource file will be created with no content when `get` runs. You can set the content by using one of the following options:
108 |
109 | * `initial_content_text`: *Optional.* Initial content as a string.
110 |
111 | * `initial_content_binary`: *Optional.* You can pass binary content as a base64 encoded string.
112 |
113 | ## Behavior
114 |
115 | ### `check`: Extract versions from the bucket.
116 |
117 | Objects will be found via the pattern configured by `regexp`. The versions
118 | will be used to order them (using [semver](http://semver.org/)). Each
119 | object's filename is the resulting version.
120 |
121 |
122 | ### `in`: Fetch an object from the bucket.
123 |
124 | Places the following files in the destination:
125 |
126 | * `(filename)`: The file fetched from the bucket (if `skip_download` is not `true`).
127 |
128 | * `url`: A file containing the URL of the object in Virutal-Hosted style. If
129 | `private` is `true` this URL will be presigned.
130 |
131 | * `s3_uri`: A file containing the S3 URI (`s3://...`) of the object (for use with `aws cp`, etc.)
132 |
133 | * `version`: The version identified in the file name.
134 |
135 | * `tags.json`: The object's tags represented as a JSON object. Only written if `download_tags` is set to true.
136 |
137 | #### Parameters
138 |
139 | * `skip_download`: *Optional.* Skip downloading object from S3. Same parameter as source configuration but used to define/override by get. Value needs to be a true/false string.
140 |
141 | * `unpack`: *Optional.* If true and the file is an archive (tar, gzipped tar, other gzipped file, or zip), unpack the file. Gzipped tarballs will be both ungzipped and untarred. It is ignored when `get` is running on the initial version.
142 |
143 | * `download_tags`: *Optional.* Write object tags to `tags.json`. Value needs to be a true/false string.
144 |
145 | ### `out`: Upload an object to the bucket.
146 |
147 | Given a file specified by `file`, upload it to the S3 bucket. If `regexp` is
148 | specified, the new file will be uploaded to the directory that the regex
149 | searches in. If `versioned_file` is specified, the new file will be uploaded as
150 | a new version of that file.
151 |
152 | #### Parameters
153 |
154 | * `file`: *Required.* Path to the file to upload, provided by an output of a task.
155 | If multiple files are matched by the glob, an error is raised. The file which
156 | matches will be placed into the directory structure on S3 as defined in `regexp`
157 | in the resource definition. The matching syntax is bash glob expansion, so
158 | no capture groups, etc.
159 |
160 | * `acl`: *Optional.* [Canned Acl](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
161 | for the uploaded object.
162 |
163 | * `content_type`: *Optional.* MIME [Content-Type](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17)
164 | describing the contents of the uploaded object
165 |
166 | ## Example Configuration
167 |
168 | ### Resource
169 |
170 | When the file has the version name in the filename
171 |
172 | ``` yaml
173 | - name: release
174 | type: s3
175 | source:
176 | bucket: releases
177 | regexp: directory_on_s3/release-(.*).tgz
178 | access_key_id: ACCESS-KEY
179 | secret_access_key: SECRET
180 | ```
181 |
182 | or
183 |
184 | When the file is being [versioned by s3](http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html)
185 |
186 | ``` yaml
187 | - name: release
188 | type: s3
189 | source:
190 | bucket: releases
191 | versioned_file: directory_on_s3/release.tgz
192 | access_key_id: ACCESS-KEY
193 | secret_access_key: SECRET
194 | ```
195 |
196 | ### Plan
197 |
198 | ``` yaml
199 | - get: release
200 | ```
201 |
202 | ``` yaml
203 | - put: release
204 | params:
205 | file: path/to/release-*.tgz
206 | acl: public-read
207 | ```
208 |
209 | ## Required IAM Permissions
210 |
211 | ### Non-versioned Buckets
212 |
213 | The bucket itself (e.g. `"arn:aws:s3:::your-bucket"`):
214 | * `s3:ListBucket`
215 |
216 | The objects in the bucket (e.g. `"arn:aws:s3:::your-bucket/*"`):
217 | * `s3:PutObject`
218 | * `s3:PutObjectAcl`
219 | * `s3:GetObject`
220 | * `s3:GetObjectTagging` (if using the `download_tags` option)
221 |
222 | ### Versioned Buckets
223 |
224 | Everything above and...
225 |
226 | The bucket itself (e.g. `"arn:aws:s3:::your-bucket"`):
227 | * `s3:ListBucketVersions`
228 | * `s3:GetBucketVersioning`
229 |
230 | The objects in the bucket (e.g. `"arn:aws:s3:::your-bucket/*"`):
231 | * `s3:GetObjectVersion`
232 | * `s3:PutObjectVersionAcl`
233 | * `s3:GetObjectVersionTagging` (if using the `download_tags` option)
234 |
235 | ## Development
236 |
237 | ### Prerequisites
238 |
239 | * Go is *required* - version 1.13 is tested; earlier versions may also
240 | work.
241 | * docker is *required* - version 17.06.x is tested; earlier versions may also
242 | work.
243 |
244 | ### Running the tests
245 |
246 | The tests have been embedded with the `Dockerfile`; ensuring that the testing
247 | environment is consistent across any `docker` enabled platform. When the docker
248 | image builds, the test are run inside the docker container, on failure they
249 | will stop the build.
250 |
251 | Run the tests with the following command:
252 |
253 | ```sh
254 | docker build -t s3-resource --target tests .
255 | ```
256 |
257 | #### Integration tests
258 |
259 | The integration requires two AWS S3 buckets, one without versioning and another
260 | with. The `docker build` step requires setting `--build-args` so the
261 | integration will run.
262 |
263 | Run the tests with the following command:
264 |
265 | ```sh
266 | docker build . -t s3-resource --target tests \
267 | --build-arg S3_TESTING_ACCESS_KEY_ID="access-key" \
268 | --build-arg S3_TESTING_SECRET_ACCESS_KEY="some-secret" \
269 | --build-arg S3_TESTING_BUCKET="bucket-non-versioned" \
270 | --build-arg S3_VERSIONED_TESTING_BUCKET="bucket-versioned" \
271 | --build-arg S3_TESTING_REGION="us-east-1" \
272 | --build-arg S3_ENDPOINT="https://s3.amazonaws.com" \
273 | --build-arg S3_USE_PATH_STYLE=""
274 | ```
275 |
276 | ##### Speeding up integration tests by skipping large file upload
277 |
278 | One of the integration tests uploads a large file (>40GB) and so can be slow.
279 | It can be skipped by adding the following option when running the tests:
280 | ```
281 | --build-arg S3_TESTING_NO_LARGE_UPLOAD=true
282 | ```
283 |
284 | ##### Integration tests using role assumption
285 |
286 | If `S3_TESTING_AWS_ROLE_ARN` is set to a role ARN, this role will be assumed for accessing
287 | the S3 bucket during integration tests. The whole integration test suite runs either
288 | completely using role assumption or completely by direct access via the credentials.
289 |
290 | ##### Required IAM permissions
291 |
292 | In addition to the required permissions above, the `s3:PutObjectTagging` permission is required to run integration tests.
293 |
294 | ### Contributing
295 |
296 | Please make all pull requests to the `master` branch and ensure tests pass
297 | locally.
298 |
299 |
--------------------------------------------------------------------------------
/build_tools.go:
--------------------------------------------------------------------------------
1 | //go:build tools
2 |
3 | package s3resource
4 |
5 | import (
6 | _ "github.com/maxbrunsfeld/counterfeiter/v6"
7 | )
8 |
9 | // This file imports packages that are used when running go generate, or used
10 | // during the development process but not otherwise depended on by built code.
11 |
--------------------------------------------------------------------------------
/check/check_suite_test.go:
--------------------------------------------------------------------------------
1 | package check_test
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 | "testing"
7 |
8 | . "github.com/onsi/ginkgo/v2"
9 | . "github.com/onsi/gomega"
10 |
11 | "github.com/onsi/gomega/gexec"
12 | )
13 |
14 | var checkPath string
15 |
16 | var _ = BeforeSuite(func() {
17 | var err error
18 |
19 | if _, err = os.Stat("/opt/resource/check"); err == nil {
20 | checkPath = "/opt/resource/check"
21 | } else {
22 | checkPath, err = gexec.Build("github.com/concourse/s3-resource/cmd/check")
23 | Ω(err).ShouldNot(HaveOccurred())
24 | }
25 |
26 | })
27 |
28 | var _ = AfterSuite(func() {
29 | gexec.CleanupBuildArtifacts()
30 | })
31 |
32 | func TestCheck(t *testing.T) {
33 | RegisterFailHandler(Fail)
34 | RunSpecs(t, "Check Suite")
35 | }
36 |
37 | func Fixture(filename string) string {
38 | path := filepath.Join("fixtures", filename)
39 | contents, err := os.ReadFile(path)
40 | if err != nil {
41 | panic(err)
42 | }
43 |
44 | return string(contents)
45 | }
46 |
--------------------------------------------------------------------------------
/check/command.go:
--------------------------------------------------------------------------------
1 | package check
2 |
3 | import (
4 | "errors"
5 |
6 | s3resource "github.com/concourse/s3-resource"
7 | "github.com/concourse/s3-resource/versions"
8 | )
9 |
10 | type Command struct {
11 | s3client s3resource.S3Client
12 | }
13 |
14 | func NewCommand(s3client s3resource.S3Client) *Command {
15 | return &Command{
16 | s3client: s3client,
17 | }
18 | }
19 |
20 | func (command *Command) Run(request Request) (Response, error) {
21 | if ok, message := request.Source.IsValid(); !ok {
22 | return Response{}, errors.New(message)
23 | }
24 |
25 | if request.Source.Regexp != "" {
26 | return command.checkByRegex(request), nil
27 | } else {
28 | return command.checkByVersionedFile(request), nil
29 | }
30 | }
31 |
32 | func (command *Command) checkByRegex(request Request) Response {
33 | extractions := versions.GetBucketFileVersions(command.s3client, request.Source)
34 |
35 | if request.Source.InitialPath != "" {
36 | extraction, ok := versions.Extract(request.Source.InitialPath, request.Source.Regexp)
37 | if ok {
38 | extractions = append([]versions.Extraction{extraction}, extractions...)
39 | }
40 | }
41 |
42 | if len(extractions) == 0 {
43 | return nil
44 | }
45 |
46 | lastVersion, matched := versions.Extract(request.Version.Path, request.Source.Regexp)
47 | if !matched {
48 | return latestVersion(extractions)
49 | } else {
50 | return newVersions(lastVersion, extractions)
51 | }
52 | }
53 |
54 | func (command *Command) checkByVersionedFile(request Request) Response {
55 | response := Response{}
56 |
57 | bucketVersions, err := command.s3client.BucketFileVersions(request.Source.Bucket, request.Source.VersionedFile)
58 |
59 | if err != nil {
60 | s3resource.Fatal("finding versions", err)
61 | }
62 |
63 | if request.Source.InitialVersion != "" {
64 | bucketVersions = append(bucketVersions, request.Source.InitialVersion)
65 | }
66 |
67 | if len(bucketVersions) == 0 {
68 | return response
69 | }
70 |
71 | requestVersionIndex := -1
72 |
73 | if request.Version.VersionID != "" {
74 | for i, bucketVersion := range bucketVersions {
75 | if bucketVersion == request.Version.VersionID {
76 | requestVersionIndex = i
77 | break
78 | }
79 | }
80 | }
81 |
82 | if requestVersionIndex == -1 {
83 | version := s3resource.Version{
84 | VersionID: bucketVersions[0],
85 | }
86 | response = append(response, version)
87 | } else {
88 | for i := requestVersionIndex; i >= 0; i-- {
89 | version := s3resource.Version{
90 | VersionID: bucketVersions[i],
91 | }
92 | response = append(response, version)
93 | }
94 | }
95 |
96 | return response
97 | }
98 |
99 | func latestVersion(extractions versions.Extractions) Response {
100 | lastExtraction := extractions[len(extractions)-1]
101 | return []s3resource.Version{{Path: lastExtraction.Path}}
102 | }
103 |
104 | func newVersions(lastVersion versions.Extraction, extractions versions.Extractions) Response {
105 | response := Response{}
106 |
107 | for _, extraction := range extractions {
108 | if extraction.Version.Compare(lastVersion.Version) >= 0 {
109 | version := s3resource.Version{
110 | Path: extraction.Path,
111 | }
112 | response = append(response, version)
113 | }
114 | }
115 |
116 | return response
117 | }
118 |
--------------------------------------------------------------------------------
/check/command_test.go:
--------------------------------------------------------------------------------
1 | package check_test
2 |
3 | import (
4 | "os"
5 |
6 | . "github.com/onsi/ginkgo/v2"
7 | . "github.com/onsi/gomega"
8 |
9 | s3resource "github.com/concourse/s3-resource"
10 | "github.com/concourse/s3-resource/fakes"
11 |
12 | . "github.com/concourse/s3-resource/check"
13 | )
14 |
15 | var _ = Describe("Check Command", func() {
16 | Describe("running the command", func() {
17 | var (
18 | tmpPath string
19 | request Request
20 |
21 | s3client *fakes.FakeS3Client
22 | command *Command
23 | )
24 |
25 | BeforeEach(func() {
26 | var err error
27 | tmpPath, err = os.MkdirTemp("", "check_command")
28 | Ω(err).ShouldNot(HaveOccurred())
29 |
30 | request = Request{
31 | Source: s3resource.Source{
32 | Bucket: "bucket-name",
33 | },
34 | }
35 |
36 | s3client = &fakes.FakeS3Client{}
37 | command = NewCommand(s3client)
38 |
39 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
40 | Truncated: false,
41 | ContinuationToken: nil,
42 | CommonPrefixes: []string{"files/abc-3/"},
43 | Paths: []string{
44 | "files/abc-0.0.1.tgz",
45 | "files/abc-2.33.333.tgz",
46 | "files/abc-2.4.3.tgz",
47 | "files/abc-3.53.tgz",
48 | },
49 | }, nil)
50 | s3client.ChunkedBucketListReturnsOnCall(1, s3resource.BucketListChunk{
51 | Truncated: false,
52 | ContinuationToken: nil,
53 | Paths: []string{
54 | "files/abc-3/53.tgz",
55 | "files/abc-3/no-magic",
56 | },
57 | }, nil)
58 | })
59 |
60 | AfterEach(func() {
61 | err := os.RemoveAll(tmpPath)
62 | Ω(err).ShouldNot(HaveOccurred())
63 | })
64 |
65 | Context("when there is no previous version", func() {
66 | It("includes the latest version only", func() {
67 | request.Version.Path = ""
68 | request.Source.Regexp = "files/abc-(.*).tgz"
69 |
70 | response, err := command.Run(request)
71 | Ω(err).ShouldNot(HaveOccurred())
72 |
73 | Ω(response).Should(HaveLen(1))
74 | Ω(response).Should(ConsistOf(
75 | s3resource.Version{
76 | Path: "files/abc-3.53.tgz",
77 | },
78 | ))
79 | })
80 |
81 | Context("when the initial version is set", func() {
82 | It("still returns the latest version", func() {
83 | request.Version.Path = ""
84 | request.Source.InitialPath = "files/abc-0.0.tgz"
85 | request.Source.Regexp = "files/abc-(.*).tgz"
86 |
87 | response, err := command.Run(request)
88 | Ω(err).ShouldNot(HaveOccurred())
89 |
90 | Ω(response).Should(HaveLen(1))
91 | Ω(response).Should(ConsistOf(
92 | s3resource.Version{
93 | Path: "files/abc-3.53.tgz",
94 | },
95 | ))
96 | })
97 | })
98 |
99 | Context("when the regexp does not match anything", func() {
100 | It("does not explode", func() {
101 | request.Source.Regexp = "no-files/missing-(.*).tgz"
102 | response, err := command.Run(request)
103 | Ω(err).ShouldNot(HaveOccurred())
104 |
105 | Ω(response).Should(HaveLen(0))
106 | })
107 |
108 | Context("when the initial version is set", func() {
109 | It("returns the initial version", func() {
110 | request.Version.Path = ""
111 | request.Source.InitialPath = "no-files/missing-0.0.tgz"
112 | request.Source.Regexp = "no-files/missing-(.*).tgz"
113 |
114 | response, err := command.Run(request)
115 | Ω(err).ShouldNot(HaveOccurred())
116 |
117 | Ω(response).Should(HaveLen(1))
118 | Ω(response).Should(ConsistOf(
119 | s3resource.Version{
120 | Path: "no-files/missing-0.0.tgz",
121 | },
122 | ))
123 | })
124 | })
125 | })
126 |
127 | Context("when the regex does not match the previous version", func() {
128 | It("returns the latest version that matches the regex", func() {
129 | request.Version.Path = "files/abc-0.0.1.tgz"
130 | request.Source.Regexp = `files/abc-(2\.33.*).tgz`
131 | response, err := command.Run(request)
132 | Ω(err).ShouldNot(HaveOccurred())
133 |
134 | Ω(response).Should(HaveLen(1))
135 | Expect(response).To(ConsistOf(s3resource.Version{Path: "files/abc-2.33.333.tgz"}))
136 | })
137 | })
138 |
139 | Context("when the regexp does not contain any magic regexp char", func() {
140 | It("does not explode", func() {
141 | request.Source.Regexp = "files/abc-3/no-magic"
142 | response, err := command.Run(request)
143 | Ω(err).ShouldNot(HaveOccurred())
144 |
145 | Ω(response).Should(HaveLen(0))
146 | })
147 | })
148 | })
149 |
150 | Context("when there is a previous version", func() {
151 | Context("when using regex that matches the provided version", func() {
152 | It("includes all versions from the previous one and the current one", func() {
153 | request.Version.Path = "files/abc-2.4.3.tgz"
154 | request.Source.Regexp = "files/abc-(.*).tgz"
155 |
156 | response, err := command.Run(request)
157 | Ω(err).ShouldNot(HaveOccurred())
158 |
159 | Ω(response).Should(HaveLen(3))
160 | Ω(response).Should(ConsistOf(
161 | s3resource.Version{
162 | Path: "files/abc-2.4.3.tgz",
163 | },
164 | s3resource.Version{
165 | Path: "files/abc-2.33.333.tgz",
166 | },
167 | s3resource.Version{
168 | Path: "files/abc-3.53.tgz",
169 | },
170 | ))
171 | })
172 | })
173 |
174 | Context("when using versioned file", func() {
175 | Context("when there are existing versions", func() {
176 | BeforeEach(func() {
177 | s3client.BucketFileVersionsReturns([]string{
178 | "file-version-3",
179 | "file-version-2",
180 | "file-version-1",
181 | }, nil)
182 | })
183 |
184 | It("includes all versions from the previous one and the current one", func() {
185 | request.Version.VersionID = "file-version-2"
186 | request.Source.VersionedFile = "files/versioned-file"
187 |
188 | response, err := command.Run(request)
189 | Ω(err).ShouldNot(HaveOccurred())
190 |
191 | Ω(response).Should(HaveLen(2))
192 | Ω(response).Should(ConsistOf(
193 | s3resource.Version{
194 | VersionID: "file-version-2",
195 | },
196 | s3resource.Version{
197 | VersionID: "file-version-3",
198 | },
199 | ))
200 | })
201 | })
202 |
203 | Context("when no version exists", func() {
204 | BeforeEach(func() {
205 | s3client.BucketFileVersionsReturns([]string{}, nil)
206 | })
207 |
208 | It("returns no versions", func() {
209 | request.Version.VersionID = ""
210 | request.Source.VersionedFile = "files/versioned-file"
211 |
212 | response, err := command.Run(request)
213 | Ω(err).ShouldNot(HaveOccurred())
214 |
215 | Ω(response).Should(HaveLen(0))
216 | })
217 |
218 | Context("when the initial version is set", func() {
219 | It("returns the initial version", func() {
220 | request.Version.VersionID = ""
221 | request.Source.VersionedFile = "files/versioned-file"
222 | request.Source.InitialVersion = "file-version-0"
223 |
224 | response, err := command.Run(request)
225 | Ω(err).ShouldNot(HaveOccurred())
226 |
227 | Ω(response).Should(HaveLen(1))
228 | Ω(response).Should(ConsistOf(
229 | s3resource.Version{
230 | VersionID: "file-version-0",
231 | },
232 | ))
233 | })
234 | })
235 | })
236 | })
237 | })
238 | })
239 | })
240 |
--------------------------------------------------------------------------------
/check/models.go:
--------------------------------------------------------------------------------
1 | package check
2 |
3 | import "github.com/concourse/s3-resource"
4 |
5 | type Request struct {
6 | Source s3resource.Source `json:"source"`
7 | Version s3resource.Version `json:"version"`
8 | }
9 |
10 | type Response []s3resource.Version
11 |
--------------------------------------------------------------------------------
/cmd/check/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "os"
6 |
7 | s3resource "github.com/concourse/s3-resource"
8 | "github.com/concourse/s3-resource/check"
9 | )
10 |
11 | func main() {
12 | var request check.Request
13 | inputRequest(&request)
14 |
15 | awsConfig, err := s3resource.NewAwsConfig(
16 | request.Source.AccessKeyID,
17 | request.Source.SecretAccessKey,
18 | request.Source.SessionToken,
19 | request.Source.AwsRoleARN,
20 | request.Source.RegionName,
21 | request.Source.SkipSSLVerification,
22 | request.Source.UseAwsCredsProvider,
23 | )
24 | if err != nil {
25 | s3resource.Fatal("error creating aws config", err)
26 | }
27 |
28 | client, err := s3resource.NewS3Client(
29 | os.Stderr,
30 | awsConfig,
31 | request.Source.Endpoint,
32 | request.Source.DisableSSL,
33 | request.Source.UsePathStyle,
34 | )
35 | if err != nil {
36 | s3resource.Fatal("error creating s3 client", err)
37 | }
38 |
39 | command := check.NewCommand(client)
40 | response, err := command.Run(request)
41 | if err != nil {
42 | s3resource.Fatal("running command", err)
43 | }
44 |
45 | outputResponse(response)
46 | }
47 |
48 | func inputRequest(request *check.Request) {
49 | if err := json.NewDecoder(os.Stdin).Decode(request); err != nil {
50 | s3resource.Fatal("reading request from stdin", err)
51 | }
52 | }
53 |
54 | func outputResponse(response check.Response) {
55 | if err := json.NewEncoder(os.Stdout).Encode(response); err != nil {
56 | s3resource.Fatal("writing response to stdout", err)
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/cmd/in/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "os"
6 |
7 | s3resource "github.com/concourse/s3-resource"
8 | "github.com/concourse/s3-resource/in"
9 | )
10 |
11 | func main() {
12 | if len(os.Args) < 2 {
13 | s3resource.Sayf("usage: %s \n", os.Args[0])
14 | os.Exit(1)
15 | }
16 |
17 | destinationDir := os.Args[1]
18 |
19 | var request in.Request
20 | inputRequest(&request)
21 |
22 | awsConfig, err := s3resource.NewAwsConfig(
23 | request.Source.AccessKeyID,
24 | request.Source.SecretAccessKey,
25 | request.Source.SessionToken,
26 | request.Source.AwsRoleARN,
27 | request.Source.RegionName,
28 | request.Source.SkipSSLVerification,
29 | request.Source.UseAwsCredsProvider,
30 | )
31 | if err != nil {
32 | s3resource.Fatal("error creating aws config", err)
33 | }
34 |
35 | endpoint := request.Source.Endpoint
36 | if len(request.Source.CloudfrontURL) != 0 {
37 | s3resource.Sayf("'cloudfront_url' is deprecated and no longer used. You only need to specify 'endpoint' now.")
38 | }
39 |
40 | client, err := s3resource.NewS3Client(
41 | os.Stderr,
42 | awsConfig,
43 | endpoint,
44 | request.Source.DisableSSL,
45 | request.Source.UsePathStyle,
46 | )
47 | if err != nil {
48 | s3resource.Fatal("error creating s3 client", err)
49 | }
50 |
51 | command := in.NewCommand(client)
52 |
53 | response, err := command.Run(destinationDir, request)
54 | if err != nil {
55 | s3resource.Fatal("running command", err)
56 | }
57 |
58 | outputResponse(response)
59 | }
60 |
61 | func inputRequest(request *in.Request) {
62 | if err := json.NewDecoder(os.Stdin).Decode(request); err != nil {
63 | s3resource.Fatal("reading request from stdin", err)
64 | }
65 | }
66 |
67 | func outputResponse(response in.Response) {
68 | if err := json.NewEncoder(os.Stdout).Encode(response); err != nil {
69 | s3resource.Fatal("writing response to stdout", err)
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/cmd/out/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "os"
6 |
7 | s3resource "github.com/concourse/s3-resource"
8 | "github.com/concourse/s3-resource/out"
9 | )
10 |
11 | func main() {
12 | if len(os.Args) < 2 {
13 | s3resource.Sayf("usage: %s \n", os.Args[0])
14 | os.Exit(1)
15 | }
16 |
17 | var request out.Request
18 | inputRequest(&request)
19 |
20 | sourceDir := os.Args[1]
21 |
22 | awsConfig, err := s3resource.NewAwsConfig(
23 | request.Source.AccessKeyID,
24 | request.Source.SecretAccessKey,
25 | request.Source.SessionToken,
26 | request.Source.AwsRoleARN,
27 | request.Source.RegionName,
28 | request.Source.SkipSSLVerification,
29 | request.Source.UseAwsCredsProvider,
30 | )
31 | if err != nil {
32 | s3resource.Fatal("error creating aws config", err)
33 | }
34 |
35 | client, err := s3resource.NewS3Client(
36 | os.Stderr,
37 | awsConfig,
38 | request.Source.Endpoint,
39 | request.Source.DisableSSL,
40 | request.Source.UsePathStyle,
41 | )
42 | if err != nil {
43 | s3resource.Fatal("error creating s3 client", err)
44 | }
45 |
46 | command := out.NewCommand(os.Stderr, client)
47 | response, err := command.Run(sourceDir, request)
48 | if err != nil {
49 | s3resource.Fatal("running command", err)
50 | }
51 |
52 | outputResponse(response)
53 | }
54 |
55 | func inputRequest(request *out.Request) {
56 | if err := json.NewDecoder(os.Stdin).Decode(request); err != nil {
57 | s3resource.Fatal("reading request from stdin", err)
58 | }
59 | }
60 |
61 | func outputResponse(response out.Response) {
62 | if err := json.NewEncoder(os.Stdout).Encode(response); err != nil {
63 | s3resource.Fatal("writing response to stdout", err)
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/concourse/s3-resource
2 |
3 | go 1.24.0
4 |
5 | toolchain go1.24.1
6 |
7 | require (
8 | github.com/aws/aws-sdk-go-v2 v1.36.3
9 | github.com/aws/aws-sdk-go-v2/config v1.29.14
10 | github.com/aws/aws-sdk-go-v2/credentials v1.17.67
11 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74
12 | github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3
13 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.19
14 | github.com/cppforlife/go-semi-semantic v0.0.0-20160921010311-576b6af77ae4
15 | github.com/fatih/color v1.18.0
16 | github.com/google/uuid v1.6.0
17 | github.com/h2non/filetype v1.1.3
18 | github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2
19 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
20 | github.com/onsi/ginkgo/v2 v2.23.3
21 | github.com/onsi/gomega v1.36.2
22 | github.com/vbauerster/mpb/v8 v8.9.3
23 | )
24 |
25 | require (
26 | github.com/VividCortex/ewma v1.2.0 // indirect
27 | github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
28 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
29 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
30 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
31 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
32 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
33 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
34 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
35 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 // indirect
36 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
37 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
38 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
39 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
40 | github.com/aws/smithy-go v1.22.3 // indirect
41 | github.com/go-logr/logr v1.4.2 // indirect
42 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
43 | github.com/google/go-cmp v0.6.0 // indirect
44 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
45 | github.com/kr/pretty v0.3.1 // indirect
46 | github.com/mattn/go-colorable v0.1.14 // indirect
47 | github.com/mattn/go-isatty v0.0.20 // indirect
48 | github.com/mattn/go-runewidth v0.0.16 // indirect
49 | github.com/onsi/ginkgo v1.2.1-0.20170102031522-a23f924ce96d // indirect
50 | github.com/rivo/uniseg v0.4.7 // indirect
51 | golang.org/x/mod v0.24.0 // indirect
52 | golang.org/x/net v0.39.0 // indirect
53 | golang.org/x/sync v0.13.0 // indirect
54 | golang.org/x/sys v0.32.0 // indirect
55 | golang.org/x/text v0.24.0 // indirect
56 | golang.org/x/tools v0.32.0 // indirect
57 | gopkg.in/yaml.v3 v3.0.1 // indirect
58 | )
59 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
2 | github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
3 | github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
4 | github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
5 | github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
6 | github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
7 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
8 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
9 | github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
10 | github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
11 | github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
12 | github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
13 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
14 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
15 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74 h1:+1lc5oMFFHlVBclPXQf/POqlvdpBzjLaN2c3ujDCcZw=
16 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74/go.mod h1:EiskBoFr4SpYnFIbw8UM7DP7CacQXDHEmJqLI1xpRFI=
17 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
18 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
19 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
20 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
21 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
22 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
23 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
24 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
25 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
26 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
27 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 h1:4nm2G6A4pV9rdlWzGMPv4BNtQp22v1hg3yrtkYpeLl8=
28 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
29 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
30 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
31 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
32 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
33 | github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3 h1:BRXS0U76Z8wfF+bnkilA2QwpIch6URlm++yPUt9QPmQ=
34 | github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3/go.mod h1:bNXKFFyaiVvWuR6O16h/I1724+aXe/tAkA9/QS01t5k=
35 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
36 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
37 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
38 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
39 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
40 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
41 | github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
42 | github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
43 | github.com/cppforlife/go-semi-semantic v0.0.0-20160921010311-576b6af77ae4 h1:J+ghqo7ZubTzelkjo9hntpTtP/9lUCWH9icEmAW+B+Q=
44 | github.com/cppforlife/go-semi-semantic v0.0.0-20160921010311-576b6af77ae4/go.mod h1:socxpf5+mELPbosI149vWpNlHK6mbfWFxSWOoSndXR8=
45 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
46 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
47 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
48 | github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
49 | github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
50 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
51 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
52 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
53 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
54 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
55 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
56 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
57 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
58 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
59 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
60 | github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
61 | github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
62 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
63 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
64 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
65 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
66 | github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
67 | github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
68 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
69 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
70 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
71 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
72 | github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU=
73 | github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ=
74 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
75 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
76 | github.com/onsi/ginkgo v1.2.1-0.20170102031522-a23f924ce96d h1:KFDjWk1pKKFlfIOu/D7DqWtZpoNdrNelD7Yktk5qsag=
77 | github.com/onsi/ginkgo v1.2.1-0.20170102031522-a23f924ce96d/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
78 | github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
79 | github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
80 | github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
81 | github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
82 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
83 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
84 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
85 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
86 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
87 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
88 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
89 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
90 | github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
91 | github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
92 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
93 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
94 | github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8=
95 | github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c=
96 | golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
97 | golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
98 | golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
99 | golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
100 | golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
101 | golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
102 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
103 | golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
104 | golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
105 | golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
106 | golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
107 | golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
108 | golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
109 | google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
110 | google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
111 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
112 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
113 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
114 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
115 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
116 |
--------------------------------------------------------------------------------
/in/archive.go:
--------------------------------------------------------------------------------
1 | package in
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "io"
7 | "os"
8 | "os/exec"
9 | "strings"
10 |
11 | "github.com/h2non/filetype"
12 | )
13 |
14 | var archiveMimetypes = []string{
15 | "application/x-gzip",
16 | "application/gzip",
17 | "application/x-tar",
18 | "application/zip",
19 | }
20 |
21 | func mimetype(r *bufio.Reader) (string, error) {
22 | bs, err := r.Peek(512)
23 | if err != nil && err != io.EOF {
24 | return "", err
25 | }
26 |
27 | kind, err := filetype.Match(bs)
28 | if err != nil {
29 | return "", err
30 | }
31 |
32 | return kind.MIME.Value, nil
33 | }
34 |
35 | func archiveMimetype(filename string) string {
36 | f, err := os.Open(filename)
37 | if err != nil {
38 | return ""
39 | }
40 | defer f.Close()
41 |
42 | mime, err := mimetype(bufio.NewReader(f))
43 | if err != nil {
44 | return ""
45 | }
46 |
47 | for i := range archiveMimetypes {
48 | if strings.HasPrefix(mime, archiveMimetypes[i]) {
49 | return archiveMimetypes[i]
50 | }
51 | }
52 |
53 | return ""
54 | }
55 |
56 | func inflate(mime, path, destination string) error {
57 | var cmd *exec.Cmd
58 |
59 | switch mime {
60 | case "application/zip":
61 | cmd = exec.Command("unzip", "-P", "", "-d", destination, path)
62 | defer os.Remove(path)
63 |
64 | case "application/x-tar":
65 | cmd = exec.Command("tar", "xf", path, "-C", destination)
66 | defer os.Remove(path)
67 |
68 | case "application/gzip", "application/x-gzip":
69 | cmd = exec.Command("gunzip", path)
70 |
71 | default:
72 | return fmt.Errorf("don't know how to extract %s", mime)
73 | }
74 |
75 | return cmd.Run()
76 | }
77 |
--------------------------------------------------------------------------------
/in/command.go:
--------------------------------------------------------------------------------
1 | package in
2 |
3 | import (
4 | "encoding/base64"
5 | "errors"
6 | "fmt"
7 | "os"
8 | "path"
9 | "path/filepath"
10 | "strconv"
11 |
12 | s3resource "github.com/concourse/s3-resource"
13 | "github.com/concourse/s3-resource/versions"
14 | )
15 |
16 | var ErrMissingPath = errors.New("missing path in request")
17 |
18 | type Command struct {
19 | s3client s3resource.S3Client
20 | }
21 |
22 | func NewCommand(s3client s3resource.S3Client) *Command {
23 | return &Command{
24 | s3client: s3client,
25 | }
26 | }
27 |
28 | func (command *Command) Run(destinationDir string, request Request) (Response, error) {
29 | if ok, message := request.Source.IsValid(); !ok {
30 | return Response{}, errors.New(message)
31 | }
32 |
33 | err := os.MkdirAll(destinationDir, 0755)
34 | if err != nil {
35 | return Response{}, err
36 | }
37 |
38 | var remotePath string
39 | var versionNumber string
40 | var versionID string
41 | var url string
42 | var s3_uri string
43 | var isInitialVersion bool
44 | var skipDownload bool
45 |
46 | if request.Source.Regexp != "" {
47 | if request.Version.Path == "" {
48 | return Response{}, ErrMissingPath
49 | }
50 |
51 | remotePath = request.Version.Path
52 |
53 | extraction, ok := versions.Extract(remotePath, request.Source.Regexp)
54 | if !ok {
55 | return Response{}, fmt.Errorf("regex does not match provided version: %#v", request.Version)
56 | }
57 |
58 | versionNumber = extraction.VersionNumber
59 |
60 | isInitialVersion = request.Source.InitialPath != "" && request.Version.Path == request.Source.InitialPath
61 | } else {
62 | remotePath = request.Source.VersionedFile
63 | versionNumber = request.Version.VersionID
64 | versionID = request.Version.VersionID
65 |
66 | isInitialVersion = request.Source.InitialVersion != "" && request.Version.VersionID == request.Source.InitialVersion
67 | }
68 |
69 | if isInitialVersion {
70 | if request.Source.InitialContentText != "" || request.Source.InitialContentBinary == "" {
71 | err = command.createInitialFile(destinationDir, path.Base(remotePath), []byte(request.Source.InitialContentText))
72 | if err != nil {
73 | return Response{}, err
74 | }
75 | }
76 | if request.Source.InitialContentBinary != "" {
77 | b, err := base64.StdEncoding.DecodeString(request.Source.InitialContentBinary)
78 | if err != nil {
79 | return Response{}, errors.New("failed to decode initial_content_binary, make sure it's base64 encoded")
80 | }
81 | err = command.createInitialFile(destinationDir, path.Base(remotePath), b)
82 | if err != nil {
83 | return Response{}, err
84 | }
85 | }
86 | } else {
87 |
88 | if request.Params.SkipDownload != "" {
89 | skipDownload, err = strconv.ParseBool(request.Params.SkipDownload)
90 | if err != nil {
91 | return Response{}, fmt.Errorf("skip_download defined but invalid value: %s", request.Params.SkipDownload)
92 | }
93 | } else {
94 | skipDownload = request.Source.SkipDownload
95 | }
96 |
97 | if !skipDownload {
98 | err = command.downloadFile(
99 | request.Source.Bucket,
100 | remotePath,
101 | versionID,
102 | destinationDir,
103 | path.Base(remotePath),
104 | )
105 | if err != nil {
106 | return Response{}, err
107 | }
108 |
109 | if request.Params.Unpack {
110 | destinationPath := filepath.Join(destinationDir, path.Base(remotePath))
111 | mime := archiveMimetype(destinationPath)
112 | if mime == "" {
113 | return Response{}, fmt.Errorf("not an archive: %s", destinationPath)
114 | }
115 |
116 | err = extractArchive(mime, destinationPath)
117 | if err != nil {
118 | return Response{}, err
119 | }
120 | }
121 | }
122 |
123 | if request.Params.DownloadTags {
124 | err = command.downloadTags(
125 | request.Source.Bucket,
126 | remotePath,
127 | versionID,
128 | destinationDir,
129 | )
130 | if err != nil {
131 | return Response{}, err
132 | }
133 | }
134 |
135 | url, err = command.getURL(request, remotePath)
136 | if err != nil {
137 | return Response{}, err
138 | }
139 | err = command.writeURLFile(destinationDir, url)
140 | if err != nil {
141 | return Response{}, err
142 | }
143 | s3_uri = command.gets3URI(request, remotePath)
144 | if err = command.writeS3URIFile(destinationDir, s3_uri); err != nil {
145 | return Response{}, err
146 | }
147 | }
148 |
149 | err = command.writeVersionFile(destinationDir, versionNumber)
150 | if err != nil {
151 | return Response{}, err
152 | }
153 |
154 | metadata := command.metadata(remotePath, request.Source.Private, url)
155 |
156 | if versionID == "" {
157 | return Response{
158 | Version: s3resource.Version{
159 | Path: remotePath,
160 | },
161 | Metadata: metadata,
162 | }, nil
163 | }
164 |
165 | return Response{
166 | Version: s3resource.Version{
167 | VersionID: versionID,
168 | },
169 | Metadata: metadata,
170 | }, nil
171 | }
172 |
173 | func (command *Command) writeURLFile(destDir string, url string) error {
174 | return os.WriteFile(filepath.Join(destDir, "url"), []byte(url), 0644)
175 | }
176 |
177 | func (command *Command) writeS3URIFile(destDir string, s3_uri string) error {
178 | return os.WriteFile(filepath.Join(destDir, "s3_uri"), []byte(s3_uri), 0644)
179 | }
180 |
181 | func (command *Command) writeVersionFile(destDir string, versionNumber string) error {
182 | return os.WriteFile(filepath.Join(destDir, "version"), []byte(versionNumber), 0644)
183 | }
184 |
185 | func (command *Command) downloadFile(bucketName string, remotePath string, versionID string, destinationDir string, destinationFile string) error {
186 | localPath := filepath.Join(destinationDir, destinationFile)
187 |
188 | return command.s3client.DownloadFile(
189 | bucketName,
190 | remotePath,
191 | versionID,
192 | localPath,
193 | )
194 | }
195 |
196 | func (command *Command) downloadTags(bucketName string, remotePath string, versionID string, destinationDir string) error {
197 | localPath := filepath.Join(destinationDir, "tags.json")
198 |
199 | return command.s3client.DownloadTags(
200 | bucketName,
201 | remotePath,
202 | versionID,
203 | localPath,
204 | )
205 | }
206 |
207 | func (command *Command) createInitialFile(destDir string, destFile string, data []byte) error {
208 | return os.WriteFile(filepath.Join(destDir, destFile), []byte(data), 0644)
209 | }
210 |
211 | func (command *Command) metadata(remotePath string, private bool, url string) []s3resource.MetadataPair {
212 | remoteFilename := filepath.Base(remotePath)
213 |
214 | metadata := []s3resource.MetadataPair{
215 | {
216 | Name: "filename",
217 | Value: remoteFilename,
218 | },
219 | }
220 |
221 | if url != "" && !private {
222 | metadata = append(metadata, s3resource.MetadataPair{
223 | Name: "url",
224 | Value: url,
225 | })
226 | }
227 |
228 | return metadata
229 | }
230 |
231 | func (command *Command) getURL(request Request, remotePath string) (string, error) {
232 | return command.s3client.URL(request.Source.Bucket, remotePath, request.Source.Private, request.Version.VersionID)
233 | }
234 |
235 | func (command *Command) gets3URI(request Request, remotePath string) string {
236 | return "s3://" + request.Source.Bucket + "/" + remotePath
237 | }
238 |
239 | func extractArchive(mime, filename string) error {
240 | destDir := filepath.Dir(filename)
241 |
242 | err := inflate(mime, filename, destDir)
243 | if err != nil {
244 | return fmt.Errorf("failed to extract archive: %s", err)
245 | }
246 |
247 | if mime == "application/gzip" || mime == "application/x-gzip" {
248 | fileInfos, err := os.ReadDir(destDir)
249 | if err != nil {
250 | return fmt.Errorf("failed to read dir: %s", err)
251 | }
252 |
253 | if len(fileInfos) != 1 {
254 | return fmt.Errorf("%d files found after gunzip; expected 1", len(fileInfos))
255 | }
256 |
257 | filename = filepath.Join(destDir, fileInfos[0].Name())
258 | mime = archiveMimetype(filename)
259 | if mime == "application/x-tar" {
260 | err = inflate(mime, filename, destDir)
261 | if err != nil {
262 | return fmt.Errorf("failed to extract archive: %s", err)
263 | }
264 | }
265 | }
266 |
267 | return nil
268 | }
269 |
--------------------------------------------------------------------------------
/in/command_test.go:
--------------------------------------------------------------------------------
1 | package in_test
2 |
3 | import (
4 | "archive/tar"
5 | "archive/zip"
6 | "compress/gzip"
7 | "io"
8 | "log"
9 | "os"
10 | "path"
11 | "path/filepath"
12 | "strings"
13 |
14 | . "github.com/onsi/ginkgo/v2"
15 | . "github.com/onsi/gomega"
16 |
17 | s3resource "github.com/concourse/s3-resource"
18 | . "github.com/concourse/s3-resource/in"
19 |
20 | "github.com/concourse/s3-resource/fakes"
21 | )
22 |
23 | var _ = Describe("In Command", func() {
24 | Describe("running the command", func() {
25 | var (
26 | tmpPath string
27 | destDir string
28 | request Request
29 |
30 | s3client *fakes.FakeS3Client
31 | command *Command
32 | )
33 |
34 | BeforeEach(func() {
35 | var err error
36 | tmpPath, err = os.MkdirTemp("", "in_command")
37 | Ω(err).ShouldNot(HaveOccurred())
38 |
39 | destDir = filepath.Join(tmpPath, "destination")
40 | request = Request{
41 | Source: s3resource.Source{
42 | Bucket: "bucket-name",
43 | Regexp: "files/a-file-(.*)",
44 | },
45 | Version: s3resource.Version{
46 | Path: "files/a-file-1.3",
47 | },
48 | }
49 |
50 | s3client = &fakes.FakeS3Client{}
51 | command = NewCommand(s3client)
52 |
53 | s3client.URLReturns("http://google.com", nil)
54 | })
55 |
56 | AfterEach(func() {
57 | err := os.RemoveAll(tmpPath)
58 | Ω(err).ShouldNot(HaveOccurred())
59 | })
60 |
61 | It("creates the destination directory", func() {
62 | Ω(destDir).ShouldNot(ExistOnFilesystem())
63 |
64 | _, err := command.Run(destDir, request)
65 | Ω(err).ShouldNot(HaveOccurred())
66 |
67 | Ω(destDir).Should(ExistOnFilesystem())
68 | })
69 |
70 | Context("when there is no path in the requested version", func() {
71 | BeforeEach(func() {
72 | request.Version.Path = ""
73 | })
74 |
75 | It("returns an error", func() {
76 | _, err := command.Run(destDir, request)
77 | Expect(err).To(MatchError(ErrMissingPath))
78 | })
79 | })
80 |
81 | Context("when configured globally to skip download", func() {
82 | BeforeEach(func() {
83 | request.Source.SkipDownload = true
84 | })
85 |
86 | It("doesn't download the file", func() {
87 | _, err := command.Run(destDir, request)
88 | Ω(err).ShouldNot(HaveOccurred())
89 | Ω(s3client.DownloadFileCallCount()).Should(Equal(0))
90 | })
91 | })
92 |
93 | Context("when configured locally to skip download", func() {
94 | BeforeEach(func() {
95 | request.Params.SkipDownload = "true"
96 | })
97 |
98 | It("doesn't download the file", func() {
99 | _, err := command.Run(destDir, request)
100 | Ω(err).ShouldNot(HaveOccurred())
101 | Ω(s3client.DownloadFileCallCount()).Should(Equal(0))
102 | })
103 | })
104 |
105 | Context("when override locally to not skip download", func() {
106 | BeforeEach(func() {
107 | request.Source.SkipDownload = true
108 | request.Params.SkipDownload = "false"
109 | })
110 |
111 | It("doesn't download the file", func() {
112 | _, err := command.Run(destDir, request)
113 | Ω(err).ShouldNot(HaveOccurred())
114 | Ω(s3client.DownloadFileCallCount()).Should(Equal(1))
115 | })
116 | })
117 |
118 | Context("when override using a wrong value for local skipdownload", func() {
119 | BeforeEach(func() {
120 | request.Params.SkipDownload = "foo"
121 | })
122 |
123 | It("doesn't download the file", func() {
124 | _, err := command.Run(destDir, request)
125 | Expect(err).To(HaveOccurred())
126 | Expect(err.Error()).To(ContainSubstring("skip_download defined but invalid value"))
127 | })
128 | })
129 |
130 | Context("when there is an existing version in the request", func() {
131 | BeforeEach(func() {
132 | request.Version.Path = "files/a-file-1.3"
133 | })
134 |
135 | It("downloads the existing version of the file", func() {
136 | _, err := command.Run(destDir, request)
137 | Ω(err).ShouldNot(HaveOccurred())
138 |
139 | Ω(s3client.DownloadFileCallCount()).Should(Equal(1))
140 | bucketName, remotePath, versionID, localPath := s3client.DownloadFileArgsForCall(0)
141 |
142 | Ω(bucketName).Should(Equal("bucket-name"))
143 | Ω(remotePath).Should(Equal("files/a-file-1.3"))
144 | Ω(versionID).Should(BeEmpty())
145 | Ω(localPath).Should(Equal(filepath.Join(destDir, "a-file-1.3")))
146 | })
147 |
148 | It("creates a 'url' file that contains the URL", func() {
149 | urlPath := filepath.Join(destDir, "url")
150 | Ω(urlPath).ShouldNot(ExistOnFilesystem())
151 |
152 | _, err := command.Run(destDir, request)
153 | Ω(err).ShouldNot(HaveOccurred())
154 |
155 | Ω(urlPath).Should(ExistOnFilesystem())
156 | contents, err := os.ReadFile(urlPath)
157 | Ω(err).ShouldNot(HaveOccurred())
158 | Ω(string(contents)).Should(Equal("http://google.com"))
159 |
160 | bucketName, remotePath, private, versionID := s3client.URLArgsForCall(0)
161 | Ω(bucketName).Should(Equal("bucket-name"))
162 | Ω(remotePath).Should(Equal("files/a-file-1.3"))
163 | Ω(private).Should(Equal(false))
164 | Ω(versionID).Should(BeEmpty())
165 | })
166 |
167 | It("creates a 's3_uri' file that contains the S3 URI", func() {
168 | uriPath := filepath.Join(destDir, "s3_uri")
169 | Ω(uriPath).ShouldNot(ExistOnFilesystem())
170 |
171 | _, err := command.Run(destDir, request)
172 | Ω(err).ShouldNot(HaveOccurred())
173 |
174 | Ω(uriPath).Should(ExistOnFilesystem())
175 | contents, err := os.ReadFile(uriPath)
176 | Ω(err).ShouldNot(HaveOccurred())
177 | Ω(string(contents)).Should(Equal("s3://" + request.Source.Bucket + "/files/a-file-1.3"))
178 | })
179 |
180 | Context("when configured with private URLs", func() {
181 | BeforeEach(func() {
182 | request.Source.Private = true
183 | })
184 |
185 | It("creates a 'url' file that contains the private URL if told to do that", func() {
186 | urlPath := filepath.Join(destDir, "url")
187 | Ω(urlPath).ShouldNot(ExistOnFilesystem())
188 |
189 | _, err := command.Run(destDir, request)
190 | Ω(err).ShouldNot(HaveOccurred())
191 |
192 | Ω(urlPath).Should(ExistOnFilesystem())
193 | contents, err := os.ReadFile(urlPath)
194 | Ω(err).ShouldNot(HaveOccurred())
195 | Ω(string(contents)).Should(Equal("http://google.com"))
196 |
197 | Ω(s3client.URLCallCount()).Should(Equal(1))
198 | bucketName, remotePath, private, versionID := s3client.URLArgsForCall(0)
199 | Ω(bucketName).Should(Equal("bucket-name"))
200 | Ω(remotePath).Should(Equal("files/a-file-1.3"))
201 | Ω(private).Should(Equal(true))
202 | Ω(versionID).Should(BeEmpty())
203 | })
204 | })
205 |
206 | It("creates a 'version' file that contains the matched version", func() {
207 | versionFile := filepath.Join(destDir, "version")
208 | Ω(versionFile).ShouldNot(ExistOnFilesystem())
209 |
210 | _, err := command.Run(destDir, request)
211 | Ω(err).ShouldNot(HaveOccurred())
212 |
213 | Ω(versionFile).Should(ExistOnFilesystem())
214 | contents, err := os.ReadFile(versionFile)
215 | Ω(err).ShouldNot(HaveOccurred())
216 | Ω(string(contents)).Should(Equal("1.3"))
217 | })
218 |
219 | Describe("the response", func() {
220 | It("has a version that is the remote file path", func() {
221 | response, err := command.Run(destDir, request)
222 | Ω(err).ShouldNot(HaveOccurred())
223 |
224 | Ω(response.Version.Path).Should(Equal("files/a-file-1.3"))
225 | })
226 |
227 | It("has metadata about the file", func() {
228 | response, err := command.Run(destDir, request)
229 | Ω(err).ShouldNot(HaveOccurred())
230 |
231 | Ω(response.Metadata[0].Name).Should(Equal("filename"))
232 | Ω(response.Metadata[0].Value).Should(Equal("a-file-1.3"))
233 |
234 | Ω(response.Metadata[1].Name).Should(Equal("url"))
235 | Ω(response.Metadata[1].Value).Should(Equal("http://google.com"))
236 | })
237 |
238 | Context("when the output is private", func() {
239 | BeforeEach(func() {
240 | request.Source.Private = true
241 | })
242 |
243 | It("doesn't include the URL in the metadata", func() {
244 | response, err := command.Run(destDir, request)
245 | Ω(err).ShouldNot(HaveOccurred())
246 |
247 | Ω(response.Metadata).Should(HaveLen(1))
248 | Ω(response.Metadata[0].Name).ShouldNot(Equal("url"))
249 | })
250 | })
251 | })
252 | })
253 |
254 | Context("when the Regexp does not match the provided version", func() {
255 | BeforeEach(func() {
256 | request.Source.Regexp = "not-matching-anything"
257 | })
258 |
259 | It("returns an error", func() {
260 | _, err := command.Run(destDir, request)
261 | Expect(err).To(HaveOccurred())
262 | Expect(err.Error()).To(ContainSubstring("regex does not match provided version"))
263 | Expect(err.Error()).To(ContainSubstring("files/a-file-1.3"))
264 | })
265 | })
266 |
267 | Context("when params is configured to unpack the file", func() {
268 | BeforeEach(func() {
269 | request.Params.Unpack = true
270 | })
271 |
272 | Context("when the file is a tarball", func() {
273 | BeforeEach(func() {
274 | s3client.DownloadFileStub = func(bucketName string, remotePath string, versionID string, localPath string) error {
275 | src := filepath.Join(tmpPath, "some-file")
276 |
277 | err := os.WriteFile(src, []byte("some-contents"), os.ModePerm)
278 | Expect(err).NotTo(HaveOccurred())
279 |
280 | err = createTarball([]string{src}, tmpPath, localPath)
281 | Expect(err).NotTo(HaveOccurred())
282 |
283 | _, err = os.Stat(localPath)
284 | Expect(err).NotTo(HaveOccurred())
285 |
286 | return nil
287 | }
288 | })
289 |
290 | It("extracts the tarball", func() {
291 | _, err := command.Run(destDir, request)
292 | Expect(err).NotTo(HaveOccurred())
293 |
294 | bs, err := os.ReadFile(filepath.Join(destDir, "some-file"))
295 | Expect(err).NotTo(HaveOccurred())
296 |
297 | Expect(bs).To(Equal([]byte("some-contents")))
298 | })
299 | })
300 |
301 | Context("when the file is a zip", func() {
302 | BeforeEach(func() {
303 | s3client.DownloadFileStub = func(bucketName string, remotePath string, versionID string, localPath string) error {
304 | inDir, err := os.MkdirTemp(tmpPath, "zip-dir")
305 | Expect(err).NotTo(HaveOccurred())
306 |
307 | err = os.WriteFile(path.Join(inDir, "some-file"), []byte("some-contents"), os.ModePerm)
308 | Expect(err).NotTo(HaveOccurred())
309 |
310 | err = zipit(path.Join(inDir, "/"), localPath, "")
311 | Expect(err).NotTo(HaveOccurred())
312 |
313 | return nil
314 | }
315 | })
316 |
317 | It("unzips the zip", func() {
318 | _, err := command.Run(destDir, request)
319 | Expect(err).NotTo(HaveOccurred())
320 |
321 | bs, err := os.ReadFile(filepath.Join(destDir, "some-file"))
322 | Expect(err).NotTo(HaveOccurred())
323 |
324 | Expect(bs).To(Equal([]byte("some-contents")))
325 | })
326 | })
327 |
328 | Context("when the file is gzipped", func() {
329 | BeforeEach(func() {
330 | request.Version.Path = "files/a-file-1.3.gz"
331 | request.Source.Regexp = "files/a-file-(.*).gz"
332 |
333 | s3client.DownloadFileStub = func(bucketName string, remotePath string, versionID string, localPath string) error {
334 | f, err := os.Create(localPath)
335 | Expect(err).NotTo(HaveOccurred())
336 |
337 | zw := gzip.NewWriter(f)
338 |
339 | _, err = zw.Write([]byte("some-contents"))
340 | Expect(err).NotTo(HaveOccurred())
341 |
342 | Expect(zw.Close()).NotTo(HaveOccurred())
343 | Expect(f.Close()).NotTo(HaveOccurred())
344 |
345 | return nil
346 | }
347 | })
348 |
349 | It("gunzips the gzip", func() {
350 | _, err := command.Run(destDir, request)
351 | Expect(err).NotTo(HaveOccurred())
352 |
353 | bs, err := os.ReadFile(filepath.Join(destDir, "a-file-1.3"))
354 | Expect(err).NotTo(HaveOccurred())
355 |
356 | Expect(string(bs)).To(Equal("some-contents"))
357 | })
358 | })
359 |
360 | Context("when the file is a gzipped tarball", func() {
361 | BeforeEach(func() {
362 | request.Version.Path = "files/a-file-1.3.tgz"
363 | request.Source.Regexp = "files/a-file-(.*).tgz"
364 |
365 | s3client.DownloadFileStub = func(bucketName string, remotePath string, versionID string, localPath string) error {
366 | err := os.MkdirAll(filepath.Join(tmpPath, "some-dir"), os.ModePerm)
367 | Expect(err).NotTo(HaveOccurred())
368 |
369 | someFile1 := filepath.Join(tmpPath, "some-dir", "some-file")
370 |
371 | err = os.WriteFile(someFile1, []byte("some-contents"), os.ModePerm)
372 | Expect(err).NotTo(HaveOccurred())
373 |
374 | someFile2 := filepath.Join(tmpPath, "some-file")
375 |
376 | err = os.WriteFile(someFile2, []byte("some-other-contents"), os.ModePerm)
377 | Expect(err).NotTo(HaveOccurred())
378 |
379 | tarPath := filepath.Join(tmpPath, "some-tar")
380 | err = createTarball([]string{someFile1, someFile2}, tmpPath, tarPath)
381 | Expect(err).NotTo(HaveOccurred())
382 |
383 | _, err = os.Stat(tarPath)
384 | Expect(err).NotTo(HaveOccurred())
385 |
386 | tarf, err := os.Open(tarPath)
387 | Expect(err).NotTo(HaveOccurred())
388 |
389 | f, err := os.Create(localPath)
390 | Expect(err).NotTo(HaveOccurred())
391 |
392 | zw := gzip.NewWriter(f)
393 |
394 | _, err = io.Copy(zw, tarf)
395 | Expect(err).NotTo(HaveOccurred())
396 |
397 | Expect(zw.Close()).NotTo(HaveOccurred())
398 | Expect(f.Close()).NotTo(HaveOccurred())
399 |
400 | return nil
401 | }
402 | })
403 |
404 | It("extracts the gzipped tarball", func() {
405 | _, err := command.Run(destDir, request)
406 | Expect(err).NotTo(HaveOccurred())
407 |
408 | Expect(filepath.Join(destDir, "some-dir", "some-file")).To(BeARegularFile())
409 |
410 | bs, err := os.ReadFile(filepath.Join(destDir, "some-dir", "some-file"))
411 | Expect(err).NotTo(HaveOccurred())
412 | Expect(bs).To(Equal([]byte("some-contents")))
413 |
414 | bs, err = os.ReadFile(filepath.Join(destDir, "some-file"))
415 | Expect(err).NotTo(HaveOccurred())
416 | Expect(bs).To(Equal([]byte("some-other-contents")))
417 | })
418 | })
419 |
420 | Context("when the file is not an archive", func() {
421 | BeforeEach(func() {
422 | s3client.DownloadFileStub = func(bucketName string, remotePath string, versionID string, localPath string) error {
423 | err := os.WriteFile(localPath, []byte("some-contents"), os.ModePerm)
424 | Expect(err).NotTo(HaveOccurred())
425 |
426 | return nil
427 | }
428 | })
429 |
430 | It("returns an error", func() {
431 | _, err := command.Run(destDir, request)
432 | Expect(err).To(HaveOccurred())
433 | })
434 | })
435 | })
436 |
437 | Context("when the requested path is the initial path", func() {
438 | var initialFilename string
439 |
440 | BeforeEach(func() {
441 | initialFilename = "a-file-0.0"
442 | request.Source.InitialPath = "files/a-file-0.0"
443 | request.Version.Path = request.Source.InitialPath
444 | request.Source.InitialContentText = "the hard questions are hard 🙈"
445 | })
446 |
447 | It("it creates a file containing the initial text content", func() {
448 | _, err := command.Run(destDir, request)
449 | Ω(err).ShouldNot(HaveOccurred())
450 |
451 | contentFile := filepath.Join(destDir, initialFilename)
452 | Ω(contentFile).Should(BeARegularFile())
453 | contents, err := os.ReadFile(contentFile)
454 | Ω(err).ShouldNot(HaveOccurred())
455 | Ω(string(contents)).Should(Equal(request.Source.InitialContentText))
456 | })
457 |
458 | Context("when the initial content is binary", func() {
459 | BeforeEach(func() {
460 | request.Source.InitialContentText = ""
461 | request.Source.InitialContentBinary = "dGhlIGhhcmQgcXVlc3Rpb25zIGFyZSBoYXJkIPCfmYg="
462 | })
463 | It("it creates a file containing the initial binary content", func() {
464 | _, err := command.Run(destDir, request)
465 | Ω(err).ShouldNot(HaveOccurred())
466 |
467 | contentFile := filepath.Join(destDir, initialFilename)
468 | Ω(contentFile).Should(BeARegularFile())
469 | contents, err := os.ReadFile(contentFile)
470 | Ω(err).ShouldNot(HaveOccurred())
471 | Ω(string(contents)).Should(Equal("the hard questions are hard 🙈"))
472 | })
473 |
474 | Context("when base64 decoding fails", func() {
475 | BeforeEach(func() {
476 | request.Source.InitialContentBinary = "not base64 data 🙈"
477 | })
478 | It("should return with an error", func() {
479 | _, err := command.Run(destDir, request)
480 | Ω(err).Should(HaveOccurred())
481 | })
482 | })
483 | })
484 |
485 | It("should not write the URL file", func() {
486 | urlPath := filepath.Join(destDir, "url")
487 | Ω(urlPath).ShouldNot(ExistOnFilesystem())
488 |
489 | _, err := command.Run(destDir, request)
490 | Ω(err).ShouldNot(HaveOccurred())
491 |
492 | Ω(urlPath).ShouldNot(ExistOnFilesystem())
493 | })
494 |
495 | It("should not write the s3_uri file", func() {
496 | uriPath := filepath.Join(destDir, "s3_uri")
497 | Ω(uriPath).ShouldNot(ExistOnFilesystem())
498 |
499 | _, err := command.Run(destDir, request)
500 | Ω(err).ShouldNot(HaveOccurred())
501 |
502 | Ω(uriPath).ShouldNot(ExistOnFilesystem())
503 | })
504 |
505 | It("should not include a URL in the metadata", func() {
506 | response, err := command.Run(destDir, request)
507 | Ω(err).ShouldNot(HaveOccurred())
508 |
509 | for _, metadatum := range response.Metadata {
510 | Ω(metadatum.Name).ShouldNot(Equal("url"))
511 | }
512 | })
513 |
514 | It("should not attempt to unpack the initial content", func() {
515 | request.Params.Unpack = true
516 | _, err := command.Run(destDir, request)
517 | Ω(err).ShouldNot(HaveOccurred())
518 |
519 | contentFile := filepath.Join(destDir, initialFilename)
520 | Ω(contentFile).Should(BeARegularFile())
521 | contents, err := os.ReadFile(contentFile)
522 | Ω(err).ShouldNot(HaveOccurred())
523 | Ω(string(contents)).Should(Equal(request.Source.InitialContentText))
524 | })
525 | })
526 |
527 | Context("when the requested version is the initial version", func() {
528 | var filename = "testfile"
529 |
530 | BeforeEach(func() {
531 | request.Source.Regexp = ""
532 | request.Source.VersionedFile = "file/testfile"
533 | request.Source.InitialVersion = "0.0.0"
534 | request.Version.VersionID = request.Source.InitialVersion
535 | request.Source.InitialContentText = "the hard questions are hard 🙈"
536 | })
537 |
538 | It("it creates a file containing the initial text content", func() {
539 | _, err := command.Run(destDir, request)
540 | Ω(err).ShouldNot(HaveOccurred())
541 |
542 | contentFile := filepath.Join(destDir, filename)
543 | Ω(contentFile).Should(BeARegularFile())
544 | contents, err := os.ReadFile(contentFile)
545 | Ω(err).ShouldNot(HaveOccurred())
546 | Ω(string(contents)).Should(Equal(request.Source.InitialContentText))
547 | })
548 |
549 | Context("when the initial content is binary", func() {
550 | BeforeEach(func() {
551 | request.Source.InitialContentText = ""
552 | request.Source.InitialContentBinary = "dGhlIGhhcmQgcXVlc3Rpb25zIGFyZSBoYXJkIPCfmYg="
553 | })
554 | It("it creates a file containing the initial binary content", func() {
555 | _, err := command.Run(destDir, request)
556 | Ω(err).ShouldNot(HaveOccurred())
557 |
558 | contentFile := filepath.Join(destDir, filename)
559 | Ω(contentFile).Should(BeARegularFile())
560 | contents, err := os.ReadFile(contentFile)
561 | Ω(err).ShouldNot(HaveOccurred())
562 | Ω(string(contents)).Should(Equal("the hard questions are hard 🙈"))
563 | })
564 |
565 | Context("when base64 decoding fails", func() {
566 | BeforeEach(func() {
567 | request.Source.InitialContentBinary = "not base64 data 🙈"
568 | })
569 | It("should return with an error", func() {
570 | _, err := command.Run(destDir, request)
571 | Ω(err).Should(HaveOccurred())
572 | })
573 | })
574 | })
575 |
576 | It("should not write the URL file", func() {
577 | urlPath := filepath.Join(destDir, "url")
578 | Ω(urlPath).ShouldNot(ExistOnFilesystem())
579 |
580 | _, err := command.Run(destDir, request)
581 | Ω(err).ShouldNot(HaveOccurred())
582 |
583 | Ω(urlPath).ShouldNot(ExistOnFilesystem())
584 | })
585 |
586 | It("should not write the s3_uri file", func() {
587 | uriPath := filepath.Join(destDir, "s3_uri")
588 | Ω(uriPath).ShouldNot(ExistOnFilesystem())
589 |
590 | _, err := command.Run(destDir, request)
591 | Ω(err).ShouldNot(HaveOccurred())
592 |
593 | Ω(uriPath).ShouldNot(ExistOnFilesystem())
594 | })
595 |
596 | It("should not include a URL in the metadata", func() {
597 | response, err := command.Run(destDir, request)
598 | Ω(err).ShouldNot(HaveOccurred())
599 |
600 | for _, metadatum := range response.Metadata {
601 | Ω(metadatum.Name).ShouldNot(Equal("url"))
602 | }
603 | })
604 |
605 | It("should not attempt to unpack the initial content", func() {
606 | request.Params.Unpack = true
607 | _, err := command.Run(destDir, request)
608 | Ω(err).ShouldNot(HaveOccurred())
609 |
610 | contentFile := filepath.Join(destDir, filename)
611 | Ω(contentFile).Should(BeARegularFile())
612 | contents, err := os.ReadFile(contentFile)
613 | Ω(err).ShouldNot(HaveOccurred())
614 | Ω(string(contents)).Should(Equal(request.Source.InitialContentText))
615 | })
616 | })
617 | })
618 | })
619 |
620 | func addFileToTar(tw *tar.Writer, tarPath, path string) error {
621 | file, err := os.Open(path)
622 | if err != nil {
623 | return err
624 | }
625 |
626 | stat, err := file.Stat()
627 | if err != nil {
628 | return err
629 | }
630 |
631 | err = tw.WriteHeader(&tar.Header{
632 | Name: tarPath,
633 | Size: stat.Size(),
634 | Mode: int64(stat.Mode()),
635 | ModTime: stat.ModTime(),
636 | })
637 | if err != nil {
638 | return err
639 | }
640 |
641 | _, err = io.Copy(tw, file)
642 | if err != nil {
643 | return err
644 | }
645 |
646 | return file.Close()
647 | }
648 |
649 | func createTarball(paths []string, basePath string, destination string) error {
650 | file, err := os.Create(destination)
651 | if err != nil {
652 | log.Fatalln(err)
653 | }
654 |
655 | tw := tar.NewWriter(file)
656 |
657 | for _, path := range paths {
658 | tarPath, err := filepath.Rel(basePath, path)
659 | if err != nil {
660 | return err
661 | }
662 | err = addFileToTar(tw, tarPath, path)
663 | if err != nil {
664 | return err
665 | }
666 | }
667 |
668 | err = tw.Close()
669 | if err != nil {
670 | return err
671 | }
672 |
673 | return file.Close()
674 | }
675 |
676 | // Thanks to Svett Ralchev
677 | // http://blog.ralch.com/tutorial/golang-working-with-zip/
678 | func zipit(source, target, prefix string) error {
679 | zipfile, err := os.Create(target)
680 | if err != nil {
681 | return err
682 | }
683 |
684 | archive := zip.NewWriter(zipfile)
685 |
686 | err = filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
687 | if path == source {
688 | return nil
689 | }
690 |
691 | if err != nil {
692 | return err
693 | }
694 |
695 | header, err := zip.FileInfoHeader(info)
696 | if err != nil {
697 | return err
698 | }
699 |
700 | header.Name = strings.TrimPrefix(path, source+string(os.PathSeparator))
701 |
702 | if info.IsDir() {
703 | header.Name += string(os.PathSeparator)
704 | } else {
705 | header.Method = zip.Deflate
706 | }
707 |
708 | writer, err := archive.CreateHeader(header)
709 | if err != nil {
710 | return err
711 | }
712 |
713 | if info.IsDir() {
714 | return nil
715 | }
716 |
717 | file, err := os.Open(path)
718 | if err != nil {
719 | return err
720 | }
721 |
722 | if _, err = io.Copy(writer, file); err != nil {
723 | return err
724 | }
725 |
726 | return file.Close()
727 | })
728 |
729 | if err = archive.Close(); err != nil {
730 | return err
731 | }
732 |
733 | return zipfile.Close()
734 | }
735 |
--------------------------------------------------------------------------------
/in/in_suite_test.go:
--------------------------------------------------------------------------------
1 | package in_test
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "testing"
7 |
8 | . "github.com/onsi/ginkgo/v2"
9 | . "github.com/onsi/gomega"
10 |
11 | "github.com/onsi/gomega/gexec"
12 |
13 | "github.com/onsi/gomega/types"
14 | )
15 |
16 | var inPath string
17 |
18 | var _ = BeforeSuite(func() {
19 | var err error
20 |
21 | if _, err = os.Stat("/opt/resource/in"); err == nil {
22 | inPath = "/opt/resource/in"
23 | } else {
24 | inPath, err = gexec.Build("github.com/concourse/s3-resource/cmd/in")
25 | Ω(err).ShouldNot(HaveOccurred())
26 | }
27 |
28 | })
29 |
30 | var _ = AfterSuite(func() {
31 | gexec.CleanupBuildArtifacts()
32 | })
33 |
34 | func TestIn(t *testing.T) {
35 | RegisterFailHandler(Fail)
36 | RunSpecs(t, "In Suite")
37 | }
38 |
39 | func ExistOnFilesystem() types.GomegaMatcher {
40 | return &existOnFilesystemMatcher{}
41 | }
42 |
43 | type existOnFilesystemMatcher struct {
44 | expected any
45 | }
46 |
47 | func (matcher *existOnFilesystemMatcher) Match(actual any) (success bool, err error) {
48 | path, ok := actual.(string)
49 | if !ok {
50 | return false, fmt.Errorf("ExistOnFilesystem matcher expects a string")
51 | }
52 |
53 | if _, err := os.Stat(path); err != nil {
54 | if os.IsNotExist(err) {
55 | return false, nil
56 | }
57 | }
58 |
59 | return true, nil
60 | }
61 |
62 | func (matcher *existOnFilesystemMatcher) FailureMessage(actual any) (message string) {
63 | return fmt.Sprintf("Expected\n\t%#v\nto exist on the filesystem", actual)
64 | }
65 |
66 | func (matcher *existOnFilesystemMatcher) NegatedFailureMessage(actual any) (message string) {
67 | return fmt.Sprintf("Expected\n\t%#v\nnot to exist on the filesystem", actual)
68 | }
69 |
--------------------------------------------------------------------------------
/in/models.go:
--------------------------------------------------------------------------------
1 | package in
2 |
3 | import "github.com/concourse/s3-resource"
4 |
5 | type Request struct {
6 | Source s3resource.Source `json:"source"`
7 | Version s3resource.Version `json:"version"`
8 | Params Params `json:"params"`
9 | }
10 |
11 | type Params struct {
12 | Unpack bool `json:"unpack"`
13 | DownloadTags bool `json:"download_tags"`
14 | SkipDownload string `json:"skip_download"`
15 | }
16 |
17 | type Response struct {
18 | Version s3resource.Version `json:"version"`
19 | Metadata []s3resource.MetadataPair `json:"metadata"`
20 | }
21 |
--------------------------------------------------------------------------------
/integration/in_test.go:
--------------------------------------------------------------------------------
1 | package integration_test
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "os"
8 | "os/exec"
9 | "path/filepath"
10 |
11 | s3resource "github.com/concourse/s3-resource"
12 | "github.com/concourse/s3-resource/in"
13 | "github.com/onsi/gomega/gbytes"
14 | "github.com/onsi/gomega/gexec"
15 |
16 | . "github.com/onsi/ginkgo/v2"
17 | . "github.com/onsi/gomega"
18 | )
19 |
20 | var _ = Describe("in", func() {
21 | var (
22 | command *exec.Cmd
23 | inRequest in.Request
24 | stdin *bytes.Buffer
25 | session *gexec.Session
26 | destDir string
27 | expectedExitStatus int
28 | )
29 |
30 | BeforeEach(func() {
31 | var err error
32 | destDir, err = os.MkdirTemp("", "s3_in_integration_test")
33 | Ω(err).ShouldNot(HaveOccurred())
34 |
35 | stdin = &bytes.Buffer{}
36 | expectedExitStatus = 0
37 |
38 | command = exec.Command(inPath, destDir)
39 | command.Stdin = stdin
40 | })
41 |
42 | AfterEach(func() {
43 | err := os.RemoveAll(destDir)
44 | Ω(err).ShouldNot(HaveOccurred())
45 | })
46 |
47 | JustBeforeEach(func() {
48 | var err error
49 |
50 | err = json.NewEncoder(stdin).Encode(inRequest)
51 | Ω(err).ShouldNot(HaveOccurred())
52 |
53 | session, err = gexec.Start(command, GinkgoWriter, GinkgoWriter)
54 | Ω(err).ShouldNot(HaveOccurred())
55 |
56 | <-session.Exited
57 | Expect(session.ExitCode()).To(Equal(expectedExitStatus))
58 | })
59 |
60 | Context("with a versioned_file and a regex", func() {
61 | BeforeEach(func() {
62 | inRequest = in.Request{
63 | Source: s3resource.Source{
64 | AccessKeyID: accessKeyID,
65 | SecretAccessKey: secretAccessKey,
66 | SessionToken: sessionToken,
67 | AwsRoleARN: awsRoleARN,
68 | Bucket: versionedBucketName,
69 | RegionName: regionName,
70 | Endpoint: endpoint,
71 | Regexp: "some-regex",
72 | VersionedFile: "some-file",
73 | UsePathStyle: pathStyle,
74 | },
75 | Version: s3resource.Version{},
76 | }
77 |
78 | expectedExitStatus = 1
79 | })
80 |
81 | It("returns an error", func() {
82 | Ω(session.Err).Should(gbytes.Say("please specify either regexp or versioned_file"))
83 | })
84 | })
85 |
86 | Context("when the given version only has a path", func() {
87 | var directoryPrefix string
88 |
89 | BeforeEach(func() {
90 | directoryPrefix = "in-request-files"
91 | inRequest = in.Request{
92 | Source: s3resource.Source{
93 | AccessKeyID: accessKeyID,
94 | SecretAccessKey: secretAccessKey,
95 | SessionToken: sessionToken,
96 | AwsRoleARN: awsRoleARN,
97 | Bucket: bucketName,
98 | RegionName: regionName,
99 | Endpoint: endpoint,
100 | Regexp: filepath.Join(directoryPrefix, "some-file-(.*)"),
101 | UsePathStyle: pathStyle,
102 | },
103 | Version: s3resource.Version{
104 | Path: filepath.Join(directoryPrefix, "some-file-1"),
105 | },
106 | }
107 |
108 | tempFile, err := os.CreateTemp("", "file-to-upload")
109 | Ω(err).ShouldNot(HaveOccurred())
110 | tempFile.Close()
111 |
112 | for i := range 3 {
113 | err = os.WriteFile(tempFile.Name(), fmt.Appendf([]byte{}, "some-file-%d", i), 0755)
114 | Ω(err).ShouldNot(HaveOccurred())
115 |
116 | _, err = s3client.UploadFile(bucketName, filepath.Join(directoryPrefix, fmt.Sprintf("some-file-%d", i)), tempFile.Name(), s3resource.NewUploadFileOptions())
117 | Ω(err).ShouldNot(HaveOccurred())
118 | }
119 |
120 | err = os.Remove(tempFile.Name())
121 | Ω(err).ShouldNot(HaveOccurred())
122 | })
123 |
124 | AfterEach(func() {
125 | for i := range 3 {
126 | err := s3client.DeleteFile(bucketName, filepath.Join(directoryPrefix, fmt.Sprintf("some-file-%d", i)))
127 | Ω(err).ShouldNot(HaveOccurred())
128 | }
129 | })
130 |
131 | It("downloads the file", func() {
132 | reader := bytes.NewBuffer(session.Out.Contents())
133 |
134 | var response in.Response
135 | err := json.NewDecoder(reader).Decode(&response)
136 | Ω(err).ShouldNot(HaveOccurred())
137 |
138 | Ω(response).Should(Equal(in.Response{
139 | Version: s3resource.Version{
140 | Path: "in-request-files/some-file-2",
141 | },
142 | Metadata: []s3resource.MetadataPair{
143 | {
144 | Name: "filename",
145 | Value: "some-file-2",
146 | },
147 | {
148 | Name: "url",
149 | Value: buildEndpoint(bucketName, endpoint) + "/in-request-files/some-file-2",
150 | },
151 | },
152 | }))
153 |
154 | Ω(filepath.Join(destDir, "some-file-2")).Should(BeARegularFile())
155 | contents, err := os.ReadFile(filepath.Join(destDir, "some-file-2"))
156 | Ω(err).ShouldNot(HaveOccurred())
157 | Ω(contents).Should(Equal([]byte("some-file-2")))
158 |
159 | Ω(filepath.Join(destDir, "version")).Should(BeARegularFile())
160 | versionContents, err := os.ReadFile(filepath.Join(destDir, "version"))
161 | Ω(err).ShouldNot(HaveOccurred())
162 | Ω(versionContents).Should(Equal([]byte("2")))
163 |
164 | Ω(filepath.Join(destDir, "url")).Should(BeARegularFile())
165 | urlContents, err := os.ReadFile(filepath.Join(destDir, "url"))
166 | Ω(err).ShouldNot(HaveOccurred())
167 | Ω(urlContents).Should(Equal([]byte(buildEndpoint(bucketName, endpoint) + "/in-request-files/some-file-2")))
168 | })
169 |
170 | Context("when the path matches the initial path", func() {
171 | BeforeEach(func() {
172 | inRequest.Source.InitialPath = filepath.Join(directoryPrefix, "some-file-0.0.0")
173 | inRequest.Source.InitialContentText = "initial content"
174 | inRequest.Version.Path = inRequest.Source.InitialPath
175 | })
176 |
177 | It("uses the initial content", func() {
178 | reader := bytes.NewBuffer(session.Out.Contents())
179 |
180 | var response in.Response
181 | err := json.NewDecoder(reader).Decode(&response)
182 |
183 | Ω(response).Should(Equal(in.Response{
184 | Version: s3resource.Version{
185 | Path: inRequest.Source.InitialPath,
186 | },
187 | Metadata: []s3resource.MetadataPair{
188 | {
189 | Name: "filename",
190 | Value: "some-file-0.0.0",
191 | },
192 | },
193 | }))
194 |
195 | Ω(filepath.Join(destDir, "some-file-0.0.0")).Should(BeARegularFile())
196 | contents, err := os.ReadFile(filepath.Join(destDir, "some-file-0.0.0"))
197 | Ω(err).ShouldNot(HaveOccurred())
198 | Ω(contents).Should(Equal([]byte(inRequest.Source.InitialContentText)))
199 |
200 | Ω(filepath.Join(destDir, "version")).Should(BeARegularFile())
201 | versionContents, err := os.ReadFile(filepath.Join(destDir, "version"))
202 | Ω(err).ShouldNot(HaveOccurred())
203 | Ω(versionContents).Should(Equal([]byte("0.0.0")))
204 |
205 | Ω(filepath.Join(destDir, "url")).ShouldNot(BeARegularFile())
206 | })
207 | })
208 | })
209 |
210 | Context("when the given version has a versionID and path", func() {
211 | var directoryPrefix string
212 | var expectedVersion string
213 |
214 | BeforeEach(func() {
215 | directoryPrefix = "in-request-files-versioned"
216 | inRequest = in.Request{
217 | Source: s3resource.Source{
218 | AccessKeyID: accessKeyID,
219 | SecretAccessKey: secretAccessKey,
220 | SessionToken: sessionToken,
221 | AwsRoleARN: awsRoleARN,
222 | Bucket: versionedBucketName,
223 | RegionName: regionName,
224 | Endpoint: endpoint,
225 | VersionedFile: filepath.Join(directoryPrefix, "some-file"),
226 | UsePathStyle: pathStyle,
227 | },
228 | Version: s3resource.Version{},
229 | }
230 |
231 | tempFile, err := os.CreateTemp("", "file-to-upload")
232 | Ω(err).ShouldNot(HaveOccurred())
233 | tempFile.Close()
234 |
235 | for i := range 3 {
236 | err = os.WriteFile(tempFile.Name(), fmt.Appendf([]byte{}, "some-file-%d", i), 0755)
237 | Ω(err).ShouldNot(HaveOccurred())
238 |
239 | _, err = s3client.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, "some-file"), tempFile.Name(), s3resource.NewUploadFileOptions())
240 | Ω(err).ShouldNot(HaveOccurred())
241 | }
242 | err = os.Remove(tempFile.Name())
243 | Ω(err).ShouldNot(HaveOccurred())
244 |
245 | versions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "some-file"))
246 | Ω(err).ShouldNot(HaveOccurred())
247 | expectedVersion = versions[1]
248 | inRequest.Version.VersionID = expectedVersion
249 | })
250 |
251 | AfterEach(func() {
252 | fileVersions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "some-file"))
253 | Ω(err).ShouldNot(HaveOccurred())
254 |
255 | for _, fileVersion := range fileVersions {
256 | err := s3client.DeleteVersionedFile(versionedBucketName, filepath.Join(directoryPrefix, "some-file"), fileVersion)
257 | Ω(err).ShouldNot(HaveOccurred())
258 | }
259 | })
260 |
261 | It("downloads the file", func() {
262 | reader := bytes.NewBuffer(session.Out.Contents())
263 |
264 | var response in.Response
265 | err := json.NewDecoder(reader).Decode(&response)
266 |
267 | Ω(response).Should(Equal(in.Response{
268 | Version: s3resource.Version{
269 | VersionID: expectedVersion,
270 | },
271 | Metadata: []s3resource.MetadataPair{
272 | {
273 | Name: "filename",
274 | Value: "some-file",
275 | },
276 | {
277 | Name: "url",
278 | Value: buildEndpoint(versionedBucketName, endpoint) + "/in-request-files-versioned/some-file?versionId=" + expectedVersion,
279 | },
280 | },
281 | }))
282 |
283 | Ω(filepath.Join(destDir, "some-file")).Should(BeARegularFile())
284 | contents, err := os.ReadFile(filepath.Join(destDir, "some-file"))
285 | Ω(err).ShouldNot(HaveOccurred())
286 | Ω(contents).Should(Equal([]byte("some-file-2")))
287 |
288 | Ω(filepath.Join(destDir, "version")).Should(BeARegularFile())
289 | versionContents, err := os.ReadFile(filepath.Join(destDir, "version"))
290 | Ω(err).ShouldNot(HaveOccurred())
291 | Ω(versionContents).Should(Equal([]byte(expectedVersion)))
292 |
293 | Ω(filepath.Join(destDir, "url")).Should(BeARegularFile())
294 | urlContents, err := os.ReadFile(filepath.Join(destDir, "url"))
295 | Ω(err).ShouldNot(HaveOccurred())
296 | Ω(urlContents).Should(Equal([]byte(buildEndpoint(versionedBucketName, endpoint) + "/in-request-files-versioned/some-file?versionId=" + expectedVersion)))
297 | })
298 |
299 | Context("when the version ID matches the InitialVersion", func() {
300 | BeforeEach(func() {
301 | inRequest.Source.InitialVersion = "0.0.0"
302 | inRequest.Source.InitialContentText = "initial content"
303 | inRequest.Version.VersionID = inRequest.Source.InitialVersion
304 | expectedVersion = inRequest.Source.InitialVersion
305 | })
306 |
307 | It("uses the initial content", func() {
308 | reader := bytes.NewBuffer(session.Out.Contents())
309 |
310 | var response in.Response
311 | err := json.NewDecoder(reader).Decode(&response)
312 |
313 | Ω(response).Should(Equal(in.Response{
314 | Version: s3resource.Version{
315 | VersionID: inRequest.Source.InitialVersion,
316 | },
317 | Metadata: []s3resource.MetadataPair{
318 | {
319 | Name: "filename",
320 | Value: "some-file",
321 | },
322 | },
323 | }))
324 |
325 | Ω(filepath.Join(destDir, "some-file")).Should(BeARegularFile())
326 | contents, err := os.ReadFile(filepath.Join(destDir, "some-file"))
327 | Ω(err).ShouldNot(HaveOccurred())
328 | Ω(contents).Should(Equal([]byte(inRequest.Source.InitialContentText)))
329 |
330 | Ω(filepath.Join(destDir, "version")).Should(BeARegularFile())
331 | versionContents, err := os.ReadFile(filepath.Join(destDir, "version"))
332 | Ω(err).ShouldNot(HaveOccurred())
333 | Ω(versionContents).Should(Equal([]byte(expectedVersion)))
334 |
335 | Ω(filepath.Join(destDir, "url")).ShouldNot(BeARegularFile())
336 | })
337 | })
338 | })
339 |
340 | Context("when download_tags is true", func() {
341 | var (
342 | directoryPrefix string
343 | tags map[string]string
344 | )
345 |
346 | BeforeEach(func() {
347 | directoryPrefix = "in-request-download-tags"
348 | inRequest = in.Request{
349 | Source: s3resource.Source{
350 | AccessKeyID: accessKeyID,
351 | SecretAccessKey: secretAccessKey,
352 | SessionToken: sessionToken,
353 | AwsRoleARN: awsRoleARN,
354 | Bucket: bucketName,
355 | RegionName: regionName,
356 | Endpoint: endpoint,
357 | Regexp: filepath.Join(directoryPrefix, "some-file-(.*)"),
358 | UsePathStyle: pathStyle,
359 | },
360 | Version: s3resource.Version{
361 | Path: filepath.Join(directoryPrefix, "some-file-1"),
362 | },
363 | Params: in.Params{
364 | DownloadTags: true,
365 | },
366 | }
367 |
368 | err := json.NewEncoder(stdin).Encode(inRequest)
369 | Ω(err).ShouldNot(HaveOccurred())
370 |
371 | tempFile, err := os.CreateTemp("", "file-to-upload")
372 | Ω(err).ShouldNot(HaveOccurred())
373 | tempFile.Close()
374 |
375 | err = os.WriteFile(tempFile.Name(), []byte("some-file-1"), 0755)
376 | Ω(err).ShouldNot(HaveOccurred())
377 |
378 | _, err = s3client.UploadFile(bucketName, filepath.Join(directoryPrefix, "some-file-1"), tempFile.Name(), s3resource.NewUploadFileOptions())
379 | Ω(err).ShouldNot(HaveOccurred())
380 |
381 | err = os.Remove(tempFile.Name())
382 | Ω(err).ShouldNot(HaveOccurred())
383 |
384 | tags = map[string]string{"tag1": "value1", "tag2": "value2"}
385 | err = s3client.SetTags(bucketName, filepath.Join(directoryPrefix, "some-file-1"), "", tags)
386 | Ω(err).ShouldNot(HaveOccurred())
387 | })
388 |
389 | AfterEach(func() {
390 | err := s3client.DeleteFile(bucketName, filepath.Join(directoryPrefix, "some-file-1"))
391 | Ω(err).ShouldNot(HaveOccurred())
392 | })
393 |
394 | It("writes the tags to tags.json", func() {
395 | Ω(filepath.Join(destDir, "tags.json")).Should(BeARegularFile())
396 | actualTagsJSON, err := os.ReadFile(filepath.Join(destDir, "tags.json"))
397 | Ω(err).ShouldNot(HaveOccurred())
398 |
399 | expectedTagsJSON, err := json.Marshal(tags)
400 | Ω(err).ShouldNot(HaveOccurred())
401 |
402 | Ω(actualTagsJSON).Should(MatchJSON(expectedTagsJSON))
403 | })
404 | })
405 | })
406 |
--------------------------------------------------------------------------------
/integration/integration_suite_test.go:
--------------------------------------------------------------------------------
1 | package integration_test
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "io"
7 | "os"
8 | "testing"
9 |
10 | "github.com/aws/aws-sdk-go-v2/aws"
11 | "github.com/aws/aws-sdk-go-v2/service/s3"
12 | "github.com/aws/aws-sdk-go-v2/service/sts"
13 | s3resource "github.com/concourse/s3-resource"
14 | "github.com/onsi/gomega/gexec"
15 |
16 | . "github.com/onsi/ginkgo/v2"
17 | . "github.com/onsi/gomega"
18 | )
19 |
20 | func TestIntegration(t *testing.T) {
21 | RegisterFailHandler(Fail)
22 | }
23 |
24 | var (
25 | accessKeyID = os.Getenv("S3_TESTING_ACCESS_KEY_ID")
26 | secretAccessKey = os.Getenv("S3_TESTING_SECRET_ACCESS_KEY")
27 | sessionToken = os.Getenv("S3_TESTING_SESSION_TOKEN")
28 | awsRoleARN = os.Getenv("S3_TESTING_AWS_ROLE_ARN")
29 | versionedBucketName = os.Getenv("S3_VERSIONED_TESTING_BUCKET")
30 | bucketName = os.Getenv("S3_TESTING_BUCKET")
31 | regionName = os.Getenv("S3_TESTING_REGION")
32 | endpoint = os.Getenv("S3_ENDPOINT")
33 | v2signing = os.Getenv("S3_V2_SIGNING")
34 | pathStyle = len(os.Getenv("S3_USE_PATH_STYLE")) > 0
35 | awsConfig *aws.Config
36 | s3client s3resource.S3Client
37 | s3Service *s3.Client
38 |
39 | checkPath string
40 | inPath string
41 | outPath string
42 | )
43 |
44 | type suiteData struct {
45 | CheckPath string
46 | InPath string
47 | OutPath string
48 | }
49 |
50 | func findOrCreate(binName string) string {
51 | resourcePath := "/opt/resource/" + binName
52 | if _, err := os.Stat(resourcePath); err == nil {
53 | return resourcePath
54 | } else {
55 | path, err := gexec.Build("github.com/concourse/s3-resource/cmd/" + binName)
56 | Ω(err).ShouldNot(HaveOccurred())
57 | return path
58 | }
59 | }
60 |
61 | func getSessionTokenS3Client(awsConfig *aws.Config) (*s3.Client, s3resource.S3Client) {
62 | stsClient := sts.NewFromConfig(*awsConfig)
63 |
64 | duration := int32(900)
65 | params := &sts.GetSessionTokenInput{
66 | DurationSeconds: &duration,
67 | }
68 |
69 | resp, err := stsClient.GetSessionToken(context.TODO(), params)
70 | Ω(err).ShouldNot(HaveOccurred())
71 |
72 | newAwsConfig, err := s3resource.NewAwsConfig(
73 | *resp.Credentials.AccessKeyId,
74 | *resp.Credentials.SecretAccessKey,
75 | *resp.Credentials.SessionToken,
76 | awsRoleARN,
77 | regionName,
78 | false,
79 | false,
80 | )
81 | Ω(err).ShouldNot(HaveOccurred())
82 | s3client, err := s3resource.NewS3Client(
83 | io.Discard,
84 | newAwsConfig,
85 | endpoint,
86 | false,
87 | pathStyle,
88 | )
89 | Ω(err).ShouldNot(HaveOccurred())
90 |
91 | return s3Service, s3client
92 | }
93 |
94 | var _ = SynchronizedBeforeSuite(func() []byte {
95 | cp := findOrCreate("check")
96 | ip := findOrCreate("in")
97 | op := findOrCreate("out")
98 |
99 | data, err := json.Marshal(suiteData{
100 | CheckPath: cp,
101 | InPath: ip,
102 | OutPath: op,
103 | })
104 |
105 | Ω(err).ShouldNot(HaveOccurred())
106 |
107 | return data
108 | }, func(data []byte) {
109 | var sd suiteData
110 | err := json.Unmarshal(data, &sd)
111 | Ω(err).ShouldNot(HaveOccurred())
112 |
113 | checkPath = sd.CheckPath
114 | inPath = sd.InPath
115 | outPath = sd.OutPath
116 |
117 | if accessKeyID != "" {
118 | Ω(accessKeyID).ShouldNot(BeEmpty(), "must specify $S3_TESTING_ACCESS_KEY_ID")
119 | Ω(secretAccessKey).ShouldNot(BeEmpty(), "must specify $S3_TESTING_SECRET_ACCESS_KEY")
120 | Ω(versionedBucketName).ShouldNot(BeEmpty(), "must specify $S3_VERSIONED_TESTING_BUCKET")
121 | Ω(bucketName).ShouldNot(BeEmpty(), "must specify $S3_TESTING_BUCKET")
122 | Ω(regionName).ShouldNot(BeEmpty(), "must specify $S3_TESTING_REGION")
123 | Ω(endpoint).ShouldNot(BeEmpty(), "must specify $S3_ENDPOINT")
124 |
125 | awsConfig, err = s3resource.NewAwsConfig(
126 | accessKeyID,
127 | secretAccessKey,
128 | sessionToken,
129 | awsRoleARN,
130 | regionName,
131 | false,
132 | false,
133 | )
134 | Ω(err).ShouldNot(HaveOccurred())
135 |
136 | s3Service = s3.NewFromConfig(*awsConfig)
137 | s3client, err = s3resource.NewS3Client(
138 | io.Discard,
139 | awsConfig,
140 | endpoint,
141 | false,
142 | pathStyle,
143 | )
144 | Ω(err).ShouldNot(HaveOccurred())
145 | }
146 | })
147 |
148 | var _ = BeforeEach(func() {
149 | if s3client == nil {
150 | Skip("Environment variables need to be set for AWS integration")
151 | }
152 | })
153 |
154 | var _ = SynchronizedAfterSuite(func() {}, func() {
155 | gexec.CleanupBuildArtifacts()
156 | })
157 |
158 | func TestIn(t *testing.T) {
159 | RegisterFailHandler(Fail)
160 | RunSpecs(t, "Integration Suite")
161 | }
162 |
163 | func buildEndpoint(bucket string, endpoint string) string {
164 | if endpoint == "" {
165 | return "https://s3.amazonaws.com/" + bucket
166 | } else {
167 | return endpoint + "/" + bucket
168 | }
169 | }
170 |
--------------------------------------------------------------------------------
/integration/out_test.go:
--------------------------------------------------------------------------------
1 | package integration_test
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "os"
8 | "os/exec"
9 | "path/filepath"
10 |
11 | "github.com/aws/aws-sdk-go-v2/aws"
12 | "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
13 | "github.com/aws/aws-sdk-go-v2/service/s3"
14 | "github.com/aws/aws-sdk-go-v2/service/s3/types"
15 | s3resource "github.com/concourse/s3-resource"
16 | "github.com/concourse/s3-resource/out"
17 | "github.com/google/uuid"
18 | "github.com/onsi/gomega/gbytes"
19 | "github.com/onsi/gomega/gexec"
20 |
21 | . "github.com/onsi/ginkgo/v2"
22 | . "github.com/onsi/gomega"
23 | )
24 |
25 | var _ = Describe("out", func() {
26 | var (
27 | command *exec.Cmd
28 | stdin *bytes.Buffer
29 | session *gexec.Session
30 | sourceDir string
31 |
32 | expectedExitStatus int
33 | )
34 |
35 | BeforeEach(func() {
36 | var err error
37 | sourceDir, err = os.MkdirTemp("", "s3_out_integration_test")
38 | Ω(err).ShouldNot(HaveOccurred())
39 |
40 | stdin = &bytes.Buffer{}
41 | expectedExitStatus = 0
42 |
43 | command = exec.Command(outPath, sourceDir)
44 | command.Stdin = stdin
45 | })
46 |
47 | AfterEach(func() {
48 | err := os.RemoveAll(sourceDir)
49 | Ω(err).ShouldNot(HaveOccurred())
50 | })
51 |
52 | JustBeforeEach(func() {
53 | var err error
54 | session, err = gexec.Start(command, GinkgoWriter, GinkgoWriter)
55 | Ω(err).ShouldNot(HaveOccurred())
56 |
57 | <-session.Exited
58 | Expect(session.ExitCode()).To(Equal(expectedExitStatus))
59 | })
60 |
61 | Context("with a versioned_file and a regex", func() {
62 | var outRequest out.Request
63 |
64 | BeforeEach(func() {
65 | outRequest = out.Request{
66 | Source: s3resource.Source{
67 | AccessKeyID: accessKeyID,
68 | SecretAccessKey: secretAccessKey,
69 | SessionToken: sessionToken,
70 | AwsRoleARN: awsRoleARN,
71 | Bucket: versionedBucketName,
72 | RegionName: regionName,
73 | Endpoint: endpoint,
74 | Regexp: "some-regex",
75 | VersionedFile: "some-file",
76 | UsePathStyle: pathStyle,
77 | },
78 | }
79 |
80 | expectedExitStatus = 1
81 |
82 | err := json.NewEncoder(stdin).Encode(outRequest)
83 | Ω(err).ShouldNot(HaveOccurred())
84 | })
85 |
86 | It("returns an error", func() {
87 | Ω(session.Err).Should(gbytes.Say("please specify either regexp or versioned_file"))
88 | })
89 | })
90 |
91 | Context("with a content-type", func() {
92 | BeforeEach(func() {
93 | os.WriteFile(filepath.Join(sourceDir, "content-typed-file"), []byte("text only"), 0755)
94 |
95 | outRequest := out.Request{
96 | Source: s3resource.Source{
97 | AccessKeyID: accessKeyID,
98 | SecretAccessKey: secretAccessKey,
99 | SessionToken: sessionToken,
100 | AwsRoleARN: awsRoleARN,
101 | Bucket: bucketName,
102 | RegionName: regionName,
103 | Endpoint: endpoint,
104 | UsePathStyle: pathStyle,
105 | },
106 | Params: out.Params{
107 | From: filepath.Join(sourceDir, "content-typed-file"),
108 | To: "",
109 | ContentType: "application/customtype",
110 | Acl: "public-read",
111 | },
112 | }
113 |
114 | err := json.NewEncoder(stdin).Encode(&outRequest)
115 | Ω(err).ShouldNot(HaveOccurred())
116 |
117 | expectedExitStatus = 0
118 | })
119 |
120 | AfterEach(func() {
121 | err := s3client.DeleteFile(bucketName, "content-typed-file")
122 | Ω(err).ShouldNot(HaveOccurred())
123 | })
124 |
125 | It("creates a file with the specified content-type", func() {
126 | response, err := s3Service.HeadObject(context.TODO(), &s3.HeadObjectInput{
127 | Bucket: aws.String(bucketName),
128 | Key: aws.String("content-typed-file"),
129 | })
130 | Ω(err).ShouldNot(HaveOccurred())
131 |
132 | Expect(response.ContentType).To(Equal(aws.String("application/customtype")))
133 | })
134 | })
135 |
136 | Context("without a content-type", func() {
137 | BeforeEach(func() {
138 | os.WriteFile(filepath.Join(sourceDir, "uncontent-typed-file"), []byte("text only"), 0755)
139 |
140 | outRequest := out.Request{
141 | Source: s3resource.Source{
142 | AccessKeyID: accessKeyID,
143 | SecretAccessKey: secretAccessKey,
144 | SessionToken: sessionToken,
145 | AwsRoleARN: awsRoleARN,
146 | Bucket: bucketName,
147 | RegionName: regionName,
148 | Endpoint: endpoint,
149 | UsePathStyle: pathStyle,
150 | },
151 | Params: out.Params{
152 | From: filepath.Join(sourceDir, "uncontent-typed-file"),
153 | To: "",
154 | Acl: "public-read",
155 | },
156 | }
157 |
158 | err := json.NewEncoder(stdin).Encode(&outRequest)
159 | Ω(err).ShouldNot(HaveOccurred())
160 |
161 | expectedExitStatus = 0
162 | })
163 |
164 | AfterEach(func() {
165 | err := s3client.DeleteFile(bucketName, "uncontent-typed-file")
166 | Ω(err).ShouldNot(HaveOccurred())
167 | })
168 |
169 | // http://docs.aws.amazon.com/AWSImportExport/latest/DG/FileExtensiontoMimeTypes.html
170 | It("creates a file with the default S3 content-type for a unknown filename extension", func() {
171 | response, err := s3Service.HeadObject(context.TODO(), &s3.HeadObjectInput{
172 | Bucket: aws.String(bucketName),
173 | Key: aws.String("uncontent-typed-file"),
174 | })
175 | Ω(err).ShouldNot(HaveOccurred())
176 |
177 | Expect(response.ContentType).To(Equal(aws.String("binary/octet-stream")))
178 | })
179 | })
180 |
181 | Context("with a file glob and from", func() {
182 | BeforeEach(func() {
183 | outRequest := out.Request{
184 | Source: s3resource.Source{
185 | AccessKeyID: accessKeyID,
186 | SecretAccessKey: secretAccessKey,
187 | SessionToken: sessionToken,
188 | AwsRoleARN: awsRoleARN,
189 | Bucket: bucketName,
190 | RegionName: regionName,
191 | Endpoint: endpoint,
192 | UsePathStyle: pathStyle,
193 | },
194 | Params: out.Params{
195 | File: "glob-*",
196 | From: "file-to-upload-local",
197 | To: "/",
198 | },
199 | }
200 |
201 | err := json.NewEncoder(stdin).Encode(&outRequest)
202 | Ω(err).ShouldNot(HaveOccurred())
203 |
204 | expectedExitStatus = 1
205 | })
206 |
207 | It("returns an error", func() {
208 | Ω(session.Err).Should(gbytes.Say("contains both file and from"))
209 | })
210 | })
211 |
212 | Context("with a non-versioned bucket", func() {
213 | var directoryPrefix string
214 |
215 | BeforeEach(func() {
216 | guid, err := uuid.NewRandom()
217 | Expect(err).ToNot(HaveOccurred())
218 | directoryPrefix = "out-request-files-" + guid.String()
219 | })
220 |
221 | AfterEach(func() {
222 | err := s3client.DeleteFile(bucketName, filepath.Join(directoryPrefix, "file-to-upload"))
223 | Ω(err).ShouldNot(HaveOccurred())
224 | })
225 |
226 | Context("with a file glob and public read ACL specified", func() {
227 | BeforeEach(func() {
228 | err := os.WriteFile(filepath.Join(sourceDir, "glob-file-to-upload"), []byte("contents"), 0755)
229 | Ω(err).ShouldNot(HaveOccurred())
230 |
231 | outRequest := out.Request{
232 | Source: s3resource.Source{
233 | AccessKeyID: accessKeyID,
234 | SecretAccessKey: secretAccessKey,
235 | SessionToken: sessionToken,
236 | AwsRoleARN: awsRoleARN,
237 | Bucket: bucketName,
238 | RegionName: regionName,
239 | Endpoint: endpoint,
240 | UsePathStyle: pathStyle,
241 | },
242 | Params: out.Params{
243 | File: "glob-*",
244 | To: directoryPrefix + "/",
245 | Acl: "public-read",
246 | },
247 | }
248 |
249 | err = json.NewEncoder(stdin).Encode(&outRequest)
250 | Ω(err).ShouldNot(HaveOccurred())
251 | })
252 |
253 | AfterEach(func() {
254 | err := s3client.DeleteFile(bucketName, filepath.Join(directoryPrefix, "glob-file-to-upload"))
255 | Ω(err).ShouldNot(HaveOccurred())
256 | })
257 |
258 | It("uploads the file to the correct bucket and outputs the version", func() {
259 | s3files, err := s3client.BucketFiles(bucketName, directoryPrefix)
260 | Ω(err).ShouldNot(HaveOccurred())
261 |
262 | Ω(s3files).Should(ConsistOf(filepath.Join(directoryPrefix, "glob-file-to-upload")))
263 |
264 | reader := bytes.NewBuffer(session.Buffer().Contents())
265 |
266 | var response out.Response
267 | err = json.NewDecoder(reader).Decode(&response)
268 | Ω(err).ShouldNot(HaveOccurred())
269 |
270 | Ω(response).Should(Equal(out.Response{
271 | Version: s3resource.Version{
272 | Path: filepath.Join(directoryPrefix, "glob-file-to-upload"),
273 | },
274 | Metadata: []s3resource.MetadataPair{
275 | {
276 | Name: "filename",
277 | Value: "glob-file-to-upload",
278 | },
279 | {
280 | Name: "url",
281 | Value: buildEndpoint(bucketName, endpoint) + "/" + directoryPrefix + "/glob-file-to-upload",
282 | },
283 | },
284 | }))
285 | })
286 |
287 | It("allows everyone to have read access to the object", func() {
288 | anonURI := "http://acs.amazonaws.com/groups/global/AllUsers"
289 | permision := types.PermissionRead
290 | grantee := types.Grantee{URI: &anonURI, Type: "Group"}
291 | expectedGrant := types.Grant{
292 | Grantee: &grantee,
293 | Permission: permision,
294 | }
295 |
296 | params := &s3.GetObjectAclInput{
297 | Bucket: aws.String(bucketName),
298 | Key: aws.String(filepath.Join(directoryPrefix, "glob-file-to-upload")),
299 | }
300 |
301 | resp, err := s3Service.GetObjectAcl(context.TODO(), params)
302 | Ω(err).ShouldNot(HaveOccurred())
303 | Ω(resp.Grants).Should(ContainElement(&expectedGrant))
304 | })
305 | })
306 |
307 | Context("with a large file that is multiple of MaxUploadParts", func() {
308 | BeforeEach(func() {
309 | if os.Getenv("S3_TESTING_NO_LARGE_UPLOAD") != "" {
310 | Skip("'S3_TESTING_NO_LARGE_UPLOAD' is set, skipping.")
311 | }
312 |
313 | path := filepath.Join(sourceDir, "large-file-to-upload")
314 |
315 | // touch the file
316 | file, err := os.Create(path)
317 | Ω(err).NotTo(HaveOccurred())
318 | Ω(file.Close()).To(Succeed())
319 |
320 | Ω(os.Truncate(path, manager.MinUploadPartSize*int64(manager.MaxUploadParts))).To(Succeed())
321 |
322 | outRequest := out.Request{
323 | Source: s3resource.Source{
324 | AccessKeyID: accessKeyID,
325 | SecretAccessKey: secretAccessKey,
326 | SessionToken: sessionToken,
327 | AwsRoleARN: awsRoleARN,
328 | Bucket: bucketName,
329 | RegionName: regionName,
330 | Endpoint: endpoint,
331 | UsePathStyle: pathStyle,
332 | },
333 | Params: out.Params{
334 | File: "large-file-to-upload",
335 | To: directoryPrefix + "/",
336 | },
337 | }
338 |
339 | err = json.NewEncoder(stdin).Encode(&outRequest)
340 | Ω(err).ShouldNot(HaveOccurred())
341 | })
342 |
343 | AfterEach(func() {
344 | err := s3client.DeleteFile(bucketName, filepath.Join(directoryPrefix, "large-file-to-upload"))
345 | Ω(err).ShouldNot(HaveOccurred())
346 | })
347 |
348 | It("uploads the file to the correct bucket and outputs the version", func() {
349 | s3files, err := s3client.BucketFiles(bucketName, directoryPrefix)
350 | Ω(err).ShouldNot(HaveOccurred())
351 |
352 | Ω(s3files).Should(ConsistOf(filepath.Join(directoryPrefix, "large-file-to-upload")))
353 | })
354 | })
355 |
356 | Context("with regexp", func() {
357 | BeforeEach(func() {
358 | err := os.WriteFile(filepath.Join(sourceDir, "file-to-upload"), []byte("contents"), 0755)
359 | Ω(err).ShouldNot(HaveOccurred())
360 |
361 | outRequest := out.Request{
362 | Source: s3resource.Source{
363 | AccessKeyID: accessKeyID,
364 | SecretAccessKey: secretAccessKey,
365 | SessionToken: sessionToken,
366 | AwsRoleARN: awsRoleARN,
367 | Bucket: bucketName,
368 | RegionName: regionName,
369 | Endpoint: endpoint,
370 | Regexp: filepath.Join(directoryPrefix, "some-file-pattern"),
371 | UsePathStyle: pathStyle,
372 | },
373 | Params: out.Params{
374 | File: "file-to-upload",
375 | },
376 | }
377 |
378 | err = json.NewEncoder(stdin).Encode(&outRequest)
379 | Ω(err).ShouldNot(HaveOccurred())
380 | })
381 |
382 | It("uploads the file to the correct bucket and outputs the version", func() {
383 | s3files, err := s3client.BucketFiles(bucketName, directoryPrefix)
384 | Ω(err).ShouldNot(HaveOccurred())
385 |
386 | Ω(s3files).Should(ConsistOf(filepath.Join(directoryPrefix, "file-to-upload")))
387 |
388 | reader := bytes.NewBuffer(session.Out.Contents())
389 |
390 | var response out.Response
391 | err = json.NewDecoder(reader).Decode(&response)
392 | Ω(err).ShouldNot(HaveOccurred())
393 |
394 | Ω(response).Should(Equal(out.Response{
395 | Version: s3resource.Version{
396 | Path: filepath.Join(directoryPrefix, "file-to-upload"),
397 | },
398 | Metadata: []s3resource.MetadataPair{
399 | {
400 | Name: "filename",
401 | Value: "file-to-upload",
402 | },
403 | {
404 | Name: "url",
405 | Value: buildEndpoint(bucketName, endpoint) + "/" + directoryPrefix + "/file-to-upload",
406 | },
407 | },
408 | }))
409 | })
410 | })
411 |
412 | Context("with versioned_file", func() {
413 | BeforeEach(func() {
414 | err := os.WriteFile(filepath.Join(sourceDir, "file-to-upload-local"), []byte("contents"), 0755)
415 | Ω(err).ShouldNot(HaveOccurred())
416 |
417 | outRequest := out.Request{
418 | Source: s3resource.Source{
419 | AccessKeyID: accessKeyID,
420 | SecretAccessKey: secretAccessKey,
421 | SessionToken: sessionToken,
422 | AwsRoleARN: awsRoleARN,
423 | Bucket: bucketName,
424 | RegionName: regionName,
425 | VersionedFile: filepath.Join(directoryPrefix, "file-to-upload"),
426 | Endpoint: endpoint,
427 | UsePathStyle: pathStyle,
428 | },
429 | Params: out.Params{
430 | From: "file-to-upload-local",
431 | To: "something-wrong/",
432 | },
433 | }
434 |
435 | expectedExitStatus = 1
436 |
437 | err = json.NewEncoder(stdin).Encode(&outRequest)
438 | Ω(err).ShouldNot(HaveOccurred())
439 | })
440 |
441 | It("reports that it failed to create a versioned object", func() {
442 | Ω(session.Err).Should(gbytes.Say("object versioning not enabled"))
443 | })
444 | })
445 | })
446 |
447 | Context("with a versioned bucket", func() {
448 | var directoryPrefix string
449 |
450 | BeforeEach(func() {
451 | directoryPrefix = "out-request-files-versioned"
452 | })
453 |
454 | AfterEach(func() {
455 | fileVersions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload"))
456 | Ω(err).ShouldNot(HaveOccurred())
457 |
458 | for _, fileVersion := range fileVersions {
459 | err := s3client.DeleteVersionedFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload"), fileVersion)
460 | Ω(err).ShouldNot(HaveOccurred())
461 | }
462 | })
463 |
464 | Context("with versioned_file", func() {
465 | BeforeEach(func() {
466 | err := os.WriteFile(filepath.Join(sourceDir, "file-to-upload-local"), []byte("contents"), 0755)
467 | Ω(err).ShouldNot(HaveOccurred())
468 |
469 | outRequest := out.Request{
470 | Source: s3resource.Source{
471 | AccessKeyID: accessKeyID,
472 | SecretAccessKey: secretAccessKey,
473 | SessionToken: sessionToken,
474 | AwsRoleARN: awsRoleARN,
475 | Bucket: versionedBucketName,
476 | RegionName: regionName,
477 | VersionedFile: filepath.Join(directoryPrefix, "file-to-upload"),
478 | Endpoint: endpoint,
479 | UsePathStyle: pathStyle,
480 | },
481 | Params: out.Params{
482 | From: "file-to-upload-local",
483 | To: "something-wrong/",
484 | },
485 | }
486 |
487 | err = json.NewEncoder(stdin).Encode(&outRequest)
488 | Ω(err).ShouldNot(HaveOccurred())
489 | })
490 |
491 | It("uploads the file to the correct bucket and outputs the version", func() {
492 | s3files, err := s3client.BucketFiles(versionedBucketName, directoryPrefix)
493 | Ω(err).ShouldNot(HaveOccurred())
494 |
495 | Ω(s3files).Should(ConsistOf(filepath.Join(directoryPrefix, "file-to-upload")))
496 |
497 | reader := bytes.NewBuffer(session.Out.Contents())
498 |
499 | var response out.Response
500 | err = json.NewDecoder(reader).Decode(&response)
501 | Ω(err).ShouldNot(HaveOccurred())
502 |
503 | versions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload"))
504 | Ω(err).ShouldNot(HaveOccurred())
505 |
506 | Ω(response).Should(Equal(out.Response{
507 | Version: s3resource.Version{
508 | VersionID: versions[0],
509 | },
510 | Metadata: []s3resource.MetadataPair{
511 | {
512 | Name: "filename",
513 | Value: "file-to-upload",
514 | },
515 | {
516 | Name: "url",
517 | Value: buildEndpoint(versionedBucketName, endpoint) + "/" + directoryPrefix + "/file-to-upload?versionId=" + versions[0],
518 | },
519 | },
520 | }))
521 | })
522 | })
523 |
524 | Context("with regexp", func() {
525 | BeforeEach(func() {
526 | err := os.WriteFile(filepath.Join(sourceDir, "file-to-upload"), []byte("contents"), 0755)
527 | Ω(err).ShouldNot(HaveOccurred())
528 |
529 | outRequest := out.Request{
530 | Source: s3resource.Source{
531 | AccessKeyID: accessKeyID,
532 | SecretAccessKey: secretAccessKey,
533 | SessionToken: sessionToken,
534 | AwsRoleARN: awsRoleARN,
535 | Bucket: versionedBucketName,
536 | RegionName: regionName,
537 | Endpoint: endpoint,
538 | UsePathStyle: pathStyle,
539 | },
540 | Params: out.Params{
541 | From: "file-to-upload",
542 | To: directoryPrefix + "/",
543 | },
544 | }
545 |
546 | err = json.NewEncoder(stdin).Encode(&outRequest)
547 | Ω(err).ShouldNot(HaveOccurred())
548 | })
549 |
550 | It("uploads the file to the correct bucket and outputs the version", func() {
551 | s3files, err := s3client.BucketFiles(versionedBucketName, directoryPrefix)
552 | Ω(err).ShouldNot(HaveOccurred())
553 |
554 | Ω(s3files).Should(ConsistOf(filepath.Join(directoryPrefix, "file-to-upload")))
555 |
556 | reader := bytes.NewBuffer(session.Out.Contents())
557 |
558 | var response out.Response
559 | err = json.NewDecoder(reader).Decode(&response)
560 | Ω(err).ShouldNot(HaveOccurred())
561 |
562 | versions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload"))
563 | Ω(err).ShouldNot(HaveOccurred())
564 |
565 | Ω(response).Should(Equal(out.Response{
566 | Version: s3resource.Version{
567 | Path: filepath.Join(directoryPrefix, "file-to-upload"),
568 | },
569 | Metadata: []s3resource.MetadataPair{
570 | {
571 | Name: "filename",
572 | Value: "file-to-upload",
573 | },
574 | {
575 | Name: "url",
576 | Value: buildEndpoint(versionedBucketName, endpoint) + "/" + directoryPrefix + "/file-to-upload?versionId=" + versions[0],
577 | },
578 | },
579 | }))
580 | })
581 | })
582 | })
583 | })
584 |
--------------------------------------------------------------------------------
/integration/s3client_test.go:
--------------------------------------------------------------------------------
1 | package integration_test
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "os"
8 | "path/filepath"
9 | "time"
10 |
11 | "github.com/aws/aws-sdk-go-v2/aws"
12 | "github.com/aws/aws-sdk-go-v2/service/s3"
13 | s3resource "github.com/concourse/s3-resource"
14 |
15 | . "github.com/onsi/ginkgo/v2"
16 | . "github.com/onsi/gomega"
17 | )
18 |
19 | var _ = Describe("S3client", func() {
20 | var (
21 | tempDir string
22 | tempFile *os.File
23 | runtime string
24 | directoryPrefix string
25 | )
26 |
27 | BeforeEach(func() {
28 | var err error
29 | directoryPrefix = "s3client-tests"
30 | runtime = fmt.Sprintf("%d", time.Now().Unix())
31 |
32 | tempDir, err = os.MkdirTemp("", "s3-upload-dir")
33 | Ω(err).ShouldNot(HaveOccurred())
34 |
35 | tempFile, err = os.CreateTemp(tempDir, "file-to-upload")
36 | Ω(err).ShouldNot(HaveOccurred())
37 |
38 | tempFile.Write([]byte("hello-" + runtime))
39 | })
40 |
41 | AfterEach(func() {
42 | err := os.RemoveAll(tempDir)
43 | Ω(err).ShouldNot(HaveOccurred())
44 |
45 | fileOneVersions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-1"))
46 | Ω(err).ShouldNot(HaveOccurred())
47 |
48 | for _, fileOneVersion := range fileOneVersions {
49 | err := s3client.DeleteVersionedFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-1"), fileOneVersion)
50 | Ω(err).ShouldNot(HaveOccurred())
51 | }
52 |
53 | fileTwoVersions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-2"))
54 | Ω(err).ShouldNot(HaveOccurred())
55 |
56 | for _, fileTwoVersion := range fileTwoVersions {
57 | err := s3client.DeleteVersionedFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-2"), fileTwoVersion)
58 | Ω(err).ShouldNot(HaveOccurred())
59 | }
60 |
61 | fileThreeVersions, err := s3client.BucketFileVersions(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-3"))
62 | Ω(err).ShouldNot(HaveOccurred())
63 |
64 | for _, fileThreeVersion := range fileThreeVersions {
65 | err := s3client.DeleteVersionedFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-3"), fileThreeVersion)
66 | Ω(err).ShouldNot(HaveOccurred())
67 | }
68 | })
69 |
70 | It("can interact with buckets", func() {
71 | _, err := s3client.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-1"), tempFile.Name(), s3resource.NewUploadFileOptions())
72 | Ω(err).ShouldNot(HaveOccurred())
73 |
74 | _, err = s3client.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-2"), tempFile.Name(), s3resource.NewUploadFileOptions())
75 | Ω(err).ShouldNot(HaveOccurred())
76 |
77 | _, err = s3client.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-2"), tempFile.Name(), s3resource.NewUploadFileOptions())
78 | Ω(err).ShouldNot(HaveOccurred())
79 |
80 | tags := map[string]string{
81 | "tag1": "value1",
82 | "tag2": "value2",
83 | }
84 | err = s3client.SetTags(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-1"), "", tags)
85 | Ω(err).ShouldNot(HaveOccurred())
86 |
87 | options := s3resource.NewUploadFileOptions()
88 | options.ServerSideEncryption = "AES256"
89 | _, err = s3client.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-3"), tempFile.Name(), options)
90 | Ω(err).ShouldNot(HaveOccurred())
91 |
92 | files, err := s3client.BucketFiles(versionedBucketName, directoryPrefix)
93 | Ω(err).ShouldNot(HaveOccurred())
94 |
95 | Ω(files).Should(ConsistOf([]string{
96 | filepath.Join(directoryPrefix, "file-to-upload-1"),
97 | filepath.Join(directoryPrefix, "file-to-upload-2"),
98 | filepath.Join(directoryPrefix, "file-to-upload-3"),
99 | }))
100 |
101 | err = s3client.DownloadFile(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-1"), "", filepath.Join(tempDir, "downloaded-file"))
102 | Ω(err).ShouldNot(HaveOccurred())
103 |
104 | read, err := os.ReadFile(filepath.Join(tempDir, "downloaded-file"))
105 | Ω(err).ShouldNot(HaveOccurred())
106 | Ω(read).Should(Equal([]byte("hello-" + runtime)))
107 |
108 | err = s3client.DownloadTags(versionedBucketName, filepath.Join(directoryPrefix, "file-to-upload-1"), "", filepath.Join(tempDir, "tags.json"))
109 | Ω(err).ShouldNot(HaveOccurred())
110 |
111 | expectedTagsJSON, err := json.Marshal(tags)
112 | Ω(err).ShouldNot(HaveOccurred())
113 |
114 | actualTagsJSON, err := os.ReadFile(filepath.Join(tempDir, "tags.json"))
115 | Ω(err).ShouldNot(HaveOccurred())
116 | Ω(actualTagsJSON).Should(MatchJSON(expectedTagsJSON))
117 |
118 | resp, err := s3Service.HeadObject(context.TODO(), &s3.HeadObjectInput{
119 | Bucket: aws.String(versionedBucketName),
120 | Key: aws.String(filepath.Join(directoryPrefix, "file-to-upload-3")),
121 | })
122 |
123 | Ω(err).ShouldNot(HaveOccurred())
124 | Ω(resp.ServerSideEncryption).Should(Equal("AES256"))
125 | })
126 |
127 | Context("when using a sessionToken", func() {
128 | BeforeEach(func() {
129 | if len(os.Getenv("TEST_SESSION_TOKEN")) == 0 {
130 | Skip("'TEST_SESSION_TOKEN' is not set, skipping.")
131 | }
132 | s3Service, s3client = getSessionTokenS3Client(awsConfig)
133 | })
134 |
135 | It("can interact with buckets", func() {
136 | _, err := s3client.BucketFiles(versionedBucketName, directoryPrefix)
137 | Ω(err).ShouldNot(HaveOccurred())
138 | })
139 | })
140 | })
141 |
--------------------------------------------------------------------------------
/models.go:
--------------------------------------------------------------------------------
1 | package s3resource
2 |
3 | type Source struct {
4 | AccessKeyID string `json:"access_key_id"`
5 | SecretAccessKey string `json:"secret_access_key"`
6 | SessionToken string `json:"session_token"`
7 | AwsRoleARN string `json:"aws_role_arn"`
8 | Bucket string `json:"bucket"`
9 | Regexp string `json:"regexp"`
10 | VersionedFile string `json:"versioned_file"`
11 | Private bool `json:"private"`
12 | RegionName string `json:"region_name"`
13 | UseAwsCredsProvider bool `json:"enable_aws_creds_provider"`
14 | //Deprecated: Not needed since upgrading to the v2 AWS Go SDK
15 | CloudfrontURL string `json:"cloudfront_url"`
16 | Endpoint string `json:"endpoint"`
17 | DisableSSL bool `json:"disable_ssl"`
18 | ServerSideEncryption string `json:"server_side_encryption"`
19 | SSEKMSKeyId string `json:"sse_kms_key_id"`
20 | UseV2Signing bool `json:"use_v2_signing"`
21 | SkipSSLVerification bool `json:"skip_ssl_verification"`
22 | SkipDownload bool `json:"skip_download"`
23 | InitialVersion string `json:"initial_version"`
24 | InitialPath string `json:"initial_path"`
25 | InitialContentText string `json:"initial_content_text"`
26 | InitialContentBinary string `json:"initial_content_binary"`
27 | DisableMultipart bool `json:"disable_multipart"`
28 | UsePathStyle bool `json:"use_path_style"`
29 | }
30 |
31 | func (source Source) IsValid() (bool, string) {
32 | if source.Regexp != "" && source.VersionedFile != "" {
33 | return false, "please specify either regexp or versioned_file"
34 | }
35 |
36 | if source.Regexp != "" && source.InitialVersion != "" {
37 | return false, "please use initial_path when regexp is set"
38 | }
39 |
40 | if source.VersionedFile != "" && source.InitialPath != "" {
41 | return false, "please use initial_version when versioned_file is set"
42 | }
43 |
44 | if source.InitialContentText != "" && source.InitialContentBinary != "" {
45 | return false, "please use intial_content_text or initial_content_binary but not both"
46 | }
47 |
48 | hasInitialContent := source.InitialContentText != "" || source.InitialContentBinary != ""
49 | if hasInitialContent && source.InitialVersion == "" && source.InitialPath == "" {
50 | return false, "please specify initial_version or initial_path if initial content is set"
51 | }
52 |
53 | return true, ""
54 | }
55 |
56 | type Version struct {
57 | Path string `json:"path,omitempty"`
58 | VersionID string `json:"version_id,omitempty"`
59 | }
60 |
61 | type MetadataPair struct {
62 | Name string `json:"name"`
63 | Value string `json:"value"`
64 | }
65 |
--------------------------------------------------------------------------------
/out/command.go:
--------------------------------------------------------------------------------
1 | package out
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "io"
7 | "os"
8 | "path/filepath"
9 | "regexp"
10 | "strings"
11 |
12 | s3resource "github.com/concourse/s3-resource"
13 | "github.com/concourse/s3-resource/versions"
14 | "github.com/fatih/color"
15 | )
16 |
17 | var ErrObjectVersioningNotEnabled = errors.New("object versioning not enabled")
18 | var ErrorColor = color.New(color.FgWhite, color.BgRed, color.Bold)
19 | var BlinkingErrorColor = color.New(color.BlinkSlow, color.FgWhite, color.BgRed, color.Bold)
20 |
21 | func init() {
22 | ErrorColor.EnableColor()
23 | }
24 |
25 | type Command struct {
26 | stderr io.Writer
27 | s3client s3resource.S3Client
28 | }
29 |
30 | func NewCommand(stderr io.Writer, s3client s3resource.S3Client) *Command {
31 | return &Command{
32 | stderr: stderr,
33 | s3client: s3client,
34 | }
35 | }
36 |
37 | func (command *Command) Run(sourceDir string, request Request) (Response, error) {
38 | if request.Params.From != "" || request.Params.To != "" || request.Source.UseV2Signing {
39 | command.printDeprecationWarning()
40 | }
41 |
42 | if ok, message := request.Source.IsValid(); !ok {
43 | return Response{}, errors.New(message)
44 | }
45 | if request.Params.File != "" && request.Params.From != "" {
46 | return Response{}, errors.New("contains both file and from")
47 | }
48 |
49 | localPath, err := command.match(request.Params, sourceDir)
50 | if err != nil {
51 | return Response{}, err
52 | }
53 |
54 | remotePath := command.remotePath(request, localPath, sourceDir)
55 |
56 | bucketName := request.Source.Bucket
57 |
58 | options := s3resource.NewUploadFileOptions()
59 |
60 | if request.Params.Acl != "" {
61 | options.Acl = request.Params.Acl
62 | }
63 |
64 | options.ContentType = request.Params.ContentType
65 | options.ServerSideEncryption = request.Source.ServerSideEncryption
66 | options.KmsKeyId = request.Source.SSEKMSKeyId
67 | options.DisableMultipart = request.Source.DisableMultipart
68 |
69 | versionID, err := command.s3client.UploadFile(
70 | bucketName,
71 | remotePath,
72 | localPath,
73 | options,
74 | )
75 | if err != nil {
76 | return Response{}, err
77 | }
78 |
79 | version := s3resource.Version{}
80 |
81 | if request.Source.VersionedFile != "" {
82 | if versionID == "" {
83 | return Response{}, ErrObjectVersioningNotEnabled
84 | }
85 |
86 | version.VersionID = versionID
87 | } else {
88 | version.Path = remotePath
89 | }
90 |
91 | url, err := command.s3client.URL(bucketName, remotePath, request.Source.Private, versionID)
92 | if err != nil {
93 | return Response{}, err
94 | }
95 |
96 | return Response{
97 | Version: version,
98 | Metadata: command.metadata(url, remotePath, request.Source.Private),
99 | }, nil
100 | }
101 |
102 | func (command *Command) remotePath(request Request, localPath string, sourceDir string) string {
103 | if request.Source.VersionedFile != "" {
104 | return request.Source.VersionedFile
105 | }
106 |
107 | if request.Params.To == "" && request.Params.From == "" && request.Source.Regexp != "" {
108 | return filepath.Join(parentDir(request.Source.Regexp), filepath.Base(localPath))
109 | }
110 |
111 | folderDestination := strings.HasSuffix(request.Params.To, "/")
112 | if folderDestination || request.Params.To == "" {
113 | return filepath.Join(request.Params.To, filepath.Base(localPath))
114 | }
115 |
116 | compiled := regexp.MustCompile(request.Params.From)
117 | fileName := strings.TrimPrefix(localPath, sourceDir+"/")
118 | return compiled.ReplaceAllString(fileName, request.Params.To)
119 | }
120 |
121 | func parentDir(regexp string) string {
122 | return regexp[:strings.LastIndex(regexp, "/")+1]
123 | }
124 |
125 | func (command *Command) match(params Params, sourceDir string) (string, error) {
126 | var matches []string
127 | var err error
128 | var pattern string
129 |
130 | if params.File != "" {
131 | pattern = params.File
132 | matches, err = filepath.Glob(filepath.Join(sourceDir, pattern))
133 | } else {
134 | paths := []string{}
135 | filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {
136 | paths = append(paths, path)
137 | return nil
138 | })
139 | pattern = params.From
140 | matches, err = versions.MatchUnanchored(paths, pattern)
141 | }
142 |
143 | if err != nil {
144 | return "", err
145 | }
146 |
147 | if len(matches) == 0 {
148 | return "", fmt.Errorf("no matches found for pattern: %s", pattern)
149 | }
150 |
151 | if len(matches) > 1 {
152 | return "", fmt.Errorf("more than one match found for pattern: %s\n%v", pattern, matches)
153 | }
154 |
155 | return matches[0], nil
156 | }
157 |
158 | func (command *Command) metadata(url, remotePath string, private bool) []s3resource.MetadataPair {
159 | remoteFilename := filepath.Base(remotePath)
160 |
161 | metadata := []s3resource.MetadataPair{
162 | {
163 | Name: "filename",
164 | Value: remoteFilename,
165 | },
166 | }
167 |
168 | if !private {
169 | metadata = append(metadata, s3resource.MetadataPair{
170 | Name: "url",
171 | Value: url,
172 | })
173 | }
174 |
175 | return metadata
176 | }
177 |
178 | func (command *Command) printDeprecationWarning() {
179 | errorColor := ErrorColor.SprintFunc()
180 | blinkColor := BlinkingErrorColor.SprintFunc()
181 | command.stderr.Write([]byte(blinkColor("WARNING:")))
182 | command.stderr.Write([]byte("\n"))
183 | command.stderr.Write([]byte(errorColor("Parameters 'from/to' are deprecated, use 'file' instead")))
184 | command.stderr.Write([]byte("\n"))
185 | command.stderr.Write([]byte(errorColor("Source field 'use_v2_signing' has been removed. v4 signing happens by default now.")))
186 | command.stderr.Write([]byte("\n\n"))
187 | }
188 |
--------------------------------------------------------------------------------
/out/command_test.go:
--------------------------------------------------------------------------------
1 | package out_test
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 |
7 | s3resource "github.com/concourse/s3-resource"
8 | "github.com/concourse/s3-resource/fakes"
9 | "github.com/concourse/s3-resource/out"
10 | "github.com/onsi/gomega/gbytes"
11 |
12 | . "github.com/onsi/ginkgo/v2"
13 | . "github.com/onsi/gomega"
14 | )
15 |
16 | var _ = Describe("Out Command", func() {
17 | Describe("running the command", func() {
18 | var (
19 | tmpPath string
20 | sourceDir string
21 | request out.Request
22 |
23 | stderr *gbytes.Buffer
24 | s3client *fakes.FakeS3Client
25 | command *out.Command
26 | )
27 |
28 | BeforeEach(func() {
29 | var err error
30 | tmpPath, err = os.MkdirTemp("", "out_command")
31 | Ω(err).ShouldNot(HaveOccurred())
32 |
33 | sourceDir = filepath.Join(tmpPath, "source")
34 | err = os.MkdirAll(sourceDir, 0755)
35 | Ω(err).ShouldNot(HaveOccurred())
36 |
37 | request = out.Request{
38 | Source: s3resource.Source{
39 | Bucket: "bucket-name",
40 | },
41 | }
42 |
43 | s3client = &fakes.FakeS3Client{}
44 | stderr = gbytes.NewBuffer()
45 | command = out.NewCommand(stderr, s3client)
46 | })
47 |
48 | AfterEach(func() {
49 | stderr.Close()
50 | err := os.RemoveAll(tmpPath)
51 | Ω(err).ShouldNot(HaveOccurred())
52 | })
53 |
54 | createFile := func(path string) {
55 | fullPath := filepath.Join(sourceDir, path)
56 | err := os.MkdirAll(filepath.Dir(fullPath), 0755)
57 | Ω(err).ShouldNot(HaveOccurred())
58 |
59 | file, err := os.Create(fullPath)
60 | Ω(err).ShouldNot(HaveOccurred())
61 | file.Close()
62 | }
63 |
64 | Describe("finding files to upload with From param", func() {
65 | It("prints the deprecation warning", func() {
66 | request.Params.From = "foo.tgz"
67 | createFile("foo.tgz")
68 |
69 | _, err := command.Run(sourceDir, request)
70 | Ω(err).ShouldNot(HaveOccurred())
71 |
72 | Expect(stderr.Contents()).To(ContainSubstring("WARNING:"))
73 | Expect(stderr.Contents()).To(ContainSubstring("Parameters 'from/to' are deprecated, use 'file' instead"))
74 | })
75 |
76 | It("does not error if there is a single match", func() {
77 | request.Params.From = "a/(.*).tgz"
78 | createFile("a/file.tgz")
79 |
80 | _, err := command.Run(sourceDir, request)
81 | Ω(err).ShouldNot(HaveOccurred())
82 | })
83 |
84 | It("errors if there are no matches", func() {
85 | request.Params.From = "b/(.*).tgz"
86 | createFile("a/file1.tgz")
87 | createFile("a/file2.tgz")
88 |
89 | _, err := command.Run(sourceDir, request)
90 | Ω(err).Should(HaveOccurred())
91 | })
92 |
93 | It("errors if there are more than one match", func() {
94 | request.Params.From = "a/(.*).tgz"
95 | createFile("a/file1.tgz")
96 | createFile("a/file2.tgz")
97 |
98 | _, err := command.Run(sourceDir, request)
99 | Ω(err).Should(HaveOccurred())
100 | })
101 | })
102 |
103 | Describe("finding files to upload with File param", func() {
104 | It("does not print the deprecation warning", func() {
105 | request.Params.File = "a/*.tgz"
106 | createFile("a/file.tgz")
107 |
108 | _, err := command.Run(sourceDir, request)
109 | Ω(err).ShouldNot(HaveOccurred())
110 |
111 | Expect(stderr.Contents()).NotTo(ContainSubstring("WARNING:"))
112 | })
113 |
114 | It("does not error if there is a single match", func() {
115 | request.Params.File = "a/*.tgz"
116 | createFile("a/file.tgz")
117 |
118 | _, err := command.Run(sourceDir, request)
119 | Ω(err).ShouldNot(HaveOccurred())
120 | })
121 |
122 | It("errors if there are no matches", func() {
123 | request.Params.File = "b/*.tgz"
124 | createFile("a/file1.tgz")
125 | createFile("a/file2.tgz")
126 |
127 | _, err := command.Run(sourceDir, request)
128 | Ω(err).Should(HaveOccurred())
129 | })
130 |
131 | It("errors if there are more than one match", func() {
132 | request.Params.File = "a/*.tgz"
133 | createFile("a/file1.tgz")
134 | createFile("a/file2.tgz")
135 |
136 | _, err := command.Run(sourceDir, request)
137 | Ω(err).Should(HaveOccurred())
138 | })
139 |
140 | It("defaults the ACL to 'private'", func() {
141 | request.Params.File = "a/*.tgz"
142 | createFile("a/file.tgz")
143 |
144 | _, err := command.Run(sourceDir, request)
145 | Ω(err).ShouldNot(HaveOccurred())
146 |
147 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
148 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
149 |
150 | Ω(bucketName).Should(Equal("bucket-name"))
151 | Ω(remotePath).Should(Equal("file.tgz"))
152 | Ω(localPath).Should(Equal(filepath.Join(sourceDir, "a/file.tgz")))
153 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "private"}))
154 | })
155 | })
156 |
157 | Context("when specifying an ACL for the uploaded file", func() {
158 | BeforeEach(func() {
159 | request.Params.File = "a/*.tgz"
160 | request.Params.Acl = "public-read"
161 | createFile("a/file.tgz")
162 | })
163 |
164 | It("applies the specfied acl", func() {
165 | _, err := command.Run(sourceDir, request)
166 | Ω(err).ShouldNot(HaveOccurred())
167 |
168 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
169 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
170 |
171 | Ω(bucketName).Should(Equal("bucket-name"))
172 | Ω(remotePath).Should(Equal("file.tgz"))
173 | Ω(localPath).Should(Equal(filepath.Join(sourceDir, "a/file.tgz")))
174 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "public-read"}))
175 |
176 | })
177 | })
178 |
179 | Context("when uploading the file with a To param", func() {
180 | BeforeEach(func() {
181 | request.Params.From = "a/(.*).tgz"
182 | request.Params.To = "a-folder/"
183 | createFile("a/file.tgz")
184 | })
185 |
186 | It("prints the deprecation warning", func() {
187 | _, err := command.Run(sourceDir, request)
188 | Ω(err).ShouldNot(HaveOccurred())
189 |
190 | Expect(stderr.Contents()).To(ContainSubstring("WARNING:"))
191 | Expect(stderr.Contents()).To(ContainSubstring("Parameters 'from/to' are deprecated, use 'file' instead"))
192 | })
193 |
194 | It("uploads the file", func() {
195 | _, err := command.Run(sourceDir, request)
196 | Ω(err).ShouldNot(HaveOccurred())
197 |
198 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
199 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
200 |
201 | Ω(bucketName).Should(Equal("bucket-name"))
202 | Ω(remotePath).Should(Equal("a-folder/file.tgz"))
203 | Ω(localPath).Should(Equal(filepath.Join(sourceDir, "a/file.tgz")))
204 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "private"}))
205 | })
206 | })
207 |
208 | Context("when uploading the file with an empty To param", func() {
209 | BeforeEach(func() {
210 | request.Params.To = ""
211 | request.Params.File = "a/*.tgz"
212 | createFile("a/file.tgz")
213 | })
214 |
215 | It("uploads the file to the root", func() {
216 | _, err := command.Run(sourceDir, request)
217 | Ω(err).ShouldNot(HaveOccurred())
218 |
219 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
220 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
221 |
222 | Ω(bucketName).Should(Equal("bucket-name"))
223 | Ω(remotePath).Should(Equal("file.tgz"))
224 | Ω(localPath).Should(Equal(filepath.Join(sourceDir, "a/file.tgz")))
225 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "private"}))
226 | })
227 | })
228 |
229 | Context("when uploading the file with a To param with templating", func() {
230 | BeforeEach(func() {
231 | request.Params.From = "a/file-(\\d*).tgz"
232 | request.Params.To = "folder-${1}/file.tgz"
233 | createFile("a/file-123.tgz")
234 | })
235 |
236 | It("uploads the file to the correct location", func() {
237 | response, err := command.Run(sourceDir, request)
238 | Ω(err).ShouldNot(HaveOccurred())
239 |
240 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
241 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
242 |
243 | Ω(bucketName).Should(Equal("bucket-name"))
244 | Ω(remotePath).Should(Equal("folder-123/file.tgz"))
245 | Ω(localPath).Should(Equal(filepath.Join(sourceDir, "a/file-123.tgz")))
246 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "private"}))
247 |
248 | Ω(response.Version.Path).Should(Equal("folder-123/file.tgz"))
249 |
250 | Ω(response.Metadata[0].Name).Should(Equal("filename"))
251 | Ω(response.Metadata[0].Value).Should(Equal("file.tgz"))
252 | })
253 | })
254 |
255 | Context("when using versioned buckets", func() {
256 | BeforeEach(func() {
257 | s3client.UploadFileReturns("123", nil)
258 | })
259 |
260 | It("renames the local file to match the name of the versioned file", func() {
261 | localFileName := "not-the-same-name-as-versioned-file.tgz"
262 | remoteFileName := "versioned-file.tgz"
263 |
264 | request.Params.File = localFileName
265 | request.Source.VersionedFile = remoteFileName
266 | createFile(localFileName)
267 |
268 | response, err := command.Run(sourceDir, request)
269 |
270 | Ω(err).ShouldNot(HaveOccurred())
271 |
272 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
273 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
274 |
275 | Ω(bucketName).Should(Equal("bucket-name"))
276 | Ω(remotePath).Should(Equal(remoteFileName))
277 | Ω(localPath).Should(Equal(filepath.Join(sourceDir, localFileName)))
278 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "private"}))
279 |
280 | Ω(response.Version.VersionID).Should(Equal("123"))
281 |
282 | Ω(response.Metadata[0].Name).Should(Equal("filename"))
283 | Ω(response.Metadata[0].Value).Should(Equal(remoteFileName))
284 | })
285 | })
286 |
287 | Context("when using regexp", func() {
288 | It("uploads to the parent directory", func() {
289 | request.Params.File = "my/special-file.tgz"
290 | request.Source.Regexp = "a-folder/some-file-(.*).tgz"
291 | createFile("my/special-file.tgz")
292 |
293 | response, err := command.Run(sourceDir, request)
294 | Expect(err).ToNot(HaveOccurred())
295 |
296 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
297 |
298 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
299 | Expect(bucketName).To(Equal("bucket-name"))
300 | Expect(remotePath).To(Equal("a-folder/special-file.tgz"))
301 | Expect(localPath).To(Equal(filepath.Join(sourceDir, "my/special-file.tgz")))
302 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "private"}))
303 |
304 | Expect(response.Metadata[0].Name).To(Equal("filename"))
305 | Expect(response.Metadata[0].Value).To(Equal("special-file.tgz"))
306 | })
307 | })
308 |
309 | Describe("output metadata", func() {
310 | BeforeEach(func() {
311 | s3client.URLStub = func(bucketName string, remotePath string, private bool, versionID string) (string, error) {
312 | return "http://example.com/" + filepath.Join(bucketName, remotePath), nil
313 | }
314 | })
315 |
316 | It("returns a response", func() {
317 | request.Params.From = "a/(.*).tgz"
318 | request.Params.To = "a-folder/"
319 | createFile("a/file.tgz")
320 |
321 | response, err := command.Run(sourceDir, request)
322 | Ω(err).ShouldNot(HaveOccurred())
323 |
324 | Ω(s3client.URLCallCount()).Should(Equal(1))
325 | bucketName, remotePath, private, versionID := s3client.URLArgsForCall(0)
326 | Ω(bucketName).Should(Equal("bucket-name"))
327 | Ω(remotePath).Should(Equal("a-folder/file.tgz"))
328 | Ω(private).Should(Equal(false))
329 | Ω(versionID).Should(BeEmpty())
330 |
331 | Ω(response.Version.Path).Should(Equal("a-folder/file.tgz"))
332 |
333 | Ω(response.Metadata[0].Name).Should(Equal("filename"))
334 | Ω(response.Metadata[0].Value).Should(Equal("file.tgz"))
335 |
336 | Ω(response.Metadata[1].Name).Should(Equal("url"))
337 | Ω(response.Metadata[1].Value).Should(Equal("http://example.com/bucket-name/a-folder/file.tgz"))
338 | })
339 |
340 | It("doesn't include the URL if the output is private", func() {
341 | request.Source.Private = true
342 | request.Params.From = "a/(.*).tgz"
343 | request.Params.To = "a-folder/"
344 | createFile("a/file.tgz")
345 |
346 | response, err := command.Run(sourceDir, request)
347 | Ω(err).ShouldNot(HaveOccurred())
348 |
349 | Ω(response.Metadata).Should(HaveLen(1))
350 | Ω(response.Metadata[0].Name).ShouldNot(Equal("url"))
351 | })
352 | })
353 |
354 | Context("when specifying a content-type for the uploaded file", func() {
355 | BeforeEach(func() {
356 | request.Params.File = "a/*.tgz"
357 | request.Params.ContentType = "application/customtype"
358 | createFile("a/file.tgz")
359 | })
360 |
361 | It("applies the specfied content-type", func() {
362 | _, err := command.Run(sourceDir, request)
363 | Ω(err).ShouldNot(HaveOccurred())
364 |
365 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
366 | _, _, _, options := s3client.UploadFileArgsForCall(0)
367 |
368 | Ω(options.ContentType).Should(Equal("application/customtype"))
369 | })
370 | })
371 |
372 | Context("content-type is not specified for the uploaded file", func() {
373 | BeforeEach(func() {
374 | request.Params.File = "a/*.tgz"
375 | createFile("a/file.tgz")
376 | })
377 |
378 | It("no content-type specified leaves an empty content-type", func() {
379 | _, err := command.Run(sourceDir, request)
380 | Ω(err).ShouldNot(HaveOccurred())
381 |
382 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
383 | _, _, _, options := s3client.UploadFileArgsForCall(0)
384 |
385 | Ω(options.ContentType).Should(Equal(""))
386 | })
387 | })
388 |
389 | Context("when specifying disable multipart upload", func() {
390 | It("uploads to the parent directory without multipart upload", func() {
391 | request.Params.File = "my/special-file.tgz"
392 | request.Source.Regexp = "a-folder/some-file-(.*).tgz"
393 | request.Source.DisableMultipart = true
394 | createFile("my/special-file.tgz")
395 |
396 | response, err := command.Run(sourceDir, request)
397 | Expect(err).ToNot(HaveOccurred())
398 | Ω(s3client.UploadFileCallCount()).Should(Equal(1))
399 |
400 | bucketName, remotePath, localPath, options := s3client.UploadFileArgsForCall(0)
401 | Expect(bucketName).To(Equal("bucket-name"))
402 | Expect(remotePath).To(Equal("a-folder/special-file.tgz"))
403 | Expect(localPath).To(Equal(filepath.Join(sourceDir, "my/special-file.tgz")))
404 | Ω(options).Should(Equal(s3resource.UploadFileOptions{Acl: "private", DisableMultipart: true}))
405 |
406 | Expect(response.Metadata[0].Name).To(Equal("filename"))
407 | Expect(response.Metadata[0].Value).To(Equal("special-file.tgz"))
408 | })
409 | })
410 |
411 | })
412 | })
413 |
--------------------------------------------------------------------------------
/out/models.go:
--------------------------------------------------------------------------------
1 | package out
2 |
3 | import (
4 | "github.com/concourse/s3-resource"
5 | )
6 |
7 | type Request struct {
8 | Source s3resource.Source `json:"source"`
9 | Params Params `json:"params"`
10 | }
11 |
12 | type Params struct {
13 | From string `json:"from"`
14 | File string `json:"file"`
15 | To string `json:"to"`
16 | Acl string `json:"acl"`
17 | ContentType string `json:"content_type"`
18 | }
19 |
20 | type Response struct {
21 | Version s3resource.Version `json:"version"`
22 | Metadata []s3resource.MetadataPair `json:"metadata"`
23 | }
24 |
--------------------------------------------------------------------------------
/out/out_suite_test.go:
--------------------------------------------------------------------------------
1 | package out_test
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 | "testing"
7 |
8 | . "github.com/onsi/ginkgo/v2"
9 | . "github.com/onsi/gomega"
10 |
11 | "github.com/onsi/gomega/gexec"
12 | )
13 |
14 | var outPath string
15 |
16 | var _ = BeforeSuite(func() {
17 | var err error
18 |
19 | if _, err = os.Stat("/opt/resource/out"); err == nil {
20 | outPath = "/opt/resource/out"
21 | } else {
22 | outPath, err = gexec.Build("github.com/concourse/s3-resource/cmd/out")
23 | Ω(err).ShouldNot(HaveOccurred())
24 | }
25 |
26 | })
27 |
28 | var _ = AfterSuite(func() {
29 | gexec.CleanupBuildArtifacts()
30 | })
31 |
32 | func TestOut(t *testing.T) {
33 | RegisterFailHandler(Fail)
34 | RunSpecs(t, "Out Suite")
35 | }
36 |
37 | func Fixture(filename string) string {
38 | path := filepath.Join("fixtures", filename)
39 | contents, err := os.ReadFile(path)
40 | if err != nil {
41 | panic(err)
42 | }
43 |
44 | return string(contents)
45 | }
46 |
--------------------------------------------------------------------------------
/progress_writer_at.go:
--------------------------------------------------------------------------------
1 | package s3resource
2 |
3 | import (
4 | "io"
5 | )
6 |
7 | type progressWriterAt struct {
8 | io.WriterAt
9 | io.Writer
10 | }
11 |
12 | func (pwa progressWriterAt) WriteAt(p []byte, off int64) (int, error) {
13 | n, err := pwa.WriterAt.WriteAt(p, off)
14 | if err != nil {
15 | return n, err
16 | }
17 |
18 | pwa.Write(p)
19 | return n, err
20 | }
21 |
--------------------------------------------------------------------------------
/s3client.go:
--------------------------------------------------------------------------------
1 | package s3resource
2 |
3 | import (
4 | "context"
5 | "crypto/tls"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 | "io"
10 | "net/http"
11 | "net/url"
12 | "os"
13 | "strings"
14 | "time"
15 |
16 | "github.com/aws/aws-sdk-go-v2/aws"
17 | "github.com/aws/aws-sdk-go-v2/config"
18 | "github.com/aws/aws-sdk-go-v2/credentials"
19 | "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
20 | "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
21 | "github.com/aws/aws-sdk-go-v2/service/s3"
22 | "github.com/aws/aws-sdk-go-v2/service/s3/types"
23 | "github.com/aws/aws-sdk-go-v2/service/sts"
24 | "github.com/vbauerster/mpb/v8"
25 | "github.com/vbauerster/mpb/v8/decor"
26 | )
27 |
28 | //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate
29 | //counterfeiter:generate -o fakes . S3Client
30 | type S3Client interface {
31 | BucketFiles(bucketName string, prefixHint string) ([]string, error)
32 | BucketFileVersions(bucketName string, remotePath string) ([]string, error)
33 |
34 | ChunkedBucketList(bucketName string, prefix string, continuationToken *string) (BucketListChunk, error)
35 |
36 | UploadFile(bucketName string, remotePath string, localPath string, options UploadFileOptions) (string, error)
37 | DownloadFile(bucketName string, remotePath string, versionID string, localPath string) error
38 |
39 | SetTags(bucketName string, remotePath string, versionID string, tags map[string]string) error
40 | DownloadTags(bucketName string, remotePath string, versionID string, localPath string) error
41 |
42 | DeleteFile(bucketName string, remotePath string) error
43 | DeleteVersionedFile(bucketName string, remotePath string, versionID string) error
44 |
45 | URL(bucketName string, remotePath string, private bool, versionID string) (string, error)
46 | }
47 |
48 | // 12 retries works out to ~5 mins of total backoff time, though AWS randomizes
49 | // the backoff to some extent so it may be as low as 4 or as high as 8 minutes
50 | const MaxRetries = 12
51 |
52 | type s3client struct {
53 | client *s3.Client
54 | progressOutput io.Writer
55 | }
56 |
57 | type UploadFileOptions struct {
58 | Acl string
59 | ServerSideEncryption string
60 | KmsKeyId string
61 | ContentType string
62 | DisableMultipart bool
63 | }
64 |
65 | func NewUploadFileOptions() UploadFileOptions {
66 | return UploadFileOptions{
67 | Acl: "private",
68 | }
69 | }
70 |
71 | func NewS3Client(
72 | progressOutput io.Writer,
73 | awsConfig *aws.Config,
74 | endpoint string,
75 | disableSSL, usePathStyle bool,
76 | ) (S3Client, error) {
77 | s3Opts := []func(*s3.Options){}
78 |
79 | if endpoint != "" {
80 | u, err := url.Parse(endpoint)
81 | if err != nil {
82 | return nil, fmt.Errorf("error parsing given endpoint: %w", err)
83 | }
84 | if u.Scheme == "" {
85 | // source.Endpoint is a hostname with no Scheme
86 | scheme := "https://"
87 | if disableSSL {
88 | scheme = "http://"
89 | }
90 | endpoint = scheme + endpoint
91 | }
92 |
93 | s3Opts = append(s3Opts, func(o *s3.Options) {
94 | o.BaseEndpoint = &endpoint
95 | o.UsePathStyle = usePathStyle
96 | o.DisableLogOutputChecksumValidationSkipped = true
97 | })
98 | }
99 |
100 | client := s3.NewFromConfig(*awsConfig, s3Opts...)
101 |
102 | return &s3client{
103 | client: client,
104 | progressOutput: progressOutput,
105 | }, nil
106 | }
107 |
108 | func NewAwsConfig(
109 | accessKey string,
110 | secretKey string,
111 | sessionToken string,
112 | roleToAssume string,
113 | regionName string,
114 | skipSSLVerification bool,
115 | useAwsCredsProvider bool,
116 | ) (*aws.Config, error) {
117 | var creds aws.CredentialsProvider
118 |
119 | if roleToAssume == "" && !useAwsCredsProvider {
120 | creds = aws.AnonymousCredentials{}
121 | }
122 |
123 | if accessKey != "" && secretKey != "" {
124 | creds = aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(accessKey, secretKey, sessionToken))
125 | _, err := creds.Retrieve(context.Background())
126 | if err != nil {
127 | return nil, err
128 | }
129 | }
130 |
131 | if len(regionName) == 0 {
132 | regionName = "us-east-1"
133 | }
134 |
135 | var httpClient *http.Client
136 | if skipSSLVerification {
137 | httpClient = &http.Client{Transport: &http.Transport{
138 | TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
139 | }}
140 | } else {
141 | httpClient = http.DefaultClient
142 | }
143 |
144 | cfg, err := config.LoadDefaultConfig(context.Background(),
145 | config.WithRegion(regionName),
146 | config.WithHTTPClient(httpClient),
147 | config.WithRetryMaxAttempts(MaxRetries),
148 | config.WithCredentialsProvider(creds),
149 | )
150 | if err != nil {
151 | return nil, fmt.Errorf("error loading default AWS config: %w", err)
152 | }
153 |
154 | if roleToAssume != "" {
155 | stsClient := sts.NewFromConfig(cfg)
156 | stsCreds := stscreds.NewAssumeRoleProvider(stsClient, roleToAssume)
157 | roleCreds, err := stsCreds.Retrieve(context.TODO())
158 | if err != nil {
159 | return nil, fmt.Errorf("error assuming role: %w", err)
160 | }
161 |
162 | cfg.Credentials = aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(
163 | roleCreds.AccessKeyID,
164 | roleCreds.SecretAccessKey,
165 | roleCreds.SessionToken,
166 | ))
167 | }
168 |
169 | return &cfg, nil
170 | }
171 |
172 | // BucketFiles returns all the files in bucketName immediately under directoryPrefix
173 | func (client *s3client) BucketFiles(bucketName string, directoryPrefix string) ([]string, error) {
174 | if !strings.HasSuffix(directoryPrefix, "/") {
175 | directoryPrefix = directoryPrefix + "/"
176 | }
177 | var (
178 | continuationToken *string
179 | truncated bool
180 | paths []string
181 | )
182 | for continuationToken, truncated = nil, true; truncated; {
183 | s3ListChunk, err := client.ChunkedBucketList(bucketName, directoryPrefix, continuationToken)
184 | if err != nil {
185 | return []string{}, err
186 | }
187 | truncated = s3ListChunk.Truncated
188 | continuationToken = s3ListChunk.ContinuationToken
189 | paths = append(paths, s3ListChunk.Paths...)
190 | }
191 | return paths, nil
192 | }
193 |
194 | func (client *s3client) BucketFileVersions(bucketName string, remotePath string) ([]string, error) {
195 | isBucketVersioned, err := client.getBucketVersioning(bucketName)
196 | if err != nil {
197 | return []string{}, err
198 | }
199 |
200 | if !isBucketVersioned {
201 | return []string{}, errors.New("bucket is not versioned")
202 | }
203 |
204 | bucketFiles, err := client.getVersionedBucketContents(bucketName, remotePath)
205 |
206 | if err != nil {
207 | return []string{}, err
208 | }
209 |
210 | versions := make([]string, 0, len(bucketFiles))
211 |
212 | for _, objectVersion := range bucketFiles[remotePath] {
213 | versions = append(versions, *objectVersion.VersionId)
214 | }
215 |
216 | return versions, nil
217 | }
218 |
219 | type BucketListChunk struct {
220 | Truncated bool
221 | ContinuationToken *string
222 | CommonPrefixes []string
223 | Paths []string
224 | }
225 |
226 | // ChunkedBucketList lists the S3 bucket `bucketName` content's under `prefix` one chunk at a time
227 | //
228 | // The returned `BucketListChunk` contains part of the files and subdirectories
229 | // present in `bucketName` under `prefix`. The files are listed in `Paths` and
230 | // the subdirectories in `CommonPrefixes`. If the returned chunk does not
231 | // include all the files and subdirectories, the `Truncated` flag will be set
232 | // to `true` and the `ContinuationToken` can be used to retrieve the next chunk.
233 | func (client *s3client) ChunkedBucketList(bucketName string, prefix string, continuationToken *string) (BucketListChunk, error) {
234 | params := &s3.ListObjectsV2Input{
235 | Bucket: aws.String(bucketName),
236 | ContinuationToken: continuationToken,
237 | Delimiter: aws.String("/"),
238 | Prefix: aws.String(prefix),
239 | }
240 | response, err := client.client.ListObjectsV2(context.TODO(), params)
241 | if err != nil {
242 | return BucketListChunk{}, err
243 | }
244 | commonPrefixes := make([]string, 0, len(response.CommonPrefixes))
245 | paths := make([]string, 0, len(response.Contents))
246 |
247 | for _, commonPrefix := range response.CommonPrefixes {
248 | commonPrefixes = append(commonPrefixes, *commonPrefix.Prefix)
249 | }
250 |
251 | for _, path := range response.Contents {
252 | paths = append(paths, *path.Key)
253 | }
254 |
255 | return BucketListChunk{
256 | Truncated: *response.IsTruncated,
257 | ContinuationToken: response.NextContinuationToken,
258 | CommonPrefixes: commonPrefixes,
259 | Paths: paths,
260 | }, nil
261 | }
262 |
263 | func (client *s3client) UploadFile(bucketName string, remotePath string, localPath string, options UploadFileOptions) (string, error) {
264 | uploader := manager.NewUploader(client.client)
265 |
266 | if client.isGCSHost() {
267 | // GCS returns `InvalidArgument` on multipart uploads
268 | uploader.MaxUploadParts = 1
269 | }
270 |
271 | stat, err := os.Stat(localPath)
272 | if err != nil {
273 | return "", err
274 | }
275 |
276 | localFile, err := os.Open(localPath)
277 | if err != nil {
278 | return "", err
279 | }
280 |
281 | defer localFile.Close()
282 |
283 | // Automatically adjust partsize for larger files.
284 | fSize := stat.Size()
285 | if !options.DisableMultipart {
286 | if fSize > int64(uploader.MaxUploadParts)*uploader.PartSize {
287 | partSize := fSize / int64(uploader.MaxUploadParts)
288 | if fSize%int64(uploader.MaxUploadParts) != 0 {
289 | partSize++
290 | }
291 | uploader.PartSize = partSize
292 | }
293 | } else {
294 | uploader.MaxUploadParts = 1
295 | uploader.Concurrency = 1
296 | uploader.PartSize = fSize + 1
297 | if fSize <= manager.MinUploadPartSize {
298 | uploader.PartSize = manager.MinUploadPartSize
299 | }
300 | }
301 |
302 | progress := client.newProgressBar(fSize)
303 | defer progress.Wait()
304 |
305 | uploadInput := &s3.PutObjectInput{
306 | Bucket: aws.String(bucketName),
307 | Key: aws.String(remotePath),
308 | Body: progress.ProxyReader(localFile),
309 | ACL: types.ObjectCannedACL(options.Acl),
310 | }
311 | if options.ServerSideEncryption != "" {
312 | uploadInput.ServerSideEncryption = types.ServerSideEncryption(options.ServerSideEncryption)
313 | }
314 | if options.KmsKeyId != "" {
315 | uploadInput.SSEKMSKeyId = aws.String(options.KmsKeyId)
316 | }
317 | if options.ContentType != "" {
318 | uploadInput.ContentType = aws.String(options.ContentType)
319 | }
320 |
321 | uploadOutput, err := uploader.Upload(context.TODO(), uploadInput)
322 | if err != nil {
323 | return "", err
324 | }
325 |
326 | if uploadOutput.VersionID != nil {
327 | return *uploadOutput.VersionID, nil
328 | }
329 |
330 | return "", nil
331 | }
332 |
333 | func (client *s3client) DownloadFile(bucketName string, remotePath string, versionID string, localPath string) error {
334 | headObject := &s3.HeadObjectInput{
335 | Bucket: aws.String(bucketName),
336 | Key: aws.String(remotePath),
337 | }
338 |
339 | if versionID != "" {
340 | headObject.VersionId = aws.String(versionID)
341 | }
342 |
343 | object, err := client.client.HeadObject(context.TODO(), headObject)
344 | if err != nil {
345 | return err
346 | }
347 |
348 | progress := client.newProgressBar(*object.ContentLength)
349 | defer progress.Wait()
350 |
351 | downloader := manager.NewDownloader(client.client)
352 |
353 | localFile, err := os.Create(localPath)
354 | if err != nil {
355 | return err
356 | }
357 | defer localFile.Close()
358 |
359 | getObject := &s3.GetObjectInput{
360 | Bucket: aws.String(bucketName),
361 | Key: aws.String(remotePath),
362 | }
363 |
364 | if versionID != "" {
365 | getObject.VersionId = aws.String(versionID)
366 | }
367 |
368 | _, err = downloader.Download(context.TODO(), progressWriterAt{localFile, progress.ProxyWriter(io.Discard)}, getObject)
369 | if err != nil {
370 | return err
371 | }
372 |
373 | return nil
374 | }
375 |
376 | func (client *s3client) SetTags(bucketName string, remotePath string, versionID string, tags map[string]string) error {
377 | var tagSet []types.Tag
378 | for key, value := range tags {
379 | tagSet = append(tagSet, types.Tag{
380 | Key: aws.String(key),
381 | Value: aws.String(value),
382 | })
383 | }
384 |
385 | putObjectTagging := &s3.PutObjectTaggingInput{
386 | Bucket: aws.String(bucketName),
387 | Key: aws.String(remotePath),
388 | Tagging: &types.Tagging{TagSet: tagSet},
389 | }
390 | if versionID != "" {
391 | putObjectTagging.VersionId = aws.String(versionID)
392 | }
393 |
394 | _, err := client.client.PutObjectTagging(context.TODO(), putObjectTagging)
395 | return err
396 | }
397 |
398 | func (client *s3client) DownloadTags(bucketName string, remotePath string, versionID string, localPath string) error {
399 | getObjectTagging := &s3.GetObjectTaggingInput{
400 | Bucket: aws.String(bucketName),
401 | Key: aws.String(remotePath),
402 | }
403 | if versionID != "" {
404 | getObjectTagging.VersionId = aws.String(versionID)
405 | }
406 |
407 | objectTagging, err := client.client.GetObjectTagging(context.TODO(), getObjectTagging)
408 | if err != nil {
409 | return err
410 | }
411 |
412 | tags := map[string]string{}
413 | for _, tag := range objectTagging.TagSet {
414 | tags[*tag.Key] = *tag.Value
415 | }
416 |
417 | tagsJSON, err := json.Marshal(tags)
418 | if err != nil {
419 | return err
420 | }
421 |
422 | return os.WriteFile(localPath, tagsJSON, 0644)
423 | }
424 |
425 | func (client *s3client) URL(bucketName string, remotePath string, private bool, versionID string) (string, error) {
426 | if !private {
427 | var endpoint *string
428 | clientOptions := client.client.Options()
429 |
430 | if clientOptions.BaseEndpoint != nil {
431 | endpoint = clientOptions.BaseEndpoint
432 | }
433 |
434 | if endpoint == nil {
435 | endpoint = aws.String(fmt.Sprintf("https://s3.%s.amazonaws.com", clientOptions.Region))
436 | }
437 |
438 | // ResolveEndpoint() will return a URL with only the scheme and host
439 | // (e.g. https://bucket-name.s3.us-west-2.amazonaws.com). It will not
440 | // include the key/remotePath if you provide it.
441 | url, err := client.client.Options().EndpointResolverV2.ResolveEndpoint(
442 | context.Background(),
443 | s3.EndpointParameters{
444 | Endpoint: endpoint,
445 | Bucket: &bucketName,
446 | Region: &clientOptions.Region, //Not used to make the final URL string but is required
447 | })
448 |
449 | if err != nil {
450 | return "", fmt.Errorf("error resolving endpoint: %w", err)
451 | }
452 |
453 | return fmt.Sprintf("%s/%s", url.URI.String(), remotePath), nil
454 | }
455 |
456 | getObjectInput := &s3.GetObjectInput{
457 | Bucket: aws.String(bucketName),
458 | Key: aws.String(remotePath),
459 | }
460 |
461 | if versionID != "" {
462 | getObjectInput.VersionId = aws.String(versionID)
463 | }
464 |
465 | presign := s3.NewPresignClient(client.client)
466 | request, err := presign.PresignGetObject(context.TODO(), getObjectInput, func(po *s3.PresignOptions) {
467 | po.Expires = 24 * time.Hour
468 | })
469 |
470 | if err != nil {
471 | return "", err
472 | }
473 |
474 | return request.URL, nil
475 | }
476 |
477 | func (client *s3client) DeleteVersionedFile(bucketName string, remotePath string, versionID string) error {
478 | _, err := client.client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
479 | Bucket: aws.String(bucketName),
480 | Key: aws.String(remotePath),
481 | VersionId: aws.String(versionID),
482 | })
483 |
484 | return err
485 | }
486 |
487 | func (client *s3client) DeleteFile(bucketName string, remotePath string) error {
488 | _, err := client.client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
489 | Bucket: aws.String(bucketName),
490 | Key: aws.String(remotePath),
491 | })
492 |
493 | return err
494 | }
495 |
496 | func (client *s3client) getBucketVersioning(bucketName string) (bool, error) {
497 | params := &s3.GetBucketVersioningInput{
498 | Bucket: aws.String(bucketName),
499 | }
500 |
501 | resp, err := client.client.GetBucketVersioning(context.TODO(), params)
502 | if err != nil {
503 | return false, err
504 | }
505 |
506 | return resp.Status == types.BucketVersioningStatusEnabled, nil
507 | }
508 |
509 | func (client *s3client) getVersionedBucketContents(bucketName string, prefix string) (map[string][]types.ObjectVersion, error) {
510 | versionedBucketContents := map[string][]types.ObjectVersion{}
511 | keyMarker := ""
512 | versionMarker := ""
513 | for {
514 |
515 | params := &s3.ListObjectVersionsInput{
516 | Bucket: aws.String(bucketName),
517 | Prefix: aws.String(prefix),
518 | }
519 |
520 | if keyMarker != "" {
521 | params.KeyMarker = aws.String(keyMarker)
522 | }
523 | if versionMarker != "" {
524 | params.VersionIdMarker = aws.String(versionMarker)
525 | }
526 |
527 | listObjectVersionsResponse, err := client.client.ListObjectVersions(context.TODO(), params)
528 | if err != nil {
529 | return versionedBucketContents, err
530 | }
531 |
532 | lastKey := ""
533 | lastVersionKey := ""
534 |
535 | for _, objectVersion := range listObjectVersionsResponse.Versions {
536 | versionedBucketContents[*objectVersion.Key] = append(versionedBucketContents[*objectVersion.Key], objectVersion)
537 |
538 | lastKey = *objectVersion.Key
539 | lastVersionKey = *objectVersion.VersionId
540 | }
541 |
542 | if *listObjectVersionsResponse.IsTruncated {
543 | keyMarker = *listObjectVersionsResponse.NextKeyMarker
544 | versionMarker = *listObjectVersionsResponse.NextVersionIdMarker
545 | if keyMarker == "" {
546 | // From the s3 docs: If response does not include the
547 | // NextMarker and it is truncated, you can use the value of the
548 | // last Key in the response as the marker in the subsequent
549 | // request to get the next set of object keys.
550 | keyMarker = lastKey
551 | }
552 |
553 | if versionMarker == "" {
554 | versionMarker = lastVersionKey
555 | }
556 | } else {
557 | break
558 | }
559 |
560 | }
561 |
562 | return versionedBucketContents, nil
563 | }
564 |
565 | func (client *s3client) newProgressBar(total int64) *mpb.Bar {
566 | pg := mpb.New(mpb.WithWidth(80), mpb.WithOutput(client.progressOutput), mpb.WithAutoRefresh())
567 | bar := pg.New(total, mpb.BarStyle(),
568 | mpb.PrependDecorators(
569 | decor.Counters(decor.SizeB1024(0), "% .2f / % .2f"),
570 | ),
571 | mpb.AppendDecorators(
572 | decor.NewPercentage("%d - "),
573 | decor.AverageSpeed(decor.SizeB1024(0), "% .2f"),
574 | ),
575 | )
576 | return bar
577 | }
578 |
579 | func (client *s3client) isGCSHost() bool {
580 | return (client.client.Options().BaseEndpoint != nil && strings.Contains(*client.client.Options().BaseEndpoint, "storage.googleapis.com"))
581 | }
582 |
--------------------------------------------------------------------------------
/s3client_test.go:
--------------------------------------------------------------------------------
1 | package s3resource_test
2 |
3 | import (
4 | "context"
5 | "net/http"
6 |
7 | "github.com/aws/aws-sdk-go-v2/aws"
8 | s3resource "github.com/concourse/s3-resource"
9 | . "github.com/onsi/ginkgo/v2"
10 | . "github.com/onsi/gomega"
11 | )
12 |
13 | var _ = Describe("AWSConfig", func() {
14 | Context("There are static credentials", func() {
15 | It("uses the static credentials", func() {
16 | accessKey := "access-key"
17 | secretKey := "secret-key"
18 | sessionToken := "session-token"
19 | cfg, err := s3resource.NewAwsConfig(accessKey, secretKey, sessionToken, "", "", false, false)
20 | Expect(err).ToNot(HaveOccurred())
21 | Expect(cfg).ToNot(BeNil())
22 |
23 | creds, err := cfg.Credentials.Retrieve(context.TODO())
24 | Expect(err).ToNot(HaveOccurred())
25 | Expect(creds).ToNot(BeNil())
26 | Expect(creds.AccessKeyID).To(Equal(accessKey))
27 | Expect(creds.SecretAccessKey).To(Equal(secretKey))
28 | Expect(creds.SessionToken).To(Equal(sessionToken))
29 | })
30 | })
31 |
32 | Context("There are no static credentials or role to assume", func() {
33 | It("uses the anonymous credentials", func() {
34 | cfg, err := s3resource.NewAwsConfig("", "", "", "", "", false, false)
35 | Expect(err).ToNot(HaveOccurred())
36 | Expect(cfg).ToNot(BeNil())
37 | Expect(cfg.Credentials).ToNot(BeNil())
38 | Expect(cfg.Credentials).To(Equal(aws.NewCredentialsCache(aws.AnonymousCredentials{})))
39 | })
40 | })
41 |
42 | Context("Set to use the Aws Default Credential Provider", func() {
43 | It("uses the Aws Default Credential Provider", func() {
44 | cfg, err := s3resource.NewAwsConfig("", "", "", "", "", false, true)
45 | Expect(err).ToNot(HaveOccurred())
46 | Expect(cfg).ToNot(BeNil())
47 | Expect(cfg.Credentials).ToNot(BeNil())
48 | Expect(cfg.Credentials).ToNot(Equal(aws.NewCredentialsCache(aws.AnonymousCredentials{})))
49 | })
50 | })
51 |
52 | Context("default values", func() {
53 | It("sets RetryMaxAttempts", func() {
54 | cfg, err := s3resource.NewAwsConfig("", "", "", "", "", false, false)
55 | Expect(err).ToNot(HaveOccurred())
56 | Expect(cfg).ToNot(BeNil())
57 | Expect(cfg.RetryMaxAttempts).To(Equal(s3resource.MaxRetries))
58 | })
59 |
60 | It("sets region to us-east-1", func() {
61 | cfg, err := s3resource.NewAwsConfig("", "", "", "", "", false, false)
62 | Expect(err).ToNot(HaveOccurred())
63 | Expect(cfg).ToNot(BeNil())
64 | Expect(cfg.Region).To(Equal("us-east-1"))
65 | })
66 |
67 | It("uses the default http client", func() {
68 | cfg, err := s3resource.NewAwsConfig("", "", "", "", "", false, false)
69 | Expect(err).ToNot(HaveOccurred())
70 | Expect(cfg).ToNot(BeNil())
71 | Expect(cfg.HTTPClient).To(Equal(http.DefaultClient))
72 | })
73 | })
74 |
75 | Context("Region is specified", func() {
76 | It("sets the region", func() {
77 | cfg, err := s3resource.NewAwsConfig("", "", "", "", "ca-central-1", false, false)
78 | Expect(err).ToNot(HaveOccurred())
79 | Expect(cfg).ToNot(BeNil())
80 | Expect(cfg.Region).To(Equal("ca-central-1"))
81 | })
82 | })
83 |
84 | Context("SSL verification is skipped", func() {
85 | It("creates a http client that skips SSL verification", func() {
86 | cfg, err := s3resource.NewAwsConfig("", "", "", "", "", true, false)
87 | Expect(err).ToNot(HaveOccurred())
88 | Expect(cfg).ToNot(BeNil())
89 |
90 | client, ok := cfg.HTTPClient.(*http.Client)
91 | Expect(ok).To(BeTrue())
92 | transport, ok := client.Transport.(*http.Transport)
93 | Expect(ok).To(BeTrue())
94 | Expect(transport.TLSClientConfig.InsecureSkipVerify).To(BeTrue())
95 | })
96 | })
97 | })
98 |
--------------------------------------------------------------------------------
/scripts/test:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | not_installed() {
6 | ! command -v $1 > /dev/null 2>&1
7 | }
8 |
9 | s3_resource_dir=$(cd $(dirname $0)/.. && pwd)
10 |
11 | if not_installed ginkgo; then
12 | echo "# ginkgo is not installed! run the following command:"
13 | echo " go install github.com/onsi/ginkgo/ginkgo"
14 | exit 1
15 | fi
16 |
17 | cd $s3_resource_dir
18 |
19 | ginkgo -r -p -skipPackage integration
20 |
--------------------------------------------------------------------------------
/suite_test.go:
--------------------------------------------------------------------------------
1 | package s3resource_test
2 |
3 | import (
4 | "testing"
5 |
6 | . "github.com/onsi/ginkgo/v2"
7 | . "github.com/onsi/gomega"
8 | )
9 |
10 | func TestResource(t *testing.T) {
11 | RegisterFailHandler(Fail)
12 | RunSpecs(t, "Resource Suite")
13 | }
14 |
--------------------------------------------------------------------------------
/utils.go:
--------------------------------------------------------------------------------
1 | package s3resource
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/mitchellh/colorstring"
8 | )
9 |
10 | func Fatal(doing string, err error) {
11 | Sayf(colorstring.Color("[red]error %s: %s\n"), doing, err)
12 | os.Exit(1)
13 | }
14 |
15 | func Sayf(message string, args ...any) {
16 | fmt.Fprintf(os.Stderr, message, args...)
17 | }
18 |
--------------------------------------------------------------------------------
/versions/versions.go:
--------------------------------------------------------------------------------
1 | package versions
2 |
3 | import (
4 | "regexp"
5 | "sort"
6 | "strings"
7 |
8 | s3resource "github.com/concourse/s3-resource"
9 | "github.com/cppforlife/go-semi-semantic/version"
10 | )
11 |
12 | func MatchUnanchored(paths []string, pattern string) ([]string, error) {
13 | matched := []string{}
14 |
15 | regex, err := regexp.Compile(pattern)
16 | if err != nil {
17 | return matched, err
18 | }
19 |
20 | for _, path := range paths {
21 | match := regex.MatchString(path)
22 |
23 | if match {
24 | matched = append(matched, path)
25 | }
26 | }
27 |
28 | return matched, nil
29 | }
30 |
31 | func Extract(path string, pattern string) (Extraction, bool) {
32 | compiled := regexp.MustCompile(pattern)
33 | matches := compiled.FindStringSubmatch(path)
34 |
35 | var match string
36 | if len(matches) < 2 { // whole string and match
37 | return Extraction{}, false
38 | } else if len(matches) == 2 {
39 | match = matches[1]
40 | } else if len(matches) > 2 { // many matches
41 | names := compiled.SubexpNames()
42 | index := sliceIndex(names, "version")
43 |
44 | if index > 0 {
45 | match = matches[index]
46 | } else {
47 | match = matches[1]
48 | }
49 | }
50 |
51 | ver, err := version.NewVersionFromString(match)
52 | if err != nil {
53 | panic("version number was not valid: " + err.Error())
54 | }
55 |
56 | extraction := Extraction{
57 | Path: path,
58 | Version: ver,
59 | VersionNumber: match,
60 | }
61 |
62 | return extraction, true
63 | }
64 |
65 | func sliceIndex(haystack []string, needle string) int {
66 | for i, element := range haystack {
67 | if element == needle {
68 | return i
69 | }
70 | }
71 |
72 | return -1
73 | }
74 |
75 | type Extractions []Extraction
76 |
77 | func (e Extractions) Len() int {
78 | return len(e)
79 | }
80 |
81 | func (e Extractions) Less(i int, j int) bool {
82 | return e[i].Version.IsLt(e[j].Version)
83 | }
84 |
85 | func (e Extractions) Swap(i int, j int) {
86 | e[i], e[j] = e[j], e[i]
87 | }
88 |
89 | type Extraction struct {
90 | // path to s3 object in bucket
91 | Path string
92 |
93 | // parsed version
94 | Version version.Version
95 |
96 | // the raw version match
97 | VersionNumber string
98 | }
99 |
100 | // GetMatchingPathsFromBucket gets all the paths in the S3 bucket `bucketName` which match all the sections of `regex`
101 | //
102 | // `regex` is a forward-slash (`/`) delimited list of regular expressions that
103 | // must match each corresponding sub-directories and file name for the path to
104 | // be retained.
105 | //
106 | // The function walks the file tree stored in the S3 bucket `bucketName` and
107 | // collects the full paths that matches `regex` along the way. It takes care of
108 | // following only the branches (prefix in S3 terms) that matches with the
109 | // corresponding section of `regex`.
110 | func GetMatchingPathsFromBucket(client s3resource.S3Client, bucketName string, regex string) ([]string, error) {
111 | type work struct {
112 | prefix string
113 | remains []string
114 | }
115 |
116 | specialCharsRE := regexp.MustCompile(`[\\\*\.\[\]\(\)\{\}\?\|\^\$\+]`)
117 |
118 | if strings.HasPrefix(regex, "^") {
119 | regex = regex[1:]
120 | }
121 | if strings.HasSuffix(regex, "$") {
122 | regex = regex[:len(regex)-1]
123 | }
124 |
125 | matchingPaths := []string{}
126 | queue := []work{{prefix: "", remains: strings.Split(regex, "/")}}
127 | for len(queue) != 0 {
128 | prefix := queue[0].prefix
129 | remains := queue[0].remains
130 | section := remains[0]
131 | remains = remains[1:]
132 | queue = queue[1:]
133 | if !specialCharsRE.MatchString(section) && len(remains) != 0 {
134 | // No special char so it can match a single string and we can just extend the prefix
135 | // but only if some remains exists, i.e. the section is not a leaf.
136 | prefix += section + "/"
137 | queue = append(queue, work{prefix: prefix, remains: remains})
138 | continue
139 | }
140 | // Let's list what's under the current prefix and see if that matches with the section
141 | var prefixRE *regexp.Regexp
142 | if len(remains) != 0 {
143 | // We need to look deeper so full prefix will end with a /
144 | prefixRE = regexp.MustCompile("^" + prefix + section + "/$")
145 | } else {
146 | prefixRE = regexp.MustCompile("^" + prefix + section + "$")
147 | }
148 | var (
149 | continuationToken *string
150 | truncated bool
151 | )
152 | for continuationToken, truncated = nil, true; truncated; {
153 | s3ListChunk, err := client.ChunkedBucketList(bucketName, prefix, continuationToken)
154 | if err != nil {
155 | return []string{}, err
156 | }
157 | truncated = s3ListChunk.Truncated
158 | continuationToken = s3ListChunk.ContinuationToken
159 |
160 | if len(remains) != 0 {
161 | // We need to look deeper so full prefix will end with a /
162 | for _, commonPrefix := range s3ListChunk.CommonPrefixes {
163 | if prefixRE.MatchString(commonPrefix) {
164 | queue = append(queue, work{prefix: commonPrefix, remains: remains})
165 | }
166 | }
167 | } else {
168 | // We're looking for a leaf
169 | for _, path := range s3ListChunk.Paths {
170 | if prefixRE.MatchString(path) {
171 | matchingPaths = append(matchingPaths, path)
172 | }
173 | }
174 | }
175 | }
176 | }
177 | return matchingPaths, nil
178 | }
179 |
180 | func GetBucketFileVersions(client s3resource.S3Client, source s3resource.Source) Extractions {
181 | regex := source.Regexp
182 |
183 | matchingPaths, err := GetMatchingPathsFromBucket(client, source.Bucket, regex)
184 | if err != nil {
185 | s3resource.Fatal("listing files", err)
186 | }
187 |
188 | var extractions = make(Extractions, 0, len(matchingPaths))
189 | for _, path := range matchingPaths {
190 | extraction, ok := Extract(path, regex)
191 |
192 | if ok {
193 | extractions = append(extractions, extraction)
194 | }
195 | }
196 |
197 | sort.Sort(extractions)
198 |
199 | return extractions
200 | }
201 |
--------------------------------------------------------------------------------
/versions/versions_suite_test.go:
--------------------------------------------------------------------------------
1 | package versions_test
2 |
3 | import (
4 | . "github.com/onsi/ginkgo/v2"
5 | . "github.com/onsi/gomega"
6 |
7 | "testing"
8 | )
9 |
10 | func TestVersions(t *testing.T) {
11 | RegisterFailHandler(Fail)
12 | RunSpecs(t, "Versions Suite")
13 | }
14 |
--------------------------------------------------------------------------------
/versions/versions_test.go:
--------------------------------------------------------------------------------
1 | package versions_test
2 |
3 | import (
4 | "errors"
5 |
6 | s3resource "github.com/concourse/s3-resource"
7 | "github.com/concourse/s3-resource/fakes"
8 | "github.com/concourse/s3-resource/versions"
9 | . "github.com/onsi/ginkgo/v2"
10 | . "github.com/onsi/gomega"
11 | )
12 |
13 | type MatchFunc func(paths []string, pattern string) ([]string, error)
14 |
15 | var ItMatchesPaths = func(matchFunc MatchFunc) {
16 | Describe("checking if paths in the bucket should be searched", func() {
17 | Context("when given an empty list of paths", func() {
18 | It("returns an empty list of matches", func() {
19 | result, err := matchFunc([]string{}, "regex")
20 |
21 | Ω(err).ShouldNot(HaveOccurred())
22 | Ω(result).Should(BeEmpty())
23 | })
24 | })
25 |
26 | Context("when given a single path", func() {
27 | It("returns it in a singleton list if it matches the regex", func() {
28 | paths := []string{"abc"}
29 | regex := "abc"
30 |
31 | result, err := matchFunc(paths, regex)
32 | Ω(err).ShouldNot(HaveOccurred())
33 | Ω(result).Should(ConsistOf("abc"))
34 | })
35 |
36 | It("returns an empty list if it does not match the regexp", func() {
37 | paths := []string{"abc"}
38 | regex := "ad"
39 |
40 | result, err := matchFunc(paths, regex)
41 | Ω(err).ShouldNot(HaveOccurred())
42 | Ω(result).Should(BeEmpty())
43 | })
44 |
45 | It("accepts full regexes", func() {
46 | paths := []string{"abc"}
47 | regex := "a.*c"
48 |
49 | result, err := matchFunc(paths, regex)
50 | Ω(err).ShouldNot(HaveOccurred())
51 | Ω(result).Should(ConsistOf("abc"))
52 | })
53 |
54 | It("errors when the regex is bad", func() {
55 | paths := []string{"abc"}
56 | regex := "a(c"
57 |
58 | _, err := matchFunc(paths, regex)
59 | Ω(err).Should(HaveOccurred())
60 | })
61 | })
62 |
63 | Context("when given a multiple paths", func() {
64 | It("returns the matches", func() {
65 | paths := []string{"abc", "bcd"}
66 | regex := ".*bc.*"
67 |
68 | result, err := matchFunc(paths, regex)
69 | Ω(err).ShouldNot(HaveOccurred())
70 | Ω(result).Should(ConsistOf("abc", "bcd"))
71 | })
72 |
73 | It("returns an empty list if none match the regexp", func() {
74 | paths := []string{"abc", "def"}
75 | regex := "ge.*h"
76 |
77 | result, err := matchFunc(paths, regex)
78 | Ω(err).ShouldNot(HaveOccurred())
79 | Ω(result).Should(BeEmpty())
80 | })
81 | })
82 | })
83 | }
84 |
85 | var _ = Describe("MatchUnanchored", func() {
86 | ItMatchesPaths(versions.MatchUnanchored)
87 | })
88 |
89 | var _ = Describe("Extract", func() {
90 | Context("when the path does not contain extractable information", func() {
91 | It("doesn't extract it", func() {
92 | result, ok := versions.Extract("abc.tgz", "abc-(.*).tgz")
93 | Ω(ok).Should(BeFalse())
94 | Ω(result).Should(BeZero())
95 | })
96 | })
97 |
98 | Context("when the path contains extractable information", func() {
99 | It("extracts it", func() {
100 | result, ok := versions.Extract("abc-105.tgz", "abc-(.*).tgz")
101 | Ω(ok).Should(BeTrue())
102 |
103 | Ω(result.Path).Should(Equal("abc-105.tgz"))
104 | Ω(result.Version.String()).Should(Equal("105"))
105 | Ω(result.VersionNumber).Should(Equal("105"))
106 | })
107 |
108 | It("extracts semantic version numbers", func() {
109 | result, ok := versions.Extract("abc-1.0.5.tgz", "abc-(.*).tgz")
110 | Ω(ok).Should(BeTrue())
111 |
112 | Ω(result.Path).Should(Equal("abc-1.0.5.tgz"))
113 | Ω(result.Version.String()).Should(Equal("1.0.5"))
114 | Ω(result.VersionNumber).Should(Equal("1.0.5"))
115 | })
116 |
117 | It("extracts versions with more than 3 segments", func() {
118 | result, ok := versions.Extract("abc-1.0.6.1-rc7.tgz", "abc-(.*).tgz")
119 | Ω(ok).Should(BeTrue())
120 |
121 | Ω(result.VersionNumber).Should(Equal("1.0.6.1-rc7"))
122 | Ω(result.Version.String()).Should(Equal("1.0.6.1-rc7"))
123 | })
124 |
125 | It("takes the first match if there are many", func() {
126 | result, ok := versions.Extract("abc-1.0.5-def-2.3.4.tgz", "abc-(.*)-def-(.*).tgz")
127 | Ω(ok).Should(BeTrue())
128 |
129 | Ω(result.Path).Should(Equal("abc-1.0.5-def-2.3.4.tgz"))
130 | Ω(result.Version.String()).Should(Equal("1.0.5"))
131 | Ω(result.VersionNumber).Should(Equal("1.0.5"))
132 | })
133 |
134 | It("extracts a named group called 'version' above all others", func() {
135 | result, ok := versions.Extract("abc-1.0.5-def-2.3.4.tgz", "abc-(.*)-def-(?P.*).tgz")
136 | Ω(ok).Should(BeTrue())
137 |
138 | Ω(result.Path).Should(Equal("abc-1.0.5-def-2.3.4.tgz"))
139 | Ω(result.Version.String()).Should(Equal("2.3.4"))
140 | Ω(result.VersionNumber).Should(Equal("2.3.4"))
141 | })
142 | })
143 | })
144 |
145 | var _ = Describe("GetMatchingPathsFromBucket", func() {
146 | var s3client *fakes.FakeS3Client
147 |
148 | BeforeEach(func() {
149 | s3client = &fakes.FakeS3Client{}
150 | })
151 |
152 | Context("When the regexp has no '/'", func() {
153 | Context("when the regexp has no special char", func() {
154 | It("uses only the empty string as prefix", func() {
155 | versions.GetMatchingPathsFromBucket(s3client, "bucket", "regexp")
156 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(1))
157 | _, prefix, _ := s3client.ChunkedBucketListArgsForCall(0)
158 | Ω(prefix).Should(Equal(""))
159 | })
160 | })
161 | Context("when the regexp has a special char", func() {
162 | It("uses only the empty string as prefix", func() {
163 | versions.GetMatchingPathsFromBucket(s3client, "bucket", "reg.xp")
164 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(1))
165 | _, prefix, _ := s3client.ChunkedBucketListArgsForCall(0)
166 | Ω(prefix).Should(Equal(""))
167 | })
168 | })
169 | })
170 |
171 | Context("When regexp special char appears close to the leaves", func() {
172 | It("starts directly with the longest prefix", func() {
173 | versions.GetMatchingPathsFromBucket(
174 | s3client, "bucket", "regexp/will/appear/only/close/tw?o+/leaves",
175 | )
176 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(1))
177 | _, prefix, _ := s3client.ChunkedBucketListArgsForCall(0)
178 | Ω(prefix).Should(Equal("regexp/will/appear/only/close/"))
179 | })
180 |
181 | It("follows only the matching prefixes", func() {
182 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
183 | CommonPrefixes: []string{
184 | "regexp/will/appear/only/close/from/",
185 | "regexp/will/appear/only/close/to/",
186 | "regexp/will/appear/only/close/too/",
187 | "regexp/will/appear/only/close/two/",
188 | },
189 | }, nil)
190 | s3client.ChunkedBucketListReturnsOnCall(1, s3resource.BucketListChunk{
191 | Paths: []string{
192 | "regexp/will/appear/only/close/to/the-end",
193 | "regexp/will/appear/only/close/to/leaves",
194 | },
195 | }, nil)
196 | s3client.ChunkedBucketListReturnsOnCall(2, s3resource.BucketListChunk{
197 | CommonPrefixes: []string{"regexp/will/appear/only/close/too/late/"},
198 | Paths: []string{"regexp/will/appear/only/close/too/soon"},
199 | }, nil)
200 | s3client.ChunkedBucketListReturnsOnCall(3, s3resource.BucketListChunk{
201 | Paths: []string{
202 | "regexp/will/appear/only/close/two/three",
203 | },
204 | }, nil)
205 |
206 | matchingPaths, err := versions.GetMatchingPathsFromBucket(
207 | s3client, "bucket", "regexp/will/appear/only/close/tw?o+/leaves",
208 | )
209 | Ω(err).ShouldNot(HaveOccurred())
210 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(4))
211 | for idx, expectedPrefix := range []string{
212 | "regexp/will/appear/only/close/",
213 | "regexp/will/appear/only/close/to/",
214 | "regexp/will/appear/only/close/too/",
215 | "regexp/will/appear/only/close/two/",
216 | } {
217 | _, prefix, _ := s3client.ChunkedBucketListArgsForCall(idx)
218 | Ω(prefix).Should(Equal(expectedPrefix))
219 | }
220 | Ω(matchingPaths).Should(ConsistOf("regexp/will/appear/only/close/to/leaves"))
221 | })
222 | })
223 |
224 | Context("When there are too many leaves for a single request", func() {
225 | It("continues requesting more", func() {
226 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
227 | Truncated: true,
228 | Paths: []string{
229 | "prefix/leaf-0",
230 | "prefix/leaf-1",
231 | },
232 | }, nil)
233 | s3client.ChunkedBucketListReturnsOnCall(1, s3resource.BucketListChunk{
234 | Truncated: false,
235 | Paths: []string{
236 | "prefix/leaf-2",
237 | "prefix/leaf-3",
238 | },
239 | }, nil)
240 |
241 | matchingPaths, err := versions.GetMatchingPathsFromBucket(
242 | s3client, "bucket", "prefix/leaf-(.*)",
243 | )
244 | Ω(err).ShouldNot(HaveOccurred())
245 | Ω(matchingPaths).Should(ConsistOf(
246 | "prefix/leaf-0",
247 | "prefix/leaf-1",
248 | "prefix/leaf-2",
249 | "prefix/leaf-3",
250 | ))
251 | })
252 | })
253 |
254 | Context("When there are too many prefixes for a single request", func() {
255 | It("continues requesting more", func() {
256 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
257 | Truncated: true,
258 | CommonPrefixes: []string{
259 | "prefix-0/",
260 | "prefix-1/",
261 | },
262 | }, nil)
263 | s3client.ChunkedBucketListReturnsOnCall(1, s3resource.BucketListChunk{
264 | Truncated: false,
265 | CommonPrefixes: []string{
266 | "prefix-2/",
267 | "prefix-3/",
268 | },
269 | }, nil)
270 | s3client.ChunkedBucketListReturnsOnCall(2, s3resource.BucketListChunk{
271 | Paths: []string{"prefix-0/leaf-0"},
272 | }, nil)
273 | s3client.ChunkedBucketListReturnsOnCall(3, s3resource.BucketListChunk{
274 | Paths: []string{"prefix-1/leaf-1"},
275 | }, nil)
276 | s3client.ChunkedBucketListReturnsOnCall(4, s3resource.BucketListChunk{
277 | Paths: []string{"prefix-2/leaf-2"},
278 | }, nil)
279 | s3client.ChunkedBucketListReturnsOnCall(5, s3resource.BucketListChunk{
280 | Paths: []string{"prefix-3/leaf-3"},
281 | }, nil)
282 |
283 | matchingPaths, err := versions.GetMatchingPathsFromBucket(
284 | s3client, "bucket", "prefix-\\d+/leaf-(.*)",
285 | )
286 | Ω(err).ShouldNot(HaveOccurred())
287 | Ω(matchingPaths).Should(ConsistOf(
288 | "prefix-0/leaf-0",
289 | "prefix-1/leaf-1",
290 | "prefix-2/leaf-2",
291 | "prefix-3/leaf-3",
292 | ))
293 | })
294 | })
295 |
296 | Context("When regexp is not anchored explicitly and has no prefix", func() {
297 | It("will behave as if anchored at both ends", func() {
298 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
299 | Paths: []string{
300 | "substring",
301 | "also-substring",
302 | "subscribing",
303 | "substring.suffix",
304 | },
305 | }, nil)
306 |
307 | matchingPaths, err := versions.GetMatchingPathsFromBucket(
308 | s3client, "bucket", "sub(.*)ing",
309 | )
310 | Ω(err).ShouldNot(HaveOccurred())
311 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(1))
312 | Ω(matchingPaths).Should(ConsistOf("substring", "subscribing"))
313 | })
314 | })
315 |
316 | Context("When regexp is not anchored explicitly and has prefix", func() {
317 | It("will behave as if anchored at both ends", func() {
318 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
319 | Paths: []string{
320 | "pre/ssing",
321 | "pre/singer",
322 | },
323 | }, nil)
324 |
325 | matchingPaths, err := versions.GetMatchingPathsFromBucket(
326 | s3client, "bucket", "pre/(.*)ing",
327 | )
328 | Ω(err).ShouldNot(HaveOccurred())
329 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(1))
330 | _, prefix, _ := s3client.ChunkedBucketListArgsForCall(0)
331 | Ω(prefix).Should(Equal("pre/"))
332 | Ω(matchingPaths).Should(ConsistOf("pre/ssing"))
333 | })
334 | })
335 |
336 | Context("When regexp is anchored explicitly and has not prefix", func() {
337 | It("still works", func() {
338 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
339 | Paths: []string{
340 | "substring",
341 | "also-substring",
342 | "subscribing",
343 | "substring.suffix",
344 | },
345 | }, nil)
346 |
347 | matchingPaths, err := versions.GetMatchingPathsFromBucket(
348 | s3client, "bucket", "^sub(.*)ing$",
349 | )
350 | Ω(err).ShouldNot(HaveOccurred())
351 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(1))
352 | Ω(matchingPaths).Should(ConsistOf("substring", "subscribing"))
353 | })
354 | })
355 |
356 | Context("When regexp is anchored explicitly and has prefix", func() {
357 | It("still works", func() {
358 | s3client.ChunkedBucketListReturnsOnCall(0, s3resource.BucketListChunk{
359 | Paths: []string{
360 | "pre/ssing",
361 | "pre/singer",
362 | },
363 | }, nil)
364 |
365 | matchingPaths, err := versions.GetMatchingPathsFromBucket(
366 | s3client, "bucket", "^pre/(.*)ing$",
367 | )
368 | Ω(err).ShouldNot(HaveOccurred())
369 | Ω(s3client.ChunkedBucketListCallCount()).Should(Equal(1))
370 | _, prefix, _ := s3client.ChunkedBucketListArgsForCall(0)
371 | Ω(prefix).Should(Equal("pre/"))
372 | Ω(matchingPaths).Should(ConsistOf("pre/ssing"))
373 | })
374 | })
375 |
376 | Context("When S3 returns an error", func() {
377 | BeforeEach(func() {
378 | s3client.ChunkedBucketListReturns(
379 | s3resource.BucketListChunk{},
380 | errors.New("S3 failure"),
381 | )
382 | })
383 | It("fails", func() {
384 | _, err := versions.GetMatchingPathsFromBucket(s3client, "bucket", "dummy")
385 | Ω(err).Should(HaveOccurred())
386 | })
387 | })
388 | })
389 |
--------------------------------------------------------------------------------