├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── awsS3.nimble ├── config.nims ├── src ├── awsS3.nim └── awsS3 │ ├── api.nim │ ├── multipart.nim │ ├── multipart │ ├── api │ │ ├── abortMultipartUpload.nim │ │ ├── api.nim │ │ ├── completeMultipartUpload.nim │ │ ├── createMultipartUpload.nim │ │ ├── listMultipartUploads.nim │ │ ├── listParts.nim │ │ ├── uploadPart.nim │ │ ├── utils.nim │ │ └── xml2Json.nim │ ├── models │ │ ├── abortMultipartUpload.nim │ │ ├── common.nim │ │ ├── completeMultipartUpload.nim │ │ ├── createMultipartUpload.nim │ │ ├── listMultipartUploads.nim │ │ ├── listParts.nim │ │ ├── models.nim │ │ ├── multipartUpload.nim │ │ ├── part.nim │ │ ├── response.nim │ │ └── uploadPart.nim │ └── signedv2.nim │ ├── signed.nim │ ├── utils_async.nim │ └── utils_sync.nim └── tests ├── config.nims ├── multipart ├── example.env ├── test_multipartupload_utils.nim └── test_multipartupload_xml2json.nim └── tests1.nim /.gitignore: -------------------------------------------------------------------------------- 1 | # test files 2 | testFile.bin 3 | tester* 4 | tester_*.nim 5 | test3.jpg -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # v3.2.0 2 | 3 | ## Changes 4 | 5 | * Started deprecation of s3Presigned. Move to s3SignedUrl. 6 | * Implemented own `makeDateTime()` instead of AwsSigV4's `makeDateTime()` due 7 | valgrind memory leak. (optional, you can still use AwsSigV4's `makeDateTime()`) 8 | 9 | **Valgrind work around:** 10 | ```nim 11 | var datetime: string 12 | block: 13 | let now = getTime() 14 | let date = now.utc.format(basicISO8601) 15 | datetime = $date 16 | ``` 17 | 18 | ## Breaking changes 19 | 20 | * Upgraded lib dep for awsSTS to `>= 2.0.3` 21 | 22 | 23 | 24 | # v3.0.2 25 | 26 | ## Changes 27 | 28 | * Implementation of synchronous procedures 29 | 30 | ## Breaking changes 31 | 32 | * Refactor folder structure. Split multipart upload into separate area. 33 | * To use multipart upload `import awsS3/multipart` 34 | * `s3CopyObjectIs2xx` has wrong formatting - required client-param but also 35 | initialized a client. 36 | * All suger procedures, e.g. `is2xx`, has been splitted into separate files 37 | for async and sync. To use them `import awsS3/utils_async` or 38 | `import awsS3/utils_sync` 39 | 40 | 41 | 42 | # v3.0.0 43 | 44 | ## Changes 45 | 46 | * Implement support for multipart upload and friends. 47 | 48 | 49 | # v2.0.1 50 | 51 | ## Changes 52 | 53 | * `try-except` within `moveObjects` on `copyObjects` to prevent error. This is temporary until the `copyObject` proc is fixed according to inline comment. 54 | 55 | 56 | # v2.0.0 57 | 58 | ## Breaking changes 59 | 60 | API for s3Presigned* and s3SignedUrl* is changed. If you are using the param 61 | `contentName: string` in the s3Presigned* and s3SignedUrl* functions, you need 62 | to update your code to use the new API. 63 | 64 | * `contentName` is now `contentDispositionName` 65 | * Content-Disposition is not automatically set to `attachment` if Content- 66 | Disposition-Name is set. It has to be set manually with `contentDisposition`. 67 | * You can set Constent-Disposition type and name independently with 68 | `contentDisposition` and `contentDispositionName` 69 | 70 | **Old**: 71 | ```nim 72 | echo s3Presigned(creds, bucketHost = bucketHostPer, key = "12/files/s3RDRB6II4i9pbswsVppmAreU24nmP1n.pdf", contentName="Filename XX", setContentType=true, fileExt=".pdf", expireInSec="432000") 73 | ``` 74 | 75 | **New**: 76 | ```nim 77 | echo s3Presigned(awsCreds, bucketHost = bucketHostPer, key = "12/files/s3RDRB6II4i9pbswsVppmAreU24nmP1n.pdf", contentDisposition = CDTattachment, contentDispositionName="Filename XX", setContentType=true, fileExt=".pdf", expireInSec="432000") 78 | 79 | # OR 80 | 81 | echo s3Presigned(awsKey, awsSecret, awsRegion, bucketHost = bucketHostPer, key = "12/files/s3RDRB6II4i9pbswsVppmAreU24nmP1n.pdf", contentDisposition = CDTattachment, contentDispositionName="Filename XX", setContentType=true, fileExt=".pdf", expireInSec="432000", accessToken = awsToken) 82 | ``` 83 | 84 | 85 | ## Changes 86 | 87 | * Private s3SignedUrl is not exposed directly. 88 | * Content-Disposition type is set by `contentDisposition* = enum`. 89 | * Content-Disposition name is now included in quotes to allow special characters. 90 | * Both s3Presigned and s3SignedUrl can be called with credentials as string 91 | instead of using AwsCreds. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Thomas T. Jarløv 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # awsS3 2 | Amazon Simple Storage Service (AWS S3) basic API support. 3 | 4 | If you need more API's then take a look at [atoz](https://github.com/disruptek/atoz). 5 | 6 | 7 | ## Procedures 8 | 9 | The core AWS commands has two procedures - one is the raw request returning 10 | the response, the other one is a sugar returning a `assert [is is2xx] == true`. 11 | 12 | The raw request commands can be chained where the `client` can be reused, 13 | e.g. the `move to trash`, which consists of a `copyObject` and a `deleteObject`. 14 | 15 | All requests are performed async. 16 | 17 | 18 | Limitations: 19 | Spaces in `keys` is not supported. 20 | 21 | 22 | ## TODO: 23 | - all `bucketHost` should be `bucketName`, and when needed as a host, the 24 | region (host) should be appended within here. In that way we would only 25 | need to pass `bucketName` (shortform) around. 26 | 27 | 28 | # Example 29 | 30 | ```nim 31 | import 32 | std/asyncdispatch, 33 | std/httpclient, 34 | std/os 35 | 36 | import 37 | awsS3, 38 | awsS3/utils_async, 39 | awsSTS 40 | 41 | const 42 | bucketHost = "my-bucket.s3-eu-west-1.amazonaws.com" 43 | bucketName = "my-bucket" 44 | serverRegion = "eu-west-1" 45 | myAccessKey = "AKIAEXAMPLE" 46 | mySecretKey = "J98765RFGBNYT4567EXAMPLE" 47 | role = "arn:aws:iam::2345676543:role/Role-S3-yeah" 48 | s3File = "test/test.jpg" 49 | s3MoveTo = "test2/test.jpg" 50 | localTestFile = "/home/username/download/myimage.jpg" 51 | downloadTo = "/home/username/git/nim_awsS3/test3.jpg" 52 | 53 | 54 | ## Get creds with awsSTS package 55 | let creds = awsSTScreate(myAccessKey, mySecretKey, serverRegion, role) 56 | 57 | ## 1) Create test file 58 | writeFile(localTestFile, "blabla") 59 | 60 | ## 2) Put object 61 | echo waitFor s3PutObjectIs2xx(creds, bucketHost, s3File, localTestFile) 62 | 63 | ## 3) Move object 64 | waitFor s3MoveObject(creds, bucketHost, s3MoveTo, bucketHost, bucketName, s3File) 65 | 66 | ## 4) Get content-length 67 | var client = newAsyncHttpClient() 68 | let m1 = waitFor s3HeadObject(client, creds, bucketHost, s3MoveTo) 69 | echo m1.headers["content-length"] 70 | 71 | ## 5) Get object 72 | echo waitFor s3GetObjectIs2xx(creds, bucketHost, s3MoveTo, downloadTo) 73 | echo fileExists(downloadTo) 74 | 75 | ## 6) Delete object 76 | echo waitFor s3DeleteObjectIs2xx(creds, bucketHost, s3MoveTo) 77 | ``` 78 | 79 | 80 | 81 | # Procs 82 | 83 | ## s3Creds* 84 | 85 | ```nim 86 | proc s3Creds*(accessKey, secretKey, tokenKey, region: string): AwsCreds = 87 | ``` 88 | 89 | This uses the nimble package `awsSTS` to store the credentials. 90 | 91 | 92 | ____ 93 | 94 | ## S3 Signed URL 95 | 96 | Generate S3 presigned URL's. 97 | 98 | ### API {.deprecated.} 99 | 100 | ~~This is the standard public API.~~ 101 | 102 | Use the `s3SignedUrl` instead. 103 | 104 | ```nim 105 | proc s3Presigned*(accessKey, secretKey, region: string, bucketHost, key: string, 106 | httpMethod = HttpGet, 107 | contentDisposition = CDTattachment, contentDispositionName = "", 108 | setContentType = true, fileExt = "", expireInSec = "65", accessToken = "" 109 | ): string {.deprecated.} = 110 | ``` 111 | 112 | ```nim 113 | proc s3Presigned*(creds: AwsCreds, bucketHost, key: string, 114 | contentDisposition = CDTattachment, contentDispositionName = "", 115 | setContentType = true, fileExt = "", expireInSec = "65" 116 | ): string {.deprecated.} = 117 | ``` 118 | 119 | ### Raw 120 | 121 | This exposes the internal API. It has been made public for users to skip the `s3Presigned*`. 122 | 123 | ```nim 124 | proc s3SignedUrl*( 125 | credsAccessKey, credsSecretKey, credsRegion: string, 126 | bucketHost, key: string, 127 | httpMethod = HttpGet, 128 | contentDisposition = CDTignore, contentDispositionName = "", 129 | setContentType = true, 130 | fileExt = "", customQuery = "", copyObject = "", expireInSec = "65", 131 | accessToken = "" 132 | ): string = 133 | 134 | ## customQuery: 135 | ## This is a custom defined header query. The string needs to include the format 136 | ## "head1:value,head2:value" - a comma separated string with header and 137 | ## value diveded by colon. 138 | ## 139 | ## copyObject: 140 | ## Attach copyObject to headers 141 | ``` 142 | 143 | ### Details 144 | Generates a S3 presigned url for sharing. 145 | 146 | ``` 147 | contentDisposition => sets "Content-Disposition" type (inline/attachment) 148 | contentDispositionName => sets "Content-Disposition" name 149 | setContentType => sets "response-content-type" 150 | fileExt => only if setContentType=true 151 | if `fileExt = ""` then mimetype is automated 152 | needs to be ".jpg" (dot before) like splitFile(f).ext 153 | ``` 154 | 155 | 156 | ### Content-Disposition type 157 | 158 | ```nim 159 | type 160 | contentDisposition* = enum 161 | CDTinline # Content-Disposition: inline 162 | CDTattachment # Content-Disposition: attachment 163 | CDTignore 164 | ``` 165 | 166 | 167 | ____ 168 | 169 | ## parseReponse* 170 | 171 | ```nim 172 | proc parseReponse*(response: AsyncResponse): (bool, HttpHeaders) = 173 | ``` 174 | 175 | Helper-Procedure that can be used to return true on success and the response headers. 176 | 177 | 178 | ____ 179 | 180 | ## isSuccess2xx* 181 | 182 | **[utils package - async & sync]** 183 | 184 | ```nim 185 | proc isSuccess2xx*(response: AsyncResponse): (bool) = 186 | ``` 187 | 188 | Helper-Procedure that can be used with the raw call for parsing the response. 189 | 190 | 191 | ____ 192 | 193 | ## s3DeleteObject 194 | 195 | ```nim 196 | proc s3DeleteObject(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key: string): Future[AsyncResponse] {.async.} = 197 | ``` 198 | 199 | AWS S3 API - DeleteObject 200 | 201 | 202 | ____ 203 | 204 | ## s3DeleteObjectIs2xx* 205 | 206 | **[utils package - async & sync]** 207 | 208 | ```nim 209 | proc s3DeleteObjectIs2xx*(creds: AwsCreds, bucketHost, key: string): Future[bool] {.async.} = 210 | ``` 211 | 212 | AWS S3 API - DeleteObject bool 213 | 214 | 215 | ____ 216 | 217 | ## s3HeadObject* 218 | 219 | ```nim 220 | proc s3HeadObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key: string): Future[AsyncResponse] {.async.} = 221 | ``` 222 | 223 | AWS S3 API - HeadObject 224 | 225 | Response: - result.headers["content-length"] 226 | 227 | 228 | ____ 229 | 230 | ## s3HeadObjectIs2xx* 231 | 232 | **[utils package - async & sync]** 233 | 234 | ```nim 235 | proc s3HeadObjectIs2xx*(creds: AwsCreds, bucketHost, key: string): Future[bool] {.async.} = 236 | ``` 237 | 238 | AWS S3 API - HeadObject bool 239 | 240 | AWS S3 API - HeadObject is2xx is only checking the existing of the file. If the data is needed, then use the raw `s3HeadObject` procedure and parse the response. 241 | 242 | 243 | ____ 244 | 245 | ## s3GetObject* 246 | 247 | ```nim 248 | proc s3GetObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key, downloadPath: string) {.async.} = 249 | ``` 250 | 251 | AWS S3 API - GetObject 252 | 253 | `downloadPath` needs to full local path. 254 | 255 | 256 | ____ 257 | 258 | ## s3GetObjectIs2xx* 259 | 260 | **[utils package - async & sync]** 261 | 262 | ```nim 263 | proc s3GetObjectIs2xx*(creds: AwsCreds, bucketHost, key, downloadPath: string): Future[bool] {.async.} = 264 | ``` 265 | 266 | AWS S3 API - GetObject bool 267 | 268 | AWS S3 API - GetObject is2xx returns true on downloaded file. 269 | 270 | `downloadPath` needs to full local path. 271 | 272 | 273 | ____ 274 | 275 | ## s3PutObject* 276 | 277 | ```nim 278 | proc s3PutObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key, localPath: string): Future[AsyncResponse] {.async.} = 279 | ``` 280 | 281 | AWS S3 API - PutObject 282 | 283 | The PutObject reads the file to memory and uploads it. 284 | 285 | 286 | ____ 287 | 288 | ## s3PutObjectIs2xx* 289 | 290 | **[utils package - async & sync]** 291 | 292 | ```nim 293 | proc s3PutObjectIs2xx*(creds: AwsCreds, bucketHost, key, localPath: string, deleteLocalFileAfter=true): Future[bool] {.async.} = 294 | ``` 295 | 296 | AWS S3 API - PutObject bool 297 | 298 | This performs a PUT and uploads the file. The `localPath` param needs to be the full path. 299 | 300 | The PutObject reads the file to memory and uploads it. 301 | 302 | ____ 303 | 304 | ## s3CopyObject* 305 | 306 | ```nim 307 | proc s3CopyObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key, copyObject: string): Future[AsyncResponse] {.async.} = 308 | ``` 309 | 310 | AWS S3 API - CopyObject 311 | 312 | The copyObject param is the full path to the copy source, this means both the bucket and file, e.g.: 313 | ``` 314 | - /bucket-name/folder1/folder2/s3C3FiLXRsPXeE9TUjZGEP3RYvczCFYg.jpg 315 | - /[BUCKET]/[KEY] 316 | ``` 317 | 318 | **TODO:** 319 | Implement error checker. An error occured during `copyObject` can return a 200-response. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) 320 | 321 | 322 | ____ 323 | 324 | ## s3CopyObjectIs2xx* 325 | 326 | **[utils package - async & sync]** 327 | 328 | ```nim 329 | proc s3CopyObjectIs2xx*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key, copyObject: string): Future[bool] {.async.} = 330 | ``` 331 | 332 | AWS S3 API - CopyObject bool 333 | 334 | 335 | ____ 336 | 337 | ## s3MoveObject* 338 | 339 | ```nim 340 | proc s3MoveObject*(creds: AwsCreds, bucketToHost, keyTo, bucketFromHost, bucketFromName, keyFrom: string) {.async.} = 341 | ``` 342 | 343 | This does a pseudo move of an object. We copy the object to the destination and then we delete the object from the original location. 344 | 345 | ``` 346 | bucketToHost => Destination bucket host 347 | keyTo => 12/files/file.jpg 348 | bucketFromHost => Origin bucket host 349 | bucketFromName => Origin bucket name 350 | keyFrom => 24/files/old.jpg 351 | ``` 352 | 353 | 354 | ____ 355 | 356 | ## s3MoveObjects* 357 | 358 | **[utils package - async & sync]** 359 | 360 | ```nim 361 | proc s3MoveObjects*(creds: AwsCreds, bucketHost, bucketFromHost, bucketFromName: string, keys: seq[string], waitValidate = 0, waitDelete = 0) {.async.} = 362 | ``` 363 | 364 | In this (plural) multiple moves are performed. The keys are identical in "from" and "to", so origin and destination are the same. 365 | 366 | The `waitValidate` and `waitDelete` are used to wait between the validation if the file exists and delete operation. 367 | 368 | 369 | ____ 370 | 371 | ## s3TrashObject* 372 | 373 | **[utils package - async & sync]** 374 | 375 | ```nim 376 | proc s3TrashObject*(creds: AwsCreds, bucketTrashHost, bucketFromHost, bucketFromName, keyFrom: string) {.async.} = 377 | ``` 378 | 379 | This does a pseudo move of an object. We copy the object to the destination and then we delete the object from the original location. The destination in this particular situation - is our trash. 380 | 381 | ____ 382 | 383 | ## s3TrashObjects* 384 | 385 | **[utils package - async & sync]** 386 | 387 | ```nim 388 | proc s3TrashObjects*(creds: AwsCreds, bucketTrashHost, bucketFromHost, bucketFromName, keyFrom: seq[string], waitValidate = 0, waitDelete = 0) {.async.} = 389 | ``` 390 | 391 | This does a pseudo move of an object. We copy the object to the destination and then we delete the object from the original location. The destination in this particular situation - is our trash. 392 | 393 | 394 | 395 | 396 | 397 | ______ 398 | 399 | 400 | # S3 Multipart uploads 401 | 402 | To use multipart import it directly and compile with `-d:s3multipart`: 403 | 404 | ```nim 405 | import awsS3/multipart 406 | ``` 407 | 408 | The upload part in ```src/multipart/api/uploadPart.nim``` contains a full example of 409 | - abortMultipartUpload 410 | - listMultipartUpload 411 | - listParts 412 | - completeMultipartUpload 413 | - createMultipartUpload 414 | 415 | ## Quick test 416 | 417 | The multipart files contains `when isMainModule` which can be used to test the upload 418 | procedures. 419 | 420 | To test the full upload procedure: Create a file called testFile.bin with 421 | +10MB of data, copy `example.env` to `.env`, run `nimble install dotenv` 422 | and then run the following command: 423 | 424 | ```nim 425 | nim c -d:dev -r src/multipart/api/uploadPart.nim 426 | ``` 427 | ____ 428 | 429 | ## abordMultipartUpload 430 | 431 | ```nim 432 | let abortMultipartUploadRequest = AbortMultipartUploadRequest( 433 | bucket: bucket, 434 | key: upload.key, 435 | uploadId: upload.uploadId.get() 436 | ) 437 | 438 | try: 439 | var abortClient = newAsyncHttpClient() 440 | let abortMultipartUploadResult = await abortClient.abortMultipartUpload(credentials=credentials, bucket=bucket, region=region, args=abortMultipartUploadRequest) 441 | echo abortMultipartUploadResult.toJson().parseJson().pretty() 442 | except: 443 | echo getCurrentExceptionMsg() 444 | 445 | ``` 446 | 447 | ____ 448 | 449 | ## createMultipartUpload 450 | 451 | ```nim 452 | # initiate the multipart upload 453 | let createMultiPartUploadRequest = CreateMultipartUploadRequest( 454 | bucket: bucket, 455 | key: key, 456 | ) 457 | 458 | let createMultiPartUploadResult = await client.createMultipartUpload( 459 | credentials = credentials, 460 | bucket = bucket, 461 | region = region, 462 | args = createMultiPartUploadRequest 463 | ) 464 | ``` 465 | 466 | ____ 467 | 468 | ## completeMultipartUpload 469 | 470 | ```nim 471 | let args = CompleteMultipartUploadRequest( 472 | bucket: bucket, 473 | key: key, 474 | uploadId: uploadId 475 | ) 476 | 477 | let res = await client.completeMultipartUpload(credentials=credentials, bucket=bucket, region=region, args=args) 478 | echo res.toJson().parseJson().pretty() 479 | 480 | ``` 481 | 482 | ____ 483 | 484 | ## uploadPart 485 | 486 | ```nim 487 | let uploadPartCommandRequest = UploadPartCommandRequest( 488 | bucket: bucket, 489 | key: key, 490 | body: body, 491 | partNumber: partNumber, 492 | uploadId: createMultiPartUploadResult.uploadId 493 | ) 494 | let res = await client.uploadPart( 495 | credentials = credentials, 496 | bucket = bucket, 497 | region = region, 498 | args = uploadPartCommandRequest 499 | ) 500 | echo "\n> uploadPart" 501 | echo res.toJson().parseJson().pretty() 502 | ``` 503 | ____ 504 | 505 | ## listMultipartUploads 506 | 507 | ```nim 508 | let listMultipartUploadsRequest = ListMultipartUploadsRequest( 509 | bucket: bucket, 510 | prefix: some("test") 511 | ) 512 | let listMultipartUploadsRes = await client.listMultipartUploads(credentials=credentials, bucket=bucket, region=region, args=listMultipartUploadsRequest) 513 | 514 | ``` 515 | 516 | ____ 517 | 518 | ## listParts 519 | 520 | ```nim 521 | let args = ListPartsRequest( 522 | bucket: bucket, 523 | key: some(key), 524 | uploadId: some(uploadId) 525 | ) 526 | let result = await client.listParts(credentials=credentials, bucket=bucket, region=region, args=args) 527 | # echo result 528 | echo result.toJson().parseJson().pretty() 529 | ``` 530 | 531 | ____ 532 | -------------------------------------------------------------------------------- /awsS3.nimble: -------------------------------------------------------------------------------- 1 | version = "3.2.2" 2 | author = "Thomas T. Jarløv (https://github.com/ThomasTJdev)" 3 | description = "Amazon S3 REST API (basic)" 4 | license = "MIT" 5 | srcDir = "src" 6 | 7 | requires "nim >= 1.4.2" 8 | requires "awsSigV4 >= 0.0.2" 9 | requires "awsSTS >= 2.0.3" 10 | requires "jsony == 1.1.5" 11 | 12 | when defined(s3multipart): 13 | requires "nimSHA2" 14 | when NimMajor >= 2: 15 | requires "hmac == 0.3.2" 16 | else: 17 | requires "hmac == 0.2.0" -------------------------------------------------------------------------------- /config.nims: -------------------------------------------------------------------------------- 1 | switch("d", "ssl") 2 | # switch("d", "dev") -------------------------------------------------------------------------------- /src/awsS3.nim: -------------------------------------------------------------------------------- 1 | # 2 | # Default 3 | # 4 | import awsS3/api 5 | export api 6 | 7 | import awsS3/signed 8 | export signed 9 | -------------------------------------------------------------------------------- /src/awsS3/api.nim: -------------------------------------------------------------------------------- 1 | # Copyright CxPlanner @ Thomas T. Jarløv (TTJ) 2 | # 3 | ## The core AWS commands has two procedures - one is the raw request returning 4 | ## the response, the other one is a sugar returning a `assert success (is2xx) == true`. 5 | ## 6 | ## The raw request commands can be chained where the `client` can be reused, 7 | ## e.g. the `move to trash`, which consists of a `copyObject` and a `deleteObject`. 8 | ## 9 | ## All requests are performed async. 10 | ## 11 | ## To get data on e.g. `headObject` just parse the headers: 12 | ## - response.headers["content-length"] 13 | ## 14 | ## 15 | ## Limitations: 16 | ## Spaces in `keys` is not supported. 17 | ## 18 | ## 19 | ## TODO: 20 | ## - all `bucketHost` should be `bucketName`, and when needed as a host, the 21 | ## region (host) should be appended within here. In that way we would only 22 | ## need to pass `bucketName` (shortform) around. 23 | 24 | 25 | import 26 | std/[ 27 | asyncdispatch, 28 | httpclient, 29 | httpcore, 30 | logging, 31 | os, 32 | strutils, 33 | times, 34 | uri 35 | ] 36 | 37 | import 38 | awsSTS 39 | 40 | import 41 | ./signed 42 | 43 | 44 | 45 | 46 | # 47 | # Credentials 48 | # 49 | proc s3Creds*(accessKey, secretKey, tokenKey, region: string): AwsCreds = 50 | ## Don't like the `awsSTS` package? Fine, just create the creds here. 51 | result = AwsCreds( 52 | AWS_REGION: region, 53 | AWS_ACCESS_KEY_ID: accessKey, 54 | AWS_SECRET_ACCESS_KEY: secretKey, 55 | AWS_SESSION_TOKEN: tokenKey 56 | ) 57 | 58 | 59 | 60 | # 61 | # Delete object 62 | # 63 | proc s3DeleteObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key: string): Future[AsyncResponse] {.async.} = 64 | ## AWS S3 API - DeleteObject 65 | result = await client.request(s3SignedUrl(creds, bucketHost, key, httpMethod=HttpDelete, setContentType=false), httpMethod=HttpDelete) 66 | 67 | 68 | proc s3DeleteObject*(client: HttpClient, creds: AwsCreds, bucketHost, key: string): Response = 69 | ## AWS S3 API - DeleteObject 70 | var s3Link: string 71 | block: 72 | let datetime = getTime().utc.format(basicISO8601) 73 | s3Link = s3SignedUrl( 74 | creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, creds.AWS_REGION, 75 | bucketHost, key, 76 | httpMethod = HttpDelete, 77 | setContentType = false, 78 | expireInSec = "65", 79 | accessToken = creds.AWS_SESSION_TOKEN, 80 | makeDateTime = datetime 81 | ) 82 | result = client.request(s3Link, httpMethod=HttpDelete) 83 | 84 | 85 | 86 | 87 | # 88 | # Head object 89 | # 90 | proc s3HeadObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key: string): Future[AsyncResponse] {.async.} = 91 | ## AWS S3 API - HeadObject 92 | ## 93 | ## Response: 94 | ## - result.headers["content-length"] 95 | result = await client.request(s3SignedUrl(creds, bucketHost, key, httpMethod=HttpHead, setContentType=false), httpMethod=HttpHead) 96 | 97 | 98 | proc s3HeadObject*(client: HttpClient, creds: AwsCreds, bucketHost, key: string): Response = 99 | ## AWS S3 API - HeadObject 100 | ## 101 | ## Response: 102 | ## - result.headers["content-length"] 103 | var s3Link: string 104 | block: 105 | let datetime = getTime().utc.format(basicISO8601) 106 | s3Link = s3SignedUrl( 107 | creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, creds.AWS_REGION, 108 | bucketHost, key, 109 | httpMethod = HttpHead, 110 | setContentType = false, 111 | expireInSec = "65", 112 | accessToken = creds.AWS_SESSION_TOKEN, 113 | makeDateTime = datetime 114 | ) 115 | result = client.request(s3Link, httpMethod=HttpHead) 116 | 117 | 118 | 119 | 120 | # 121 | # Get object 122 | # 123 | proc s3GetObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key, downloadPath: string) {.async.} = 124 | ## AWS S3 API - GetObject 125 | ## 126 | ## `downloadPath` needs to full local path. 127 | await client.downloadFile(s3SignedUrl(creds, bucketHost, key, httpMethod=HttpGet, setContentType=false), downloadPath) 128 | 129 | 130 | proc s3GetObject*(client: HttpClient, creds: AwsCreds, bucketHost, key, downloadPath: string) = 131 | ## AWS S3 API - GetObject 132 | ## 133 | ## `downloadPath` needs to full local path. 134 | var s3Link: string 135 | block: 136 | let datetime = getTime().utc.format(basicISO8601) 137 | s3Link = s3SignedUrl( 138 | creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, creds.AWS_REGION, 139 | bucketHost, key, 140 | httpMethod = HttpGet, 141 | setContentType = false, 142 | expireInSec = "65", 143 | accessToken = creds.AWS_SESSION_TOKEN, 144 | makeDateTime = datetime 145 | ) 146 | client.downloadFile(s3Link, downloadPath) 147 | 148 | 149 | 150 | 151 | # 152 | # Put object 153 | # 154 | proc s3PutObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key, localPath: string): Future[AsyncResponse] {.async.} = 155 | ## AWS S3 API - PutObject 156 | ## 157 | ## The PutObject reads the file to memory and uploads it. 158 | result = await client.put(s3SignedUrl(creds, bucketHost, key, httpMethod=HttpPut), body = readFile(localPath)) 159 | 160 | 161 | proc s3PutObject*(client: HttpClient, creds: AwsCreds, bucketHost, key, localPath: string): Response = 162 | ## AWS S3 API - PutObject 163 | ## 164 | ## The PutObject reads the file to memory and uploads it. 165 | var s3Link: string 166 | block: 167 | let datetime = getTime().utc.format(basicISO8601) 168 | s3Link = s3SignedUrl( 169 | creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, creds.AWS_REGION, 170 | bucketHost, key, 171 | httpMethod = HttpPut, 172 | setContentType = false, 173 | expireInSec = "65", 174 | accessToken = creds.AWS_SESSION_TOKEN, 175 | makeDateTime = datetime 176 | ) 177 | result = client.put(s3Link, body = readFile(localPath)) 178 | 179 | 180 | 181 | # 182 | # Copy object 183 | # 184 | proc s3CopyObject*(client: AsyncHttpClient, creds: AwsCreds, bucketHost, key, copyObject: string): Future[AsyncResponse] {.async.} = 185 | ## AWS S3 API - CopyObject 186 | ## 187 | ## The copyObject param is the full path to the copy source, this means both 188 | ## the bucket and file, e.g. 189 | ## - "/bucket-name/folder1/folder2/s3C3FiLXRsPXeE9TUjZGEP3RYvczCFYg.jpg" 190 | ## - "/[BUCKET]/[KEY] 191 | ## 192 | ## TODO: Implement error checker. An error occured during `copyObject` can 193 | ## return a 200-response. 194 | ## If the error occurs during the copy operation, the error response is 195 | ## embedded in the 200 OK response. This means that a 200 OK response 196 | ## can contain either a success or an error. 197 | ## (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) 198 | 199 | let 200 | copyObjectEncoded = copyObject.encodeUrl() 201 | headers = newHttpHeaders(@[ 202 | ("host", bucketHost), 203 | ("x-amz-copy-source", copyObjectEncoded), 204 | ]) 205 | 206 | result = await client.request(s3SignedUrl(creds, bucketHost, key, httpMethod=HttpPut, copyObject=copyObjectEncoded, setContentType=false), httpMethod=HttpPut, headers=headers) 207 | 208 | 209 | proc s3CopyObject*(client: HttpClient, creds: AwsCreds, bucketHost, key, copyObject: string): Response = 210 | ## AWS S3 API - CopyObject 211 | ## 212 | ## The copyObject param is the full path to the copy source, this means both 213 | ## the bucket and file, e.g. 214 | ## - "/bucket-name/folder1/folder2/s3C3FiLXRsPXeE9TUjZGEP3RYvczCFYg.jpg" 215 | ## - "/[BUCKET]/[KEY] 216 | ## 217 | ## TODO: Implement error checker. An error occured during `copyObject` can 218 | ## return a 200-response. 219 | ## If the error occurs during the copy operation, the error response is 220 | ## embedded in the 200 OK response. This means that a 200 OK response 221 | ## can contain either a success or an error. 222 | ## (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) 223 | 224 | var s3Link: string 225 | block: 226 | let datetime = getTime().utc.format(basicISO8601) 227 | let copyObjectEncoded = copyObject.encodeUrl() 228 | s3Link = s3SignedUrl( 229 | creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, creds.AWS_REGION, 230 | bucketHost, key, 231 | httpMethod = HttpPut, 232 | setContentType = false, 233 | copyObject = copyObjectEncoded, 234 | expireInSec = "65", 235 | accessToken = creds.AWS_SESSION_TOKEN, 236 | makeDateTime = datetime 237 | ) 238 | 239 | let copyObjectEncoded = copyObject.encodeUrl() 240 | let headers = newHttpHeaders(@[ 241 | ("host", bucketHost), 242 | ("x-amz-copy-source", copyObjectEncoded), 243 | ]) 244 | result = client.request(s3Link, httpMethod=HttpPut, headers=headers) 245 | 246 | -------------------------------------------------------------------------------- /src/awsS3/multipart.nim: -------------------------------------------------------------------------------- 1 | # Copyright Thomas T. Jarløv (TTJ) - ttj@ttj.dk 2 | 3 | # 4 | # Multipart 5 | # 6 | import 7 | multipart/[ 8 | signedv2, 9 | api/utils, 10 | api/api, 11 | models/models 12 | ] 13 | export 14 | signedv2, 15 | utils, 16 | api, 17 | models 18 | -------------------------------------------------------------------------------- /src/awsS3/multipart/api/abortMultipartUpload.nim: -------------------------------------------------------------------------------- 1 | # std 2 | import 3 | os, 4 | httpclient, 5 | asyncdispatch, 6 | strutils, 7 | strformat, 8 | options, 9 | xmlparser, 10 | xmltree 11 | 12 | # other 13 | import 14 | ../models/models, 15 | ../signedv2, 16 | xml2Json, 17 | json, 18 | jsony, 19 | utils, 20 | listMultipartUploads 21 | 22 | from awsSTS import AwsCreds 23 | 24 | proc abortMultipartUpload*( 25 | client: AsyncHttpClient, 26 | credentials: AwsCreds, 27 | headers: HttpHeaders = newHttpHeaders(), 28 | bucket: string, 29 | region: string, 30 | service="s3", 31 | args: AbortMultipartUploadRequest 32 | ): Future[AbortMultipartUploadResult] {.async.} = 33 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html 34 | 35 | # request 36 | 37 | # DELETE /Key+?uploadId=UploadId HTTP/1.1 38 | # Host: Bucket.s3.amazonaws.com 39 | # x-amz-request-payer: RequestPayer 40 | # x-amz-expected-bucket-owner: ExpectedBucketOwner 41 | 42 | # response 43 | 44 | # HTTP/1.1 204 45 | # x-amz-request-charged: RequestCharged 46 | 47 | # example request 48 | 49 | # DELETE /example-object? 50 | # uploadId=VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZ HTTP/1.1 51 | # Host: example-bucket.s3..amazonaws.com 52 | # Date: Mon, 1 Nov 2010 20:34:56 GMT 53 | # Authorization: authorization string 54 | 55 | # example response 56 | 57 | # HTTP/1.1 204 OK 58 | # x-amz-id-2: Weag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg== 59 | # x-amz-request-id: 996c76696e6727732072657175657374 60 | # Date: Mon, 1 Nov 2010 20:34:56 GMT 61 | # Content-Length: 0 62 | # Connection: keep-alive 63 | # Server: AmazonS3 64 | 65 | if args.requestPayer.isSome(): 66 | client.headers["x-amz-request-payer"] = args.requestPayer.get() 67 | if args.expectedBucketOwner.isSome(): 68 | client.headers["x-amz-expected-bucket-owner"] = args.expectedBucketOwner.get() 69 | 70 | let httpMethod = HttpDelete 71 | let endpoint = &"https://{bucket}.{service}.{region}.amazonaws.com" 72 | let url = &"{endpoint}/{args.key}?uploadId={args.uploadId}" 73 | 74 | let res = await client.request(credentials=credentials, headers=headers, httpMethod=httpMethod, url=url, region=region, service=service, payload="") 75 | # let body = await res.body() 76 | 77 | when defined(dev): 78 | echo "" 26 | if part.checksumCRC32.isSome(): 27 | result = result & &"{part.checksumCRC32.get()}" 28 | if part.checksumCRC32C.isSome(): 29 | result = result & &"{part.checksumCRC32C.get()}" 30 | if part.checksumSHA1.isSome(): 31 | result = result & &"{part.checksumSHA1.get()}" 32 | if part.checksumSHA256.isSome(): 33 | result = result & &"{part.checksumSHA256.get()}" 34 | if part.eTag.isSome(): 35 | result = result & &"{part.eTag.get()}" 36 | if part.partNumber.isSome(): 37 | result = result & &"{part.partNumber.get()}" 38 | result = result & "" 39 | 40 | proc completeMultipartUpload*( 41 | client: AsyncHttpClient, 42 | credentials: AwsCreds, 43 | headers: HttpHeaders = newHttpHeaders(), 44 | bucket: string, 45 | region: string, 46 | service="s3", 47 | args: CompleteMultipartUploadRequest 48 | ): Future[CompleteMultipartUploadResult] {.async.} = 49 | # The CompleteMultipartUpload operation completes a multipart upload by assembling previously uploaded parts. 50 | # https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html 51 | 52 | # example request 53 | # POST /Key+?uploadId=UploadId HTTP/1.1 54 | # Host: Bucket.s3.amazonaws.com 55 | # x-amz-checksum-crc32: ChecksumCRC32 56 | # x-amz-checksum-crc32c: ChecksumCRC32C 57 | # x-amz-checksum-sha1: ChecksumSHA1 58 | # x-amz-checksum-sha256: ChecksumSHA256 59 | # x-amz-request-payer: RequestPayer 60 | # x-amz-expected-bucket-owner: ExpectedBucketOwner 61 | # x-amz-server-side-encryption-customer-algorithm: SSECustomerAlgorithm 62 | # x-amz-server-side-encryption-customer-key: SSECustomerKey 63 | # x-amz-server-side-encryption-customer-key-MD5: SSECustomerKeyMD5 64 | # 65 | # 66 | # 67 | # string 68 | # string 69 | # string 70 | # string 71 | # string 72 | # integer 73 | # 74 | # ... 75 | # 76 | 77 | # example response 78 | # HTTP/1.1 200 79 | # x-amz-expiration: Expiration 80 | # x-amz-server-side-encryption: ServerSideEncryption 81 | # x-amz-version-id: VersionId 82 | # x-amz-server-side-encryption-aws-kms-key-id: SSEKMSKeyId 83 | # x-amz-server-side-encryption-bucket-key-enabled: BucketKeyEnabled 84 | # x-amz-request-charged: RequestCharged 85 | # 86 | # 87 | # string 88 | # string 89 | # string 90 | # string 91 | # string 92 | # string 93 | # string 94 | # string 95 | # 96 | 97 | let httpMethod = HttpPost 98 | let endpoint = &"https://{bucket}.{service}.{region}.amazonaws.com" 99 | var url = &"{endpoint}/{args.key}?uploadId={args.uploadId}" 100 | 101 | if args.checksumCRC32.isSome(): 102 | headers["x-amz-checksum-crc32"] = args.checksumCRC32.get() 103 | if args.checksumCRC32C.isSome(): 104 | headers["x-amz-checksum-crc32c"] = args.checksumCRC32C.get() 105 | if args.checksumSHA1.isSome(): 106 | headers["x-amz-checksum-sha1"] = args.checksumSHA1.get() 107 | if args.checksumSHA256.isSome(): 108 | headers["x-amz-checksum-sha256"] = args.checksumSHA256.get() 109 | if args.requestPayer.isSome(): 110 | headers["x-amz-request-payer"] = args.requestPayer.get() 111 | if args.expectedBucketOwner.isSome(): 112 | headers["x-amz-expected-bucket-owner"] = args.expectedBucketOwner.get() 113 | if args.sseCustomerAlgorithm.isSome(): 114 | headers["x-amz-server-side-encryption-customer-algorithm"] = args.sseCustomerAlgorithm.get() 115 | if args.sseCustomerKey.isSome(): 116 | headers["x-amz-server-side-encryption-customer-key"] = args.sseCustomerKey.get() 117 | if args.sseCustomerKeyMD5.isSome(): 118 | headers["x-amz-server-side-encryption-customer-key-MD5"] = args.sseCustomerKeyMD5.get() 119 | 120 | var partsXml = "" 121 | if args.multipartUpload.isSome(): 122 | if args.multipartUpload.get().parts.isSome(): 123 | partsXml = args.multipartUpload.get().parts.get().makePartsXml() 124 | 125 | let payload = &"""{partsXml}""" 126 | 127 | when defined(dev): 128 | echo "\n>completeMultiPart.payload: " 129 | echo payload 130 | 131 | let res = await client.request(credentials=credentials, headers=headers, httpMethod=httpMethod, url=url, region=region, service=service, payload=payload) 132 | let body = await res.body 133 | 134 | when defined(dev): 135 | echo " xml: ", xml 151 | echo "\n> jsonStr: ", jsonStr 152 | # echo obj 153 | # echo "\n> obj string: ", obj.toJson().parseJson().pretty() 154 | result = obj 155 | 156 | if res.headers.hasKey("x-amz-expiration"): 157 | result.expiration = some($res.headers["x-amz-expiration"]) 158 | if res.headers.hasKey("x-amz-server-side-encryption"): 159 | result.serverSideEncryption = some(parseEnum[ServerSideEncryption]($res.headers["x-amz-server-side-encryption"])) 160 | if res.headers.hasKey("x-amz-version-id"): 161 | result.versionId = some($res.headers["x-amz-version-id"]) 162 | if res.headers.hasKey("x-amz-server-side-encryption-aws-kms-key-id"): 163 | result.ssekmsKeyId = some($res.headers["x-amz-server-side-encryption-aws-kms-key-id"]) 164 | if res.headers.hasKey("x-amz-server-side-encryption-bucket-key-enabled"): 165 | result.bucketKeyEnabled = some(parseBool($res.headers["x-amz-server-side-encryption-bucket-key-enabled"])) 166 | if res.headers.hasKey("x-amz-request-charged"): 167 | result.requestCharged = some($res.headers["x-amz-request-charged"]) 168 | 169 | proc main() {.async.} = 170 | # this is just a scoped testing function 171 | let 172 | accessKey = os.getEnv("AWS_ACCESS_KEY_ID") 173 | secretKey = os.getEnv("AWS_SECRET_ACCESS_KEY") 174 | region = os.getEnv("AWS_REGION") 175 | bucket = os.getEnv("AWS_BUCKET") 176 | key = "testFile.bin" 177 | 178 | let credentials = AwsCreds(AWS_ACCESS_KEY_ID: accessKey, AWS_SECRET_ACCESS_KEY: secretKey) 179 | 180 | var client = newAsyncHttpClient() 181 | 182 | let uploadId = "" 183 | let args = CompleteMultipartUploadRequest( 184 | bucket: bucket, 185 | key: key, 186 | uploadId: uploadId 187 | ) 188 | 189 | let res = await client.completeMultipartUpload(credentials=credentials, bucket=bucket, region=region, args=args) 190 | echo res.toJson().parseJson().pretty() 191 | 192 | 193 | when isMainModule: 194 | import dotenv 195 | load() 196 | 197 | try: 198 | waitFor main() 199 | except: 200 | ## treeform async message fix 201 | ## https://github.com/nim-lang/Nim/issues/19931#issuecomment-1167658160 202 | let msg = getCurrentExceptionMsg() 203 | for line in msg.split("\n"): 204 | var line = line.replace("\\", "/") 205 | if "/lib/pure/async" in line: 206 | continue 207 | if "#[" in line: 208 | break 209 | line.removeSuffix("Iter") 210 | echo line -------------------------------------------------------------------------------- /src/awsS3/multipart/api/createMultipartUpload.nim: -------------------------------------------------------------------------------- 1 | import 2 | os, 3 | httpclient, 4 | asyncdispatch, 5 | strutils, 6 | strformat, 7 | options, 8 | xmlparser, 9 | xmltree, 10 | times 11 | 12 | import 13 | ../models/models, 14 | ../signedv2, 15 | xml2Json, 16 | json, 17 | jsony, 18 | utils 19 | 20 | from awsSTS import AwsCreds 21 | 22 | proc createMultipartUpload*( 23 | client: AsyncHttpClient, 24 | credentials: AwsCreds, 25 | headers: HttpHeaders = newHttpHeaders(), 26 | bucket: string, 27 | region: string, 28 | service="s3", 29 | args: CreateMultipartUploadRequest 30 | ): Future[InitiateMultipartUploadResult] {.async.} = 31 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html 32 | 33 | # example request 34 | # POST /{Key+}?uploads HTTP/1.1 35 | # Host: Bucket.s3.amazonaws.com 36 | # x-amz-acl: ACL 37 | # Cache-Control: CacheControl 38 | # Content-Disposition: ContentDisposition 39 | # Content-Encoding: ContentEncoding 40 | # Content-Language: ContentLanguage 41 | # Content-Type: ContentType 42 | # Expires: Expires 43 | # x-amz-grant-full-control: GrantFullControl 44 | # x-amz-grant-read: GrantRead 45 | # x-amz-grant-read-acp: GrantReadACP 46 | # x-amz-grant-write-acp: GrantWriteACP 47 | # x-amz-server-side-encryption: ServerSideEncryption 48 | # x-amz-storage-class: StorageClass 49 | # x-amz-website-redirect-location: WebsiteRedirectLocation 50 | # x-amz-server-side-encryption-customer-algorithm: SSECustomerAlgorithm 51 | # x-amz-server-side-encryption-customer-key: SSECustomerKey 52 | # x-amz-server-side-encryption-customer-key-MD5: SSECustomerKeyMD5 53 | # x-amz-server-side-encryption-aws-kms-key-id: SSEKMSKeyId 54 | # x-amz-server-side-encryption-context: SSEKMSEncryptionContext 55 | # x-amz-server-side-encryption-bucket-key-enabled: BucketKeyEnabled 56 | # x-amz-request-payer: RequestPayer 57 | # x-amz-tagging: Tagging 58 | # x-amz-object-lock-mode: ObjectLockMode 59 | # x-amz-object-lock-retain-until-date: ObjectLockRetainUntilDate 60 | # x-amz-object-lock-legal-hold: ObjectLockLegalHoldStatus 61 | # x-amz-expected-bucket-owner: ExpectedBucketOwner 62 | # x-amz-checksum-algorithm: ChecksumAlgorithm 63 | 64 | # Body 65 | 66 | 67 | # example response 68 | # HTTP/1.1 200 69 | # x-amz-abort-date: AbortDate 70 | # x-amz-abort-rule-id: AbortRuleId 71 | # x-amz-server-side-encryption: ServerSideEncryption 72 | # x-amz-server-side-encryption-customer-algorithm: SSECustomerAlgorithm 73 | # x-amz-server-side-encryption-customer-key-MD5: SSECustomerKeyMD5 74 | # x-amz-server-side-encryption-aws-kms-key-id: SSEKMSKeyId 75 | # x-amz-server-side-encryption-context: SSEKMSEncryptionContext 76 | # x-amz-server-side-encryption-bucket-key-enabled: BucketKeyEnabled 77 | # x-amz-request-charged: RequestCharged 78 | # x-amz-checksum-algorithm: ChecksumAlgorithm 79 | # 80 | # 81 | # string 82 | # string 83 | # string 84 | # 85 | 86 | let httpMethod = HttpPost 87 | let endpoint = &"https://{bucket}.{service}.{region}.amazonaws.com" 88 | var url = &"{endpoint}/{args.key}?uploads=" 89 | 90 | 91 | if args.acl.isSome(): 92 | headers["x-amz-acl="] = $args.acl.get() 93 | if args.cacheControl.isSome(): 94 | headers["Cache-Control="] = args.cacheControl.get() 95 | if args.contentDisposition.isSome(): 96 | headers["Content-Disposition="] = args.contentDisposition.get() 97 | if args.contentEncoding.isSome(): 98 | headers["Content-Encoding="] = args.contentEncoding.get() 99 | if args.contentLanguage.isSome(): 100 | headers["Content-Language="] = args.contentLanguage.get() 101 | if args.contentType.isSome(): 102 | headers["Content-Type="] = args.contentType.get() 103 | if args.expires.isSome(): 104 | headers["Expires="] = $args.expires.get() 105 | if args.grantFullControl.isSome(): 106 | headers["x-amz-grant-full-control="] = args.grantFullControl.get() 107 | if args.grantRead.isSome(): 108 | headers["x-amz-grant-read="] = args.grantRead.get() 109 | if args.grantReadACP.isSome(): 110 | headers["x-amz-grant-read-acp="] = args.grantReadACP.get() 111 | if args.grantWriteACP.isSome(): 112 | headers["x-amz-grant-write-acp="] = args.grantWriteACP.get() 113 | if args.serverSideEncryption.isSome(): 114 | headers["x-amz-server-side-encryption="] = $args.serverSideEncryption.get() 115 | if args.storageClass.isSome(): 116 | headers["x-amz-storage-class="] = $args.storageClass.get() 117 | if args.websiteRedirectLocation.isSome(): 118 | headers["x-amz-website-redirect-location="] = args.websiteRedirectLocation.get() 119 | if args.sseCustomerAlgorithm.isSome(): 120 | headers["x-amz-server-side-encryption-customer-algorithm="] = args.sseCustomerAlgorithm.get() 121 | if args.sseCustomerKey.isSome(): 122 | headers["x-amz-server-side-encryption-customer-key="] = args.sseCustomerKey.get() 123 | if args.sseCustomerKeyMD5.isSome(): 124 | headers["x-amz-server-side-encryption-customer-key-MD5="] = args.sseCustomerKeyMD5.get() 125 | if args.sseKMSKeyId.isSome(): 126 | headers["x-amz-server-side-encryption-aws-kms-key-id="] = args.sseKMSKeyId.get() 127 | if args.sseKMSEncryptionContext.isSome(): 128 | headers["x-amz-server-side-encryption-context="] = args.sseKMSEncryptionContext.get() 129 | if args.requestPayer.isSome(): 130 | headers["x-amz-request-payer="] = args.requestPayer.get() 131 | if args.tagging.isSome(): 132 | headers["x-amz-tagging="] = args.tagging.get() 133 | if args.objectLockMode.isSome(): 134 | headers["x-amz-object-lock-mode="] = $args.objectLockMode.get() 135 | if args.objectLockRetainUntilDate.isSome(): 136 | headers["x-amz-object-lock-retain-until-date="] = $args.objectLockRetainUntilDate.get() 137 | if args.objectLockLegalHoldStatus.isSome(): 138 | headers["x-amz-object-lock-legal-hold="] = $args.objectLockLegalHoldStatus.get() 139 | if args.expectedBucketOwner.isSome(): 140 | headers["x-amz-expected-bucket-owner="] = args.expectedBucketOwner.get() 141 | if args.checksumAlgorithm.isSome(): 142 | headers["x-amz-checksum-algorithm="] = $args.checksumAlgorithm.get() 143 | 144 | # let res = await client.request(httpMethod=httpMethod, url=url, headers = headers, body = "") 145 | let res = await client.request(credentials=credentials, headers=headers, httpMethod=httpMethod, url=url, region=region, service=service, payload="") 146 | let body = await res.body 147 | 148 | when defined(dev): 149 | echo " xml: ", xml 168 | echo "\n> jsonStr: ", jsonStr 169 | # echo obj 170 | # echo "\n> obj string: ", obj.toJson().parseJson().pretty() 171 | result = obj 172 | 173 | if res.headers.hasKey("x-amz-abort-date"): 174 | result.abortDate = some(parse($res.headers["x-amz-abort-date"], "ddd',' dd MMM yyyy HH:mm:ss 'GMT'")) 175 | if res.headers.hasKey("x-amz-abort-rule-id"): 176 | result.abortRuleId = some($res.headers["x-amz-abort-rule-id"]) 177 | if res.headers.hasKey("x-amz-server-side-encryption"): 178 | result.serverSideEncryption = some(parseEnum[ServerSideEncryption]($res.headers["x-amz-server-side-encryption"])) 179 | if res.headers.hasKey("x-amz-server-side-encryption-customer-algorithm"): 180 | result.sseCustomerAlgorithm = some($res.headers["x-amz-server-side-encryption-customer-algorithm"]) 181 | if res.headers.hasKey("x-amz-server-side-encryption-customer-key-MD5"): 182 | result.sseCustomerKeyMD5 = some($res.headers["x-amz-server-side-encryption-customer-key-MD5"]) 183 | if res.headers.hasKey("x-amz-server-side-encryption-aws-kms-key-id"): 184 | result.sseKMSKeyId = some($res.headers["x-amz-server-side-encryption-aws-kms-key-id"]) 185 | if res.headers.hasKey("x-amz-server-side-encryption-context"): 186 | result.sseKMSEncryptionContext = some($res.headers["x-amz-server-side-encryption-context"]) 187 | if res.headers.hasKey("x-amz-server-side-encryption-bucket-key-enabled"): 188 | result.bucketKeyEnabled = some(parseBool($res.headers["x-amz-server-side-encryption-bucket-key-enabled"])) 189 | if res.headers.hasKey("x-amz-request-charged"): 190 | result.requestCharged = some($res.headers["x-amz-request-charged"]) 191 | if res.headers.hasKey("x-amz-checksum-algorithm"): 192 | result.checksumAlgorithm = some(parseEnum[CheckSumAlgorithm]($res.headers["x-amz-checksum-algorithm"])) 193 | 194 | 195 | proc main() {.async.} = 196 | # this is just a scoped testing function 197 | let 198 | accessKey = os.getEnv("AWS_ACCESS_KEY_ID") 199 | secretKey = os.getEnv("AWS_SECRET_ACCESS_KEY") 200 | region = os.getEnv("AWS_REGION") 201 | bucket = os.getEnv("AWS_BUCKET") 202 | key = "testFile.bin" 203 | 204 | let credentials = AwsCreds(AWS_ACCESS_KEY_ID: accessKey, AWS_SECRET_ACCESS_KEY: secretKey) 205 | 206 | var client = newAsyncHttpClient() 207 | 208 | let createMultipartUploadRequest = CreateMultipartUploadRequest( 209 | bucket: bucket, 210 | key: key, 211 | ) 212 | 213 | let res = await client.createMultipartUpload(credentials=credentials, bucket=bucket, region=region, args=createMultipartUploadRequest) 214 | echo res.toJson().parseJson().pretty() 215 | 216 | 217 | when isMainModule: 218 | import dotenv 219 | load() 220 | 221 | try: 222 | waitFor main() 223 | except: 224 | ## treeform async message fix 225 | ## https://github.com/nim-lang/Nim/issues/19931#issuecomment-1167658160 226 | let msg = getCurrentExceptionMsg() 227 | for line in msg.split("\n"): 228 | var line = line.replace("\\", "/") 229 | if "/lib/pure/async" in line: 230 | continue 231 | if "#[" in line: 232 | break 233 | line.removeSuffix("Iter") 234 | echo line -------------------------------------------------------------------------------- /src/awsS3/multipart/api/listMultipartUploads.nim: -------------------------------------------------------------------------------- 1 | # std 2 | import 3 | os, 4 | httpclient, 5 | asyncdispatch, 6 | strutils, 7 | sequtils, 8 | strformat, 9 | options, 10 | xmlparser, 11 | xmltree 12 | 13 | # other 14 | import 15 | ../models/models, 16 | ../signedv2, 17 | xml2Json, 18 | json, 19 | jsony, 20 | utils 21 | 22 | from awsSTS import AwsCreds 23 | 24 | proc listMultipartUploads*( 25 | client: AsyncHttpClient, 26 | credentials: AwsCreds, 27 | headers: HttpHeaders = newHttpHeaders(), 28 | bucket: string, 29 | region: string, 30 | service="s3", 31 | args: ListMultipartUploadsRequest 32 | ): Future[ListMultipartUploadsResult] {.async.} = 33 | ## List Multipart Uploads 34 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html 35 | ## This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. 36 | 37 | # example request 38 | 39 | # GET /?uploads&delimiter=Delimiter&encoding-type=EncodingType&key-marker=KeyMarker&max-uploads=MaxUploads&prefix=Prefix&upload-id-marker=UploadIdMarker HTTP/1.1 40 | # Host: Bucket.s3.amazonaws.com 41 | # x-amz-expected-bucket-owner: ExpectedBucketOwner 42 | 43 | # example response 44 | # HTTP/1.1 200 45 | # 46 | # 47 | # string 48 | # string 49 | # string 50 | # string 51 | # string 52 | # string 53 | # string 54 | # integer 55 | # boolean 56 | # 57 | # string 58 | # timestamp 59 | # 60 | # string 61 | # string 62 | # 63 | # string 64 | # 65 | # string 66 | # string 67 | # 68 | # string 69 | # string 70 | # 71 | # ... 72 | # 73 | # string 74 | # 75 | # ... 76 | # string 77 | # 78 | 79 | # example request 80 | # GET /?uploads&max-uploads=3 HTTP/1.1 81 | # Host: example-bucket.s3..amazonaws.com 82 | # Date: Mon, 1 Nov 2010 20:34:56 GMT 83 | # Authorization: authorization string 84 | 85 | # example response 86 | # HTTP/1.1 200 OK 87 | # x-amz-id-2: Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg== 88 | # x-amz-request-id: 656c76696e6727732072657175657374 89 | # Date: Mon, 1 Nov 2010 20:34:56 GMT 90 | # Content-Length: 1330 91 | # Connection: keep-alive 92 | # Server: AmazonS3 93 | 94 | # 95 | # 96 | # bucket 97 | # 98 | # 99 | # my-movie.m2ts 100 | # YW55IGlkZWEgd2h5IGVsdmluZydzIHVwbG9hZCBmYWlsZWQ 101 | # 3 102 | # true 103 | # 104 | # my-divisor 105 | # XMgbGlrZSBlbHZpbmcncyBub3QgaGF2aW5nIG11Y2ggbHVjaw 106 | # 107 | # arn:aws:iam::111122223333:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de 108 | # user1-11111a31-17b5-4fb7-9df5-b111111f13de 109 | # 110 | # 111 | # 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a 112 | # OwnerDisplayName 113 | # 114 | # STANDARD 115 | # 2010-11-10T20:48:33.000Z 116 | # 117 | # 118 | # my-movie.m2ts 119 | # VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA 120 | # 121 | # b1d16700c70b0b05597d7acd6a3f92be 122 | # InitiatorDisplayName 123 | # 124 | # 125 | # b1d16700c70b0b05597d7acd6a3f92be 126 | # OwnerDisplayName 127 | # 128 | # STANDARD 129 | # 2010-11-10T20:48:33.000Z 130 | # 131 | # 132 | # my-movie.m2ts 133 | # YW55IGlkZWEgd2h5IGVsdmluZydzIHVwbG9hZCBmYWlsZWQ 134 | # 135 | # arn:aws:iam::444455556666:user/user1-22222a31-17b5-4fb7-9df5-b222222f13de 136 | # user1-22222a31-17b5-4fb7-9df5-b222222f13de 137 | # 138 | # 139 | # b1d16700c70b0b05597d7acd6a3f92be 140 | # OwnerDisplayName 141 | # 142 | # STANDARD 143 | # 2010-11-10T20:49:33.000Z 144 | # 145 | # 146 | 147 | if args.expectedBucketOwner.isSome(): 148 | client.headers["x-amz-expected-bucket-owner"] = args.expectedBucketOwner.get() 149 | 150 | # GET /?uploads=&delimiter=Delimiter&encoding-type=EncodingType&key-marker=KeyMarker&max-uploads=MaxUploads&prefix=Prefix&upload-id-marker=UploadIdMarker HTTP/1.1 151 | 152 | let httpMethod = HttpGet 153 | let endpoint = &"https://{bucket}.{service}.{region}.amazonaws.com" 154 | var url = &"{endpoint}/?uploads=" 155 | 156 | if args.delimiter.isSome(): 157 | url = url & "&delimiter=" & args.delimiter.get() 158 | if args.encodingType.isSome(): 159 | url = url & "&encoding-type=" & args.encodingType.get() 160 | if args.keyMarker.isSome(): 161 | url = url & "&key-marker=" & args.keyMarker.get() 162 | if args.maxUploads.isSome(): 163 | url = url & "&max-uploads=" & $args.maxUploads.get() 164 | if args.prefix.isSome(): 165 | url = url & "&prefix=" & args.prefix.get() 166 | if args.uploadIdMarker.isSome(): 167 | url = url & "&upload-id-marker=" & args.uploadIdMarker.get() 168 | 169 | 170 | 171 | let res = await client.request(credentials=credentials, headers=headers, httpMethod=httpMethod, url=url, region=region, service=service, payload="") 172 | let body = await res.body 173 | 174 | when defined(dev): 175 | echo "\n< listMultipartUploads.url" 176 | echo url 177 | echo "\n< listMultipartUploads.method" 178 | echo httpMethod 179 | echo "\n< listMultipartUploads.code" 180 | echo res.code 181 | echo "\n< listMultipartUploads.headers" 182 | echo res.headers 183 | echo "\n< listMultipartUploads.body" 184 | echo body 185 | 186 | if res.code != Http200: 187 | raise newException(HttpRequestError, "Error: " & $res.code & " " & await res.body) 188 | 189 | let xml = body.parseXML() 190 | let json = xml.xml2Json() 191 | let jsonStr = json["ListMultipartUploadsResult"].toJson() 192 | when defined(dev): 193 | echo "\n> jsonStr: " 194 | echo jsonStr 195 | let obj = jsonStr.fromJson(ListMultipartUploadsResult) 196 | 197 | when defined(dev): 198 | echo "\n> xml: ", xml 199 | echo "\n> jsonStr: ", jsonStr 200 | # echo obj 201 | # echo "\n> obj string: ", obj.toJson().parseJson().pretty() 202 | result = obj 203 | 204 | 205 | 206 | 207 | proc main() {.async.} = 208 | # this is just a scoped testing function 209 | let 210 | accessKey = os.getEnv("AWS_ACCESS_KEY_ID") 211 | secretKey = os.getEnv("AWS_SECRET_ACCESS_KEY") 212 | region = os.getEnv("AWS_REGION") 213 | bucket = os.getEnv("AWS_BUCKET") 214 | 215 | let credentials = AwsCreds(AWS_ACCESS_KEY_ID: accessKey, AWS_SECRET_ACCESS_KEY: secretKey) 216 | 217 | var client = newAsyncHttpClient() 218 | 219 | let args = ListMultipartUploadsRequest( 220 | bucket: bucket, 221 | prefix: some("test") 222 | ) 223 | let result = await client.listMultipartUploads(credentials=credentials, bucket=bucket, region=region, args=args) 224 | echo result.toJson().parseJson().pretty() 225 | 226 | 227 | when isMainModule: 228 | import dotenv 229 | load() 230 | 231 | try: 232 | waitFor main() 233 | except: 234 | ## treeform async message fix 235 | ## https://github.com/nim-lang/Nim/issues/19931#issuecomment-1167658160 236 | let msg = getCurrentExceptionMsg() 237 | for line in msg.split("\n"): 238 | var line = line.replace("\\", "/") 239 | if "/lib/pure/async" in line: 240 | continue 241 | if "#[" in line: 242 | break 243 | line.removeSuffix("Iter") 244 | echo line -------------------------------------------------------------------------------- /src/awsS3/multipart/api/listParts.nim: -------------------------------------------------------------------------------- 1 | import 2 | os, 3 | httpclient, 4 | asyncdispatch, 5 | strutils, 6 | strformat, 7 | options, 8 | xmlparser, 9 | xmltree, 10 | times 11 | 12 | import 13 | ../models/models, 14 | ../signedv2, 15 | xml2Json, 16 | json, 17 | jsony, 18 | utils 19 | 20 | from awsSTS import AwsCreds 21 | 22 | proc listParts*( 23 | client: AsyncHttpClient, 24 | credentials: AwsCreds, 25 | headers: HttpHeaders = newHttpHeaders(), 26 | bucket: string, 27 | region: string, 28 | service="s3", 29 | args: ListPartsRequest 30 | ): Future[ListPartsResult] {.async.} = 31 | ## List Part Uploads 32 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html 33 | ## This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. 34 | 35 | # example request 36 | 37 | # GET /Key+?max-parts=MaxParts&part-number-marker=PartNumberMarker&uploadId=UploadId HTTP/1.1 38 | # Host: Bucket.s3.amazonaws.com 39 | # x-amz-request-payer: RequestPayer 40 | # x-amz-expected-bucket-owner: ExpectedBucketOwner 41 | # x-amz-server-side-encryption-customer-algorithm: SSECustomerAlgorithm 42 | # x-amz-server-side-encryption-customer-key: SSECustomerKey 43 | # x-amz-server-side-encryption-customer-key-MD5: SSECustomerKeyMD5 44 | 45 | # example response 46 | # HTTP/1.1 200 47 | # x-amz-abort-date: AbortDate 48 | # x-amz-abort-rule-id: AbortRuleId 49 | # x-amz-request-charged: RequestCharged 50 | # 51 | # 52 | # string 53 | # string 54 | # string 55 | # integer 56 | # integer 57 | # integer 58 | # boolean 59 | # 60 | # string 61 | # string 62 | # string 63 | # string 64 | # string 65 | # timestamp 66 | # integer 67 | # integer 68 | # 69 | # ... 70 | # 71 | # string 72 | # string 73 | # 74 | # 75 | # string 76 | # string 77 | # 78 | # string 79 | # string 80 | # 81 | 82 | # GET /Key+?max-parts=MaxParts&part-number-marker=PartNumberMarker&uploadId=UploadId HTTP/1.1 83 | 84 | let httpMethod = HttpGet 85 | let endpoint = &"https://{bucket}.{service}.{region}.amazonaws.com" 86 | var url = "" 87 | if args.key.isSome(): 88 | url = &"{endpoint}/" & args.key.get() & "?" 89 | else: 90 | url = &"{endpoint}/?" 91 | 92 | 93 | # add headers 94 | if args.requestPayer.isSome(): 95 | headers["x-amz-request-payer"] = $args.requestPayer.get() 96 | if args.expectedBucketOwner.isSome(): 97 | headers["x-amz-expected-bucket-owner"] = $args.expectedBucketOwner.get() 98 | if args.sseCustomerAlgorithm.isSome(): 99 | headers["x-amz-server-side-encryption-customer-algorithm"] = $args.sseCustomerAlgorithm.get() 100 | if args.sseCustomerKey.isSome(): 101 | headers["x-amz-server-side-encryption-customer-key"] = $args.sseCustomerKey.get() 102 | if args.sseCustomerKeyMD5.isSome(): 103 | headers["x-amz-server-side-encryption-customer-key-MD5"] = $args.sseCustomerKeyMD5.get() 104 | 105 | # add query params 106 | 107 | if args.uploadId.isSome(): 108 | url = url & "&uploadId=" & args.uploadId.get() 109 | if args.maxParts.isSome(): 110 | url = url & "&max-parts=" & $args.maxParts.get() 111 | if args.partNumberMarker.isSome(): 112 | url = url & "&part-number-marker=" & $args.partNumberMarker.get() 113 | 114 | let res = await client.request(credentials=credentials, headers=headers, httpMethod=httpMethod, url=url, region=region, service=service, payload="") 115 | let body = await res.body 116 | 117 | when defined(dev): 118 | echo "\n< listMultipartUploads.url" 119 | echo url 120 | echo "\n< listMultipartUploads.method" 121 | echo httpMethod 122 | echo "\n< listMultipartUploads.code" 123 | echo res.code 124 | echo "\n< listMultipartUploads.headers" 125 | echo res.headers 126 | echo "\n< listMultipartUploads.body" 127 | echo body 128 | 129 | if res.code != Http200: 130 | raise newException(HttpRequestError, "Error: " & $res.code & " " & await res.body) 131 | 132 | let xml = body.parseXML() 133 | let json = xml.xml2Json() 134 | let jsonStr = json["ListPartsResult"].toJson() 135 | when defined(dev): 136 | echo jsonStr 137 | let obj = jsonStr.fromJson(ListPartsResult) 138 | 139 | when defined(dev): 140 | echo "\n> xml: ", xml 141 | echo "\n> jsonStr: ", jsonStr 142 | # echo obj 143 | # echo "\n> obj string: ", obj.toJson().parseJson().pretty() 144 | result = obj 145 | 146 | if res.headers.hasKey("x-amz-abort-date"): 147 | result.abortDate = some(parse($res.headers["x-amz-abort-date"], "ddd',' dd MMM yyyy HH:mm:ss 'GMT'")) 148 | if res.headers.hasKey("x-amz-abort-rule-id"): 149 | result.abortRuleId = some($res.headers["x-amz-abort-rule-id"]) 150 | if res.headers.hasKey("x-amz-request-charged"): 151 | result.requestCharged = some($res.headers["x-amz-request-charged"]) 152 | 153 | 154 | 155 | 156 | 157 | proc main() {.async.} = 158 | # this is just a scoped testing function 159 | let 160 | accessKey = os.getEnv("AWS_ACCESS_KEY_ID") 161 | secretKey = os.getEnv("AWS_SECRET_ACCESS_KEY") 162 | region = "eu-west-2" 163 | bucket = "nim-aws-s3-multipart-upload" 164 | key = "testFile1.bin" 165 | uploadId = "G9St9EQShiehKsLSdNz.KcnTQiNYoQzx91OfcEi.PpAr6U3KKTCzCvuGFLAlVMbuDIAovCEUKqM55qTLl73TYoBELhzFjo.aAtbvGLy2z6ClnkKmm4dQNGx_14p.Ztho" 166 | 167 | let credentials = AwsCreds(AWS_ACCESS_KEY_ID: accessKey, AWS_SECRET_ACCESS_KEY: secretKey) 168 | 169 | var client = newAsyncHttpClient() 170 | 171 | let args = ListPartsRequest( 172 | bucket: bucket, 173 | key: some(key), 174 | uploadId: some(uploadId) 175 | ) 176 | let result = await client.listParts(credentials=credentials, bucket=bucket, region=region, args=args) 177 | # echo result 178 | echo result.toJson().parseJson().pretty() 179 | 180 | 181 | when isMainModule: 182 | import dotenv 183 | load() 184 | 185 | try: 186 | waitFor main() 187 | except: 188 | ## treeform async message fix 189 | ## https://github.com/nim-lang/Nim/issues/19931#issuecomment-1167658160 190 | let msg = getCurrentExceptionMsg() 191 | for line in msg.split("\n"): 192 | var line = line.replace("\\", "/") 193 | if "/lib/pure/async" in line: 194 | continue 195 | if "#[" in line: 196 | break 197 | line.removeSuffix("Iter") 198 | echo line -------------------------------------------------------------------------------- /src/awsS3/multipart/api/uploadPart.nim: -------------------------------------------------------------------------------- 1 | # std 2 | import 3 | os, 4 | httpclient, 5 | httpcore, 6 | asyncdispatch, 7 | strutils, 8 | strformat, 9 | options, 10 | math, 11 | sequtils, 12 | algorithm, 13 | xmlparser, 14 | xmltree 15 | 16 | # other 17 | import 18 | ../models/models, 19 | ../signedv2, 20 | xml2Json, 21 | json, 22 | jsony, 23 | utils, 24 | nimSHA2 25 | 26 | import 27 | abortMultipartUpload, 28 | createMultipartUpload, 29 | completeMultipartUpload, 30 | listMultipartUploads, 31 | listParts 32 | 33 | from awsSTS import AwsCreds 34 | 35 | proc uploadPart*( 36 | # proc uploadPart*[T]( 37 | client: AsyncHttpClient, 38 | credentials: AwsCreds, 39 | headers: HttpHeaders = newHttpHeaders(), 40 | bucket: string, 41 | region: string, 42 | service = "s3", 43 | # args: UploadPartCommandRequest[T] 44 | args: UploadPartCommandRequest 45 | ): Future[UploadPartResult] {.async.} = 46 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html 47 | ## Uploads a part in a multipart upload. 48 | 49 | # example request 50 | # PUT /Key+?partNumber=PartNumber&uploadId=UploadId HTTP/1.1 51 | # Host: Bucket.s3.amazonaws.com 52 | # Content-Length: ContentLength 53 | # Content-MD5: ContentMD5 54 | # x-amz-sdk-checksum-algorithm: ChecksumAlgorithm 55 | # x-amz-checksum-crc32: ChecksumCRC32 56 | # x-amz-checksum-crc32c: ChecksumCRC32C 57 | # x-amz-checksum-sha1: ChecksumSHA1 58 | # x-amz-checksum-sha256: ChecksumSHA256 59 | # x-amz-server-side-encryption-customer-algorithm: SSECustomerAlgorithm 60 | # x-amz-server-side-encryption-customer-key: SSECustomerKey 61 | # x-amz-server-side-encryption-customer-key-MD5: SSECustomerKeyMD5 62 | # x-amz-request-payer: RequestPayer 63 | # x-amz-expected-bucket-owner: ExpectedBucketOwner 64 | 65 | # Body 66 | 67 | 68 | # example response 69 | # HTTP/1.1 200 70 | # x-amz-server-side-encryption: ServerSideEncryption 71 | # ETag: ETag 72 | # x-amz-checksum-crc32: ChecksumCRC32 73 | # x-amz-checksum-crc32c: ChecksumCRC32C 74 | # x-amz-checksum-sha1: ChecksumSHA1 75 | # x-amz-checksum-sha256: ChecksumSHA256 76 | # x-amz-server-side-encryption-customer-algorithm: SSECustomerAlgorithm 77 | # x-amz-server-side-encryption-customer-key-MD5: SSECustomerKeyMD5 78 | # x-amz-server-side-encryption-aws-kms-key-id: SSEKMSKeyId 79 | # x-amz-server-side-encryption-bucket-key-enabled: BucketKeyEnabled 80 | # x-amz-request-charged: RequestCharged 81 | 82 | 83 | let httpMethod = HttpPut 84 | let endpoint = &"https://{bucket}.{service}.{region}.amazonaws.com" 85 | var url = &"{endpoint}/{args.key}?partNumber={args.partNumber}&uploadId={args.uploadId}" 86 | 87 | if args.contentLength.isSome(): 88 | headers["Content-Length"] = $args.contentLength.get() 89 | if args.contentMD5.isSome(): 90 | headers["Content-MD5"] = args.contentMD5.get() 91 | if args.checksumAlgorithm.isSome(): 92 | headers["x-amz-sdk-checksum-algorithm"] = $args.checksumAlgorithm.get() 93 | if args.checksumCRC32.isSome(): 94 | headers["x-amz-checksum-crc32"] = args.checksumCRC32.get() 95 | if args.checksumCRC32C.isSome(): 96 | headers["x-amz-checksum-crc32c"] = args.checksumCRC32C.get() 97 | if args.checksumSHA1.isSome(): 98 | headers["x-amz-checksum-sha1"] = args.checksumSHA1.get() 99 | if args.checksumSHA256.isSome(): 100 | headers["x-amz-checksum-sha256"] = args.checksumSHA256.get() 101 | if args.sseCustomerAlgorithm.isSome(): 102 | headers["x-amz-server-side-encryption-customer-algorithm"] = args.sseCustomerAlgorithm.get() 103 | if args.sseCustomerKey.isSome(): 104 | headers["x-amz-server-side-encryption-customer-key"] = args.sseCustomerKey.get() 105 | if args.sseCustomerKeyMD5.isSome(): 106 | headers["x-amz-server-side-encryption-customer-key-MD5"] = args.sseCustomerKeyMD5.get() 107 | if args.requestPayer.isSome(): 108 | headers["x-amz-request-payer"] = args.requestPayer.get() 109 | if args.expectedBucketOwner.isSome(): 110 | headers["x-amz-expected-bucket-owner"] = args.expectedBucketOwner.get() 111 | 112 | let res = await client.request(credentials = credentials, 113 | headers = headers, httpMethod = httpMethod, url = url, 114 | region = region, service = service, payload = args.body) 115 | let body = await res.body 116 | 117 | when defined(dev): 118 | echo "\n< uploadPart.url" 119 | echo url 120 | echo "\n< uploadPart.method" 121 | echo httpMethod 122 | echo "\n< uploadPart.code" 123 | echo res.code 124 | echo "\n< uploadPart.headers" 125 | echo res.headers 126 | echo "\n< uploadPart.body" 127 | echo body 128 | 129 | if res.code != Http200: 130 | raise newException(HttpRequestError, "Error: " & $res.code & 131 | " " & await res.body) 132 | 133 | # 134 | if res.headers.hasKey("x-amz-server-side-encryption-customer-algorithm"): 135 | result.sseCustomerAlgorithm = some($res.headers["x-amz-server-side-encryption-customer-algorithm"]) 136 | if res.headers.hasKey("ETag"): 137 | # some reason amazon gives back this with quotes... 138 | # so quotes need to be stripped 139 | result.eTag = some(($res.headers["ETag"]).strip(chars = {'"'})) 140 | if res.headers.hasKey("x-amz-checksum-crc32"): 141 | result.checksumCRC32 = some($res.headers["x-amz-checksum-crc32"]) 142 | if res.headers.hasKey("x-amz-checksum-crc32c"): 143 | result.checksumCRC32C = some($res.headers["x-amz-checksum-crc32c"]) 144 | if res.headers.hasKey("x-amz-checksum-sha1"): 145 | result.checksumSHA1 = some($res.headers["x-amz-checksum-sha1"]) 146 | if res.headers.hasKey("x-amz-checksum-sha256"): 147 | result.checksumSHA256 = some($res.headers["x-amz-checksum-sha256"]) 148 | if res.headers.hasKey("x-amz-server-side-encryption-customer-algorithm"): 149 | result.sseCustomerAlgorithm = some($res.headers["x-amz-server-side-encryption-customer-algorithm"]) 150 | if res.headers.hasKey("x-amz-server-side-encryption-customer-key-MD5"): 151 | result.sseCustomerKeyMD5 = some($res.headers["x-amz-server-side-encryption-customer-key-MD5"]) 152 | if res.headers.hasKey("x-amz-server-side-encryption-aws-kms-key-id"): 153 | result.sseKMSKeyId = some($res.headers["x-amz-server-side-encryption-aws-kms-key-id"]) 154 | if res.headers.hasKey("x-amz-server-side-encryption-bucket-key-enabled"): 155 | result.bucketKeyEnabled = some(parseBool(res.headers[ 156 | "x-amz-server-side-encryption-bucket-key-enabled"])) 157 | if res.headers.hasKey("x-amz-request-charged"): 158 | result.requestCharged = some($res.headers["x-amz-request-charged"]) 159 | 160 | proc main() {.async.} = 161 | # this is just a scoped testing function 162 | let 163 | accessKey = os.getEnv("AWS_ACCESS_KEY_ID") 164 | secretKey = os.getEnv("AWS_SECRET_ACCESS_KEY") 165 | region = os.getEnv("AWS_REGION") 166 | bucket = os.getEnv("AWS_BUCKET") 167 | file = "testFile.bin" 168 | key = "testFile.bin" 169 | 170 | let credentials = AwsCreds(AWS_ACCESS_KEY_ID: accessKey, AWS_SECRET_ACCESS_KEY: secretKey) 171 | var client = newAsyncHttpClient() 172 | 173 | 174 | let listMultipartUploadsRequest = ListMultipartUploadsRequest( 175 | bucket: bucket, 176 | prefix: some("test") 177 | ) 178 | let listMultipartUploadsRes = await client.listMultipartUploads(credentials=credentials, bucket=bucket, region=region, args=listMultipartUploadsRequest) 179 | 180 | if listMultipartUploadsRes.uploads.isSome(): 181 | var uploads = listMultipartUploadsRes.uploads.get() 182 | echo uploads.len() 183 | 184 | for upload in uploads: 185 | let abortMultipartUploadRequest = AbortMultipartUploadRequest( 186 | bucket: bucket, 187 | key: upload.key, 188 | uploadId: upload.uploadId.get() 189 | ) 190 | 191 | try: 192 | var abortClient = newAsyncHttpClient() 193 | let abortMultipartUploadResult = await abortClient.abortMultipartUpload(credentials=credentials, bucket=bucket, region=region, args=abortMultipartUploadRequest) 194 | echo abortMultipartUploadResult.toJson().parseJson().pretty() 195 | except: 196 | echo getCurrentExceptionMsg() 197 | 198 | # read the file 199 | # split the files bigger then 5MB 200 | # add the remainder to the last chunk 201 | let fileBuffer = file.readFile() 202 | let minChunkSize = 1024*1024*5 203 | let chunkCount = fileBuffer.len div minChunkSize 204 | var chunkSizes: seq[int] = @[] 205 | for i in 0.. uploadPart" 246 | echo res.toJson().parseJson().pretty() 247 | 248 | if completedMultipartUpload.parts.isNone: 249 | raise newException(ValueError, "parts is None, please initialize it") 250 | 251 | # list the parts before completing 252 | let listPartsResquest = ListPartsRequest( 253 | bucket: bucket, 254 | key: some(key), 255 | uploadId: some(createMultiPartUploadResult.uploadId) 256 | ) 257 | let listPartsResult = await client.listParts(credentials=credentials, bucket=bucket, region=region, args=listPartsResquest) 258 | echo "\n> listPartsResult" 259 | echo listPartsResult.toJson().parseJson().pretty() 260 | 261 | 262 | let completedPart = CompletedPart( 263 | eTag: res.eTag, 264 | partNumber: some(partNumber) 265 | ) 266 | echo "\n> completedPart" 267 | echo completedPart.toJson().parseJson().pretty() 268 | 269 | var parts = completedMultipartUpload.parts.get() 270 | parts.add(completedPart) 271 | completedMultipartUpload.parts = some(parts) 272 | 273 | 274 | let completeMultipartUploadRequest = CompleteMultipartUploadRequest( 275 | bucket: bucket, 276 | key: key, 277 | uploadId: createMultiPartUploadResult.uploadId, 278 | multipartUpload: some(completedMultipartUpload) 279 | ) 280 | echo completeMultipartUploadRequest.toJson().parseJson().pretty() 281 | 282 | let completeMultipartUploadResult = await client.completeMultipartUpload( 283 | credentials = credentials, bucket = bucket, 284 | region = region, args = completeMultipartUploadRequest) 285 | echo "\n> completeMultipartUpload" 286 | echo completeMultipartUploadResult.toJson().parseJson().pretty() 287 | 288 | when isMainModule: 289 | import dotenv 290 | load() 291 | 292 | try: 293 | waitFor main() 294 | except: 295 | ## treeform async message fix 296 | ## https://github.com/nim-lang/Nim/issues/19931#issuecomment-1167658160 297 | let msg = getCurrentExceptionMsg() 298 | for line in msg.split("\n"): 299 | var line = line.replace("\\", "/") 300 | if "/lib/pure/async" in line: 301 | continue 302 | if "#[" in line: 303 | break 304 | line.removeSuffix("Iter") 305 | echo line 306 | -------------------------------------------------------------------------------- /src/awsS3/multipart/api/utils.nim: -------------------------------------------------------------------------------- 1 | import 2 | std/strutils, 3 | std/options, 4 | std/times 5 | 6 | import 7 | jsony 8 | 9 | proc parseHook*[T](s: string, i: var int, v: var Option[seq[T]]) = 10 | eatSpace(s, i) 11 | if i + 3 < s.len and 12 | s[i+0] == 'n' and 13 | s[i+1] == 'u' and 14 | s[i+2] == 'l' and 15 | s[i+3] == 'l': 16 | i += 4 17 | return 18 | if s[i] == '[': 19 | var v2: seq[T] 20 | parseHook(s, i, v2) 21 | v = some(v2) 22 | else: 23 | var v2: T 24 | parseHook(s, i, v2) 25 | v = some(@[v2]) 26 | 27 | 28 | proc dumpHook*(s: var string, v: Option[DateTime]) = 29 | if v.isNone: 30 | s.add("null") 31 | else: 32 | s.add("\"" & v.get().format("yyyy-MM-dd'T'hh:mm:ss'.'fffzzz") & "\"" ) 33 | 34 | proc dumpHook*(s: var string, v: DateTime) = 35 | s.add("\"" & v.format("yyyy-MM-dd'T'hh:mm:ss'.'fffzzz") & "\"" ) 36 | 37 | proc parseHook*(s: string, i: var int, v: var DateTime) = 38 | ## jsony time convert 39 | ## runs through times and tries to parse them 40 | var str: string 41 | parseHook(s, i, str) 42 | var timeFormats = @[ 43 | "yyyy-MM-dd", 44 | "yyyy-MM-dd hh:mm:ss", 45 | "yyyy-MM-dd hh:mm:ssz", 46 | "yyyy-MM-dd hh:mm:sszz", 47 | "yyyy-MM-dd hh:mm:sszzzz", 48 | "yyyy-MM-dd'T'hh:mm:ss'.'fff", 49 | "yyyy-MM-dd'T'hh:mm:ss'.'fffz", 50 | "yyyy-MM-dd'T'hh:mm:ss'.'fffzz", 51 | "yyyy-MM-dd'T'hh:mm:ss'.'fffzzz", 52 | "yyyy-MM-dd'T'hh:mm:ss'.'fff'Z'", 53 | "yyyy-MM-dd'T'hh:mm:ss'.'fff'Z'zz", 54 | "yyyy-MM-dd'T'hh:mm:ss'.'fff'Z'zzz", 55 | ] 56 | for fmt in timeFormats: 57 | try: 58 | v = parse(str, fmt, utc()) 59 | return 60 | except: 61 | continue 62 | raise newException(ValueError, "Invalid date format: " & str) 63 | 64 | proc parseHook*(s: string, i: var int, v: var int) = 65 | ## attempt to parse Ints 66 | var str: string 67 | parseHook(s, i, str) 68 | v = parseInt(str) 69 | 70 | proc parseHook*(s: string, i: var int, v: var float) = 71 | ## attempt to parse Floats 72 | var str: string 73 | parseHook(s, i, str) 74 | v = parseFloat(str) 75 | 76 | proc parseHook*(s: string, i: var int, v: var Option[bool]) = 77 | # attempt to parse Bools 78 | var str: string 79 | parseHook(s, i, str) 80 | v = some(parseBool(str)) 81 | 82 | proc renameHook*(v: object, fieldName: var string) = 83 | # loosely match field to names 84 | # MyField -> myfield 85 | # myField -> myFields 86 | runnableExamples: 87 | type 88 | MyTest = object 89 | id: string 90 | myFancyField: string 91 | 92 | var myJson = """ 93 | { 94 | "Id": "someId", 95 | "MyFancyField": "foo" 96 | } 97 | """ 98 | let myTest = myJson.fromJson(MyTest) 99 | echo myTest 100 | 101 | # MyField -> myField 102 | var tempFieldName = fieldName 103 | tempFieldName[0] = tempFieldName[0].toLowerAscii() 104 | for x , _ in v.fieldPairs(): 105 | if tempFieldName == x: 106 | fieldName = tempFieldName 107 | return 108 | # try to match with an upload-> uploads 109 | tempFieldName &= "s" 110 | for x , _ in v.fieldPairs(): 111 | if tempFieldName == x: 112 | fieldName = tempFieldName 113 | return 114 | -------------------------------------------------------------------------------- /src/awsS3/multipart/api/xml2Json.nim: -------------------------------------------------------------------------------- 1 | import 2 | xmlparser, 3 | xmltree, 4 | json, 5 | tables, 6 | strtabs, 7 | unittest, 8 | sequtils 9 | 10 | import 11 | utils, 12 | jsony 13 | 14 | const escapedChars = @[ 15 | ('<', "<"), 16 | ('>', ">"), 17 | ('&', "&"), 18 | ('"', """), 19 | ('\'', "'") 20 | ] 21 | const escapedCharStrings = escapedChars.mapIt($it[0]) 22 | 23 | proc hasEscapedChar(xmlNode: XmlNode): bool = 24 | let children = xmlNode.items().toSeq() 25 | for child in children: 26 | if child.kind() == xnText: 27 | if child.text() in escapedCharStrings: 28 | return true 29 | 30 | proc getUnescaptedChar(str: string): string = 31 | for (c, cs) in escapedChars: 32 | if str == cs: 33 | return cs 34 | 35 | proc getUnescapedString(xmlNode: XmlNode): string = 36 | let children = xmlNode.items().toSeq() 37 | for child in children: 38 | if child.kind() == xnText: 39 | if child.text() in escapedCharStrings: 40 | result.add child.text().getUnescaptedChar() 41 | else: 42 | result.add child.text() 43 | 44 | proc xml2Json*(xmlNode: XmlNode, splitAttr: bool=false, isFrist: bool=true): JsonNode = 45 | ## Convert an XML node to a JSON node. 46 | ## if the resulting json will be JSNull 47 | ## if 1000 the resulting json will be JSString not JSInt 48 | 49 | # deal with root node. 50 | if isFrist: 51 | result = newJObject() 52 | result[xmlNode.tag()] = xmlNode.xml2Json(splitAttr, false) 53 | return result 54 | case xmlNode.kind(): 55 | of xnVerbatimText, xnText: 56 | result = newJString(xmlNode.text) 57 | of xnElement: 58 | # if element has no children return 59 | let children = xmlNode.items().toSeq() 60 | if children.len == 0: 61 | return newJNull() 62 | # for some reason XML treates escaped charaters as thier own nodes. 63 | # this fixes that 64 | if xmlNode.hasEscapedChar(): 65 | result = newJObject() 66 | return newJString(xmlNode.getUnescapedString()) 67 | 68 | result = newJObject() 69 | # if element has attributes 70 | if xmlNode.attrsLen() > 0: 71 | for key, val in xmlNode.attrs().pairs(): 72 | if splitAttr: 73 | result["attributes"] = newJObject() 74 | result["attributes"][key] = newJString(val) 75 | else: 76 | result[key] = newJString(val) 77 | 78 | # if it has children 79 | # children need to be added as either an array or object 80 | # if node has multiple children with the same tag 81 | # it is assumed to be an array otherwise treat it as an object 82 | for child in children: 83 | if child.kind() in {xnText, xnVerbatimText}: 84 | result = newJString(child.text) 85 | elif child.kind() == xnElement: 86 | if child.hasEscapedChar(): 87 | result[child.tag()] = newJString(child.getUnescapedString()) 88 | elif result.hasKey(child.tag()): 89 | # assume it is an array 90 | if result[child.tag()].kind != JArray: 91 | let tempArray = newJArray() 92 | tempArray.add(result[child.tag()]) 93 | result[child.tag()] = tempArray 94 | result[child.tag()].add(child.xml2Json(splitAttr, false)) 95 | else: 96 | # assume it is an object 97 | result[child.tag()] = child.xml2Json(splitAttr, false) 98 | else: 99 | raise newException(ValueError, "kind not implemented: " & $child.kind()) 100 | of xnComment: 101 | result = newJObject() 102 | result["comment"] = newJString(xmlNode.text) 103 | of xnCData: 104 | result = newJObject() 105 | result["cdata"] = newJString(xmlNode.text) 106 | of xnEntity: 107 | result = newJObject() 108 | result["entity"] = newJString(xmlNode.text) 109 | -------------------------------------------------------------------------------- /src/awsS3/multipart/models/abortMultipartUpload.nim: -------------------------------------------------------------------------------- 1 | import 2 | # common, 3 | options 4 | 5 | type 6 | ## this file is the type definition for the s3 api was created from the aws docs 7 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html 8 | AbortMultipartUploadRequest* = object 9 | 10 | ## The bucket name upload the part to. 11 | bucket*: string 12 | 13 | ## Key of the object to upload. AKA the filepath/filename. 14 | key*: string 15 | 16 | ## The ID that identifies the multipart upload 17 | uploadId*: string 18 | 19 | ## Tag to specify if the Requester Pays Buckets 20 | ## https*://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html" 21 | # requestPayer*: Option[RequestPayer | string] 22 | requestPayer*: Option[string] 23 | 24 | ## The ID of the expected bucket owner. If the bucket is owned by a different account the request will fail with error code 403. 25 | expectedBucketOwner*: Option[string] 26 | 27 | AbortMultipartUploadResult* = object 28 | ## Tag to specify if the Requester Pays Buckets 29 | ## https*://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html" 30 | # requestPayer*: Option[RequestPayer | string] 31 | requestCharged*: Option[string] -------------------------------------------------------------------------------- /src/awsS3/multipart/models/common.nim: -------------------------------------------------------------------------------- 1 | import 2 | options 3 | 4 | # enums 5 | type 6 | ObjectCannedACL* = enum 7 | authenticated_read = "authenticated-read", 8 | aws_exec_read = "aws-exec-read", 9 | bucket_owner_full_control = "bucket-owner-full-control", 10 | bucket_owner_read = "bucket-owner-read", 11 | private = "private", 12 | public_read = "public-read", 13 | public_read_write = "public-read-write" 14 | 15 | CheckSumAlgorithm* = enum 16 | CRC32 = "CRC32", 17 | CRC32C = "CRC32C", 18 | SHA1 = "SHA1", 19 | SHA256 = "SHA256" 20 | 21 | CopyReplace* = enum 22 | COPY = "COPY", 23 | REPLACE = "REPLACE" 24 | 25 | OnOff = enum 26 | OFF = "OFF", 27 | ON = "ON" 28 | 29 | ObjectLockLeagalHoldStatus* = OnOff 30 | 31 | ObjectLockMode* = enum 32 | COMPLIANCE = "COMPLIANCE", 33 | GOVERNANCE = "GOVERNANCE" 34 | 35 | ServerSideEncryption* = enum 36 | AES256 = "AES256", 37 | awsKms = "aws:kms" 38 | 39 | StorageClass* = enum 40 | DEEP_ARCHIVE = "DEEP_ARCHIVE", 41 | GLACIER = "GLACIER", 42 | GLACIER_IR = "GLACIER_IR", 43 | INTELLIGENT_TIERING = "INTELLIGENT_TIERING", 44 | ONEZONE_IA = "ONEZONE_IA", 45 | OUTPOSTS = "OUTPOSTS", 46 | REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY", 47 | STANDARD = "STANDARD", 48 | STANDARD_IA = "STANDARD_IA" 49 | 50 | TaggingDirective* = CopyReplace 51 | MetadataDirective* = CopyReplace 52 | 53 | Request = enum 54 | requester = "requester" 55 | 56 | RequestPayer* = Request 57 | RequestCharged* = Request 58 | 59 | EncodingType* = enum 60 | url = "url" 61 | 62 | CommonPrefix* = object 63 | Prefix: Option[string] 64 | 65 | DisplayAccount = object 66 | DisplayName: Option[string] 67 | ID: Option[string] 68 | 69 | Owner* = DisplayAccount 70 | Initiator* = DisplayAccount 71 | 72 | # models -------------------------------------------------------------------------------- /src/awsS3/multipart/models/completeMultipartUpload.nim: -------------------------------------------------------------------------------- 1 | import 2 | common, 3 | options 4 | 5 | 6 | type 7 | 8 | CompletedPart* = object 9 | ## this file is the type definition for the s3 api was created from the aws docs 10 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html 11 | ## 12 | ## Entity tag of the uploaded part 13 | eTag*: Option[string] 14 | 15 | ## A base64-encoded, 32-bit CRC32 checksum of the uploaded part. 16 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 17 | checksumCRC32*: Option[string] 18 | 19 | ## A base64-encoded, 32-bit CRC32C checksum of the uploaded part. 20 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 21 | checksumCRC32C*: Option[string] 22 | 23 | ## A base64-encoded, 32-bit SHA1 checksum of the uploaded part. 24 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 25 | checksumSHA1*: Option[string] 26 | 27 | ## A base64-encoded, 32-bit SHA256 checksum of the uploaded part. 28 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 29 | checksumSHA256*: Option[string] 30 | 31 | ## The part number of the uploaded part, restricted to 1-10000 32 | partNumber*: Option[int] 33 | 34 | 35 | CompletedMultipartUpload* = object 36 | # can result in a 400 error when not provided by the request. 37 | parts*: Option[seq[CompletedPart]] 38 | 39 | 40 | CompleteMultipartUploadRequest* = object 41 | 42 | ## The bucket name of the uploaded the part. 43 | bucket*: string 44 | 45 | ## Key of the object to upload. AKA the filepath/filename. 46 | key*: string 47 | 48 | ## The ID that identifies the multipart upload. 49 | uploadId*: string 50 | 51 | # Multipart upload request body 52 | multipartUpload*: Option[CompletedMultipartUpload] 53 | 54 | ## A base64-encoded, 32-bit CRC32 checksum of the uploaded part. 55 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 56 | checksumCRC32*: Option[string] 57 | 58 | ## A base64-encoded, 32-bit CRC32C checksum of the uploaded part. 59 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 60 | checksumCRC32C*: Option[string] 61 | 62 | ## A base64-encoded, 32-bit SHA1 checksum of the uploaded part. 63 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 64 | checksumSHA1*: Option[string] 65 | 66 | ## A base64-encoded, 32-bit SHA256 checksum of the uploaded part. 67 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 68 | checksumSHA256*: Option[string] 69 | 70 | ## Tag to specify if the Requester Pays Buckets 71 | ## https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html" 72 | # requestPayer*: Option[RequestPayer | string] 73 | requestPayer*: Option[string] 74 | 75 | ## ID of the expected bucket owner. If the bucket is owned by a different account the request will fail with error code 403. 76 | expectedBucketOwner*: Option[string] 77 | 78 | # Server-side encryption (SSE) algorithm used to encrypt the upload. 79 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 80 | sseCustomerAlgorithm*: Option[string] 81 | 82 | # Server-side encryption (SSE) Key used to encrypt the upload. 83 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 84 | sseCustomerKey*: Option[string] 85 | 86 | # Server-side encryption (SSE) MD5 checksum. 87 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 88 | sseCustomerKeyMD5*: Option[string] 89 | 90 | 91 | CompleteMultipartUploadResult* = object 92 | 93 | location*: Option[string] 94 | 95 | bucket*: Option[string] 96 | 97 | key*: Option[string] 98 | 99 | expiration*: Option[string] 100 | 101 | eTag*: Option[string] 102 | 103 | checksumCRC32*: Option[string] 104 | 105 | checksumCRC32C*: Option[string] 106 | 107 | checksumSHA1*: Option[string] 108 | 109 | checksumSHA256*: Option[string] 110 | 111 | serverSideEncryption*: Option[ServerSideEncryption] 112 | 113 | versionId*: Option[string] 114 | 115 | sseKMSKeyId*: Option[string] 116 | 117 | bucketKeyEnabled*: Option[bool] 118 | 119 | requestCharged*: Option[string] -------------------------------------------------------------------------------- /src/awsS3/multipart/models/createMultipartUpload.nim: -------------------------------------------------------------------------------- 1 | 2 | import 3 | common, 4 | options, 5 | times, 6 | tables 7 | 8 | 9 | 10 | type 11 | ## this file is the type definition for the s3 api was created from the aws docs 12 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html 13 | CreateMultipartUploadRequest* = object 14 | # acl*: Option[ObjectCannedACL | string] 15 | acl*: Option[ObjectCannedACL] 16 | 17 | ## The bucket name upload the part to. 18 | bucket*: string 19 | 20 | cacheControl*: Option[string] 21 | 22 | contentDisposition*: Option[string] 23 | 24 | contentEncoding*: Option[string] 25 | 26 | contentLanguage*: Option[string] 27 | 28 | ## MIME type 29 | contentType*: Option[string] 30 | 31 | # The date that the multipart upload is to expire. 32 | expires*: Option[DateTime] 33 | 34 | ## Grant READ, READ_ACP, and WRITE_ACP permissions on the upload. 35 | grantFullControl*: Option[string] 36 | 37 | ## Grant READ permissions on the upload. 38 | grantRead*: Option[string] 39 | 40 | ## Grant READACP permissions on the upload. 41 | grantReadACP*: Option[string] 42 | 43 | ## Grant WRITEACP permissions on the upload. 44 | grantWriteACP*: Option[string] 45 | 46 | ## Key of the file to upload. AKA the filepath/filename. 47 | key*: string 48 | 49 | ## A map of metadata to store with the file in S3 50 | # metadata*: Option[Table[string, string]] 51 | 52 | # Server-side encryption (SSE) algorithm used to encrypt the upload. 53 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 54 | serverSideEncryption*: Option[ServerSideEncryption] 55 | 56 | ## Storage class to be used 57 | ## https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html 58 | # storageClass*: Option[StorageClass | string] 59 | storageClass*: Option[StorageClass] 60 | 61 | ## Specifies the redirect url if the bucket is being used as a website. 62 | websiteRedirectLocation*: Option[string] 63 | 64 | ## The algorithm used to encrypt the upload. 65 | sseCustomerAlgorithm*: Option[string] 66 | 67 | ## specifies the customer encryption key. Must match "x-amz-server-side-encryption-customer-algorithm" in headers 68 | sseCustomerKey*: Option[string] 69 | 70 | ## The MD5 Hash of the customer key to be used for encryption. To verify the integrity of the customer key. 71 | sseCustomerKeyMD5*: Option[string] 72 | 73 | ## Specify the SSEKM Key id to be used from AWS:KMS to encrypt the upload. 74 | sseKMSKeyId*: Option[string] 75 | 76 | ## Specify the SSEKM Encryption Context to be used from AWS:KMS to encrypt the upload. 77 | sseKMSEncryptionContext*: Option[string] 78 | 79 | ## Specify to use S3 Bucket Key for server-side encryption AWS KMS (SSE-KMS) 80 | bucketKeyEnabled*: Option[bool] 81 | 82 | ## Requester Payer for the specified upload. 83 | ## https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html 84 | requestPayer*: Option[string] 85 | 86 | ## Tag set of the upload. must be URL encoded. 87 | tagging*: Option[string] 88 | 89 | ## Specifies the object lock mode that you want to apply to the uploaded object. 90 | objectLockMode*: Option[ObjectLockMode] 91 | 92 | ## Specifies the date and time when you want the object lock to expire. 93 | objectLockRetainUntilDate*: Option[DateTime] 94 | 95 | ## Specifies whether you want to apply a Legal Hold to the uploaded object. 96 | objectLockLegalHoldStatus*: Option[ObjectLockLeagalHoldStatus] 97 | 98 | ## ID of the expected bucket owner. If the bucket is owned by a different account the request will fail with error code 403. 99 | expectedBucketOwner*: Option[string] 100 | 101 | ## The algorithm used check the integrity of the object during the transfer. 102 | checksumAlgorithm*: Option[ChecksumAlgorithm] 103 | 104 | InitiateMultipartUploadResult* = object 105 | 106 | ## specified abort date for incomplete multipart uploads 107 | ## https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config 108 | abortDate*: Option[DateTime] 109 | 110 | ## specified abort rule id for incomplete multipart uploads. "x-amz-abort-date" 111 | abortRuleId*: Option[string] 112 | 113 | ## The bucket name upload the part to. 114 | bucket*: string 115 | 116 | ## Key of the object to upload. AKA the filepath/filename. 117 | key*: string 118 | 119 | ## The ID that identifies the multipart upload 120 | uploadId*: string 121 | 122 | # Server-side encryption (SSE) algorithm used to encrypt the upload. 123 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 124 | # serverSideEncryption*: Option[string] 125 | serverSideEncryption*: Option[ServerSideEncryption] 126 | 127 | ## The algorithm used to encrypt the upload. 128 | sseCustomerAlgorithm*: Option[string] 129 | 130 | # Server-side encryption (SSE) Key used to encrypt the upload. 131 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 132 | sseCustomerKey*: Option[string] 133 | 134 | # Server-side encryption (SSE) MD5 checksum. 135 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 136 | # sseCustomerKeyMD5*: Option[string] 137 | sseCustomerKeyMD5*: Option[string] 138 | 139 | ## AWS Key Management Service (AWS KMS) 140 | sseKMSKeyId*: Option[string] 141 | 142 | ## AWS KMS Encryption Context 143 | sseKMSEncryptionContext*: Option[string] 144 | 145 | ## S3 Bucket Key for server-side encryption AWS KMS (SSE-KMS) 146 | bucketKeyEnabled*: Option[bool] 147 | 148 | # Requester Pays status for the specified bucket. 149 | requestCharged*: Option[string] 150 | 151 | ## The algorithm used check the integrity of the object during the transfer. 152 | checksumAlgorithm*: Option[ChecksumAlgorithm] -------------------------------------------------------------------------------- /src/awsS3/multipart/models/listMultipartUploads.nim: -------------------------------------------------------------------------------- 1 | import 2 | options, 3 | common, 4 | multipartUpload 5 | 6 | 7 | type 8 | ## this file is the type definition for the s3 api was created from the aws docs 9 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html 10 | ListMultipartUploadsRequest* = object 11 | 12 | ## The bucket name of the uploaded the part. 13 | ## bucket 14 | bucket*: string 15 | 16 | ## The character you want to use to group the keys. 17 | ## delimiter 18 | delimiter*: Option[string] 19 | 20 | ## Request AWS S3 to encode the object keys in the response and specifies the encoding 21 | ## encoding-type 22 | encodingType*: Option[string] 23 | 24 | ## Specifies the upload part with the upload-id-marker 25 | ## key-marker 26 | keyMarker*: Option[string] 27 | 28 | ## sets the maximum number of uploads to return. range 1-1000 29 | ## max-uploads 30 | maxUploads*: Option[int] 31 | 32 | ## List in-progress uploads only for those keys that begin with the specified prefix. 33 | ## prefix 34 | prefix*: Option[string] 35 | 36 | ## Together with the key-marker, specifies the upload after which listing should begin with. 37 | ## upload-id-marker 38 | uploadIdMarker*: Option[string] 39 | 40 | ## List the expected bucket owner for this request. If the bucket is owned by a different owner, the server will return an HTTP 403 (Access Denied) error. 41 | ## x-amz-expected-bucket-owner 42 | expectedBucketOwner*: Option[string] 43 | 44 | 45 | ListMultipartUploadsResult* = object 46 | ## The bucket name of the uploaded the part. 47 | ## bucket 48 | bucket*: string 49 | 50 | ## If you specify a delimiter in your request, then the response includes a CommonPrefixes. 51 | commonPrefixes*: Option[seq[CommonPrefix]] 52 | 53 | ## The character specified to use to group the keys. 54 | ## delimiter 55 | delimiter*: Option[string] 56 | 57 | ## Request AWS S3 to encode the object keys in the response and specifies the encoding 58 | ## encoding-type 59 | encodingType*: Option[string] 60 | 61 | 62 | ## Indicates whether the returned list of multipart uploads is truncated. 63 | isTruncated*: Option[bool] 64 | 65 | ## Specifies the upload part with the upload-id-marker 66 | ## key-marker 67 | keyMarker*: Option[string] 68 | 69 | ## Together with the key-marker, specifies the upload after which listing should begin with. 70 | ## upload-id-marker 71 | uploadIdMarker*: Option[string] 72 | 73 | ## Maximum number of multipart uploads that could have been included in the response. 74 | maxUploads*: Option[int] 75 | 76 | ## When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request. 77 | nextKeyMarker*: Option[string] 78 | 79 | ## List in-progress uploads only for those keys that begin with the specified prefix. 80 | ## prefix 81 | prefix*: Option[string] 82 | 83 | ## If the list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request. 84 | nextUploadIdMarker*: Option[string] 85 | 86 | ## the container for the list of multipart uploads. 87 | uploads*: Option[seq[MultipartUpload]] 88 | -------------------------------------------------------------------------------- /src/awsS3/multipart/models/listParts.nim: -------------------------------------------------------------------------------- 1 | import 2 | options, 3 | common, 4 | times, 5 | part 6 | 7 | 8 | type 9 | ## this file is the type definition for the s3 api was created from the aws docs 10 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html 11 | 12 | ListPartsRequest* = object 13 | ## The bucket name of the uploaded the part. 14 | ## bucket 15 | bucket*: string 16 | 17 | key*: Option[string] 18 | 19 | maxParts*: Option[string] 20 | 21 | partNumberMarker*: Option[string] 22 | 23 | uploadId*: Option[string] 24 | 25 | ## List the expected bucket owner for this request. If the bucket is owned by a different owner, the server will return an HTTP 403 (Access Denied) error. 26 | ## x-amz-expected-bucket-owner 27 | expectedBucketOwner*: Option[string] 28 | 29 | requestPayer*: Option[string] 30 | 31 | sseCustomerAlgorithm*: Option[string] 32 | 33 | sseCustomerKey*: Option[string] 34 | 35 | sseCustomerKeyMD5*: Option[string] 36 | 37 | 38 | ListPartsResult* = object 39 | 40 | abortDate*: Option[DateTime] 41 | 42 | abortRuleId*: Option[string] 43 | 44 | requestCharged*: Option[string] 45 | 46 | listPartsResult: string 47 | 48 | bucket*: Option[string] 49 | 50 | checkSumAlgorithm: Option[CheckSumAlgorithm] 51 | 52 | initiator*: Initiator 53 | 54 | isTruncated*: Option[bool] 55 | 56 | key*: Option[string] 57 | 58 | maxParts*: Option[int] 59 | 60 | nextPartNumberMarker*: Option[int] 61 | 62 | owner*: Owner 63 | 64 | parts: Option[seq[Part]] 65 | 66 | partNumberMarker*: Option[int] 67 | 68 | storageClass*: Option[StorageClass] 69 | 70 | uploadId*: Option[string] 71 | 72 | 73 | -------------------------------------------------------------------------------- /src/awsS3/multipart/models/models.nim: -------------------------------------------------------------------------------- 1 | import 2 | common, 3 | listMultipartUploads, 4 | listParts, 5 | multipartUpload, 6 | createMultipartUpload, 7 | uploadPart, 8 | completeMultipartUpload, 9 | abortMultipartUpload 10 | 11 | export 12 | common, 13 | listMultipartUploads, 14 | listParts, 15 | multipartUpload, 16 | createMultipartUpload, 17 | uploadPart, 18 | completeMultipartUpload, 19 | abortMultipartUpload 20 | -------------------------------------------------------------------------------- /src/awsS3/multipart/models/multipartUpload.nim: -------------------------------------------------------------------------------- 1 | import 2 | common, 3 | options, 4 | times 5 | 6 | type 7 | MultipartUpload* = object 8 | ## ID of the multipart upload 9 | uploadId*: Option[string] 10 | 11 | ## Key of the object to upload. AKA the filepath/filename. 12 | key*: string 13 | 14 | ## Date and time at which the multipart upload was initiated 15 | initiated*: Option[DateTime] 16 | 17 | ## The class of storage used to store the object 18 | storageClass*: Option[StorageClass] 19 | 20 | ## Specifies the owner of the object that is part of the multipart upload. 21 | owner*: Option[Owner] 22 | 23 | ## Identifies who initiated the multipart upload 24 | initiator*: Option[Initiator] 25 | 26 | ## The algorithm that was used to create a checksum of the object 27 | checksumAlgorithm*: Option[ChecksumAlgorithm] -------------------------------------------------------------------------------- /src/awsS3/multipart/models/part.nim: -------------------------------------------------------------------------------- 1 | import 2 | options, 3 | times 4 | 5 | type 6 | ## this file is the type definition for the s3 api was created from the aws docs 7 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html 8 | Part* = object 9 | 10 | ## A base64-encoded, 32-bit CRC32 checksum of the uploaded part. 11 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 12 | checksumCRC32*: Option[string] 13 | 14 | ## A base64-encoded, 32-bit CRC32C checksum of the uploaded part. 15 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 16 | checksumCRC32C*: Option[string] 17 | 18 | ## A base64-encoded, 32-bit SHA1 checksum of the uploaded part. 19 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 20 | checksumSHA1*: Option[string] 21 | 22 | ## A base64-encoded, 32-bit SHA256 checksum of the uploaded part. 23 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 24 | checksumSHA256*: Option[string] 25 | 26 | eTag*: Option[string] 27 | 28 | lastModified*: Option[DateTime] 29 | 30 | partNumber*: Option[int] 31 | 32 | size*: Option[int] -------------------------------------------------------------------------------- /src/awsS3/multipart/models/response.nim: -------------------------------------------------------------------------------- 1 | import 2 | options, 3 | httpcore, 4 | options 5 | 6 | 7 | type 8 | ## this file is the type definition for the s3 api was created from the aws docs 9 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html 10 | ResponseMetadata* = object 11 | ## The status code of the last HTTP response received for this operation. 12 | httpStatusCode*: Option[HttpCode] 13 | 14 | ## A unique identifier for the last request sent for this operation 15 | ## #debugging 16 | requestId*: Option[string] 17 | 18 | ## An identifier for the last request sent. 19 | ## #debugging 20 | extendedRequestId*: Option[string] 21 | 22 | ## An identifier of the last request sent. 23 | ## #debugging 24 | cfId*: Option[string] 25 | 26 | ## The number of times this operation was attempted. 27 | attempts*: Option[int] 28 | 29 | ## Total time spent waiting between retries in milliseconds. 30 | totalRetryDelay*: Option[int] 31 | 32 | MetadataBearer* = object 33 | ## Metadata pertaining to this request. 34 | `$metadata`*: ResponseMetadata 35 | -------------------------------------------------------------------------------- /src/awsS3/multipart/models/uploadPart.nim: -------------------------------------------------------------------------------- 1 | import 2 | common, 3 | options 4 | 5 | # type 6 | # Body = openArray[byte] or openArray[char] or string 7 | 8 | 9 | 10 | 11 | type 12 | ## this file is the type definition for the s3 api was created from the aws docs 13 | ## https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html 14 | # UploadPartCommandRequest*[T: seq[byte] or seq[char] or string] = object 15 | UploadPartCommandRequest* = object 16 | 17 | ## The body of the request. 18 | ## favor using bytes over strings as hashing could cause issues 19 | # body*: T 20 | body*: string 21 | 22 | ## The bucket name of the uploaded the part. 23 | bucket*: string 24 | 25 | ## Specify the content length if it can not be determined automatically. 26 | contentLength*: Option[int] 27 | 28 | ## Base64-encoded 128-bit MD5 digest of the part data. Used to verify the integrity of the 29 | contentMD5*: Option[string] 30 | 31 | ## The algorithm used to verify the integrity of the part data. 32 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 33 | checksumAlgorithm*: Option[ChecksumAlgorithm] 34 | 35 | ## A base64-encoded, 32-bit CRC32 checksum of the uploaded part. 36 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 37 | checksumCRC32*: Option[string] 38 | 39 | ## A base64-encoded, 32-bit CRC32C checksum of the uploaded part. 40 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 41 | checksumCRC32C*: Option[string] 42 | 43 | ## A base64-encoded, 32-bit SHA1 checksum of the uploaded part. 44 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 45 | checksumSHA1*: Option[string] 46 | 47 | ## A base64-encoded, 32-bit SHA256 checksum of the uploaded part. 48 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 49 | checksumSHA256*: Option[string] 50 | 51 | ## Key of the object to upload. AKA the filepath/filename. 52 | key*: string 53 | 54 | ## The ID that identifies the multipart upload. 55 | uploadId*: string 56 | 57 | ## The part number of the part being uploaded. range 1-10000 58 | partNumber*: int 59 | 60 | # Server-side encryption (SSE) algorithm used to encrypt the upload. 61 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 62 | sseCustomerAlgorithm*: Option[string] 63 | 64 | # Server-side encryption (SSE) Key used to encrypt the upload. 65 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 66 | sseCustomerKey*: Option[string] 67 | 68 | # Server-side encryption (SSE) MD5 checksum. 69 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 70 | sseCustomerKeyMD5*: Option[string] 71 | 72 | 73 | ## Tag to specify if the Requester Pays Buckets 74 | ## https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html" 75 | requestPayer*: Option[string] 76 | 77 | ## List the expected bucket owner for this request. If the bucket is owned by a different owner, the server will return an HTTP 403 (Access Denied) error. 78 | ## x-amz-expected-bucket-owner 79 | expectedBucketOwner*: Option[string] 80 | 81 | UploadPartResult* = object 82 | 83 | ## The server side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms). 84 | serverSideEncryption: Option[ServerSideEncryption] 85 | 86 | ##

Entity tag for the uploaded object.

87 | eTag*: Option[string] 88 | 89 | ## A base64-encoded, 32-bit CRC32 checksum of the uploaded part. 90 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 91 | checksumCRC32*: Option[string] 92 | 93 | ## A base64-encoded, 32-bit CRC32C checksum of the uploaded part. 94 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 95 | checksumCRC32C*: Option[string] 96 | 97 | ## A base64-encoded, 32-bit SHA1 checksum of the uploaded part. 98 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 99 | checksumSHA1*: Option[string] 100 | 101 | ## A base64-encoded, 32-bit SHA256 checksum of the uploaded part. 102 | ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html 103 | checksumSHA256*: Option[string] 104 | 105 | # Server-side encryption (SSE) algorithm used to encrypt the upload. 106 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 107 | sseCustomerAlgorithm*: Option[string] 108 | 109 | # Server-side encryption (SSE) MD5 checksum. 110 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html 111 | sseCustomerKeyMD5*: Option[string] 112 | 113 | 114 | ## AWS Key Management Service (AWS KMS) 115 | sseKMSKeyId*: Option[string] 116 | 117 | ## S3 Bucket Key for server-side encryption AWS KMS (SSE-KMS) 118 | bucketKeyEnabled*: Option[bool] 119 | 120 | # Requester Pays status for the specified bucket. 121 | requestCharged*: Option[string] 122 | 123 | -------------------------------------------------------------------------------- /src/awsS3/multipart/signedv2.nim: -------------------------------------------------------------------------------- 1 | import 2 | os, 3 | times, 4 | tables, 5 | sequtils, 6 | strutils, strformat, re, 7 | uri, 8 | httpclient, 9 | asyncdispatch, 10 | algorithm, 11 | unicode, 12 | sugar 13 | 14 | import 15 | nimSHA2, 16 | hmac 17 | 18 | from awsSTS import AwsCreds 19 | 20 | type 21 | # AwsCredentials* = object 22 | # id*: string 23 | # secret*: string 24 | 25 | AwsScope* = object 26 | date*: DateTime 27 | region*: string 28 | service*: string 29 | 30 | CanonicalHeaders* = object 31 | headers*: HttpHeaders 32 | headersString*: string 33 | signedHeaders*: string 34 | 35 | 36 | CanonicalRequestResult = object 37 | endpoint: string 38 | canonicalRequest: string 39 | canonicalHeaders: CanonicalHeaders 40 | canonicalPath: string 41 | canonicalQuery: string 42 | hashPayload: string 43 | authorization: string 44 | 45 | const 46 | basicISO8601_1 = initTimeFormat "yyyyMMdd\'T\'HHmmss\'Z\'" 47 | 48 | # canonical headers, path, qs, encodeUri from 49 | # https://github.com/Gooseus/nimaws 50 | proc uriEncode( 51 | s: string, 52 | notEncode: set[char] 53 | ): string = 54 | for i in 0..(s.len - 1): 55 | if s[i] in notEncode+{'a'..'z', 'A'..'Z', '0'..'9', '-', '.', '_', '~'}: 56 | result.add(s[i]) 57 | else: 58 | result.add('%') 59 | result.add(toHex(ord(s[i]), 2).toUpperASCII()) 60 | 61 | proc condenseWhitespace(x: string): string = 62 | return strutils.strip(x).replace(re"\s+", " ") 63 | 64 | proc createCanonicalPath(path: string): string = 65 | return uriEncode(path, {'/'}) 66 | 67 | proc createCanonicalQueryString(queryString: string): string = 68 | var queryParts = queryString.split("&").filter(x => x != "").map(x => x.split("=")).map(x => (x[0], x[1])) 69 | if queryParts.len == 0: 70 | return "" 71 | 72 | queryParts = queryParts.sortedByIt(it[0]) 73 | return encodeQuery(queryParts, omitEq=false) 74 | 75 | # if query.len < 1: 76 | # return result 77 | # var queryParts = query.split("&").sorted() 78 | # for part in queryParts: 79 | # result.add(uriEncode(part, {'='})) 80 | 81 | proc createSigningString*( 82 | scope: AwsScope, 83 | request: string, 84 | algorithm: string, 85 | termination: string 86 | ): string = 87 | 88 | var 89 | requestSHA256 = computeSHA256(request).hex().toLowerAscii() 90 | date = scope.date.format("yyyyMMdd") 91 | fullDate = scope.date.format(basicISO8601_1) 92 | region = scope.region 93 | service = scope.service 94 | scopeString = &"{date}/{region}/{service}" 95 | 96 | return &"{algorithm}\n{fullDate}\n{scopeString}/{termination}\n{requestSHA256}" 97 | 98 | proc createCanonicalHeaders(headers: HttpHeaders): CanonicalHeaders = 99 | var headerKeys = headers.table.keys.toSeq() 100 | headerKeys = headerKeys.sorted() 101 | 102 | let tempHeaders = newHttpHeaders() 103 | 104 | for name in headerKeys: 105 | let loweredName = toLower(name) 106 | result.signedHeaders.add(loweredName) 107 | result.signedHeaders.add(';') 108 | 109 | result.headersString.add(loweredName) 110 | result.headersString.add(':') 111 | 112 | let values: seq[string] = headers.table[name] 113 | for value in values.items: 114 | tempHeaders.add(loweredName, value) 115 | result.headersString.add(condenseWhitespace(value)) 116 | 117 | result.headersString.add("\n") 118 | 119 | result.signedHeaders = result.signedHeaders[0.. createCanonicalRequest.hashload" 150 | echo hashload 151 | 152 | var 153 | host = if uri.port.len > 0: &"{uri.hostname}:{uri.port}" else: &"{uri.hostname}" 154 | 155 | headers["host"] = host 156 | 157 | if computeHash: 158 | headers["x-amz-content-sha256"] = hashload 159 | 160 | let 161 | canonicalHeaders = createCanonicalHeaders(headers) 162 | canonicalRequest = &"{httpMethod}\n{canonicalPath}\n{canonicalQueryString}\n{canonicalHeaders.headersString}\n{canonicalHeaders.signedHeaders}\n{hashload}" 163 | 164 | when defined(dev): 165 | echo "\n> createCanonicalRequest.headers" 166 | echo canonicalHeaders.headers 167 | echo "\n> createCanonicalRequest.signedHeaders" 168 | echo canonicalHeaders.signedHeaders 169 | echo "\n> createCanonicalRequest.canonicalRequest" 170 | echo canonicalRequest 171 | 172 | result.endpoint = endpoint 173 | result.canonicalHeaders = canonicalHeaders 174 | result.canonicalRequest = canonicalRequest 175 | result.canonicalPath = canonicalPath 176 | result.canonicalQuery = canonicalQueryString 177 | result.hashPayload = hashload 178 | 179 | 180 | proc createSignature*(key: string, sts: string): string = 181 | return hmac_sha256(key, sts).hex().toLowerAscii() 182 | 183 | proc signingKey*( 184 | secret: string, 185 | scope: AwsScope, 186 | termination: string 187 | ): string = 188 | 189 | var 190 | date = scope.date.format("yyyyMMdd") 191 | region = scope.region 192 | service = scope.service 193 | 194 | kDate = $hmac_sha256(&"AWS4{secret}", date) 195 | kRegion = $hmac_sha256(kDate, region) 196 | kService = $hmac_sha256(kRegion, service) 197 | kSigning = $hmac_sha256(kService, termination) 198 | return kSigning 199 | 200 | proc createAuthorizationHeader*( 201 | id: string, 202 | scope: AwsScope, 203 | signedHeaders, 204 | signature: string, 205 | algorithm: string, 206 | termination: string 207 | ): string = 208 | var 209 | date = scope.date.format("yyyyMMdd") 210 | scopeString = &"{date}/{scope.region}/{scope.service}" 211 | credential = &"{id}/{scopeString}/{termination}" 212 | 213 | return &"{algorithm} Credential={credential}, SignedHeaders={signedHeaders}, Signature={signature}" 214 | 215 | proc createAuthorizedCanonicalRequest*( 216 | credentials: AwsCreds, 217 | httpMethod: HttpMethod, 218 | url: string, 219 | payload: seq[byte] | seq[char] | string, 220 | headers: HttpHeaders, 221 | scope: AwsScope, 222 | algorithm: string, 223 | termination: string 224 | ): CanonicalRequestResult = 225 | # https://docs.aws.amazon.com/general/latest/gr/create-signed-request.html 226 | # Step 1: Create a canonical request 227 | # Step 2: Create a hash of the canonical request 228 | # Step 3: Create a string to sign 229 | # Step 4: Calculate the signature 230 | # Step 5: Add the signature to the request 231 | var headers = headers 232 | headers["x-amz-date"] = scope.date.format(basicISO8601_1) 233 | # aws hash is sensitive to string | seq[char|byte] 234 | # create canonical request 235 | var canonicalRequestResult = createCanonicalRequest( 236 | headers, 237 | httpMethod, 238 | url, 239 | payload, 240 | computeHash=true 241 | ) 242 | 243 | # create string to sign 244 | let to_sign = createSigningString( 245 | scope, 246 | canonicalRequestResult.canonicalRequest, 247 | algorithm, 248 | termination 249 | ) 250 | # create signature 251 | let 252 | signingKey = signingKey(credentials.AWS_SECRET_ACCESS_KEY, scope, termination) 253 | sig = createSignature(signingKey, to_sign) 254 | 255 | # create authorization header 256 | let authorization = createAuthorizationHeader( 257 | credentials.AWS_ACCESS_KEY_ID, 258 | scope, 259 | canonicalRequestResult.canonicalHeaders.signedHeaders, 260 | sig, 261 | algorithm, 262 | termination 263 | ) 264 | canonicalRequestResult.authorization = authorization 265 | return canonicalRequestResult 266 | 267 | proc request*( 268 | client: AsyncHttpClient, 269 | credentials: AwsCreds, 270 | httpMethod: HttpMethod, 271 | headers: HttpHeaders = newHttpHeaders(), 272 | url: string, 273 | region: string, 274 | service: string, 275 | payload: seq[byte] | seq[char] | string, 276 | algorithm = "AWS4-HMAC-SHA256", 277 | termination = "aws4_request" 278 | ): Future[AsyncResponse] = 279 | 280 | let 281 | date = getTime().utc() 282 | scope = AwsScope(date: date, region: region, service: service) 283 | 284 | var authorizedCanonicalRequest = createAuthorizedCanonicalRequest( 285 | credentials, 286 | httpMethod, 287 | url, 288 | payload, 289 | headers, 290 | scope, 291 | algorithm, 292 | termination 293 | ) 294 | 295 | authorizedCanonicalRequest.canonicalHeaders.headers["authorization"] = authorizedCanonicalRequest.authorization 296 | var canonicalURL = authorizedCanonicalRequest.endpoint & authorizedCanonicalRequest.canonicalPath & "?" & authorizedCanonicalRequest.canonicalQuery 297 | 298 | 299 | when defined(dev): 300 | echo "\n> request.httpMethod" 301 | echo httpMethod 302 | echo "\n> request.url" 303 | echo canonicalURL 304 | echo "\n> request.client.httpClient.headers" 305 | echo authorizedCanonicalRequest.canonicalHeaders.headers 306 | 307 | return client.request(url = canonicalURL, httpMethod = httpMethod, headers=authorizedCanonicalRequest.canonicalHeaders.headers, body = $payload) 308 | 309 | 310 | 311 | 312 | proc main() {.async.} = 313 | # this is just a scoped testing function 314 | proc listMultipartUpload( 315 | client: AsyncHttpClient, 316 | credentials: AwsCreds, 317 | bucket: string, 318 | region: string 319 | ): Future[string] {.async.} = 320 | let 321 | url = &"https://{bucket}.s3.{region}.amazonaws.com/?uploads=" 322 | service = "s3" 323 | payload = "" 324 | res = await client.request(credentials=credentials, httpMethod=HttpGet, url=url, region=region, service=service, payload=payload) 325 | body = await res.body 326 | 327 | if res.code != Http200: 328 | raise newException(HttpRequestError, "Failed to list multipart upload: " & 329 | $res.code & " " & body) 330 | return body 331 | 332 | let 333 | accessKey = os.getEnv("AWS_ACCESS_KEY_ID") 334 | secretKey = os.getEnv("AWS_SECRET_ACCESS_KEY") 335 | region = os.getEnv("AWS_REGION") 336 | bucket = os.getEnv("AWS_BUCKET") 337 | 338 | let creds = AwsCreds(AWS_ACCESS_KEY_ID: accessKey, AWS_SECRET_ACCESS_KEY: secretKey) 339 | 340 | var client = newAsyncHttpClient() 341 | echo await client.listMultipartUpload(creds, bucket, region) 342 | 343 | 344 | when isMainModule: 345 | import dotenv 346 | load() 347 | 348 | try: 349 | waitFor main() 350 | except: 351 | ## treeform async message fix 352 | ## https://github.com/nim-lang/Nim/issues/19931#issuecomment-1167658160 353 | let msg = getCurrentExceptionMsg() 354 | for line in msg.split("\n"): 355 | var line = line.replace("\\", "/") 356 | if "/lib/pure/async" in line: 357 | continue 358 | if "#[" in line: 359 | break 360 | line.removeSuffix("Iter") 361 | echo line -------------------------------------------------------------------------------- /src/awsS3/signed.nim: -------------------------------------------------------------------------------- 1 | # Copyright Thomas T. Jarløv (TTJ) - ttj@ttj.dk 2 | 3 | 4 | import 5 | std/[ 6 | httpclient, 7 | httpcore, 8 | json, 9 | mimetypes, 10 | os, 11 | strutils, 12 | tables, 13 | times 14 | ] 15 | 16 | import 17 | awsSigV4, 18 | awsSTS 19 | 20 | type 21 | S3ContentDisposition* = enum 22 | CDTinline 23 | CDTattachment 24 | CDTignore 25 | 26 | const 27 | mimetypeDB = mimes.toTable 28 | 29 | const 30 | #dateISO8601* = initTimeFormat "yyyyMMdd" 31 | basicISO8601* = initTimeFormat "yyyyMMdd\'T\'HHmmss\'Z\'" 32 | 33 | 34 | proc s3SignedUrl*( 35 | credsAccessKey, credsSecretKey, credsRegion: string, 36 | bucketHost, key: string, 37 | httpMethod = HttpGet, 38 | contentDisposition: S3ContentDisposition = CDTignore, contentDispositionName = "", 39 | setContentType = true, 40 | fileExt = "", customQuery = "", copyObject = "", expireInSec = "65", 41 | accessToken = "", 42 | makeDateTime = "" 43 | ): string = 44 | ## Generate a S3 signed URL. 45 | ## 46 | ## customQuery: 47 | ## This is a custom defined header query. The string needs to include the format 48 | ## "head1:value,head2:value" - a comma separated string with header and 49 | ## value diveded by colon. 50 | ## 51 | ## fileExt => ".jpg", ".ifc" 52 | 53 | let 54 | url = "https://" & bucketHost & "/" & key 55 | region = credsRegion 56 | service = "s3" 57 | payload = "" 58 | digest = SHA256 59 | 60 | # datetime: 61 | # 62 | # Why this? In a complex threaded system Valgrind kept bugging over the 63 | # times library and not bein able to free the memory. The original 64 | # makeDateTime() comes from the library awsSigV4, and even with destroy and 65 | # defer nothing helped on Valgrind. 66 | # 67 | # You might never experience this, but if you do, the fix is to create the 68 | # datetime string outside the procedure within a scoped block and just pass 69 | # the string. 70 | var datetime: string = makeDateTime 71 | if datetime.len == 0: 72 | datetime = getTime().utc.format(basicISO8601) 73 | 74 | let scope = credentialScope(region=region, service=service, date=datetime) 75 | 76 | var headers = newHttpHeaders() 77 | headers.add("Host", bucketHost) 78 | if copyObject.len > 0: 79 | headers.add("x-amz-copy-source", copyObject) 80 | 81 | # Create the initial JSON query with known fields using %* 82 | var query = %* { 83 | "X-Amz-Algorithm": $SHA256, 84 | "X-Amz-Credential": credsAccessKey & "/" & scope, 85 | "X-Amz-Date": datetime, 86 | "X-Amz-Expires": expireInSec 87 | } 88 | 89 | if accessToken.len > 0: 90 | query["X-Amz-Security-Token"] = %* accessToken 91 | 92 | if contentDisposition != CDTignore or contentDispositionName.len > 0: 93 | let dispType = case contentDisposition 94 | of CDTinline: "inline;" 95 | of CDTattachment: "attachment;" 96 | else: "" 97 | 98 | if contentDispositionName.len > 0: 99 | let filename = if dispType.len == 0: 100 | "filename=\"" & contentDispositionName & "\"" 101 | else: 102 | " filename=\"" & contentDispositionName & "\"" 103 | query["response-content-disposition"] = %* (dispType & filename) 104 | 105 | if setContentType: 106 | let extension = if fileExt.len > 0: fileExt[1..^1] 107 | elif splitFile(key).ext.len > 0: splitFile(key).ext[1..^1] 108 | else: "" 109 | query["response-content-type"] = %* mimetypeDB.getOrDefault(extension, "binary/octet-stream") 110 | 111 | if customQuery.len > 0: 112 | for c in customQuery.split(","): 113 | let q = c.split(":") 114 | if q.len == 2: 115 | query[q[0]] = %* q[1] 116 | 117 | query["X-Amz-SignedHeaders"] = %* (if copyObject.len > 0: "host;x-amz-copy-source" else: "host") 118 | 119 | let 120 | request = canonicalRequest(httpMethod, url, query, headers, payload, digest = UnsignedPayload) 121 | sts = stringToSign(request, scope, datetime, digest) 122 | signature = calculateSignature( 123 | secret = credsSecretKey, 124 | date = datetime, 125 | region = region, 126 | service = service, 127 | tosign = sts, 128 | digest = digest 129 | ) 130 | 131 | result = url & "?" & request.split("\n")[2] & "&X-Amz-Signature=" & signature 132 | 133 | when defined(verboseS3): 134 | echo result 135 | 136 | 137 | proc s3SignedUrl*(awsCreds: AwsCreds, bucketHost, key: string, 138 | httpMethod = HttpGet, 139 | contentDisposition: S3ContentDisposition = CDTignore, contentDispositionName = "", 140 | setContentType = true, fileExt = "", customQuery = "", copyObject = "", 141 | expireInSec = "65" 142 | ): string {.deprecated.} = 143 | 144 | return s3SignedUrl( 145 | awsCreds.AWS_ACCESS_KEY_ID, awsCreds.AWS_SECRET_ACCESS_KEY, awsCreds.AWS_REGION, 146 | bucketHost, key, 147 | httpMethod = httpMethod, 148 | contentDisposition = contentDisposition, contentDispositionName = contentDispositionName, 149 | setContentType = setContentType, 150 | fileExt = fileExt, customQuery = customQuery, copyObject = copyObject, expireInSec = expireInSec, 151 | accessToken = awsCreds.AWS_SESSION_TOKEN 152 | ) 153 | 154 | 155 | 156 | # 157 | # S3 presigned GET 158 | # 159 | proc s3Presigned*(accessKey, secretKey, region: string, bucketHost, key: string, 160 | httpMethod = HttpGet, 161 | contentDisposition: S3ContentDisposition = CDTattachment, contentDispositionName = "", 162 | setContentType = true, fileExt = "", expireInSec = "65", accessToken = "" 163 | ): string {.deprecated.} = 164 | ## Generates a S3 presigned url for sharing. 165 | ## 166 | ## contentDisposition => sets "Content-Disposition" type (inline/attachment) 167 | ## contentDispositionName => sets "Content-Disposition" name 168 | ## setContentType => sets "Content-Type" 169 | ## fileExt => only if setContentType=true 170 | ## if `fileExt = ""` then mimetype is automated 171 | ## needs to be ".jpg" (dot before) like splitFile(f).ext 172 | return s3SignedUrl(accessKey, secretKey, region, bucketHost, key, 173 | httpMethod = httpMethod, 174 | contentDisposition = contentDisposition, contentDispositionName = contentDispositionName, 175 | setContentType = setContentType, 176 | fileExt = fileExt, expireInSec = expireInSec, accessToken = accessToken 177 | ) 178 | 179 | 180 | proc s3Presigned*(creds: AwsCreds, bucketHost, key: string, 181 | contentDisposition: S3ContentDisposition = CDTattachment, contentDispositionName="", 182 | setContentType=true, fileExt="", expireInSec="65"): string {.deprecated.} = 183 | 184 | return s3Presigned( 185 | creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, creds.AWS_REGION, 186 | bucketHost, key, 187 | httpMethod = HttpGet, 188 | contentDisposition = contentDisposition, contentDispositionName = contentDispositionName, 189 | setContentType = setContentType, fileExt = fileExt, expireInSec = expireInSec, 190 | accessToken = creds.AWS_SESSION_TOKEN 191 | ) -------------------------------------------------------------------------------- /src/awsS3/utils_async.nim: -------------------------------------------------------------------------------- 1 | # Copyright Thomas T. Jarløv (TTJ) - ttj@ttj.dk 2 | 3 | import 4 | std/asyncdispatch, 5 | std/httpclient, 6 | std/os, 7 | std/logging, 8 | std/strutils 9 | 10 | import 11 | awsSTS 12 | 13 | import 14 | ./api 15 | 16 | 17 | # 18 | # Helper procedures 19 | # 20 | proc parseReponse*(response: AsyncResponse): (bool, HttpHeaders) = 21 | ## Helper-Procedure that can be used to return true on success and the response 22 | ## headers. 23 | if response.code.is2xx: 24 | when defined(verboseS3): echo "success: " & $response.code 25 | return (true, response.headers) 26 | 27 | else: 28 | when defined(verboseS3): echo "failure: " & $response.code 29 | return (false, response.headers) 30 | 31 | 32 | proc isSuccess2xx*(response: AsyncResponse): (bool) = 33 | ## Helper-Procedure that can be used with the raw call for parsing the response. 34 | if response.code.is2xx: 35 | when defined(verboseS3): echo "success: " & $response.code 36 | return (true) 37 | 38 | else: 39 | when defined(verboseS3): echo "failure: " & $response.code 40 | return (false) 41 | 42 | 43 | proc s3DeleteObjectIs2xx*(creds: AwsCreds, bucketHost, key: string): Future[bool] {.async.} = 44 | ## AWS S3 API - DeleteObject bool 45 | if key.contains(" "): 46 | warn("s3DeleteObjectIs2xx(): Skipping due spaces = " & key) 47 | return false 48 | else: 49 | let client = newAsyncHttpClient() 50 | result = (await (s3DeleteObject(client, creds, bucketHost, key))).isSuccess2xx() 51 | client.close() 52 | 53 | 54 | proc s3HeadObjectIs2xx*(creds: AwsCreds, bucketHost, key: string): Future[bool] {.async.} = 55 | ## AWS S3 API - HeadObject bool 56 | ## 57 | ## AWS S3 API - HeadObject is2xx is only checking the existing of the file. 58 | ## If the data is needed, then use the raw `s3HeadObject` procedure and 59 | ## parse the response. 60 | if key.contains(" "): 61 | warn("s3HeadObjectIs2xx(): Skipping due spaces = " & key) 62 | return false 63 | else: 64 | let client = newAsyncHttpClient() 65 | result = (await (s3HeadObject(client, creds, bucketHost, key))).isSuccess2xx() 66 | client.close() 67 | 68 | 69 | proc s3GetObjectIs2xx*(creds: AwsCreds, bucketHost, key, downloadPath: string): Future[bool] {.async.} = 70 | ## AWS S3 API - GetObject bool 71 | ## 72 | ## AWS S3 API - GetObject is2xx returns true on downloaded file. 73 | ## 74 | ## `downloadPath` needs to full local path. 75 | if key.contains(" "): 76 | warn("s3GetObjectIs2xx(): Skipping due spaces = " & key) 77 | return false 78 | else: 79 | # let client = newHttpClient() 80 | let client = newAsyncHttpClient() 81 | await s3GetObject(client, creds, bucketHost, key, downloadPath) 82 | client.close() 83 | result = fileExists(downloadPath) 84 | 85 | 86 | proc s3PutObjectIs2xx*(creds: AwsCreds, bucketHost, key, localPath: string, deleteLocalFileAfter=true): Future[bool] {.async.} = 87 | ## AWS S3 API - PutObject bool 88 | ## 89 | ## This performs a PUT and uploads the file. The `localPath` param needs to 90 | ## be the full path. 91 | ## 92 | ## The PutObject reads the file to memory and uploads it. 93 | if not fileExists(localPath): 94 | return false 95 | 96 | if key.contains(" "): 97 | warn("s3PutObjectIs2xx(): Skipping due spaces = " & key) 98 | return false 99 | else: 100 | let client = newAsyncHttpClient() 101 | result = (await (s3PutObject(client, creds, bucketHost, key, localPath))).isSuccess2xx() 102 | client.close() 103 | if deleteLocalFileAfter: 104 | removeFile(localPath) 105 | 106 | 107 | proc s3CopyObjectIs2xx*(creds: AwsCreds, bucketHost, key, copyObject: string): Future[bool] {.async.} = 108 | ## AWS S3 API - CopyObject bool 109 | if key.contains(" "): 110 | warn("s3CopyObjectIs2xx(): Skipping due spaces = " & key) 111 | return false 112 | else: 113 | let client = newAsyncHttpClient() 114 | result = (await s3CopyObject(client, creds, bucketHost, key, copyObject)).isSuccess2xx() 115 | client.close() 116 | 117 | 118 | proc s3MoveObject*(creds: AwsCreds, bucketToHost, keyTo, bucketFromHost, bucketFromName, keyFrom: string) {.async.} = 119 | ## This does a pseudo move of an object. We copy the object to the destination 120 | ## and then we delete the object from the original location. 121 | ## 122 | ## bucketToHost => Destination bucket host 123 | ## keyTo => 12/files/file.jpg 124 | ## bucketFromHost => Origin bucket host 125 | ## bucketFromName => Origin bucket name 126 | ## keyFrom => 24/files/old.jpg 127 | ## 128 | let client = newAsyncHttpClient() 129 | 130 | if (await s3CopyObject(client, creds, bucketToHost, keyTo, "/" & bucketFromName & "/" & keyFrom)).isSuccess2xx(): 131 | if not (await (s3DeleteObject(client, creds, bucketFromHost, keyFrom))).isSuccess2xx(): 132 | warn("s3MoveObject(): Failed on delete - " & bucketFromHost & keyFrom) 133 | 134 | client.close() 135 | 136 | 137 | proc s3MoveObjects*( 138 | creds: AwsCreds, 139 | bucketHost, bucketFromHost, bucketFromName: string, 140 | keys: seq[string], 141 | waitValidate = 0, 142 | waitDelete = 0 143 | ) {.async.} = 144 | ## In this (plural) multiple moves are performed. The keys are identical in 145 | ## "from" and "to", so origin and destination are the same. 146 | ## 147 | ## The `waitValidate` and `waitDelete` are used to wait between the validation 148 | ## if the file exists and delete operation. 149 | let client = newAsyncHttpClient() 150 | 151 | var keysSuccess: seq[string] 152 | 153 | for key in keys: 154 | try: 155 | if (await s3CopyObject(client, creds, bucketHost, key, "/" & bucketFromName & "/" & key)).isSuccess2xx(): 156 | keysSuccess.add(key) 157 | except: 158 | error("s3MoveObjects(): Failed on copy - " & bucketHost & " - " & key) 159 | 160 | if waitValidate > 0: 161 | await sleepAsync(waitValidate) 162 | 163 | for key in keysSuccess: 164 | try: 165 | if not (await (s3DeleteObject(client, creds, bucketFromHost, key))).isSuccess2xx(): 166 | warn("s3MoveObject(): Could not delete - " & bucketFromHost & " - " & key) 167 | except: 168 | error("s3MoveObjects(): Failed on delete - " & bucketFromHost & " - " & key) 169 | 170 | if waitDelete > 0: 171 | await sleepAsync(waitDelete) 172 | 173 | client.close() 174 | 175 | 176 | proc s3TrashObject*(creds: AwsCreds, bucketTrashHost, bucketFromHost, bucketFromName, keyFrom: string) {.async.} = 177 | ## This does a pseudo move of an object. We copy the object to the destination 178 | ## and then we delete the object from the original location. 179 | ## The destination in this particular situation - is our trash. 180 | await s3MoveObject(creds, bucketTrashHost, keyFrom, bucketFromHost, bucketFromName, keyFrom) 181 | 182 | 183 | proc s3TrashObjects*( 184 | creds: AwsCreds, 185 | bucketTrashHost, bucketFromHost, bucketFromName: string, 186 | keys: seq[string], 187 | waitValidate = 0, 188 | waitDelete = 0 189 | ) {.async.} = 190 | ## This does a pseudo move of an object. We copy the object to the destination 191 | ## and then we delete the object from the original location. 192 | ## The destination in this particular situation - is our trash. 193 | ## 194 | ## The `waitValidate` is the time to wait between validating the existence of 195 | ## the file. The `waitDelete` is the time to wait between deleting the files. 196 | await s3MoveObjects(creds, bucketTrashHost, bucketFromHost, bucketFromName, keys, waitValidate, waitDelete) 197 | 198 | 199 | -------------------------------------------------------------------------------- /src/awsS3/utils_sync.nim: -------------------------------------------------------------------------------- 1 | # Copyright Thomas T. Jarløv (TTJ) - ttj@ttj.dk 2 | 3 | import 4 | std/httpclient, 5 | std/os, 6 | std/logging, 7 | std/strutils 8 | 9 | import 10 | awsSTS 11 | 12 | import 13 | ./api 14 | 15 | 16 | # 17 | # Helper procedures 18 | # 19 | proc parseReponse*(response: Response): (bool, HttpHeaders) = 20 | ## Helper-Procedure that can be used to return true on success and the response 21 | ## headers. 22 | if response.code.is2xx: 23 | when defined(verboseS3): echo "success: " & $response.code 24 | return (true, response.headers) 25 | 26 | else: 27 | when defined(verboseS3): echo "failure: " & $response.code 28 | return (false, response.headers) 29 | 30 | 31 | proc isSuccess2xx*(response: Response): (bool) = 32 | ## Helper-Procedure that can be used with the raw call for parsing the response. 33 | if response.code.is2xx: 34 | when defined(verboseS3): echo "success: " & $response.code 35 | return (true) 36 | 37 | else: 38 | when defined(verboseS3): echo "failure: " & $response.code 39 | return (false) 40 | 41 | 42 | proc s3DeleteObjectIs2xx*(creds: AwsCreds, bucketHost, key: string): bool = 43 | ## AWS S3 API - DeleteObject bool 44 | if key.contains(" "): 45 | warn("s3DeleteObjectIs2xx(): Skipping due spaces = " & key) 46 | return false 47 | else: 48 | let client = newHttpClient() 49 | result = (s3DeleteObject(client, creds, bucketHost, key)).isSuccess2xx() 50 | client.close() 51 | 52 | 53 | proc s3HeadObjectIs2xx*(creds: AwsCreds, bucketHost, key: string): bool = 54 | ## AWS S3 API - HeadObject bool 55 | ## 56 | ## AWS S3 API - HeadObject is2xx is only checking the existing of the file. 57 | ## If the data is needed, then use the raw `s3HeadObject` procedure and 58 | ## parse the response. 59 | if key.contains(" "): 60 | warn("s3HeadObjectIs2xx(): Skipping due spaces = " & key) 61 | return false 62 | else: 63 | let client = newHttpClient() 64 | result = (s3HeadObject(client, creds, bucketHost, key)).isSuccess2xx() 65 | client.close() 66 | 67 | 68 | proc s3GetObjectIs2xx*(creds: AwsCreds, bucketHost, key, downloadPath: string): bool = 69 | ## AWS S3 API - GetObject bool 70 | ## 71 | ## AWS S3 API - GetObject is2xx returns true on downloaded file. 72 | ## 73 | ## `downloadPath` needs to full local path. 74 | if key.contains(" "): 75 | warn("s3GetObjectIs2xx(): Skipping due spaces = " & key) 76 | return false 77 | else: 78 | # let client = newHttpClient() 79 | let client = newHttpClient() 80 | s3GetObject(client, creds, bucketHost, key, downloadPath) 81 | client.close() 82 | result = fileExists(downloadPath) 83 | 84 | 85 | 86 | proc s3PutObjectIs2xx*(creds: AwsCreds, bucketHost, key, localPath: string, deleteLocalFileAfter=true): bool = 87 | ## AWS S3 API - PutObject bool 88 | ## 89 | ## This performs a PUT and uploads the file. The `localPath` param needs to 90 | ## be the full path. 91 | ## 92 | ## The PutObject reads the file to memory and uploads it. 93 | if not fileExists(localPath): 94 | return false 95 | 96 | if key.contains(" "): 97 | warn("s3PutObjectIs2xx(): Skipping due spaces = " & key) 98 | return false 99 | else: 100 | let client = newHttpClient() 101 | result = (s3PutObject(client, creds, bucketHost, key, localPath)).isSuccess2xx() 102 | client.close() 103 | if deleteLocalFileAfter: 104 | removeFile(localPath) 105 | 106 | 107 | proc s3CopyObjectIs2xx*(creds: AwsCreds, bucketHost, key, copyObject: string): bool = 108 | ## AWS S3 API - CopyObject bool 109 | if key.contains(" "): 110 | warn("s3CopyObjectIs2xx(): Skipping due spaces = " & key) 111 | return false 112 | else: 113 | let client = newHttpClient() 114 | result = s3CopyObject(client, creds, bucketHost, key, copyObject).isSuccess2xx() 115 | client.close() 116 | 117 | 118 | proc s3MoveObject*(creds: AwsCreds, bucketToHost, keyTo, bucketFromHost, bucketFromName, keyFrom: string) = 119 | ## This does a pseudo move of an object. We copy the object to the destination 120 | ## and then we delete the object from the original location. 121 | ## 122 | ## bucketToHost => Destination bucket host 123 | ## keyTo => 12/files/file.jpg 124 | ## bucketFromHost => Origin bucket host 125 | ## bucketFromName => Origin bucket name 126 | ## keyFrom => 24/files/old.jpg 127 | ## 128 | let client = newHttpClient() 129 | 130 | if s3CopyObject(client, creds, bucketToHost, keyTo, "/" & bucketFromName & "/" & keyFrom).isSuccess2xx(): 131 | if not (s3DeleteObject(client, creds, bucketFromHost, keyFrom)).isSuccess2xx(): 132 | warn("s3MoveObject(): Failed on delete - " & bucketFromHost & keyFrom) 133 | 134 | client.close() 135 | 136 | 137 | proc s3MoveObjects*( 138 | creds: AwsCreds, 139 | bucketHost, bucketFromHost, bucketFromName: string, 140 | keys: seq[string], 141 | waitValidate = 0, 142 | waitDelete = 0 143 | ) = 144 | ## In this (plural) multiple moves are performed. The keys are identical in 145 | ## "from" and "to", so origin and destination are the same. 146 | ## 147 | ## The `waitValidate` and `waitDelete` are used to wait between the validation 148 | ## if the file exists and delete operation. 149 | let client = newHttpClient() 150 | 151 | var keysSuccess: seq[string] 152 | 153 | for key in keys: 154 | try: 155 | if s3CopyObject(client, creds, bucketHost, key, "/" & bucketFromName & "/" & key).isSuccess2xx(): 156 | keysSuccess.add(key) 157 | except: 158 | error("s3MoveObjects(): Failed on copy - " & bucketHost & " - " & key) 159 | 160 | if waitValidate > 0: 161 | sleep(waitValidate) 162 | 163 | for key in keysSuccess: 164 | try: 165 | if not (s3DeleteObject(client, creds, bucketFromHost, key)).isSuccess2xx(): 166 | warn("s3MoveObject(): Could not delete - " & bucketFromHost & " - " & key) 167 | except: 168 | error("s3MoveObjects(): Failed on delete - " & bucketFromHost & " - " & key) 169 | 170 | if waitDelete > 0: 171 | sleep(waitDelete) 172 | 173 | client.close() 174 | 175 | 176 | proc s3TrashObject*(creds: AwsCreds, bucketTrashHost, bucketFromHost, bucketFromName, keyFrom: string) = 177 | ## This does a pseudo move of an object. We copy the object to the destination 178 | ## and then we delete the object from the original location. 179 | ## The destination in this particular situation - is our trash. 180 | s3MoveObject(creds, bucketTrashHost, keyFrom, bucketFromHost, bucketFromName, keyFrom) 181 | 182 | 183 | proc s3TrashObjects*( 184 | creds: AwsCreds, 185 | bucketTrashHost, bucketFromHost, bucketFromName: string, 186 | keys: seq[string], 187 | waitValidate = 0, 188 | waitDelete = 0 189 | ) = 190 | ## This does a pseudo move of an object. We copy the object to the destination 191 | ## and then we delete the object from the original location. 192 | ## The destination in this particular situation - is our trash. 193 | ## 194 | ## The `waitValidate` is the time to wait between validating the existence of 195 | ## the file. The `waitDelete` is the time to wait between deleting the files. 196 | s3MoveObjects(creds, bucketTrashHost, bucketFromHost, bucketFromName, keys, waitValidate, waitDelete) 197 | 198 | -------------------------------------------------------------------------------- /tests/config.nims: -------------------------------------------------------------------------------- 1 | # Copyright Thomas T. Jarløv (TTJ) - ttj@ttj.dk 2 | 3 | switch("path", "..") -------------------------------------------------------------------------------- /tests/multipart/example.env: -------------------------------------------------------------------------------- 1 | # This file is only used for testing the AWS Multipart Upload 2 | 3 | AWS_ACCESS_KEY_ID= 4 | AWS_SECRET_ACCESS_KEY= 5 | 6 | # AWS_ROLE_ARN (optional) 7 | AWS_ROLE_ARN= 8 | 9 | # AWS_SESSION_TOKEN (optional) 10 | # AWS_SESSION_TOKEN= 11 | 12 | AWS_REGION=eu-west-1 13 | 14 | AWS_BUCKET=nim-s3-bucket -------------------------------------------------------------------------------- /tests/multipart/test_multipartupload_utils.nim: -------------------------------------------------------------------------------- 1 | # Copyright Thomas T. Jarløv (TTJ) - ttj@ttj.dk 2 | 3 | import 4 | unittest, 5 | strutils, 6 | options, 7 | times 8 | 9 | import 10 | jsony 11 | 12 | import 13 | src/api/utils 14 | 15 | 16 | suite "utility functions for multipart upload": 17 | test "check amazon time format time": 18 | let time = parse("2023-02-09T08:24:35.000Z", "yyyy-MM-dd\'T\'HH:mm:ss\'.\'fffzzz", utc()) 19 | let expectedTime = fromUnix(1675931075).utc() # 2023-02-09T08:24:35.000Z 20 | check: 21 | time == expectedTime 22 | 23 | test "jsony time convert - parse": 24 | type 25 | MyTimeObject = object 26 | time: DateTime 27 | let json = """[ 28 | {"time":"2023-02-09T08:24:35.000Z"}, 29 | {"time":"2023-02-09T08:24:35.000Z+00:00"}, 30 | {"time":"2023-02-09T08:24:35.000Z+01:00"}, 31 | ]""" 32 | 33 | let timesArr = json.fromJson(seq[MyTimeObject]) 34 | 35 | check: 36 | timesArr[0].time == dateTime(2023, mFeb, 9, 8, 24, 35, 0, utc()) # 2023-02-09T08:24:35.000Z 37 | timesArr[1].time == dateTime(2023, mFeb, 9, 8, 24, 35, 0, utc()) # 2023-02-09T08:24:35.000Z 38 | timesArr[2].time == dateTime(2023, mFeb, 9, 7, 24, 35, 0, utc()) # 2023-02-09T07:24:35.000Z 39 | 40 | test "jsony time convert - dump": 41 | type 42 | MyTimeObject = object 43 | time: DateTime 44 | let time = dateTime(2023, mFeb, 9, 8, 24, 35, 0, utc()) # 2023-02-09T08:24:35.000Z 45 | let myTimeObject = MyTimeObject(time: time) 46 | let json = myTimeObject.toJson() 47 | check: 48 | json == """{"time":"2023-02-09T08:24:35.000Z"}""" 49 | 50 | test "jsony loose frist char": 51 | type 52 | MyObject = object 53 | id: string 54 | myFancyField: string 55 | 56 | var myJson = """ 57 | { 58 | "Id": "someId", 59 | "MyFancyField": "foo" 60 | } 61 | """ 62 | let myObject = myJson.fromJson(MyObject) 63 | let expectedObject = MyObject(id: "someId", myFancyField: "foo") 64 | check: 65 | myObject == expectedObject 66 | 67 | test "jsony loose object/arr": 68 | type 69 | Cat = object 70 | name: string 71 | MyType = object 72 | cat: Option[seq[Cat]] 73 | 74 | let 75 | d1 = """{"cat":[{"name":"sparky"}]}""" 76 | d2 = """{"cat":{"name":"sparky"}}""" 77 | d3 = """{"cat":null}""" 78 | 79 | check: 80 | d1.fromJson(MyType) == MyType(cat: some(@[Cat(name: "sparky")])) 81 | d2.fromJson(MyType) == MyType(cat: some(@[Cat(name: "sparky")])) 82 | d3.fromJson(MyType) == MyType() 83 | 84 | 85 | -------------------------------------------------------------------------------- /tests/multipart/test_multipartupload_xml2json.nim: -------------------------------------------------------------------------------- 1 | # Copyright Thomas T. Jarløv (TTJ) - ttj@ttj.dk 2 | 3 | import 4 | xmlparser, 5 | xmltree, 6 | json, 7 | tables, 8 | strtabs, 9 | unittest, 10 | sequtils 11 | 12 | import 13 | jsony 14 | 15 | import 16 | src/api/utils, 17 | src/api/xml2Json 18 | 19 | 20 | 21 | suite "xml2Json for multipart": 22 | type 23 | Xml2JsonTestRoot = object 24 | id: string 25 | child1: string 26 | child2: seq[string] 27 | child3: Table[string, string] 28 | child5: string 29 | Xml2JsonTest = object 30 | root: Xml2JsonTestRoot 31 | 32 | let xmlString = """ 33 | 34 | value1 35 | value2 36 | value3 37 | 38 | value4 39 | 40 | value5 41 | """ 42 | 43 | let expectedJson = """{"root":{"id":"123","child1":"value1","child2":["value2","value3"],"child3":{"child4":"value4"},"Child5":"value5"}}""" 44 | let expectedJsonSplitAttr = """{"root":{"attributes":{"id":"123"},"child1":"value1","child2":["value2","value3"],"child3":{"child4":"value4"},"Child5":"value5"}}""" 45 | test "xml->jsonString": 46 | let xml = xmlString.parseXml() 47 | check: 48 | $xml.xml2Json() == expectedJson 49 | $xml.xml2Json(true) == expectedJsonSplitAttr 50 | 51 | test "xml->json->obj": 52 | 53 | let xml = xmlString.parseXml() 54 | let json = xml.xml2Json() 55 | let jsonString = json.toJson() 56 | let obj = jsonString.fromJson(Xml2JsonTest) 57 | let expectedObject = Xml2JsonTest( 58 | root: Xml2JsonTestRoot( 59 | id: "123", 60 | child1: "value1", 61 | child2: @["value2", "value3"], 62 | child3: {"child4": "value4"}.toTable(), 63 | child5: "value5" 64 | ) 65 | ) 66 | check: 67 | 68 | obj == expectedObject 69 | 70 | test "xml quotes": 71 | let xmlString = """ "48ad599540f59071982d4a00c6c5928d-4"""" 72 | let expectedJson = """{"ETag":"48ad599540f59071982d4a00c6c5928d-4"}""" 73 | let xmlString1 = """ "48ad599540f59071982d4a00c6c5928d-4"""" 74 | let expectedJson1 = """{"root":{"ETag":"48ad599540f59071982d4a00c6c5928d-4"}}""" 75 | var n0 = newElement("ETag") 76 | let n1 = newText("\"") 77 | let n2 = newText("48ad599540f59071982d4a00c6c5928d-4") 78 | n0.add(n1) 79 | n0.add(n2) 80 | n0.add(n1) 81 | 82 | let root = newElement("root") 83 | root.add(n0) 84 | 85 | check: 86 | xmlString.parseXml().xml2Json().toJson() == expectedJson 87 | n0.xml2Json().toJson() == expectedJson 88 | root.xml2Json().toJson() == expectedJson1 89 | xmlString1.parseXml().xml2Json().toJson() == expectedJson1 90 | 91 | test "jsony object": 92 | let json = """{"Xml2JsonTestRoot":{"id":"123","child1":"value1","child2":["value2","value3"],"child3":{"child4":"value4"},"Child5":"value5"}}""" 93 | let obj = json.parseJson()["Xml2JsonTestRoot"].toJson().fromJson(Xml2JsonTestRoot) 94 | 95 | check: 96 | obj.id == "123" 97 | obj.child1 == "value1" 98 | obj.child2 == @["value2", "value3"] 99 | obj.child3 == {"child4": "value4"}.toTable() 100 | obj.child5 == "value5" -------------------------------------------------------------------------------- /tests/tests1.nim: -------------------------------------------------------------------------------- 1 | import 2 | std/[ 3 | httpclient, 4 | os, 5 | times 6 | ] 7 | 8 | import 9 | src/awsS3/signed, 10 | src/awsS3/api, 11 | src/awsS3/utils_sync, 12 | awsSTS 13 | 14 | const 15 | bucketHost = ".s3-eu-west-1.amazonaws.com" 16 | bucketName = "" 17 | serverRegion = "eu-west-1" 18 | myAccessKey = "" 19 | mySecretKey = "" 20 | role = "arn:aws:iam:::role/" 21 | s3File1 = "test/test1.jpg" 22 | s3File2 = "test/test2.jpg" 23 | s3MoveTo = "test2/test.jpg" 24 | localTestFile1 = "/home/user/downloads/myimage1.jpg" 25 | localTestFile2 = "/home/user/downloads/myimage2.jpg" 26 | # downloadTo = "/home/username/git/nim_awsS3/test3.jpg" 27 | 28 | ## Get creds with awsSTS package 29 | let creds = awsSTScreate(myAccessKey, mySecretKey, serverRegion, role) 30 | 31 | # Tests 32 | # ## Move object 33 | # waitFor s3MoveObject(creds, bucketHost, s3MoveTo, bucketHost, bucketName, s3File) 34 | 35 | # ## Get content-length 36 | # var client = newAsyncHttpClient() 37 | # let m1 = waitFor s3HeadObject(client, creds, bucketHost, s3MoveTo) 38 | # echo m1.headers["content-length"] 39 | 40 | # ## Get object 41 | # echo waitFor s3GetObjectIs2xx(creds, bucketHost, s3MoveTo, downloadTo) 42 | # echo fileExists(downloadTo) 43 | 44 | # ## Delete object 45 | # echo waitFor s3DeleteObjectIs2xx(creds, bucketHost, s3MoveTo) 46 | 47 | 48 | 49 | proc upload() = 50 | 51 | ## 1) Create test file 52 | writeFile(localTestFile1, "blabla") 53 | writeFile(localTestFile2, "lkjhgfdedrtyuio") 54 | 55 | ## 2) Put object 56 | echo s3PutObjectIs2xx(creds, bucketHost, s3File1, localTestFile1) 57 | echo s3PutObjectIs2xx(creds, bucketHost, s3File2, localTestFile2) 58 | 59 | proc delete() = 60 | s3TrashObjects( 61 | creds, 62 | ".s3-eu-west-1.amazonaws.com", 63 | bucketHost, 64 | bucketName, 65 | @[s3File1, s3File2], 66 | waitValidate = 2000, 67 | waitDelete = 2000 68 | ) 69 | 70 | # upload() 71 | # delete() 72 | 73 | let 74 | tBucket = ".s3-eu-west-1.amazonaws.com" 75 | tKey = "/" 76 | 77 | echo s3SignedUrl( 78 | creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, creds.AWS_REGION, 79 | tBucket, tKey, 80 | httpMethod = HttpGet, 81 | contentDisposition = CDTattachment, contentDispositionName = "", 82 | setContentType = true, fileExt = "", expireInSec = "6500", 83 | accessToken = creds.AWS_SESSION_TOKEN, 84 | makeDateTime = $(getTime().utc.format(basicISO8601)) 85 | ) 86 | 87 | 88 | 89 | 90 | proc move() = 91 | s3MoveObject( 92 | creds, 93 | ".s3-eu-west-1.amazonaws.com", 94 | "ulla/dulle", 95 | ".s3-eu-west-1.amazonaws.com", "", 96 | "test/sub") 97 | 98 | # move() 99 | 100 | 101 | 102 | # Delete folder 103 | proc deleteFolder() = 104 | let client = newHttpClient() 105 | let result = (s3DeleteObject(client, creds, ".s3-eu-west-1.amazonaws.com", "test/sub/")) 106 | echo result.body 107 | echo result.status 108 | client.close() 109 | 110 | #deleteFolder() --------------------------------------------------------------------------------