├── .cfconfig.json ├── .cfformat.json ├── .cflintrc ├── .editorconfig ├── .env.example ├── .gitattributes ├── .github ├── FUNDING.YML └── workflows │ ├── ci.yml │ ├── cron.yml │ ├── gh-release.yml │ ├── pr.yml │ └── tests.yml ├── .gitignore ├── .markdownlint copy.json ├── .markdownlint.json ├── .vscode └── settings.json ├── APACHE_LICENSE.TXT ├── CONTRIBUTING.md ├── ModuleConfig.cfc ├── box.json ├── build ├── .travis.yml ├── Build.cfc └── release.boxr ├── changelog.md ├── models ├── AmazonS3.cfc ├── MiniLogBox.cfc ├── Sv2Util.cfc └── Sv4Util.cfc ├── readme.md ├── server-adobe@2018.json ├── server-adobe@2021.json ├── server-adobe@2023.json ├── server-boxlang-cfml@1.json ├── server-lucee@5.json └── test-harness ├── .cflintrc ├── Application.cfc ├── box.json ├── config ├── Application.cfc ├── Coldbox.cfc ├── Router.cfc └── WireBox.cfc ├── handlers └── Main.cfc ├── index.cfm ├── layouts └── Main.cfm └── tests ├── Application.cfc ├── fixtures ├── get-presigned-url │ ├── get-presigned-url.authz │ ├── get-presigned-url.creq │ ├── get-presigned-url.req │ └── get-presigned-url.sts ├── get-vanilla-query-unreserved-s3 │ ├── get-vanilla-query-unreserved-s3.authz │ ├── get-vanilla-query-unreserved-s3.creq │ ├── get-vanilla-query-unreserved-s3.req │ └── get-vanilla-query-unreserved-s3.sts ├── post-header-key-sort-s3 │ ├── post-header-key-sort-s3.authz │ ├── post-header-key-sort-s3.creq │ ├── post-header-key-sort-s3.req │ └── post-header-key-sort-s3.sts └── post-vanilla-query-s3 │ ├── post-vanilla-query-s3.authz │ ├── post-vanilla-query-s3.creq │ ├── post-vanilla-query-s3.req │ └── post-vanilla-query-s3.sts ├── index.cfm ├── runner.cfm ├── specs ├── AmazonS3Spec.cfc ├── Sv4UtilSpec.cfc └── models │ └── AmazonS3 │ ├── buildKeyName.cfc │ ├── buildUrlEndpoint.cfc │ ├── createSignatureUtil.cfc │ ├── getBucketLocation.cfc │ ├── init.cfc │ ├── requireBucketName.cfc │ ├── setAuth.cfc │ ├── setAwsDomain.cfc │ ├── setAwsRegion.cfc │ └── setSSL.cfc └── tmp └── .gitkeep /.cfconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "adminPassword" : "coldbox", 3 | "componentCacheEnabled":false, 4 | "postParametersLimit" : 200, 5 | "robustExceptionEnabled":true, 6 | "saveClassFiles":false, 7 | "systemErr":"System", 8 | "systemOut":"System", 9 | "thistimezone":"UTC", 10 | "whitespaceManagement":"white-space-pref", 11 | "debuggingEnabled":true, 12 | "debuggingReportExecutionTimes":false, 13 | "disableInternalCFJavaComponents":false, 14 | "inspectTemplate":"always", 15 | "requestTimeout":"0,0,0,90", 16 | "datasources":{ 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /.cfformat.json: -------------------------------------------------------------------------------- 1 | { 2 | "array.empty_padding": false, 3 | "array.padding": true, 4 | "array.multiline.min_length": 50, 5 | "array.multiline.element_count": 2, 6 | "array.multiline.leading_comma.padding": true, 7 | "array.multiline.leading_comma": false, 8 | "alignment.consecutive.assignments": true, 9 | "alignment.consecutive.properties": true, 10 | "alignment.consecutive.params": true, 11 | "alignment.doc_comments" : true, 12 | "brackets.padding": true, 13 | "comment.asterisks": "align", 14 | "binary_operators.padding": true, 15 | "for_loop_semicolons.padding": true, 16 | "function_call.empty_padding": false, 17 | "function_call.padding": true, 18 | "function_call.multiline.leading_comma.padding": true, 19 | "function_call.casing.builtin": "cfdocs", 20 | "function_call.casing.userdefined": "camel", 21 | "function_call.multiline.element_count": 3, 22 | "function_call.multiline.leading_comma": false, 23 | "function_call.multiline.min_length": 50, 24 | "function_declaration.padding": true, 25 | "function_declaration.empty_padding": false, 26 | "function_declaration.multiline.leading_comma": false, 27 | "function_declaration.multiline.leading_comma.padding": true, 28 | "function_declaration.multiline.element_count": 3, 29 | "function_declaration.multiline.min_length": 50, 30 | "function_declaration.group_to_block_spacing": "compact", 31 | "function_anonymous.empty_padding": false, 32 | "function_anonymous.group_to_block_spacing": "compact", 33 | "function_anonymous.multiline.element_count": 3, 34 | "function_anonymous.multiline.leading_comma": false, 35 | "function_anonymous.multiline.leading_comma.padding": true, 36 | "function_anonymous.multiline.min_length": 50, 37 | "function_anonymous.padding": true, 38 | "indent_size": 4, 39 | "keywords.block_to_keyword_spacing": "spaced", 40 | "keywords.group_to_block_spacing": "spaced", 41 | "keywords.padding_inside_group": true, 42 | "keywords.spacing_to_block": "spaced", 43 | "keywords.spacing_to_group": true, 44 | "keywords.empty_group_spacing": false, 45 | "max_columns": 115, 46 | "metadata.multiline.element_count": 3, 47 | "metadata.multiline.min_length": 50, 48 | "method_call.chain.multiline" : 3, 49 | "newline":"\n", 50 | "property.multiline.element_count": 3, 51 | "property.multiline.min_length": 30, 52 | "parentheses.padding": true, 53 | "strings.quote": "double", 54 | "strings.attributes.quote": "double", 55 | "struct.separator": " : ", 56 | "struct.padding": true, 57 | "struct.empty_padding": false, 58 | "struct.multiline.leading_comma": false, 59 | "struct.multiline.leading_comma.padding": true, 60 | "struct.multiline.element_count": 2, 61 | "struct.multiline.min_length": 60, 62 | "tab_indent": true 63 | } 64 | -------------------------------------------------------------------------------- /.cflintrc: -------------------------------------------------------------------------------- 1 | { 2 | "rule": [], 3 | "includes": [ 4 | { "code": "AVOID_USING_CFINCLUDE_TAG" }, 5 | { "code": "AVOID_USING_CFABORT_TAG" }, 6 | { "code": "AVOID_USING_CFEXECUTE_TAG" }, 7 | { "code": "AVOID_USING_DEBUG_ATTR" }, 8 | { "code": "AVOID_USING_ABORT" }, 9 | { "code": "AVOID_USING_ISDATE" }, 10 | { "code": "AVOID_USING_ISDEBUGMODE" }, 11 | { "code": "AVOID_USING_CFINSERT_TAG" }, 12 | { "code": "AVOID_USING_CFUPDATE_TAG" }, 13 | { "code": "ARG_VAR_CONFLICT" }, 14 | { "code": "ARG_VAR_MIXED" }, 15 | { "code": "ARG_HINT_MISSING" }, 16 | { "code": "ARG_HINT_MISSING_SCRIPT" }, 17 | { "code" : "ARGUMENT_INVALID_NAME" }, 18 | { "code" : "ARGUMENT_ALLCAPS_NAME" }, 19 | { "code" : "ARGUMENT_TOO_WORDY" }, 20 | { "code" : "ARGUMENT_IS_TEMPORARY" }, 21 | { "code": "CFQUERYPARAM_REQ" }, 22 | { "code": "COMPARE_INSTEAD_OF_ASSIGN" }, 23 | { "code": "COMPONENT_HINT_MISSING" }, 24 | { "code" : "COMPONENT_INVALID_NAME" }, 25 | { "code" : "COMPONENT_ALLCAPS_NAME" }, 26 | { "code" : "COMPONENT_TOO_SHORT" }, 27 | { "code" : "COMPONENT_TOO_LONG" }, 28 | { "code" : "COMPONENT_TOO_WORDY" }, 29 | { "code" : "COMPONENT_IS_TEMPORARY" }, 30 | { "code" : "COMPONENT_HAS_PREFIX_OR_POSTFIX" }, 31 | { "code": "COMPLEX_BOOLEAN_CHECK" }, 32 | { "code": "EXCESSIVE_FUNCTION_LENGTH" }, 33 | { "code": "EXCESSIVE_COMPONENT_LENGTH" }, 34 | { "code": "EXCESSIVE_ARGUMENTS" }, 35 | { "code": "EXCESSIVE_FUNCTIONS" }, 36 | { "code": "EXPLICIT_BOOLEAN_CHECK" }, 37 | { "code": "FUNCTION_TOO_COMPLEX" }, 38 | { "code": "FUNCTION_HINT_MISSING" }, 39 | { "code": "FILE_SHOULD_START_WITH_LOWERCASE" }, 40 | { "code": "LOCAL_LITERAL_VALUE_USED_TOO_OFTEN" }, 41 | { "code": "GLOBAL_LITERAL_VALUE_USED_TOO_OFTEN" }, 42 | { "code": "MISSING_VAR" }, 43 | { "code" : "METHOD_INVALID_NAME" }, 44 | { "code" : "METHOD_ALLCAPS_NAME" }, 45 | { "code" : "METHOD_IS_TEMPORARY" }, 46 | { "code": "NESTED_CFOUTPUT" }, 47 | { "code": "NEVER_USE_QUERY_IN_CFM" }, 48 | { "code": "OUTPUT_ATTR" }, 49 | { "code" : "QUERYPARAM_REQ" }, 50 | { "code": "UNUSED_LOCAL_VARIABLE" }, 51 | { "code": "UNUSED_METHOD_ARGUMENT" }, 52 | { "code": "SQL_SELECT_STAR" }, 53 | { "code": "SCOPE_ALLCAPS_NAME" }, 54 | { "code": "VAR_ALLCAPS_NAME" }, 55 | { "code": "VAR_INVALID_NAME" }, 56 | { "code": "VAR_TOO_WORDY" } 57 | ], 58 | "inheritParent": false, 59 | "parameters": { 60 | "TooManyFunctionsChecker.maximum" : 20 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | end_of_line = lf 7 | charset = utf-8 8 | trim_trailing_whitespace = true 9 | insert_final_newline = false 10 | indent_style = tab 11 | indent_size = 4 12 | tab_width = 4 13 | 14 | [*.yml] 15 | indent_style = space 16 | indent_size = 2 17 | 18 | [*.{md,markdown}] 19 | trim_trailing_whitespace = false 20 | insert_final_newline = false -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | AWS_ACCESS_KEY= 2 | AWS_ACCESS_SECRET= 3 | AWS_REGION=us-east-1 4 | AWS_DOMAIN=amazonaws.com 5 | AWS_DEFAULT_BUCKET_NAME= 6 | # This env var will be used in a bucket name for the test suite. It's not 7 | # required for local development. In CI, the ENGINE var will be defined by 8 | # the CI setup, so that we can test on several engines simultaneously without 9 | # name collisions. 10 | #ENGINE=localhost 11 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | *.sln merge=union 7 | *.csproj merge=union 8 | *.vbproj merge=union 9 | *.fsproj merge=union 10 | *.dbproj merge=union 11 | 12 | # Standard to msysgit 13 | *.doc diff=astextplain 14 | *.DOC diff=astextplain 15 | *.docx diff=astextplain 16 | *.DOCX diff=astextplain 17 | *.dot diff=astextplain 18 | *.DOT diff=astextplain 19 | *.pdf diff=astextplain 20 | *.PDF diff=astextplain 21 | *.rtf diff=astextplain 22 | *.RTF diff=astextplain 23 | -------------------------------------------------------------------------------- /.github/FUNDING.YML: -------------------------------------------------------------------------------- 1 | patreon: ortussolutions 2 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: AWS S3 SDK CI 2 | 3 | # Only on Development we build snapshots 4 | on: 5 | push: 6 | branches: 7 | - development 8 | - master 9 | workflow_dispatch: 10 | 11 | env: 12 | MODULE_ID: s3sdk 13 | jobs: 14 | ############################################# 15 | # Tests First baby! We fail, no build :( 16 | ############################################# 17 | tests: 18 | uses: ./.github/workflows/tests.yml 19 | secrets: 20 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 21 | S3SDK_AWS_ACCESS_KEY: ${{ secrets.S3SDK_AWS_ACCESS_KEY }} 22 | S3SDK_AWS_ACCESS_SECRET: ${{ secrets.S3SDK_AWS_ACCESS_SECRET }} 23 | 24 | ############################################# 25 | # Build Module 26 | ############################################# 27 | build: 28 | name: Build & Publish 29 | needs: tests 30 | runs-on: ubuntu-24.04 31 | steps: 32 | - name: Checkout Repository 33 | uses: actions/checkout@v2 34 | with: 35 | fetch-depth: 0 36 | 37 | - name: Setup Java 38 | uses: actions/setup-java@v2 39 | with: 40 | distribution: "adopt" 41 | java-version: "11" 42 | 43 | - name: Setup CommandBox 44 | uses: Ortus-Solutions/setup-commandbox@v2.0.1 45 | with: 46 | forgeboxAPIKey: ${{ secrets.FORGEBOX_TOKEN }} 47 | 48 | - name: Setup Environment Variables For Build Process 49 | id: current_version 50 | run: | 51 | echo "VERSION=`cat box.json | jq '.version' -r`" >> $GITHUB_ENV 52 | box package set version=@build.version@+@build.number@ 53 | # master or snapshot 54 | echo "Github Ref is $GITHUB_REF" 55 | echo "BRANCH=master" >> $GITHUB_ENV 56 | if [ $GITHUB_REF == 'refs/heads/development' ] 57 | then 58 | echo "BRANCH=development" >> $GITHUB_ENV 59 | fi 60 | 61 | - name: Build ${{ env.MODULE_ID }} 62 | run: | 63 | box install commandbox-docbox 64 | box task run taskfile=build/Build target=run :version=${{ env.VERSION }} :projectName=${{ env.MODULE_ID }} :buildID=${{ github.run_number }} :branch=${{ env.BRANCH }} 65 | 66 | - name: Upload Build Artifacts 67 | if: success() 68 | uses: actions/upload-artifact@v4 69 | with: 70 | name: ${{ env.MODULE_ID }} 71 | path: | 72 | .artifacts/**/* 73 | 74 | - name: Upload Binaries to S3 75 | uses: jakejarvis/s3-sync-action@master 76 | with: 77 | args: --acl public-read 78 | env: 79 | AWS_S3_BUCKET: "downloads.ortussolutions.com" 80 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} 81 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_ACCESS_SECRET }} 82 | SOURCE_DIR: ".artifacts/${{ env.MODULE_ID }}" 83 | DEST_DIR: "ortussolutions/coldbox-modules/${{ env.MODULE_ID }}" 84 | 85 | - name: Upload API Docs to S3 86 | uses: jakejarvis/s3-sync-action@master 87 | with: 88 | args: --acl public-read 89 | env: 90 | AWS_S3_BUCKET: "apidocs.ortussolutions.com" 91 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} 92 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_ACCESS_SECRET }} 93 | SOURCE_DIR: ".tmp/apidocs" 94 | DEST_DIR: "coldbox-modules/${{ env.MODULE_ID }}/${{ env.VERSION }}" 95 | 96 | - name: Publish To ForgeBox 97 | run: | 98 | cd .tmp/${{ env.MODULE_ID }} 99 | cat box.json 100 | box forgebox publish 101 | 102 | - name: Inform Slack 103 | if: ${{ always() }} 104 | uses: rtCamp/action-slack-notify@v2 105 | env: 106 | SLACK_CHANNEL: coding 107 | SLACK_COLOR: ${{ job.status }} # or a specific color like 'green' or '#ff00ff' 108 | SLACK_ICON_EMOJI: ":bell:" 109 | SLACK_MESSAGE: '${{ env.MODULE_ID }} Built with ${{ job.status }}!' 110 | SLACK_TITLE: "${{ env.MODULE_ID }} Build" 111 | SLACK_USERNAME: CI 112 | SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} 113 | -------------------------------------------------------------------------------- /.github/workflows/cron.yml: -------------------------------------------------------------------------------- 1 | name: Daily Tests 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' # Runs at 00:00 UTC every day 6 | 7 | jobs: 8 | tests: 9 | uses: ./.github/workflows/tests.yml 10 | secrets: 11 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 12 | S3SDK_AWS_ACCESS_KEY: ${{ secrets.S3SDK_AWS_ACCESS_KEY }} 13 | S3SDK_AWS_ACCESS_SECRET: ${{ secrets.S3SDK_AWS_ACCESS_SECRET }} 14 | -------------------------------------------------------------------------------- /.github/workflows/gh-release.yml: -------------------------------------------------------------------------------- 1 | # Publish Github Release 2 | name: Github Release 3 | 4 | on: 5 | push: 6 | tags: 7 | - v[0-9]+.* 8 | 9 | jobs: 10 | create-release: 11 | runs-on: ubuntu-24.04 12 | steps: 13 | - uses: actions/checkout@v2 14 | - uses: taiki-e/create-gh-release-action@v1.5.0 15 | with: 16 | # Produced by the build/Build.cfc 17 | changelog: changelog.md 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 20 | -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | name: Pull Requests 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - development 7 | 8 | jobs: 9 | tests: 10 | uses: ./.github/workflows/tests.yml 11 | secrets: 12 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 13 | S3SDK_AWS_ACCESS_KEY: ${{ secrets.S3SDK_AWS_ACCESS_KEY }} 14 | S3SDK_AWS_ACCESS_SECRET: ${{ secrets.S3SDK_AWS_ACCESS_SECRET }} 15 | 16 | # Format PR 17 | format: 18 | name: Format 19 | runs-on: ubuntu-24.04 20 | steps: 21 | - name: Checkout Repository 22 | uses: actions/checkout@v2 23 | 24 | - uses: Ortus-Solutions/commandbox-action@v1.0.2 25 | with: 26 | cmd: run-script format 27 | 28 | - name: Commit Format Changes 29 | uses: stefanzweifel/git-auto-commit-action@v4 30 | with: 31 | commit_message: Apply cfformat changes 32 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Test Suites 2 | 3 | # We are a reusable Workflow only 4 | on: 5 | workflow_call: 6 | secrets: 7 | SLACK_WEBHOOK_URL: 8 | required: false 9 | S3SDK_AWS_ACCESS_KEY: 10 | required: true 11 | S3SDK_AWS_ACCESS_SECRET: 12 | required: true 13 | 14 | jobs: 15 | tests: 16 | name: Tests 17 | runs-on: ubuntu-24.04 18 | env: 19 | DB_USER: root 20 | DB_PASSWORD: root 21 | AWS_DOMAIN: amazonaws.com 22 | AWS_REGION: us-east-1 23 | continue-on-error: ${{ matrix.experimental }} 24 | strategy: 25 | fail-fast: false 26 | matrix: 27 | cfengine: [ "lucee@5", "adobe@2021", "boxlang-cfml@1" ] 28 | coldboxVersion: [ "^7" ] 29 | experimental: [ false ] 30 | include: 31 | - cfengine: "adobe@2023" 32 | coldboxVersion: "^7" 33 | experimental: true 34 | - cfengine: "adobe@2018" 35 | coldboxVersion: "^7" 36 | experimental: true 37 | - coldboxVersion: "be" 38 | cfengine: "lucee@5" 39 | experimental: true 40 | - coldboxVersion: "be" 41 | cfengine: "adobe@2018" 42 | experimental: true 43 | - coldboxVersion: "be" 44 | cfengine: "adobe@2021" 45 | experimental: true 46 | - coldboxVersion: "be" 47 | cfengine: "boxlang-cfml@1" 48 | experimental: true 49 | steps: 50 | - name: Checkout Repository 51 | uses: actions/checkout@v2 52 | 53 | - name: Setup Java 54 | uses: actions/setup-java@v2 55 | with: 56 | distribution: "adopt" 57 | java-version: "11" 58 | 59 | # - name: Setup Database and Fixtures 60 | # run: | 61 | # #sudo systemctl start mysql.service 62 | # ## Create Database 63 | # #mysql -u${{ env.DB_USER }} -p${{ env.DB_PASSWORD }} -e 'CREATE DATABASE cbsecurity;' 64 | # ## Import Database 65 | # #mysql -u${{ env.DB_USER }} -p${{ env.DB_PASSWORD }} < test-harness/tests/resources/cbsecurity.sql 66 | 67 | - name: Setup Environment For Testing Process 68 | run: | 69 | # Setup .env 70 | touch .env 71 | # ENV 72 | printf "ENVIRONMENT=development\n" >> .env 73 | printf "AWS_ACCESS_KEY=${{ secrets.S3SDK_AWS_ACCESS_KEY }}\n" >> .env 74 | printf "AWS_ACCESS_SECRET=${{ secrets.S3SDK_AWS_ACCESS_SECRET }}\n" >> .env 75 | printf "AWS_REGION=${{ env.AWS_REGION }}\n" >> .env 76 | printf "AWS_DOMAIN=${{ env.AWS_DOMAIN }}\n" >> .env 77 | printf "ENGINE=${{ matrix.cfengine }}\n" >> .env 78 | printf "COLDBOX_VERSION=${{ matrix.coldboxVersion }}\n" >> .env 79 | 80 | 81 | - name: Setup CommandBox CLI 82 | uses: Ortus-Solutions/setup-commandbox@v2.0.1 83 | with: 84 | version: 6.1.0 85 | 86 | # This needs to happen until v6.2 of commandbox is released 87 | - name: Update CommandBox-BoxLang 88 | if : ${{ matrix.cfengine == 'boxlang-cfml@1' }} 89 | run: | 90 | box install --force commandbox-boxlang 91 | 92 | - name: Install Test Harness with ColdBox ${{ matrix.coldboxVersion }} 93 | run: | 94 | box install 95 | cd test-harness 96 | box package set dependencies.coldbox=${{ matrix.coldboxVersion }} 97 | box install 98 | 99 | - name: Start ${{ matrix.cfengine }} Server 100 | run: | 101 | box server start serverConfigFile="server-${{ matrix.cfengine }}.json" --noSaveSettings --debug 102 | curl http://127.0.0.1:60299 103 | 104 | - name: Run Tests 105 | run: | 106 | mkdir -p test-harness/tests/results 107 | box testbox run --verbose outputFile=test-harness/tests/results/test-results outputFormats=json,antjunit 108 | ls -lR test-harness/tests 109 | 110 | - name: Publish Test Results 111 | uses: EnricoMi/publish-unit-test-result-action@v2 112 | if: always() 113 | with: 114 | files: test-harness/tests/results/**/*.xml 115 | check_name: "${{ matrix.cfengine }} Test Results - Coldbox ${{matrix.coldboxVersion}}" 116 | 117 | - name: Upload Test Results to Artifacts 118 | if: always() 119 | uses: actions/upload-artifact@v4 120 | with: 121 | name: ${{ matrix.cfengine }}-test-results-${{matrix.coldboxVersion}}-${{ matrix.experimental }} 122 | path: | 123 | test-harness/tests/results/**/* 124 | 125 | - name: Failure Debugging Log 126 | if: ${{ failure() }} 127 | run: | 128 | box server log serverConfigFile="server-${{ matrix.cfengine }}.json" 129 | 130 | - name: Upload Debugging Log To Artifacts 131 | if: ${{ failure() }} 132 | uses: actions/upload-artifact@v4 133 | with: 134 | name: Failure Debugging Info - ${{ matrix.cfengine }} 135 | path: | 136 | .engine/**/logs/* 137 | .engine/**/WEB-INF/cfusion/logs/* 138 | 139 | - name: Slack Notifications 140 | # Only on failures and NOT in pull requests 141 | if: ${{ failure() && !startsWith( 'pull_request', github.event_name ) }} 142 | uses: rtCamp/action-slack-notify@v2 143 | env: 144 | SLACK_CHANNEL: coding 145 | SLACK_COLOR: ${{ job.status }} # or a specific color like 'green' or '#ff00ff' 146 | SLACK_ICON_EMOJI: ":bell:" 147 | SLACK_MESSAGE: '${{ github.repository }} tests failed :cry:' 148 | SLACK_TITLE: ${{ github.repository }} Tests For ${{ matrix.cfengine }} failed 149 | SLACK_USERNAME: CI 150 | SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} 151 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Artifacts and temp folders 2 | .artifacts/** 3 | .tmp/** 4 | 5 | # Engine + Secrets + databases 6 | .env 7 | .engine/** 8 | test-harness/.engine 9 | .db/** 10 | 11 | # Dependencies 12 | test-harness/coldbox/** 13 | test-harness/docbox/** 14 | test-harness/testbox/** 15 | test-harness/logs/** 16 | test-harness/modules/** 17 | test-harness/tests/tmp/** 18 | !test-harness/tests/tmp/.gitkeep 19 | 20 | # modules 21 | modules/** 22 | 23 | # log files 24 | logs/** 25 | .idea/ 26 | -------------------------------------------------------------------------------- /.markdownlint copy.json: -------------------------------------------------------------------------------- 1 | { 2 | "line-length": false, 3 | "single-h1": false, 4 | "no-hard-tabs" : false, 5 | "fenced-code-language" : false, 6 | "no-bare-urls" : false, 7 | "first-line-h1": false, 8 | "no-multiple-blanks": { 9 | "maximum": 2 10 | }, 11 | "no-duplicate-header" : { 12 | "siblings_only" : true 13 | }, 14 | "no-duplicate-heading" : false, 15 | "no-inline-html" : false 16 | } 17 | -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "line-length": false, 3 | "single-h1": false, 4 | "no-hard-tabs" : false, 5 | "fenced-code-language" : false, 6 | "no-bare-urls" : false, 7 | "first-line-h1": false, 8 | "no-multiple-blanks": { 9 | "maximum": 2 10 | }, 11 | "no-duplicate-header" : { 12 | "siblings_only" : true 13 | }, 14 | "no-inline-html" : false 15 | } 16 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cfml.mappings": [ 3 | { 4 | "logicalPath": "/coldbox", 5 | "directoryPath": "./test-harness/coldbox", 6 | "isPhysicalDirectoryPath": false 7 | }, 8 | { 9 | "logicalPath": "/testbox", 10 | "directoryPath": "./test-harness/testbox", 11 | "isPhysicalDirectoryPath": false 12 | } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /APACHE_LICENSE.TXT: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. 10 | 11 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 12 | 13 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 14 | 15 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. 16 | 17 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. 18 | 19 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. 20 | 21 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). 22 | 23 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. 24 | 25 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." 26 | 27 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 28 | 29 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 30 | 31 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 32 | 33 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: 34 | 35 | 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and 36 | 37 | 2. You must cause any modified files to carry prominent notices stating that You changed the files; and 38 | 39 | 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and 40 | 41 | 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. 42 | 43 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 44 | 45 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 46 | 47 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 48 | 49 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 50 | 51 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 52 | 53 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 54 | 55 | END OF TERMS AND CONDITIONS 56 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Setting up your development environment 2 | 3 | - Clone the repository 4 | - `cd test-harness` 5 | - `cp .env.example .env` 6 | - Edit .env file appropriately 7 | - `box install` 8 | - `box run-script start` 9 | This will use Lucee 5 server config file by default. See `start:[version]` 10 | options in test-harness/box.json for other engines. 11 | - Run tests: 12 | - In browser, visit `http://localhost:60299/tests/runner.cfm` 13 | - On command line: `box testbox run` 14 | -------------------------------------------------------------------------------- /ModuleConfig.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Ortus Solutions, Corp 3 | * www.ortussolutions.com 4 | * --- 5 | * This module connects your application to Amazon S3 6 | **/ 7 | component { 8 | 9 | // Module Properties 10 | this.title = "Amazon S3 SDK"; 11 | this.author = "Ortus Solutions, Corp"; 12 | this.webURL = "https://www.ortussolutions.com"; 13 | this.description = "This SDK will provide you with Amazon S3 connectivity for any ColdFusion (CFML) application."; 14 | 15 | // Module Entry Point 16 | this.entryPoint = "s3sdk"; 17 | // Model Namespace 18 | this.modelNamespace = "s3sdk"; 19 | // CF Mapping 20 | this.cfmapping = "s3sdk"; 21 | // Auto-map models 22 | this.autoMapModels = false; 23 | 24 | /** 25 | * Configure 26 | */ 27 | function configure(){ 28 | // Settings 29 | variables.settings = { 30 | accessKey : "", 31 | autoContentType : false, 32 | autoMD5 : false, 33 | awsDomain : "amazonaws.com", 34 | awsRegion : "us-east-1", 35 | debug : false, 36 | defaultACL : "public-read", 37 | defaultBucketName : "", 38 | defaultCacheControl : "no-store, no-cache, must-revalidate", 39 | defaultDelimiter : "/", 40 | defaultStorageClass : "STANDARD", 41 | defaultTimeOut : 300, 42 | encryptionCharset : "utf-8", 43 | retriesOnError : 3, 44 | secretKey : "", 45 | serviceName : "s3", 46 | signatureType : "V4", 47 | ssl : true, 48 | throwOnRequestError : true, 49 | defaultEncryptionAlgorithm : "", 50 | defaultEncryptionKey : "", 51 | defaultObjectOwnership : "ObjectWriter", 52 | defaultBlockPublicAcls : false, 53 | defaultIgnorePublicAcls : false, 54 | defaultBlockPublicPolicy : false, 55 | defaultRestrictPublicBuckets : false, 56 | urlStyle : "path" 57 | }; 58 | } 59 | 60 | /** 61 | * Fired when the module is registered and activated. 62 | */ 63 | function onLoad(){ 64 | binder 65 | .map( "AmazonS3@s3sdk" ) 66 | .to( "#moduleMapping#.models.AmazonS3" ) 67 | .initArg( name = "accessKey", value = variables.settings.accessKey ) 68 | .initArg( name = "secretKey", value = variables.settings.secretKey ) 69 | .initArg( name = "awsDomain", value = variables.settings.awsDomain ) 70 | .initArg( name = "awsRegion", value = variables.settings.awsregion ) 71 | .initArg( name = "encryptionCharset", value = variables.settings.encryptionCharset ) 72 | .initArg( name = "signatureType", value = variables.settings.signatureType ) 73 | .initArg( name = "ssl", value = variables.settings.ssl ) 74 | .initArg( name = "defaultTimeOut", value = variables.settings.defaultTimeOut ) 75 | .initArg( name = "defaultDelimiter", value = variables.settings.defaultDelimiter ) 76 | .initArg( name = "defaultBucketName", value = variables.settings.defaultBucketName ) 77 | .initArg( name = "defaultCacheControl", value = variables.settings.defaultCacheControl ) 78 | .initArg( name = "defaultStorageClass", value = variables.settings.defaultStorageClass ) 79 | .initArg( name = "defaultACL", value = variables.settings.defaultACL ) 80 | .initArg( name = "throwOnRequestError", value = variables.settings.throwOnRequestError ) 81 | .initArg( name = "autoContentType", value = variables.settings.autoContentType ) 82 | .initArg( name = "autoMD5", value = variables.settings.autoMD5 ) 83 | .initArg( name = "serviceName", value = variables.settings.serviceName ) 84 | .initArg( name = "debug", value = variables.settings.debug ) 85 | .initArg( name = "defaultEncryptionAlgorithm", value = variables.settings.defaultEncryptionAlgorithm ) 86 | .initArg( name = "defaultEncryptionKey", value = variables.settings.defaultEncryptionKey ) 87 | .initArg( name = "defaultObjectOwnership", value = variables.settings.defaultObjectOwnership ) 88 | .initArg( name = "defaultBlockPublicAcls", value = variables.settings.defaultBlockPublicAcls ) 89 | .initArg( name = "defaultIgnorePublicAcls", value = variables.settings.defaultIgnorePublicAcls ) 90 | .initArg( name = "defaultBlockPublicPolicy", value = variables.settings.defaultBlockPublicPolicy ) 91 | .initArg( 92 | name = "defaultRestrictPublicBuckets", 93 | value = variables.settings.defaultRestrictPublicBuckets 94 | ).initArg( 95 | name = "urlStyle", 96 | value = variables.settings.urlStyle 97 | ); 98 | binder.map( "Sv4Util@s3sdk" ).to( "#moduleMapping#.models.AmazonS3" ); 99 | 100 | binder.map( "Sv2Util@s3sdk" ).to( "#moduleMapping#.models.AmazonS3" ); 101 | } 102 | 103 | 104 | 105 | 106 | /** 107 | * Fired when the module is unregistered and unloaded 108 | */ 109 | function onUnload(){ 110 | } 111 | 112 | } 113 | -------------------------------------------------------------------------------- /box.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"Amazon S3 SDK", 3 | "version":"5.8.1", 4 | "slug":"s3sdk", 5 | "location":"https://downloads.ortussolutions.com/ortussolutions/coldbox-modules/s3sdk/@build.version@/s3sdk-@build.version@.zip", 6 | "author":"Ortus Solutions, Corp", 7 | "homepage":"https://github.com/coldbox-modules/s3sdk", 8 | "documentation":"https://github.com/coldbox-modules/s3sdk", 9 | "repository":{ 10 | "type":"git", 11 | "URL":"https://github.com/coldbox-modules/s3sdk" 12 | }, 13 | "bugs":"https://github.com/coldbox-modules/s3sdk/issues", 14 | "shortDescription":"This SDK will provide you with Amazon S3, Digital Ocean Spaces connectivity for any ColdFusion (CFML) application.", 15 | "type":"modules", 16 | "license":[ 17 | { 18 | "type":"Apache2", 19 | "URL":"https://www.apache.org/licenses/LICENSE-2.0" 20 | } 21 | ], 22 | "contributors":[ 23 | "Andrew Davis" 24 | ], 25 | "dependencies":{}, 26 | "devDependencies":{ 27 | "commandbox-cfformat":"*", 28 | "commandbox-docbox":"*", 29 | "commandbox-dotenv":"*", 30 | "commandbox-cfconfig":"*" 31 | }, 32 | "installPaths":{}, 33 | "ignore":[ 34 | "**/.*", 35 | "build", 36 | "test-harness", 37 | "/server*.json" 38 | ], 39 | "testbox":{ 40 | "runner":"http://localhost:60299/tests/runner.cfm" 41 | }, 42 | "scripts":{ 43 | "build:module":"task run taskFile=build/Build.cfc :projectName=`package show slug` :version=`package show version`", 44 | "build:docs":"task run taskFile=build/Build.cfc target=docs :projectName=`package show slug` :version=`package show version`", 45 | "release":"recipe build/release.boxr", 46 | "format":"cfformat run models,test-harness/tests/**/*.cfc,*.cfc --overwrite", 47 | "format:watch":"cfformat watch models,test-harness/tests/**/*.cfc,*.cfc ./.cfformat.json", 48 | "format:check":"cfformat check models,test-harness/tests/**/*.cfc,*.cfc", 49 | "install:dependencies":"install --force && cd test-harness && install --force" 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /build/.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | 3 | notifications: 4 | slack: 5 | secure: FIHlTn/YO7Wgumm1uIqmoEsqjQA7fV0AE94Rjc5yKzM3AquQa8HicgDVVk0d2GrKRnl0xt3j4ZJV//VJyIjlCd/QVKuj48R2ChjEY2im3+99HFPafCUI5/S2uyowKU6mJTFonH9v6p41eqxdbiAxJdDGOT0V2Gpt3UBSNuHz8ED9/aIHqv+P7M+VD6Xd2XYwctPniWlaSWx57sWcnG/VkFG45qFQAyha64uxOOe4M3ZmG/n5FfauZ8cBVLiRKEIr+CyNhh1ujfzi7+4uzMlSNL5t/BbZamAQuZzqGzGQ9RVvIlyPgUGNJtDEE/hWS09aagXF5T6EMj00szizErh4J1/x4qZwml5+TcBN31E0QmAhCtZe85sr3tYgic+hEz9XX1yymQzf/C7n4to2yNvq0r4g51xDk8IuP95WEh7zaqLlvFZvBFgxpHZBMYlRvhytjOYDeIFRMcGwHZcXosaG2ejqDwcGq/LC4oeG4sSwmg9sdRrtcmcanrNqrBka86WYO6LntI3JdZ86/1ACEUHzhCCwvrKELc9Ji1xxGAgS7QKH+s2/hnJuiMyv73gOVLKYC+wPMLt+fvOmPLSEl+PJiAIlToBq1KUBg03RSQLfPOLD7OrJ8VvDZsEPwejqlGDyc4wRglS9OTi7SnN5LYHSDNDdGdREegWqq9qDHEYEVLI= 6 | 7 | env: 8 | # Fill out these global variables for build process 9 | global: 10 | - MODULE_ID=s3sdk 11 | matrix: 12 | - ENGINE=lucee@5 13 | - ENGINE=adobe@2016 14 | - ENGINE=adobe@2018 15 | - ENGINE=adobe@2021 16 | 17 | branches: 18 | only: 19 | - development 20 | - master 21 | 22 | dist: focal 23 | sudo: required 24 | 25 | before_install: 26 | # CommandBox Keys 27 | - curl -fsSl https://downloads.ortussolutions.com/debs/gpg | sudo apt-key add - 28 | - sudo echo "deb https://downloads.ortussolutions.com/debs/noarch /" | sudo tee -a 29 | /etc/apt/sources.list.d/commandbox.list 30 | 31 | install: 32 | # Install Commandbox 33 | - sudo apt-get update && sudo apt-get --assume-yes install jq commandbox 34 | # Install CommandBox Supporting Librarires 35 | - box install commandbox-cfconfig,commandbox-dotenv,commandbox-docbox 36 | # If using auto-publish, you will need to provide your API token with this line: 37 | - box config set endpoints.forgebox.APIToken=$FORGEBOX_API_TOKEN > /dev/null 38 | 39 | script: 40 | # Set Current Version 41 | - TARGET_VERSION=`cat $TRAVIS_BUILD_DIR/box.json | jq '.version' -r` 42 | - TRAVIS_TAG=${TARGET_VERSION} 43 | - echo "Starting build for ${MODULE_ID} v${TARGET_VERSION}" 44 | # Replace version so builder can issue it 45 | - box package set version=@build.version@+@build.number@ 46 | # Startup the harness 47 | - cd test-harness 48 | # Seed our env 49 | - touch .env 50 | - printf "AWS_ACCESS_KEY=${AWS_ACCESS_KEY}\n" >> .env 51 | - printf "AWS_ACCESS_SECRET=${AWS_ACCESS_SECRET}\n" >> .env 52 | - printf "AWS_REGION=${AWS_REGION}\n" >> .env 53 | - printf "AWS_DOMAIN=${AWS_DOMAIN}\n" >> .env 54 | - printf "ENGINE=${ENGINE}\n" >> .env 55 | # run our dependency install to ensure the workbench is in place 56 | - box install 57 | # run our matrix server 58 | - box server start serverConfigFile="server-${ENGINE}.json" 59 | # Startup the app 60 | - curl http://localhost:60299 61 | # Debugging of tests 62 | #- curl http://localhost:60299/tests/runner.cfm?reporter=json -o testresults.json && cat testresults.json 63 | # move back to build dir to build it 64 | - cd $TRAVIS_BUILD_DIR 65 | # Build Project 66 | - box task run taskfile=build/Build target=run :version=${TARGET_VERSION} :projectName=${MODULE_ID} :buildID=${TRAVIS_BUILD_NUMBER} :branch=${TRAVIS_BRANCH} 67 | # Cat results for debugging 68 | #- cat build/results.json 69 | 70 | after_failure: 71 | - cd $TRAVIS_BUILD_DIR/test-harness 72 | # Display the contents of our root directory 73 | # Spit out our Commandbox log in case we need to debug 74 | - box server log server-${ENGINE}.json 75 | - cat `box system-log` 76 | 77 | deploy: 78 | # Module Deployment 79 | - provider: s3 80 | on: 81 | branch: 82 | - master 83 | - development 84 | condition: "$ENGINE = lucee@5" 85 | skip_cleanup: true 86 | #AWS Credentials need to be set in Travis 87 | access_key_id: $AWS_ACCESS_KEY 88 | secret_access_key: $AWS_ACCESS_SECRET 89 | # Destination 90 | bucket: "downloads.ortussolutions.com" 91 | local-dir: $TRAVIS_BUILD_DIR/.artifacts/$MODULE_ID 92 | upload-dir: ortussolutions/coldbox-modules/$MODULE_ID 93 | acl: public_read 94 | 95 | # API Docs Deployment 96 | - provider: s3 97 | on: 98 | branch: 99 | - master 100 | - development 101 | condition: "$ENGINE = lucee@5" 102 | skip_cleanup: true 103 | #AWS Credentials need to be set in Travis 104 | access_key_id: $AWS_ACCESS_KEY 105 | secret_access_key: $AWS_ACCESS_SECRET 106 | bucket: "apidocs.ortussolutions.com" 107 | local-dir: $TRAVIS_BUILD_DIR/.tmp/apidocs 108 | upload-dir: coldbox-modules/$MODULE_ID/$TARGET_VERSION 109 | acl: public_read 110 | 111 | # Github Release only on Master 112 | - provider: releases 113 | api_key: ${GITHUB_TOKEN} 114 | on: 115 | branch: 116 | - master 117 | condition: "$ENGINE = lucee@5" 118 | skip_cleanup: true 119 | edge: true 120 | file_glob: true 121 | file: $TRAVIS_BUILD_DIR/.artifacts/$MODULE_ID/**/* 122 | release_notes_file: $TRAVIS_BUILD_DIR/changelog-latest.md 123 | name: v${TRAVIS_TAG} 124 | tag_name: v${TRAVIS_TAG} 125 | overwrite: true 126 | 127 | after_deploy: 128 | # Move to build out artifact 129 | - cd ${TRAVIS_BUILD_DIR}/.tmp/${MODULE_ID} 130 | - cat box.json 131 | # Only publish once 132 | - if [ ${ENGINE} = 'lucee@5' ]; then box forgebox publish; fi 133 | -------------------------------------------------------------------------------- /build/Build.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * Build process for ColdBox Modules 3 | * Adapt to your needs. 4 | */ 5 | component { 6 | 7 | /** 8 | * Constructor 9 | */ 10 | function init(){ 11 | // Setup Pathing 12 | variables.cwd = getCWD().reReplace( "\.$", "" ); 13 | variables.artifactsDir = cwd & "/.artifacts"; 14 | variables.buildDir = cwd & "/.tmp"; 15 | variables.apiDocsURL = "http://localhost:60299/apidocs/"; 16 | variables.testRunner = "http://localhost:60299/tests/runner.cfm"; 17 | 18 | // Source Excludes Not Added to final binary 19 | variables.excludes = [ 20 | "build", 21 | "node-modules", 22 | "resources", 23 | "test-harness", 24 | "(package|package-lock).json", 25 | "webpack.config.js", 26 | "server-.*\.json", 27 | "docker-compose.yml", 28 | "^\..*" 29 | ]; 30 | 31 | // Cleanup + Init Build Directories 32 | [ 33 | variables.buildDir, 34 | variables.artifactsDir 35 | ].each( function( item ){ 36 | if ( directoryExists( item ) ) { 37 | directoryDelete( item, true ); 38 | } 39 | // Create directories 40 | directoryCreate( item, true, true ); 41 | } ); 42 | 43 | // Create Mappings 44 | fileSystemUtil.createMapping( 45 | "coldbox", 46 | variables.cwd & "test-harness/coldbox" 47 | ); 48 | 49 | return this; 50 | } 51 | 52 | /** 53 | * Run the build process: test, build source, docs, checksums 54 | * 55 | * @projectName The project name used for resources and slugs 56 | * @version The version you are building 57 | * @buldID The build identifier 58 | * @branch The branch you are building 59 | */ 60 | function run( 61 | required projectName, 62 | version = "1.0.0", 63 | buildID = createUUID(), 64 | branch = "development" 65 | ){ 66 | // Create project mapping 67 | fileSystemUtil.createMapping( arguments.projectName, variables.cwd ); 68 | 69 | // Build the source 70 | buildSource( argumentCollection = arguments ); 71 | 72 | // Build Docs 73 | arguments.outputDir = variables.buildDir & "/apidocs"; 74 | docs( argumentCollection = arguments ); 75 | 76 | // checksums 77 | buildChecksums(); 78 | 79 | // Build latest changelog 80 | latestChangelog(); 81 | 82 | // Finalize Message 83 | print 84 | .line() 85 | .boldMagentaLine( "Build Process is done! Enjoy your build!" ) 86 | .toConsole(); 87 | } 88 | 89 | /** 90 | * Run the test suites 91 | */ 92 | function runTests(){ 93 | // Tests First, if they fail then exit 94 | print.blueLine( "Testing the package, please wait..." ).toConsole(); 95 | 96 | command( "testbox run" ) 97 | .params( 98 | runner = variables.testRunner, 99 | verbose = true, 100 | outputFile = "#variables.cwd#/test-harness/results/test-results", 101 | outputFormats="json,antjunit" 102 | ) 103 | .run(); 104 | 105 | // Check Exit Code? 106 | if ( shell.getExitCode() ) { 107 | return error( "Cannot continue building, tests failed!" ); 108 | } 109 | } 110 | 111 | /** 112 | * Build the source 113 | * 114 | * @projectName The project name used for resources and slugs 115 | * @version The version you are building 116 | * @buldID The build identifier 117 | * @branch The branch you are building 118 | */ 119 | function buildSource( 120 | required projectName, 121 | version = "1.0.0", 122 | buildID = createUUID(), 123 | branch = "development" 124 | ){ 125 | // Build Notice ID 126 | print 127 | .line() 128 | .boldMagentaLine( 129 | "Building #arguments.projectName# v#arguments.version#+#arguments.buildID# from #cwd# using the #arguments.branch# branch." 130 | ) 131 | .toConsole(); 132 | 133 | // Prepare exports directory 134 | variables.exportsDir = variables.artifactsDir & "/#projectName#/#arguments.version#"; 135 | directoryCreate( variables.exportsDir, true, true ); 136 | 137 | // Project Build Dir 138 | variables.projectBuildDir = variables.buildDir & "/#projectName#"; 139 | directoryCreate( 140 | variables.projectBuildDir, 141 | true, 142 | true 143 | ); 144 | 145 | // Copy source 146 | print.blueLine( "Copying source to build folder..." ).toConsole(); 147 | copy( 148 | variables.cwd, 149 | variables.projectBuildDir 150 | ); 151 | 152 | // Create build ID 153 | fileWrite( 154 | "#variables.projectBuildDir#/#projectName#-#version#+#buildID#", 155 | "Built with love on #dateTimeFormat( now(), "full" )#" 156 | ); 157 | 158 | // Updating Placeholders 159 | print.greenLine( "Updating version identifier to #arguments.version#" ).toConsole(); 160 | command( "tokenReplace" ) 161 | .params( 162 | path = "/#variables.projectBuildDir#/**", 163 | token = "@build.version@", 164 | replacement = arguments.version 165 | ) 166 | .run(); 167 | 168 | print.greenLine( "Updating build identifier to #arguments.buildID#" ).toConsole(); 169 | command( "tokenReplace" ) 170 | .params( 171 | path = "/#variables.projectBuildDir#/**", 172 | token = ( arguments.branch == "master" ? "@build.number@" : "+@build.number@" ), 173 | replacement = ( arguments.branch == "master" ? arguments.buildID : "-snapshot" ) 174 | ) 175 | .run(); 176 | 177 | // zip up source 178 | var destination = "#variables.exportsDir#/#projectName#-#version#.zip"; 179 | print.greenLine( "Zipping code to #destination#" ).toConsole(); 180 | cfzip( 181 | action = "zip", 182 | file = "#destination#", 183 | source = "#variables.projectBuildDir#", 184 | overwrite = true, 185 | recurse = true 186 | ); 187 | 188 | // Copy box.json for convenience 189 | fileCopy( 190 | "#variables.projectBuildDir#/box.json", 191 | variables.exportsDir 192 | ); 193 | } 194 | 195 | /** 196 | * Produce the API Docs 197 | */ 198 | function docs( 199 | required projectName, 200 | version = "1.0.0", 201 | outputDir = ".tmp/apidocs" 202 | ){ 203 | // Create project mapping 204 | fileSystemUtil.createMapping( arguments.projectName, variables.cwd ); 205 | // Generate Docs 206 | print.greenLine( "Generating API Docs, please wait..." ).toConsole(); 207 | directoryCreate( arguments.outputDir, true, true ); 208 | 209 | command( "docbox generate" ) 210 | .params( 211 | "source" = "models", 212 | "mapping" = "models", 213 | "strategy-projectTitle" = "#arguments.projectName# v#arguments.version#", 214 | "strategy-outputDir" = arguments.outputDir 215 | ) 216 | .run(); 217 | 218 | print.greenLine( "API Docs produced at #arguments.outputDir#" ).toConsole(); 219 | 220 | var destination = "#variables.exportsDir#/#projectName#-docs-#version#.zip"; 221 | print.greenLine( "Zipping apidocs to #destination#" ).toConsole(); 222 | cfzip( 223 | action = "zip", 224 | file = "#destination#", 225 | source = "#arguments.outputDir#", 226 | overwrite = true, 227 | recurse = true 228 | ); 229 | } 230 | 231 | /** 232 | * Build the latest changelog file: changelog-latest.md 233 | */ 234 | function latestChangelog(){ 235 | print.blueLine( "Building latest changelog..." ).toConsole(); 236 | 237 | if ( !fileExists( variables.cwd & "changelog.md" ) ) { 238 | return error( "Cannot continue building, changelog.md file doesn't exist!" ); 239 | } 240 | 241 | fileWrite( 242 | variables.cwd & "changelog-latest.md", 243 | fileRead( variables.cwd & "changelog.md" ).split( "----" )[ 2 ].trim() & chr( 13 ) & chr( 10 ) 244 | ); 245 | 246 | print 247 | .greenLine( "Latest changelog file created at `changelog-latest.md`" ) 248 | .line() 249 | .line( fileRead( variables.cwd & "changelog-latest.md" ) ); 250 | } 251 | 252 | /********************************************* PRIVATE HELPERS *********************************************/ 253 | 254 | /** 255 | * Build Checksums 256 | */ 257 | private function buildChecksums(){ 258 | print.greenLine( "Building checksums" ).toConsole(); 259 | command( "checksum" ) 260 | .params( 261 | path = "#variables.exportsDir#/*.zip", 262 | algorithm = "SHA-512", 263 | extension = "sha512", 264 | write = true 265 | ) 266 | .run(); 267 | command( "checksum" ) 268 | .params( 269 | path = "#variables.exportsDir#/*.zip", 270 | algorithm = "md5", 271 | extension = "md5", 272 | write = true 273 | ) 274 | .run(); 275 | } 276 | 277 | /** 278 | * DirectoryCopy is broken in lucee 279 | */ 280 | private function copy( src, target, recurse = true ){ 281 | // process paths with excludes 282 | directoryList( 283 | src, 284 | false, 285 | "path", 286 | function( path ){ 287 | var isExcluded = false; 288 | variables.excludes.each( function( item ){ 289 | if ( path.replaceNoCase( variables.cwd, "", "all" ).reFindNoCase( item ) ) { 290 | isExcluded = true; 291 | } 292 | } ); 293 | return !isExcluded; 294 | } 295 | ).each( function( item ){ 296 | // Copy to target 297 | if ( fileExists( item ) ) { 298 | print.blueLine( "Copying #item#" ).toConsole(); 299 | fileCopy( item, target ); 300 | } else { 301 | print.greenLine( "Copying directory #item#" ).toConsole(); 302 | directoryCopy( 303 | item, 304 | target & "/" & item.replace( src, "" ), 305 | true 306 | ); 307 | } 308 | } ); 309 | } 310 | 311 | /** 312 | * Gets the last Exit code to be used 313 | **/ 314 | private function getExitCode(){ 315 | return ( createObject( "java", "java.lang.System" ).getProperty( "cfml.cli.exitCode" ) ?: 0 ); 316 | } 317 | 318 | } 319 | -------------------------------------------------------------------------------- /build/release.boxr: -------------------------------------------------------------------------------- 1 | # This recipe signifies a new release of the module by doing merges and bumps accordingly 2 | 3 | # Check out master and update it locally 4 | !git checkout -f master 5 | !git pull origin master 6 | 7 | # Merge development into it for release 8 | !git merge --no-ff development 9 | 10 | # Tag the master repo with the version from box.json 11 | echo "git tag v`package show version`" | run 12 | 13 | # Push all branches back out to github 14 | !git push origin --all 15 | 16 | # Push all tags 17 | !git push origin --tags 18 | 19 | # Check development again 20 | !git checkout -f development 21 | 22 | # Bump to prepare for a new release, do minor, change if needed and don't tag 23 | bump --minor --!tagVersion 24 | !git commit -a -m "version bump" 25 | !git push origin development -------------------------------------------------------------------------------- /changelog.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ---- 9 | ## [Unreleased] 10 | 11 | ### Fixed 12 | 13 | * Set all `hash` usage algorithms to MD5 for Adobe change to default algorithm 14 | 15 | ## v5.7.1 => 2023-SEP-21 16 | 17 | ### Fixed 18 | 19 | * Added `entryPoint`, `modelNamespace` and `cfmapping` keys to ModuleConfig, to ensure mappings for downstream modules are available during framework load 20 | 21 | ## v5.7.0 => 2023-MAY-03 22 | 23 | ### Changed 24 | 25 | * Updates permission handling to account for updated AWS default bucket policies 26 | 27 | ## v5.6.0 => 2023-MAR-07 28 | 29 | ### Added 30 | 31 | * Support for overriding response headers like content type for pre-signed URLs 32 | 33 | ## v5.5.2 => 2023-FEB-07 34 | 35 | ### Fixed 36 | 37 | * Multi-part upload concurrency fixes 38 | 39 | ## v5.5.1 => 2023-FEB-03 40 | 41 | ### Added 42 | 43 | * Support for multi-part file uploads to conserve memory usage 44 | 45 | ## v5.4.1 => 2023-FEB-02 46 | 47 | 48 | ## v5.3.1 => 2023-FEB-02 49 | 50 | ## v5.2.0 => 2023-JAN-26 51 | 52 | ### Added 53 | 54 | * Add support for server side encryption 55 | * Add retry support for S3 connection failures 56 | 57 | ## v5.1.2 => 2022-OCT-19 58 | 59 | ### Added 60 | 61 | * Added property to ensure URLEndpointHostname can be retreived 62 | 63 | ## v5.1.1 => 2022-NOV-1 64 | 65 | ### Fixed 66 | 67 | * Fixes an issue when header content types were not present in the arguments scope 68 | 69 | ## v5.0.0 => 2022-OCT-19 70 | 71 | ### Changed / Compatibility 72 | 73 | * Dropped Adobe 2016 Support 74 | * Configuration setting: `encryption_charset` changed to `encryptionCharset` for consistency. **Breaking change** 75 | 76 | ### Added 77 | 78 | * Revamp of ACLs to allow any grant to be added to any object. 79 | * Ability to request `PUT` signed URLs so you don't have to upload to a middle server and then S3. You can now create a signed PUT operation that you can upload directly to S3. 80 | * Encoding of signed URLs to avoid issues with weird filenames 81 | * Preserve content type on copy 82 | * Ability to choose how many times to retry s3 operations when they fail with a 500 or 503. This can happen due to throttling or rate limiting. You can configure it with the new setting: `retriesOnError` and it defaults to 3. 83 | * New ColdBox Module template 84 | * Add bucket name to test suite 85 | * Github actions migration 86 | * Avoid error logs for `objectExists()` 87 | 88 | ### Fixed 89 | 90 | * @bdw429s Fixed tons of issues with filename encodings. :party: 91 | * 404 is not an "error" status when verifying for errors on requests 92 | * The argument name in `putObject()` was incorrect "arguments.content" instead of "arguments.data", this only happens when md5 == "auto" so it probably slipped by for some time. 93 | 94 | ---- 95 | 96 | ## v4.8.0 => 2021-JUL-06 97 | 98 | ### Added 99 | 100 | * Migrations to github actions 101 | * Added new argument to `downloadObject( getAsBinary : 'no' )` so you can get binary or non binary objects. Defaults to non binary. 102 | 103 | ---- 104 | 105 | ## v4.7.0 => 2021-MAR-24 106 | 107 | ### Added 108 | 109 | * Adobe 2021 to the testing matrix and supported engines 110 | 111 | ### Fixed 112 | 113 | * Adobe 2021 issues with date formatting 114 | * Watcher needed to use the root `.cfformat.json` 115 | 116 | ---- 117 | 118 | ## v4.6.0 => 2021-FEB-18 119 | 120 | ### Added 121 | 122 | * New method: `setAccessControlPolicy()` so you can add ACLs to buckets 123 | * `getBucket()` has been updated to use the ListObjectsv2 API - which is recommended by AWS for more detailed information. 124 | * Implements SigV4-signed requests thanks to @sbleon's amazing work! 125 | * Added more formatting rules via cfformat 126 | * Added a `gitattributes` for cross OS compatibilities 127 | * Added a `markdownlint.json` for more control over markdown 128 | * Added new package script : `format:watch` to format and watch :) 129 | 130 | ### Changed 131 | 132 | * Updated tests to fire up in ColdBox 6 133 | * Handles some cleanup of parameters which were being passed as resource strings ( which were then being encoded and blowing up ). 134 | * Updated release recipe to match newer modules. 135 | 136 | ### Removed 137 | 138 | * Cleanup of old cfml engine files 139 | * Cleanup of old init code 140 | * Removed some settings from test harness 141 | 142 | ---- 143 | ## v4.5.0 => 2020-MAR-11 144 | 145 | * `Feature` : `SV4Util` is now a singleton for added performance and more configuration expansion by adding the sdk reference 146 | * `Improvement` : Better error messages when s3 goes :boom: 147 | * `Bug` : Fix for ACF double encoding 148 | 149 | ---- 150 | ## v4.4.0 => 2019-MAY-15 151 | 152 | * Reworked SSL setup to allow for dynamic creation of the URL entry point 153 | * Removed ACF11 officially, it is impossible to deal with their cfhttp junk! It works, but at your own risk. 154 | 155 | ---- 156 | ## v4.3.0 => 2019-APR-05 157 | 158 | * Removal of debugging code 159 | 160 | ---- 161 | ## v4.2.1 => 2019-MAR-26 162 | 163 | * Avoid double encoding on `copy`, `putObjectFile`, and `delete()` operations 164 | * Consolidate ssl to use `variables` instead of `arguments` 165 | 166 | ---- 167 | ## v4.2.0 => 2019-MAR-15 168 | 169 | * ACF compatiblities 170 | * Fixes for auth on folder commands 171 | * New constructor args: `defaultDelimiter` for folder operations, `defaultBucketname` so you can set a default bucket for all bucket related operations. 172 | * Avoid nasty error on bucket deletion 173 | * Add new method `objectExists()` boolean check for objects 174 | * Fix URI encoding on signatures for headers and query params 175 | 176 | ---- 177 | ## v4.1.1 => 2019-MAR-26 178 | 179 | * Left some dump/aborts 180 | 181 | ---- 182 | ## v4.1.0 => 2019-MAR-13 183 | 184 | * DigitalOcean Spaces compatiblity 185 | * Region naming support, you can now pass the `awsRegion` argument to the constructor to select the AWS or DO region 186 | * SSL is now the default for all operations 187 | * Addition of two new constructor params: `awsRegion` and `awsDomain` to support regions and multi-domains for AWS and Digital Ocean 188 | * Added log debugging to calls and signatures if LogBox is on `debug` level 189 | 190 | ---- 191 | ## v4.0.1 => 2018-OCT-22 192 | 193 | * Fixes to models location, oopsy! 194 | 195 | ---- 196 | ## v4.0.0 => 2018-OCT-20 197 | 198 | * AWS Region Support 199 | * Migrated Module Layout to use Ortus Standard Module Layout 200 | * Added testing for all ACF Engines 201 | * Rework as generic Box module (compatibility change), you must move your `s3sdk` top level settings in ColdBox Config to `moduleSettings.s3sdk` 202 | * `deleteBucket()` returns **false** if bucket doesn't exist instead of throwing an exception 203 | * Few optimizations and documentation of the API 204 | 205 | ---- 206 | ## v3.0.1 207 | 208 | * Travis Updates and self-publishing 209 | 210 | ---- 211 | ## v3.0.0 212 | 213 | * Ugprade to ColdBox 4 standards 214 | * Upgrade to latest Amazon S3 SDK standards 215 | * Travis build process 216 | 217 | ---- 218 | ## v2.0 219 | 220 | * Original Spec as a ColdBox Plugin 221 | -------------------------------------------------------------------------------- /models/MiniLogBox.cfc: -------------------------------------------------------------------------------- 1 | component { 2 | 3 | MiniLogBox function init( boolean debug ){ 4 | variables.debug = arguments.debug; 5 | variables.logs = []; 6 | return this; 7 | } 8 | 9 | boolean function canDebug(){ 10 | return variables.debug; 11 | } 12 | 13 | function debug( required string msg, data ){ 14 | arrayAppend( variables.logs, arguments.msg ); 15 | if ( structKeyExists( arguments, "data" ) ) { 16 | arrayAppend( variables.logs, arguments.data ); 17 | } 18 | } 19 | 20 | function error( required string msg, data ){ 21 | arrayAppend( variables.logs, "Error: " & arguments.msg ); 22 | if ( structKeyExists( arguments, "data" ) ) { 23 | arrayAppend( variables.logs, arguments.data ); 24 | } 25 | } 26 | 27 | function warn( required string msg, data ){ 28 | arrayAppend( variables.logs, "Warn: " & arguments.msg ); 29 | if ( structKeyExists( arguments, "data" ) ) { 30 | arrayAppend( variables.logs, arguments.data ); 31 | } 32 | } 33 | 34 | array function getLogs(){ 35 | return variables.logs; 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /models/Sv2Util.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * Amazon Web Services Signature 4 Utility for ColdFusion 3 | **/ 4 | component singleton { 5 | 6 | /** 7 | * Creates a new instance of the utility for generating signatures using the supplied settings 8 | * 9 | * @return new instance initalized with specified settings 10 | */ 11 | Sv2Util function init(){ 12 | return this; 13 | } 14 | 15 | /** 16 | * Generates a version 2 signature and returns headers for the request 17 | * 18 | * @requestMethod - Request operation, ie PUT, GET, POST, etcetera. 19 | * @requestURI - Absolute path of the URI. Portion of the URL after the host, to the "?" beginning the query string 20 | * @requestHeaders - Structure of http headers for used the request. 21 | * @requestParams - Structure containing any url parameters for the request. 22 | * @amzHeaders - Structure containing any amazon headers used to build the signature. 23 | */ 24 | public struct function generateSignatureData( 25 | required string requestMethod, 26 | required string hostName, 27 | required string requestURI, 28 | required any requestBody, 29 | required struct requestHeaders, 30 | required struct requestParams, 31 | required string accessKey, 32 | required string secretKey, 33 | required string regionName, 34 | required string serviceName, 35 | boolean signedPayload = true, 36 | array excludeHeaders = [], 37 | string amzDate, 38 | string dateStamp 39 | ){ 40 | var props = { 41 | requestHeaders : arguments.requestHeaders, 42 | requestParams : arguments.requestParams, 43 | canonicalURI : "", 44 | accessKey : arguments.accessKey, 45 | secretKey : arguments.secretKey, 46 | regionName : arguments.regionName, 47 | serviceName : arguments.serviceName, 48 | hostName : arguments.hostName, 49 | requestMethod : arguments.requestMethod, 50 | requestPayload : arguments.signedPayload ? hash256( arguments.requestBody ) : arguments.requestBody, 51 | excludeHeaders : arguments.excludeHeaders 52 | }; 53 | 54 | // Override current utc date and time 55 | if ( structKeyExists( arguments, "amzDate" ) || structKeyExists( arguments, "dateStamp" ) ) { 56 | props.dateStamp = arguments.dateStamp; 57 | props.amzDate = arguments.amzDate; 58 | } else { 59 | var utcDateTime = dateConvert( "local2UTC", now() ); 60 | // Generate UTC time stamps 61 | props.dateStamp = dateFormat( utcDateTime, "yyyymmdd" ); 62 | props.amzDate = props.dateStamp & "T" & timeFormat( utcDateTime, "HHmmss" ) & "Z"; 63 | } 64 | 65 | var sortedHeaders = structSort( props.requestHeaders, "text", "asc" ); 66 | for ( var header in sortedHeaders ) { 67 | props.canonicalURI &= lCase( trim( header ) ) & ":" & props.requestHeaders[ header ] & chr( 10 ); 68 | }; 69 | 70 | props.canonicalURI = props.requestMethod & chr( 10 ) 71 | & ( props.requestHeaders[ "content-md5" ] ?: "" ) & chr( 10 ) 72 | & ( props.requestHeaders[ "content-type" ] ?: "" ) & chr( 10 ) 73 | & props.amzDate & chr( 10 ) 74 | & props.canonicalURI 75 | & "/" & arguments.requestURI; 76 | 77 | // Calculate the hash of the information 78 | var digest = hMAC_SHA1( props.secretKey, props.canonicalURI ); 79 | // fix the returned data to be a proper signature 80 | props.signature = toBase64( digest ); 81 | props.authorizationHeader = "AWS #props.accessKey#:#props.signature#"; 82 | 83 | return props; 84 | } 85 | 86 | 87 | /** 88 | * NSA SHA-1 Algorithm: RFC 2104HMAC-SHA1 89 | */ 90 | private binary function HMAC_SHA1( required string signKey, required string signMessage ){ 91 | var jMsg = javacast( "string", arguments.signMessage ).getBytes( encryptionCharset ); 92 | var jKey = javacast( "string", arguments.signKey ).getBytes( encryptionCharset ); 93 | var key = createObject( "java", "javax.crypto.spec.SecretKeySpec" ).init( jKey, "HmacSHA1" ); 94 | var mac = createObject( "java", "javax.crypto.Mac" ).getInstance( key.getAlgorithm() ); 95 | 96 | mac.init( key ); 97 | mac.update( jMsg ); 98 | 99 | return mac.doFinal(); 100 | } 101 | 102 | } 103 | -------------------------------------------------------------------------------- /models/Sv4Util.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * Amazon Web Services Signature 4 Utility for ColdFusion 3 | * Version Date: 2016-04-12 (Alpha) 4 | * 5 | * Copyright 2016 Leigh (cfsearching) 6 | * 7 | * Requirements: Adobe ColdFusion 10+ 8 | * AWS Signature 4 specifications: http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html 9 | * 10 | * Licensed under the Apache License, Version 2.0 (the "License"); 11 | * you may not use this file except in compliance with the License. 12 | * You may obtain a copy of the License at 13 | * 14 | * http://www.apache.org/licenses/LICENSE-2.0 15 | * 16 | * Unless required by applicable law or agreed to in writing, software 17 | * distributed under the License is distributed on an "AS IS" BASIS, 18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | * See the License for the specific language governing permissions and 20 | * limitations under the License. 21 | */ 22 | component singleton { 23 | 24 | /** 25 | * Creates a new instance of the utility for generating signatures using the supplied settings 26 | * 27 | * @return new instance initalized with specified settings 28 | */ 29 | Sv4Util function init(){ 30 | // Algorithms used in calculating the signature 31 | variables.signatureAlgorithm = "AWS4-HMAC-SHA256"; 32 | variables.hashAlorithm = "SHA256"; 33 | 34 | return this; 35 | } 36 | 37 | 38 | /** 39 | * Generates Signature 4 properties for the supplied request settings. 40 | * 41 | * @requestMethod - Request operation, ie PUT, GET, POST, etcetera. 42 | * @hostName - Target host name, example: bucketname.s3.amazonaws.com 43 | * @requestURI - Absolute path of the URI. Portion of the URL after the host, to the "?" beginning the query string 44 | * @requestBody - Body of the request. Either a string or binary value. 45 | * @requestHeaders - Structure of http headers for used the request. Mandatory host and date headers are automatically generated. 46 | * @requestParams - Structure containing any url parameters for the request. Mandatory parameters are automatically generated. 47 | * @excludeHeaders - (Optional) List of header names AWS can exclude from the signing process. Default is an empty array, which means all headers should be "signed" 48 | * @amzDate - (Optional) Override the automatic X-Amz-Date calculation with this value. Current UTC date. If supplied, @dateStamp is required. Format: yyyyMMddTHHnnssZ 49 | * @regionName - (Optional) Override the instance region name with this value. Example "us-east-1" 50 | * @serviceName - (Optional) Override the instance service name with this value. Example "s3" 51 | * @dateStamp - (Optional) Override the automatic dateStamp calculation with this value. Current UTC date (only). If supplied, @amzDate is required. Format: yyyyMMdd 52 | * @presigningDownloadURL - (Optional) Generates a signed request with all required parameters in the query string, and no headers except for Host. 53 | * 54 | * @return Signature value, authorization header and all properties part of the signature calculation: ALGORITHM,AMZDATE,AUTHORIZATIONHEADER,CANONICALHEADERS,CANONICALQUERYSTRING,CANONICALREQUEST,CANONICALURI,CREDENTIALSCOPE,DATESTAMP,EXCLUDEHEADERS,HOSTNAME,REGIONNAME,REQUESTHEADERS,REQUESTMETHOD,REQUESTPARAMS,REQUESTPAYLOAD,SERVICENAME,SIGNATURE,SIGNEDHEADERS,SIGNKEYBYTES,STRINGTOSIGN 55 | */ 56 | public struct function generateSignatureData( 57 | required string requestMethod, 58 | required string hostName, 59 | required string requestURI, 60 | required any requestBody, 61 | required struct requestHeaders, 62 | required struct requestParams, 63 | required string accessKey, 64 | required string secretKey, 65 | required string regionName, 66 | required string serviceName, 67 | array excludeHeaders = [], 68 | string amzDate, 69 | string dateStamp, 70 | boolean presignDownloadURL = false 71 | ){ 72 | // Initialize properties 73 | var props = {}; 74 | var hasQueryParams = structCount( arguments.requestParams ) > 0; 75 | var utcDateTime = dateConvert( "local2UTC", now() ); 76 | 77 | // Generate UTC time stamps 78 | props.dateStamp = dateFormat( utcDateTime, "yyyymmdd" ); 79 | props.amzDate = props.dateStamp & "T" & timeFormat( utcDateTime, "HHmmss" ) & "Z"; 80 | 81 | // Override current utc date and time 82 | if ( structKeyExists( arguments, "amzDate" ) || structKeyExists( arguments, "dateStamp" ) ) { 83 | props.dateStamp = arguments.dateStamp; 84 | props.amzDate = arguments.amzDate; 85 | } 86 | 87 | props.accessKey = arguments.accessKey; 88 | props.secretKey = arguments.secretKey; 89 | props.regionName = arguments.regionName; 90 | props.serviceName = arguments.serviceName; 91 | 92 | // /////////////////////////////////// 93 | // Basic request properties 94 | // /////////////////////////////////// 95 | props.algorithm = variables.signatureAlgorithm; 96 | props.hostName = arguments.hostName; 97 | props.requestMethod = arguments.requestMethod; 98 | props.canonicalURI = buildCanonicalURI( requestURI = arguments.requestURI ); 99 | 100 | // For signed requests, the payload is a checksum 101 | props.requestPayload = hash256( arguments.requestBody ); 102 | props.credentialScope = buildCredentialScope( 103 | dateStamp = props.dateStamp, 104 | serviceName = props.serviceName, 105 | regionName = props.regionName 106 | ); 107 | 108 | 109 | // /////////////////////////////////// 110 | // Validate headers/parameters 111 | // /////////////////////////////////// 112 | props.requestHeaders = duplicate( arguments.requestHeaders ); 113 | props.requestParams = duplicate( arguments.requestParams ); 114 | 115 | // Host header is mandatory for ALL requests 116 | props.requestHeaders[ "Host" ] = arguments.hostName; 117 | 118 | // Apply mandatory headers and parameters 119 | if ( presignDownloadURL ) { 120 | // First, normalize request headers 121 | props.requestHeaders = cleanHeaders( props.requestHeaders ); 122 | props.excludeHeaders = cleanHeaderNames( arguments.excludeHeaders ); 123 | 124 | // Signed requests must include a checksum, ie hash of payload 125 | // props.requestParams["X-Amz-Content-Sha256"] = props.requestPayload; 126 | props.requestPayload = "UNSIGNED-PAYLOAD"; 127 | 128 | // Identify which headers will be included in the signing process 129 | props.signedHeaders = buildSignedHeaders( 130 | requestHeaders = props.requestHeaders, 131 | excludeNames = props.excludeHeaders 132 | ); 133 | 134 | // When presigning a download URL, canonical query string must also 135 | // include the parameters used as part of the signing process, ie hashing algorithm, 136 | // credential scope, date, and signed headers parameters. 137 | props.requestParams[ "X-Amz-Algorithm" ] = variables.signatureAlgorithm; 138 | props.requestParams[ "X-Amz-Credential" ] = "#props.accessKey#/#props.credentialScope#"; 139 | props.requestParams[ "X-Amz-SignedHeaders" ] = props.signedHeaders; 140 | props.requestParams[ "X-Amz-Date" ] = props.amzDate; 141 | 142 | // Finally, normalize url parameters 143 | props.requestParams = encodeQueryParams( queryParams = props.requestParams ); 144 | } 145 | // All other request types (PUT, DELETE, POST, ....) 146 | else { 147 | // Signed requests must include a checksum, ie hash of payload 148 | props.requestHeaders[ "X-Amz-Content-Sha256" ] = props.requestPayload; 149 | 150 | // Host header is mandatory for ALL requests 151 | props.requestHeaders[ "Host" ] = arguments.hostName; 152 | // Date header is mandatory when not passing values in url 153 | props.requestHeaders[ "X-Amz-Date" ] = props.amzDate; 154 | 155 | // Normalize headers and url parameters 156 | props.requestHeaders = cleanHeaders( props.requestHeaders ); 157 | props.excludeHeaders = cleanHeaderNames( arguments.excludeHeaders ); 158 | // Identify which headers will be included in the signing process 159 | props.signedHeaders = buildSignedHeaders( 160 | requestHeaders = props.requestHeaders, 161 | excludeNames = props.excludeHeaders 162 | ); 163 | props.requestParams = encodeQueryParams( queryParams = props.requestParams ); 164 | } 165 | 166 | 167 | // /////////////////////////////////////// 168 | // Generate signature 169 | // /////////////////////////////////////// 170 | 171 | // Generate header, query, and request strings 172 | props.canonicalQueryString = buildCanonicalQueryString( requestParams = props.requestParams ); 173 | props.canonicalHeaders = buildCanonicalHeaders( requestHeaders = props.requestHeaders ); 174 | props.canonicalRequest = buildCanonicalRequest( argumentCollection = props ); 175 | 176 | // Generate signature and authorization strings 177 | props.stringToSign = generateStringToSign( argumentCollection = props ); 178 | props.signKeyBytes = generateSignatureKey( argumentCollection = props ); 179 | props.signature = lCase( 180 | binaryEncode( hmacBinary( message = props.stringToSign, key = props.signKeyBytes ), "hex" ) 181 | ); 182 | props.authorizationHeader = buildAuthorizationHeader( argumentCollection = props ); 183 | 184 | // (Debugging) Convert binary values into human readable form 185 | props.signKeyBytes = binaryEncode( props.signKeyBytes, "hex" ); 186 | 187 | return props; 188 | } 189 | 190 | /** 191 | * Generates request string to sign 192 | * 193 | * @amzDate - Current timestamp in UTC. Format yyyyMMddTHHnnssZ 194 | * @credentialScope - String defining scope of request. See buildCredentialScope(). 195 | * @canonicalRequest - Canonical request string 196 | * 197 | * @return - String to be signed 198 | */ 199 | private string function generateStringToSign( 200 | required string amzDate, 201 | required string credentialScope, 202 | required string canonicalRequest 203 | ){ 204 | // Format: Algorithm + '\n' + RequestDate + '\n' + CredentialScope + '\n' + HashedCanonicalRequest 205 | var elements = [ 206 | variables.signatureAlgorithm, 207 | arguments.amzDate, 208 | arguments.credentialScope, 209 | hash256( arguments.canonicalRequest ) 210 | ]; 211 | 212 | return arrayToList( elements, chr( 10 ) ); 213 | } 214 | 215 | /** 216 | * Generate canonical request string 217 | * 218 | * @requestMethod - Request operation, ie PUT, GET, POST, etcetera. 219 | * @canonicalURI - Canonical URL string. See buildCanonicalURI 220 | * @canonicalHeaders - Canonical header string. See buildCanonicalHeaders 221 | * @canonicalQueryString - Canonical query string. See buildCanonicalQueryString 222 | * @signedHeaders - List of signed headers. See buildSignedHeaders 223 | * @requestPayload - For signed requests, this is the hash of the request body. Otherwise, the raw request body 224 | */ 225 | private string function buildCanonicalRequest( 226 | required string requestMethod, 227 | required string canonicalURI, 228 | required string canonicalQueryString, 229 | required string canonicalHeaders, 230 | required string signedHeaders, 231 | required string requestPayload 232 | ){ 233 | var canonicalRequest = ""; 234 | 235 | // Build ordered list of elements in the request, delimited by new lines 236 | // Note: Headers and signed headers should never be empty. "Host" header is always required. 237 | canonicalRequest = arguments.requestMethod & chr( 10 ) 238 | & arguments.canonicalURI & chr( 10 ) 239 | & arguments.canonicalQueryString & chr( 10 ) 240 | & arguments.canonicalHeaders & chr( 10 ) 241 | & arguments.signedHeaders & chr( 10 ) 242 | & arguments.requestPayload; 243 | 244 | return canonicalRequest; 245 | } 246 | 247 | /** 248 | * Generates canonical query string 249 | * 257 | * 258 | * @requestParams Structure containing all parameters passed via the query string. 259 | * @isEncoded If true, the supplied parameters are already url encoded 260 | * 261 | * @return canonical query string 262 | */ 263 | private string function buildCanonicalQueryString( required struct requestParams, boolean isEncoded = true ){ 264 | var encodedParams = ""; 265 | var paramNames = ""; 266 | var paramPairs = ""; 267 | 268 | // Ensure parameter names and values are URL encoded first 269 | encodedParams = isEncoded ? arguments.requestParams : encodeQueryParams( arguments.requestParams ); 270 | 271 | // Extract and sort encoded parameter names 272 | paramNames = structKeyArray( encodedParams ); 273 | arraySort( paramNames, "text", "asc" ); 274 | 275 | // Build array of sorted name/value pairs 276 | paramPairs = []; 277 | arrayEach( paramNames, function( string param ){ 278 | arrayAppend( paramPairs, arguments.param & "=" & encodedParams[ arguments.param ] ); 279 | } ); 280 | 281 | // Finally, generate sorted list of parameters, delimited by "&" 282 | return arrayToList( paramPairs, "&" ); 283 | } 284 | 285 | 286 | /** 287 | * Generates a list of signed header names. 288 | * 289 | *

"...By adding this list of headers, you tell AWS which headers in the request 290 | * are part of the signing process and which ones AWS can ignore (for example, any 291 | * additional headers added by a proxy) for purposes of validating the request."

292 | * 293 | * @requestHeaders Raw headers to be included in request 294 | * @excludeNames Names of any headers AWS should ignore for the signing process 295 | * 296 | * @return Sorted list of signed header names, delimited by semi-colon ";" 297 | */ 298 | private string function buildSignedHeaders( required struct requestHeaders, required array excludeNames ){ 299 | var name = ""; 300 | var headerNames = []; 301 | var allHeaders = !arrayLen( arguments.excludeNames ); 302 | 303 | // Identify which headers are "signed" 304 | structEach( arguments.requestHeaders, function( string name, any value ){ 305 | if ( allHeaders || !arrayFindNoCase( excludeNames, arguments.name ) ) { 306 | arrayAppend( headerNames, arguments.name ); 307 | } 308 | } ); 309 | 310 | // Sort header names in ASCII order 311 | arraySort( headerNames, "text", "asc" ); 312 | 313 | // Return list of names 314 | return arrayToList( headerNames, ";" ); 315 | } 316 | 317 | /** 318 | * Generates a list of canonical headers 319 | * 320 | * @requestHeaders Structure containing headers to be included in request hash 321 | * 322 | * @return Sorted list of header pairs, delimited by new lines 323 | */ 324 | private string function buildCanonicalHeaders( required struct requestHeaders ){ 325 | var pairs = ""; 326 | var names = ""; 327 | var headers = ""; 328 | 329 | // Scrub the header names and values first 330 | headers = cleanHeaders( arguments.requestHeaders ); 331 | 332 | // Sort header names in ASCII order 333 | names = structKeyArray( headers ); 334 | arraySort( names, "text", "asc" ); 335 | 336 | // Build array of sorted header name and value pairs 337 | pairs = []; 338 | arrayEach( names, function( string key ){ 339 | arrayAppend( pairs, arguments.key & ":" & headers[ arguments.key ] ); 340 | } ); 341 | 342 | // Generate list. Note: List must END WITH a new line character 343 | return arrayToList( pairs, chr( 10 ) ) & chr( 10 ); 344 | } 345 | 346 | 347 | /** 348 | * Generates canonical URI. Encoded, absolute path component of the URI, 349 | * which is everything in the URI from the HTTP host to the question mark character ("?") 350 | * that begins the query string parameters (if any) 351 | * 352 | * @uriPath URI or path. If empty, "/" will be used 353 | * 354 | * @return URL encoded path 355 | */ 356 | public string function buildCanonicalURI( required string requestURI ){ 357 | var path = arguments.requestURI; 358 | // Return "/" for empty path 359 | if ( !len( trim( path ) ) ) { 360 | path = "/"; 361 | } 362 | 363 | // Convert to absolute path (if needed) 364 | if ( left( path, 1 ) != "/" ) { 365 | path = "/" & path; 366 | } 367 | 368 | return urlEncodePath( path ); 369 | } 370 | 371 | 372 | /** 373 | * Generates signing key for AWS Signature V4 374 | * 375 | *

Source: http://stackoverflow.com/questions/32513197/how-to-derive-a-sign-in-key-for-aws-signature-version-4-in-coldfusion

376 | * 377 | * @dateStamp Date stamp in yyyymmdd format. Example: 20150830 378 | * @regionName Region name that is part of the service's endpoint (alphanumeric). Example: "us-east-1" 379 | * @serviceName Service name that is part of the service's endpoint (alphanumeric). Example: "s3" 380 | * @algorithm HMAC algorithm. Default is "HMACSHA256" 381 | * 382 | * @return signing key in binary 383 | */ 384 | private binary function generateSignatureKey( 385 | required string dateStamp, 386 | required string regionName, 387 | required string serviceName, 388 | required string secretKey, 389 | string algorithm = "HMACSHA256" 390 | ){ 391 | var kSecret = charsetDecode( "AWS4" & arguments.secretKey, "UTF-8" ); 392 | var kDate = hmacBinary( arguments.dateStamp, kSecret ); 393 | // Region information as a lowercase alphanumeric string 394 | var kRegion = hmacBinary( lCase( arguments.regionName ), kDate ); 395 | // Service name information as a lowercase alphanumeric string 396 | var kService = hmacBinary( lCase( arguments.serviceName ), kRegion ); 397 | // A special termination string: aws4_request 398 | var kSigning = hmacBinary( "aws4_request", kService ); 399 | 400 | return kSigning; 401 | } 402 | 403 | 404 | /** 405 | * Generates string indicating the scope for which the signature is valid. Credential scope 406 | * is represented by a slash-separated string of dimensions in the following order: 407 | * 408 | * dateStamp / regionName / serviceName / terminationString 409 | * 410 | * @dateStamp - Current date in UTC (must be same as X-Amz-Date date). Format yyyyMMdd 411 | * @regionName - Name of the target region, UTF-8 encoded. Example "us-east-1" 412 | * @serviceName - Name of the target service, UTF-8 encoded. Example "s3" 413 | * 414 | * @return - formatted string. Example: 20150830/us-east-1/iam/aws4_request 415 | */ 416 | private string function buildCredentialScope( 417 | required string dateStamp, 418 | required string regionName, 419 | required string serviceName 420 | ){ 421 | return arguments.dateStamp & "/" & lCase( arguments.regionName ) & "/" & lCase( arguments.serviceName ) & "/" & "aws4_request"; 422 | } 423 | 424 | /** 425 | * Generates Authorization header string. 426 | * 427 | * Format: algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope 428 | * + ', ' + 'SignedHeaders=' + signed_headers + ', ' 429 | * + 'Signature=' + signature 430 | * 431 | * @dateStamp - Current date in UTC (must be same as X-Amz-Date date). Format yyyyMMdd 432 | * @regionName - Name of the target region, UTF-8 encoded. Example "us-east-1" 433 | * @serviceName - Name of the target service, UTF-8 encoded. Example "s3" 434 | * 435 | * @return - formatted string. Example: 20150830/us-east-1/iam/aws4_request 436 | */ 437 | private string function buildAuthorizationHeader( 438 | required struct requestHeaders, 439 | required string signedHeaders, 440 | required string credentialScope, 441 | required string signature, 442 | required string accessKey 443 | ){ 444 | var authHeader = variables.signatureAlgorithm & " " 445 | & "Credential=" & arguments.accessKey & "/" & arguments.credentialScope & "," 446 | & "SignedHeaders=" & arguments.signedHeaders & "," 447 | & "Signature=" & arguments.signature; 448 | 449 | 450 | return authHeader; 451 | } 452 | 453 | 454 | /** 455 | * Convenience method which generates a (binary) HMAC code for the specified message 456 | * 457 | * @message Message to sign 458 | * @key HMAC key in binary form 459 | * @algorithm Signing algorithm. [ Default is "HMACSHA256" ] 460 | * @encoding Character encoding of message string. [ Default is UTF-8 ] 461 | * 462 | * @return HMAC value for the specified message as binary (currently unsupported in CF11) 463 | */ 464 | private binary function hmacBinary( 465 | required string message, 466 | required binary key, 467 | string algorithm = "HMACSHA256", 468 | string encoding = "UTF-8" 469 | ){ 470 | // Generate HMAC and decode result into binary 471 | return binaryDecode( 472 | hmac( 473 | arguments.message, 474 | arguments.key, 475 | arguments.algorithm, 476 | arguments.encoding 477 | ), 478 | "hex" 479 | ); 480 | } 481 | 482 | 483 | /** 484 | * Convenience method that hashes the supplied value, with SHA256 485 | * 486 | * @text value to hash 487 | * 488 | * @return hashed value, in lower case 489 | */ 490 | private string function hash256( required any text ){ 491 | return lCase( hash( arguments.text, "SHA-256" ) ); 492 | } 493 | 494 | 495 | /** 496 | * URL encode query parameters and names 497 | * 498 | * @params Structure containing all query parameters for the request 499 | * 500 | * @return new structure with all parameter names and values encoded 501 | */ 502 | private struct function encodeQueryParams( required struct queryParams ){ 503 | // First encode parameter names and values 504 | var encodedParams = {}; 505 | structEach( arguments.queryParams, function( string key, string value ){ 506 | encodedParams[ urlEncodeForAWS( arguments.key ) ] = urlEncodeForAWS( arguments.value ); 507 | } ); 508 | return encodedParams; 509 | } 510 | 511 | /** 512 | * Scrubs header names and values: 513 | * 518 | * 519 | * @headers Header names and values to scrub 520 | * 521 | * @return structure of parsed header names and values 522 | */ 523 | private struct function cleanHeaders( required struct headers ){ 524 | var headerName = ""; 525 | var headerValue = ""; 526 | var cleaned = {}; 527 | 528 | structEach( arguments.headers, function( string key, string value ){ 529 | headerName = cleanHeader( arguments.key ); 530 | headerValue = cleanHeader( arguments.value ); 531 | cleaned[ lCase( headerName ) ] = headerValue; 532 | } ); 533 | 534 | return cleaned; 535 | } 536 | 537 | /** 538 | * Scrubs header names and values: 539 | * 544 | * 545 | * @headers Header names to scrub 546 | * 547 | * @return array of parsed header names 548 | */ 549 | private array function cleanHeaderNames( required array names ){ 550 | var headerName = ""; 551 | 552 | var cleaned = []; 553 | arrayEach( names, function( string headerName ){ 554 | arrayAppend( cleaned, cleanHeader( arguments.headerName ) ); 555 | } ); 556 | 557 | return cleaned; 558 | } 559 | 560 | 561 | /** 562 | * Removes extraneous white space from header names or values. 563 | * See http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html 564 | * 565 | * 569 | * 570 | * @text Text to scrub 571 | * 572 | * @return parsed text 573 | */ 574 | private string function cleanHeader( required string text ){ 575 | return reReplace( 576 | trim( arguments.text ), 577 | "\s+", 578 | chr( 32 ), 579 | "all" 580 | ); 581 | } 582 | 583 | 584 | /** 585 | * URL encodes the supplied string per RFC 3986, which defines the following as 586 | * unreserved characters that should NOT be encoded: 587 | * 588 | * A-Z, a-z, 0-9, hyphen ( - ), underscore ( _ ), period ( . ), and tilde ( ~ ). 589 | * 590 | * @value string to encode 591 | * 592 | * @return URI encoded string 593 | */ 594 | private string function urlEncodeForAWS( string value ){ 595 | var encodedValue = encodeForURL( arguments.value ); 596 | // Reverse encoding of tilde "~" 597 | encodedValue = replace( encodedValue, encodeForURL( "~" ), "~", "all" ); 598 | // Fix encoding of spaces, ie replace '+' into "%20" 599 | encodedValue = replace( encodedValue, "+", "%20", "all" ); 600 | // Asterisk "*" should be encoded 601 | encodedValue = replace( encodedValue, "*", "%2A", "all" ); 602 | 603 | return encodedValue; 604 | } 605 | 606 | 607 | /** 608 | * URL encodes the supplied string per RFC 3986, which defines the following as 609 | * unreserved characters that should NOT be encoded: 610 | * 611 | * A-Z, a-z, 0-9, hyphen ( - ), underscore ( _ ), period ( . ), and tilde ( ~ ). 612 | * 613 | * @value string to encode 614 | * 615 | * @return URI encoded string 616 | */ 617 | public string function urlEncodePath( string value ){ 618 | var encodedValue = encodeForURL( arguments.value ); 619 | // Reverse encoding of tilde "~" 620 | encodedValue = replace( encodedValue, encodeForURL( "~" ), "~", "all" ); 621 | // Fix encoding of spaces, ie replace '+' into "%20" 622 | encodedValue = replace( encodedValue, "+", "%20", "all" ); 623 | // Asterisk "*" should be encoded 624 | encodedValue = replace( encodedValue, "*", "%2A", "all" ); 625 | // Forward slash "/" should NOT be encoded 626 | encodedValue = replace( encodedValue, "%2F", "/", "all" ); 627 | 628 | return encodedValue; 629 | } 630 | 631 | /** 632 | * Returns current UTC date and time in the following formats: 633 | * - dateStamp - Current UTC date, format: yyyymmdd 634 | * - timeStamp - Current UTC date and time, format: yyyymmddTHHnnssZ 635 | * 636 | * @return structure containing date and time strings 637 | */ 638 | public struct function getUTCStrings(){ 639 | var utcDateTime = dateConvert( "local2UTC", now() ); 640 | var result = {}; 641 | 642 | // Generate UTC time stamps 643 | result.dateStamp = dateFormat( utcDateTime, "yyyymmdd" ); 644 | result.amzDate = result.dateStamp & "T" & timeFormat( utcDateTime, "HHmmss" ) & "Z"; 645 | result.timeStamp = dateFormat( utcDateTime, "yyyy-mm-dd" ) & "T" & timeFormat( utcDateTime, "HH:mm:ss" ) & "Z"; 646 | return result; 647 | } 648 | 649 | } 650 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | [![AWS S3 SDK CI](https://github.com/coldbox-modules/s3sdk/actions/workflows/ci.yml/badge.svg)](https://github.com/coldbox-modules/s3sdk/actions/workflows/ci.yml) 2 | 3 | # Welcome to the Amazon S3, DigitalOcean Spaces SDK 4 | 5 | This SDK allows you to add Amazon S3, Digital Ocean Spaces capabilities to your ColdFusion (CFML) applications. It is also a ColdBox Module, so if you are using ColdBox, you get auto-registration and much more. 6 | 7 | ## Resources 8 | 9 | * Source: https://github.com/coldbox-modules/s3sdk 10 | * Issues: https://github.com/coldbox-modules/s3sdk/issues 11 | * [Changelog](changelog.md) 12 | * S3 API Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Operations_Amazon_Simple_Storage_Service.html 13 | * Digital Oceans Spaces API Reference: https://developers.digitalocean.com/documentation/spaces/ 14 | 15 | ## Requirements 16 | 17 | * [Boxlang](https://www.boxlang.io/) 1+ 18 | * Lucee 5+ 19 | * Adobe 2018+ 20 | 21 | ## Installation 22 | 23 | This SDK can be installed as standalone or as a ColdBox Module. Either approach requires a simple CommandBox command: 24 | 25 | ```bash 26 | box install s3sdk 27 | ``` 28 | 29 | Then follow either the standalone or module instructions below. 30 | 31 | ### Standalone 32 | 33 | This SDK will be installed into a directory called `s3sdk` and then the SDK can be instantiated via ` new s3sdk.models.AmazonS3()` with the following constructor arguments: 34 | 35 | ```js 36 | /** 37 | * Create a new S3SDK Instance 38 | * 39 | * @accessKey The Amazon access key. 40 | * @secretKey The Amazon secret key. 41 | * @awsDomain The Domain used S3 Service (amazonws.com, digitalocean.com, storage.googleapis.com). Defaults to amazonws.com 42 | * @awsRegion The Amazon region. Defaults to us-east-1 for amazonaws.com 43 | * @encryptionCharset The charset for the encryption. Defaults to UTF-8. 44 | * @signature The signature version to calculate, "V2" is deprecated but more compatible with other endpoints. "V4" requires Sv4Util.cfc & ESAPI on Lucee. Defaults to V4 45 | * @ssl True if the request should use SSL. Defaults to true. 46 | * @defaultTimeOut Default HTTP timeout for all requests. Defaults to 300. 47 | * @defaultDelimiter Delimter to use for getBucket calls. "/" is standard to treat keys as file paths 48 | * @defaultBucketName Bucket name to use by default 49 | * @defaultCacheControl Default caching policy for objects. Defaults to: no-store, no-cache, must-revalidate 50 | * @defaultStorageClass Default storage class for objects that affects cost, access speed and durability. Defaults to STANDARD. 51 | * @defaultACL Default access control policy for objects and buckets. Defaults to public-read. 52 | * @autoContentType Tries to determine content type of file by file extension. Defaults to false. 53 | * @autoMD5 Calculates MD5 hash of content automatically. Defaults to false. 54 | * @debug Used to turn debugging on or off outside of logbox. Defaults to false. 55 | * @defaultEncryptionAlgorithm The default server side encryption algorithm to use. Usually "AES256". Not needed if using custom defaultEncryptionKey 56 | * @defaultEncryptionKey The default base64 encoded AES 356 bit key for server side encryption. 57 | * @urlStyle Specifies the format of the URL whether it is the `path` format or `virtual` format. Defaults to path. For more information see https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html 58 | * 59 | * @return An AmazonS3 instance. 60 | */ 61 | public AmazonS3 function init( 62 | required string accessKey, 63 | required string secretKey, 64 | string awsDomain = "amazonaws.com", 65 | string awsRegion = "us-east-1", 66 | string encryptionCharset = "UTF-8", 67 | string signature = "V4", 68 | boolean ssl = true, 69 | string defaultTimeOut= 300, 70 | string defaultDelimiter='/', 71 | string defaultBucketName='', 72 | string defaultCacheControl= "no-store, no-cache, must-revalidate", 73 | string defaultStorageClass= "STANDARD", 74 | string defaultACL= "public-read", 75 | boolean autoContentType= false, 76 | boolean autoMD5= false, 77 | boolean debug= false, 78 | string defaultEncryptionAlgorithm = "", 79 | string defaultEncryptionKey = "", 80 | string urlStyle = "path" 81 | ) 82 | ``` 83 | 84 | ### ColdBox Module 85 | 86 | This package also is a ColdBox module as well. The module can be configured by creating an `s3sdk` configuration structure in your `moduleSettings` struct in the application configuration file: `config/Coldbox.cfc` with the following settings: 87 | 88 | ```js 89 | moduleSettings = { 90 | s3sdk = { 91 | // Your amazon, digital ocean access key 92 | accessKey = "", 93 | // Tries to determine content type of file by file extension when putting files. Defaults to false. 94 | autoContentType = false, 95 | // Calculates MD5 hash of content automatically. Defaults to false. 96 | autoMD5 = false, 97 | // Your AWS/Digital Ocean Domain Mapping: defaults to amazonaws.com 98 | awsDomain = "amazonaws.com", 99 | // Your AWS/Digital Ocean Region: Defaults to us-east-1 100 | awsregion = "us-east-1", 101 | // Used to turn debugging on or off outside of logbox. Defaults to false. 102 | debug = false, 103 | // Default access control policy for objects and buckets. Defaults to public-read. 104 | defaultACL = "public-read", 105 | // The default bucket name to root the operations on. 106 | defaultBucketName = "", 107 | // Default caching policy for objects. Defaults to: no-store, no-cache, must-revalidate 108 | defaultCacheControl = "no-store, no-cache, must-revalidate", 109 | // The default delimiter for folder operations 110 | defaultDelimiter = "/", 111 | // Default storage class for objects that affects cost, access speed and durability. Defaults to STANDARD. 112 | // AWS classes are: STANDARD,STANDARD_IA,INTELLIGENT_TIERING,ONEZONE_IA,GLACIER,DEEP_ARCHIVE 113 | // Google Cloud Storage Clases: regional,multi_regional,nearline,coldline, 114 | defaultStorageClass = "STANDARD", 115 | // Default HTTP timeout in seconds for all requests. Defaults to 300 seconds. 116 | defaultTimeOut = 300, 117 | // The default encryption character set: defaults to utf-8 118 | encryptionCharset = "utf-8", 119 | // How many times to retry the request before failing if the response is a 500 or 503 120 | retriesOnError : 3, 121 | // Your amazon, digital ocean secret key 122 | secretKey = "", 123 | // Service name that is part of the service's endpoint (alphanumeric). Example: "s3" 124 | // Only used for the v4 signatures 125 | serviceName : "s3", 126 | // The signature version to calculate, "V2" is deprecated but more compatible with other endpoints. "V4" requires Sv4Util.cfc & ESAPI on Lucee. Defaults to V4 127 | signature = "V4", 128 | // SSL mode or not on cfhttp calls and when generating put/get authenticated URLs: Defaults to true 129 | ssl = true, 130 | // Throw exceptions when s3 requests fail, else it swallows them up. 131 | throwOnRequestError : true, 132 | // What format of endpoint to use whether path or virtual 133 | urlStyle = "path" 134 | } 135 | }; 136 | ``` 137 | 138 | Then you can leverage the SDK CFC via the injection DSL: `AmazonS3@s3sdk` 139 | 140 | ## Usage 141 | 142 | Please check out the api docs: https://apidocs.ortussolutions.com/#/coldbox-modules/s3sdk/, choose your version and code away! 143 | 144 | ## Development 145 | 146 | See [Contributing](https://github.com/coldbox-modules/s3sdk/blob/development/CONTRIBUTING.md) 147 | -------------------------------------------------------------------------------- /server-adobe@2018.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"s3sdk-adobe@2018", 3 | "app":{ 4 | "serverHomeDirectory":".engine/adobe2018", 5 | "cfengine":"adobe@2018" 6 | }, 7 | "web":{ 8 | "http":{ 9 | "port":"60299" 10 | }, 11 | "rewrites":{ 12 | "enable":"true" 13 | }, 14 | "webroot":"test-harness", 15 | "aliases":{ 16 | "/moduleroot/s3sdk":"./" 17 | } 18 | }, 19 | "jvm":{ 20 | "heapSize":"1024" 21 | }, 22 | "openBrowser":"false", 23 | "cfconfig":{ 24 | "file":".cfconfig.json" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /server-adobe@2021.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"s3sdk-adobe@2021", 3 | "app":{ 4 | "serverHomeDirectory":".engine/adobe2021", 5 | "cfengine":"adobe@2021" 6 | }, 7 | "web":{ 8 | "http":{ 9 | "port":"60299" 10 | }, 11 | "rewrites":{ 12 | "enable":"true" 13 | }, 14 | "webroot":"test-harness", 15 | "aliases":{ 16 | "/moduleroot/s3sdk":"./" 17 | } 18 | }, 19 | "jvm":{ 20 | "heapSize":"1024" 21 | }, 22 | "openBrowser":"false", 23 | "cfconfig":{ 24 | "file":".cfconfig.json" 25 | }, 26 | "scripts":{ 27 | "onServerInstall":"cfpm install zip,debugger,mysql" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /server-adobe@2023.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"s3sdk-adobe@2023", 3 | "app":{ 4 | "serverHomeDirectory":".engine/adobe2023", 5 | "cfengine":"adobe@2023" 6 | }, 7 | "web":{ 8 | "http":{ 9 | "port":"60299" 10 | }, 11 | "rewrites":{ 12 | "enable":"true" 13 | }, 14 | "webroot":"test-harness", 15 | "aliases":{ 16 | "/moduleroot/s3sdk":"./" 17 | } 18 | }, 19 | "jvm":{ 20 | "heapSize":"1024" 21 | }, 22 | "openBrowser":"false", 23 | "cfconfig":{ 24 | "file":".cfconfig.json" 25 | }, 26 | "scripts":{ 27 | "onServerInstall":"cfpm install zip,debugger,mysql" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /server-boxlang-cfml@1.json: -------------------------------------------------------------------------------- 1 | { 2 | "app":{ 3 | "cfengine":"boxlang@be", 4 | "serverHomeDirectory":".engine/boxlang" 5 | }, 6 | "name":"s3sdk-boxlang-cfml@1", 7 | "force":true, 8 | "openBrowser":false, 9 | "web":{ 10 | "directoryBrowsing":true, 11 | "http":{ 12 | "port":"60299" 13 | }, 14 | "rewrites":{ 15 | "enable":"true" 16 | }, 17 | "webroot":"test-harness", 18 | "aliases":{ 19 | "/moduleroot/cbfs":"./", 20 | "/root":"./test-harness" 21 | } 22 | }, 23 | "JVM":{ 24 | "heapSize":"1024", 25 | "javaVersion":"openjdk21_jre", 26 | "args":"-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8888" 27 | }, 28 | "cfconfig":{ 29 | "file":".cfconfig.json" 30 | }, 31 | "env":{ 32 | "BOXLANG_DEBUG":true 33 | }, 34 | "scripts":{ 35 | "onServerInitialInstall":"install bx-compat-cfml@be,bx-esapi --noSave" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /server-lucee@5.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"s3sdk-lucee@5", 3 | "app":{ 4 | "serverHomeDirectory":".engine/lucee5", 5 | "cfengine":"lucee@5" 6 | }, 7 | "web":{ 8 | "http":{ 9 | "port":"60299" 10 | }, 11 | "rewrites":{ 12 | "enable":"true" 13 | }, 14 | "webroot":"test-harness", 15 | "aliases":{ 16 | "/moduleroot/s3sdk":"./" 17 | } 18 | }, 19 | "jvm":{ 20 | "heapSize":"1024" 21 | }, 22 | "openBrowser":"false", 23 | "cfconfig":{ 24 | "file":".cfconfig.json" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /test-harness/.cflintrc: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /test-harness/Application.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | ******************************************************************************** 3 | Copyright 2005-2007 ColdBox Framework by Luis Majano and Ortus Solutions, Corp 4 | www.ortussolutions.com 5 | ******************************************************************************** 6 | */ 7 | component{ 8 | 9 | // UPDATE THE NAME OF THE MODULE IN TESTING BELOW 10 | request.MODULE_NAME = "s3sdk"; 11 | request.MODULE_PATH = "s3sdk"; 12 | 13 | // Application properties 14 | this.name = hash( getCurrentTemplatePath() ); 15 | this.sessionManagement = true; 16 | this.sessionTimeout = createTimeSpan(0,0,15,0); 17 | this.setClientCookies = true; 18 | 19 | /************************************** 20 | LUCEE Specific Settings 21 | **************************************/ 22 | // buffer the output of a tag/function body to output in case of a exception 23 | this.bufferOutput = true; 24 | // Activate Gzip Compression 25 | this.compression = false; 26 | // Turn on/off white space managemetn 27 | this.whiteSpaceManagement = "smart"; 28 | // Turn on/off remote cfc content whitespace 29 | this.suppressRemoteComponentContent = false; 30 | 31 | // COLDBOX STATIC PROPERTY, DO NOT CHANGE UNLESS THIS IS NOT THE ROOT OF YOUR COLDBOX APP 32 | COLDBOX_APP_ROOT_PATH = getDirectoryFromPath( getCurrentTemplatePath() ); 33 | // The web server mapping to this application. Used for remote purposes or static purposes 34 | COLDBOX_APP_MAPPING = ""; 35 | // COLDBOX PROPERTIES 36 | COLDBOX_CONFIG_FILE = ""; 37 | // COLDBOX APPLICATION KEY OVERRIDE 38 | COLDBOX_APP_KEY = ""; 39 | 40 | // Mappings 41 | this.mappings[ "/root" ] = COLDBOX_APP_ROOT_PATH; 42 | 43 | // Map back to its root 44 | moduleRootPath = REReplaceNoCase( this.mappings[ "/root" ], "#request.MODULE_NAME#(\\|/)test-harness(\\|/)", "" ); 45 | modulePath = REReplaceNoCase( this.mappings[ "/root" ], "test-harness(\\|/)", "" ); 46 | 47 | // Module Root + Path Mappings 48 | this.mappings[ "/moduleroot" ] = moduleRootPath; 49 | this.mappings[ "/#request.MODULE_NAME#" ] = modulePath; 50 | 51 | // application start 52 | public boolean function onApplicationStart(){ 53 | application.cbBootstrap = new coldbox.system.Bootstrap( COLDBOX_CONFIG_FILE, COLDBOX_APP_ROOT_PATH, COLDBOX_APP_KEY, COLDBOX_APP_MAPPING ); 54 | application.cbBootstrap.loadColdbox(); 55 | return true; 56 | } 57 | 58 | // request start 59 | public boolean function onRequestStart(String targetPage){ 60 | 61 | // Process ColdBox Request 62 | application.cbBootstrap.onRequestStart( arguments.targetPage ); 63 | 64 | return true; 65 | } 66 | 67 | public void function onSessionStart(){ 68 | application.cbBootStrap.onSessionStart(); 69 | } 70 | 71 | public void function onSessionEnd( struct sessionScope, struct appScope ){ 72 | arguments.appScope.cbBootStrap.onSessionEnd( argumentCollection=arguments ); 73 | } 74 | 75 | public boolean function onMissingTemplate( template ){ 76 | return application.cbBootstrap.onMissingTemplate( argumentCollection=arguments ); 77 | } 78 | 79 | } 80 | -------------------------------------------------------------------------------- /test-harness/box.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"Tester", 3 | "version":"0.0.0", 4 | "slug":"tester", 5 | "private":true, 6 | "description":"", 7 | "dependencies":{ 8 | "coldbox":"^7" 9 | }, 10 | "devDependencies":{ 11 | "testbox":"be" 12 | }, 13 | "installPaths":{ 14 | "coldbox":"coldbox/", 15 | "testbox":"testbox/" 16 | }, 17 | "scripts":{ 18 | }, 19 | "testbox":{ 20 | "runner":"http://localhost:60299/tests/runner.cfm" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test-harness/config/Application.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * This is a protection Application cfm for the config file. You do not 3 | * need to modify this file 4 | */ 5 | component{ 6 | abort; 7 | } -------------------------------------------------------------------------------- /test-harness/config/Coldbox.cfc: -------------------------------------------------------------------------------- 1 | component { 2 | 3 | // Configure ColdBox Application 4 | function configure(){ 5 | // coldbox directives 6 | coldbox = { 7 | // Application Setup 8 | appName : "Module Tester", 9 | // Development Settings 10 | reinitPassword : "", 11 | handlersIndexAutoReload : true, 12 | modulesExternalLocation : [], 13 | // Implicit Events 14 | defaultEvent : "", 15 | requestStartHandler : "", 16 | requestEndHandler : "", 17 | applicationStartHandler : "", 18 | applicationEndHandler : "", 19 | sessionStartHandler : "", 20 | sessionEndHandler : "", 21 | missingTemplateHandler : "", 22 | // Error/Exception Handling 23 | exceptionHandler : "", 24 | onInvalidEvent : "", 25 | customErrorTemplate : "/coldbox/system/exceptions/Whoops.cfm", 26 | // Application Aspects 27 | handlerCaching : false, 28 | eventCaching : false 29 | }; 30 | 31 | settings = { 32 | "targetEngine" : getSystemSetting( "ENGINE", "localhost" ), 33 | "coldBoxVersion" : getSystemSetting( "COLDBOX_VERSION", "" ) 34 | }; 35 | 36 | // environment settings, create a detectEnvironment() method to detect it yourself. 37 | // create a function with the name of the environment so it can be executed if that environment is detected 38 | // the value of the environment is a list of regex patterns to match the cgi.http_host. 39 | environments = { development : "localhost,127\.0\.0\.1" }; 40 | 41 | // Module Directives 42 | modules = { 43 | // An array of modules names to load, empty means all of them 44 | include : [], 45 | // An array of modules names to NOT load, empty means none 46 | exclude : [] 47 | }; 48 | 49 | // Register interceptors as an array, we need order 50 | interceptors = []; 51 | 52 | // LogBox DSL 53 | logBox = { 54 | // Define Appenders 55 | appenders : { 56 | console : { class : "coldbox.system.logging.appenders.ConsoleAppender" }, 57 | files : { 58 | class : "coldbox.system.logging.appenders.RollingFileAppender", 59 | properties : { filename : "tester", filePath : "/#appMapping#/logs" } 60 | } 61 | }, 62 | // Root Logger 63 | root : { levelmax : "DEBUG", appenders : "*" }, 64 | // Implicit Level Categories 65 | info : [ "coldbox.system" ], 66 | debug : [ "s3sdk" ] 67 | }; 68 | 69 | moduleSettings = { 70 | s3sdk : { 71 | // Settings 72 | accessKey : getSystemSetting( "AWS_ACCESS_KEY" ), 73 | secretKey : getSystemSetting( "AWS_ACCESS_SECRET" ), 74 | defaultBucketName : getSystemSetting( 75 | "AWS_DEFAULT_BUCKET_NAME", 76 | "ortus3-s3sdk-bdd-#replace( settings.targetEngine, "@", "-" )#-#reReplace( 77 | settings.coldBoxVersion, 78 | "[^a-zA-Z0-9]", 79 | "", 80 | "all" 81 | )#" 82 | ), 83 | awsRegion : getSystemSetting( "AWS_REGION" ), 84 | awsDomain : getSystemSetting( "AWS_DOMAIN" ), 85 | ssl : getSystemSetting( "AWS_SSL", true ), 86 | urlStyle : getsystemSetting( "AWS_URLSTYLE", "path" ) 87 | } 88 | }; 89 | } 90 | 91 | function afterAspectsLoad( event, interceptData ){ 92 | controller 93 | .getModuleService() 94 | .registerModule( moduleName = request.MODULE_NAME, invocationPath = "moduleroot" ); 95 | } 96 | 97 | } 98 | -------------------------------------------------------------------------------- /test-harness/config/Router.cfc: -------------------------------------------------------------------------------- 1 | component { 2 | 3 | function configure(){ 4 | // Resources 5 | resources( "roles" ); 6 | 7 | // Your Application Routes 8 | addRoute( pattern = ":handler/:action?" ); 9 | } 10 | 11 | } 12 | -------------------------------------------------------------------------------- /test-harness/config/WireBox.cfc: -------------------------------------------------------------------------------- 1 | component extends="coldbox.system.ioc.config.Binder"{ 2 | 3 | /** 4 | * Configure WireBox, that's it! 5 | */ 6 | function configure(){ 7 | 8 | // The WireBox configuration structure DSL 9 | wireBox = { 10 | // Scope registration, automatically register a wirebox injector instance on any CF scope 11 | // By default it registeres itself on application scope 12 | scopeRegistration = { 13 | enabled = true, 14 | scope = "application", // server, cluster, session, application 15 | key = "wireBox" 16 | }, 17 | 18 | // DSL Namespace registrations 19 | customDSL = { 20 | // namespace = "mapping name" 21 | }, 22 | 23 | // Custom Storage Scopes 24 | customScopes = { 25 | // annotationName = "mapping name" 26 | }, 27 | 28 | // Package scan locations 29 | scanLocations = [], 30 | 31 | // Stop Recursions 32 | stopRecursions = [], 33 | 34 | // Parent Injector to assign to the configured injector, this must be an object reference 35 | parentInjector = "", 36 | 37 | // Register all event listeners here, they are created in the specified order 38 | listeners = [ 39 | // { class="", name="", properties={} } 40 | ] 41 | }; 42 | 43 | // Map Bindings below 44 | } 45 | 46 | } -------------------------------------------------------------------------------- /test-harness/handlers/Main.cfc: -------------------------------------------------------------------------------- 1 | component{ 2 | 3 | function index( event, rc, prc ){ 4 | return "s3sdk"; 5 | } 6 | 7 | } -------------------------------------------------------------------------------- /test-harness/index.cfm: -------------------------------------------------------------------------------- 1 |  2 | 9 | 10 | -------------------------------------------------------------------------------- /test-harness/layouts/Main.cfm: -------------------------------------------------------------------------------- 1 |  2 |

Module Tester

3 |
4 | #renderView()# 5 |
6 |
-------------------------------------------------------------------------------- /test-harness/tests/Application.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | ******************************************************************************** 3 | Copyright 2005-2007 ColdBox Framework by Luis Majano and Ortus Solutions, Corp 4 | www.ortussolutions.com 5 | ******************************************************************************** 6 | */ 7 | component { 8 | 9 | // UPDATE THE NAME OF THE MODULE IN TESTING BELOW 10 | request.MODULE_NAME = "s3sdk"; 11 | request.MODULE_PATH = "s3sdk"; 12 | 13 | // APPLICATION CFC PROPERTIES 14 | this.name = "s3sdk Testing Suite"; 15 | this.sessionManagement = true; 16 | this.sessionTimeout = createTimespan( 0, 0, 15, 0 ); 17 | this.applicationTimeout = createTimespan( 0, 0, 15, 0 ); 18 | this.setClientCookies = true; 19 | 20 | // Create testing mapping 21 | this.mappings[ "/tests" ] = getDirectoryFromPath( getCurrentTemplatePath() ); 22 | 23 | // The application root 24 | rootPath = reReplaceNoCase( this.mappings[ "/tests" ], "tests(\\|/)", "" ); 25 | this.mappings[ "/root" ] = rootPath; 26 | 27 | // The module root path 28 | moduleRootPath = reReplaceNoCase( 29 | rootPath, 30 | "#request.MODULE_PATH#(\\|/)test-harness(\\|/)", 31 | "" 32 | ); 33 | this.mappings[ "/moduleroot" ] = moduleRootPath; 34 | this.mappings[ "/#request.MODULE_NAME#" ] = moduleRootPath & "#request.MODULE_NAME#"; 35 | 36 | function onRequestStart( required targetPage ){ 37 | // Set a high timeout for long running tests 38 | setting requestTimeout ="9999"; 39 | // New ColdBox Virtual Application Starter 40 | request.coldBoxVirtualApp= new coldbox.system.testing.VirtualApp( appMapping = "/root" ); 41 | 42 | // ORM Reload for fresh results 43 | if ( structKeyExists( url, "fwreinit" ) ) { 44 | if ( structKeyExists( server, "lucee" ) ) { 45 | pagePoolClear(); 46 | } 47 | request.coldBoxVirtualApp.shutdown(); 48 | } 49 | 50 | // If hitting the runner or specs, prep our virtual app 51 | if ( getBaseTemplatePath().replace( expandPath( "/tests" ), "" ).reFindNoCase( "(runner|specs)" ) ) { 52 | request.coldBoxVirtualApp.startup(); 53 | } 54 | 55 | return true; 56 | } 57 | 58 | public function onRequestEnd(){ 59 | request.coldBoxVirtualApp.shutdown(); 60 | } 61 | 62 | } 63 | -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-presigned-url/get-presigned-url.authz: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=host,Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404 -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-presigned-url/get-presigned-url.creq: -------------------------------------------------------------------------------- 1 | GET 2 | /test.txt 3 | X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host 4 | host:examplebucket.s3.amazonaws.com 5 | 6 | host 7 | UNSIGNED-PAYLOAD -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-presigned-url/get-presigned-url.req: -------------------------------------------------------------------------------- 1 | GET /test.txt?X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400 HTTP/1.1 2 | Host:examplebucket.s3.amazonaws.com 3 | -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-presigned-url/get-presigned-url.sts: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 2 | 20130524T000000Z 3 | 20130524/us-east-1/s3/aws4_request 4 | 3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04 -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-vanilla-query-unreserved-s3/get-vanilla-query-unreserved-s3.authz: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=e86fe49a4c0dda9163bed3b1b40d530d872eb612e2c366de300bfefdf356fd6a -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-vanilla-query-unreserved-s3/get-vanilla-query-unreserved-s3.creq: -------------------------------------------------------------------------------- 1 | GET 2 | / 3 | -._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz 4 | host:example.amazonaws.com 5 | x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 6 | x-amz-date:20150830T123600Z 7 | 8 | host;x-amz-content-sha256;x-amz-date 9 | e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-vanilla-query-unreserved-s3/get-vanilla-query-unreserved-s3.req: -------------------------------------------------------------------------------- 1 | GET /?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 2 | Host:example.amazonaws.com 3 | X-Amz-Date:20150830T123600Z -------------------------------------------------------------------------------- /test-harness/tests/fixtures/get-vanilla-query-unreserved-s3/get-vanilla-query-unreserved-s3.sts: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 2 | 20150830T123600Z 3 | 20150830/us-east-1/service/aws4_request 4 | facd9f59986c0db2aa22858efa7a21f6696099c027d015fa695286b9b5e39100 -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-header-key-sort-s3/post-header-key-sort-s3.authz: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request,SignedHeaders=host;my-header1;x-amz-content-sha256;x-amz-date,Signature=ce153a4d3cd49f14e231c0fd5c87b341f370d3843ffc58e3a92d8b7a55a668d1 -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-header-key-sort-s3/post-header-key-sort-s3.creq: -------------------------------------------------------------------------------- 1 | POST 2 | / 3 | 4 | host:example.amazonaws.com 5 | my-header1:value1 6 | x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 7 | x-amz-date:20150830T123600Z 8 | 9 | host;my-header1;x-amz-content-sha256;x-amz-date 10 | e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-header-key-sort-s3/post-header-key-sort-s3.req: -------------------------------------------------------------------------------- 1 | POST / HTTP/1.1 2 | Host:example.amazonaws.com 3 | My-Header1:value1 4 | X-Amz-Date:20150830T123600Z -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-header-key-sort-s3/post-header-key-sort-s3.sts: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 2 | 20150830T123600Z 3 | 20150830/us-east-1/service/aws4_request 4 | f0813a7aa38884105b1a28017093220fdc3af22c260bd719d0ab0f5d379e6f6b -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-vanilla-query-s3/post-vanilla-query-s3.authz: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=d43fd95e1dfefe02247ce8858649e1a063f9dd10f25f170f7ebda6ee3e9b6fbc -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-vanilla-query-s3/post-vanilla-query-s3.creq: -------------------------------------------------------------------------------- 1 | POST 2 | / 3 | Param1=value1 4 | host:example.amazonaws.com 5 | x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 6 | x-amz-date:20150830T123600Z 7 | 8 | host;x-amz-content-sha256;x-amz-date 9 | e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-vanilla-query-s3/post-vanilla-query-s3.req: -------------------------------------------------------------------------------- 1 | POST /?Param1=value1 HTTP/1.1 2 | Host:example.amazonaws.com 3 | X-Amz-Date:20150830T123600Z -------------------------------------------------------------------------------- /test-harness/tests/fixtures/post-vanilla-query-s3/post-vanilla-query-s3.sts: -------------------------------------------------------------------------------- 1 | AWS4-HMAC-SHA256 2 | 20150830T123600Z 3 | 20150830/us-east-1/service/aws4_request 4 | 737aad6241fece15acdc0a5c4d8f3727b19ff855b8507e9f59bb09dcc692938d -------------------------------------------------------------------------------- /test-harness/tests/index.cfm: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | #testbox.init( directory=rootMapping & url.path ).run()# 27 | 28 |

Invalid incoming directory: #rootMapping & url.path#

29 |
30 | 31 | 32 |
33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | TestBox Browser 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 |
66 |
67 |
68 | 69 |
70 | v#testbox.getVersion()# 71 |
72 | 73 |
74 |
75 |
76 |
77 |
78 | 79 |

TestBox Test Browser:

80 |

81 | Below is a listing of the files and folders starting from your root #rootPath#. You can click on individual tests in order to execute them 82 | or click on the Run All button on your left and it will execute a directory runner from the visible folder. 83 |

84 | 85 |
86 | Contents: #executePath# 87 | 88 |

89 |
90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | ✚ #qResults.name#
98 | 99 | target="_blank"
>#qResults.name#
100 | 101 | target="_blank">#qResults.name#
102 | 103 | #qResults.name#
104 | 105 | 106 |
107 |
108 |
109 |
110 |
111 |
112 | 113 | 114 | 115 |
116 | -------------------------------------------------------------------------------- /test-harness/tests/runner.cfm: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /test-harness/tests/specs/AmazonS3Spec.cfc: -------------------------------------------------------------------------------- 1 | component extends="coldbox.system.testing.BaseTestCase" { 2 | 3 | this.loadColdbox = true; 4 | this.unloadColdbox = false; 5 | 6 | function beforeAll(){ 7 | super.beforeAll(); 8 | 9 | prepTmpFolder(); 10 | 11 | var moduleSettings = getWirebox().getInstance( "box:moduleSettings:s3sdk" ); 12 | 13 | variables.testBucket = moduleSettings.defaultBucketName; 14 | 15 | variables.s3 = new s3sdk.models.AmazonS3( 16 | accessKey = moduleSettings.accessKey, 17 | secretKey = moduleSettings.secretKey, 18 | awsRegion = moduleSettings.awsRegion, 19 | awsDomain = moduleSettings.awsDomain, 20 | ssl = moduleSettings.ssl, 21 | defaultBucketName = moduleSettings.defaultBucketName, 22 | defaultObjectOwnership = moduleSettings.defaultObjectOwnership, 23 | urlStyle = "path" 24 | ); 25 | 26 | getWirebox().autowire( s3 ); 27 | prepareMock( s3 ); 28 | s3.$property( propertyName = "log", mock = createLogStub() ); 29 | 30 | s3.putBucket( testBucket ); 31 | } 32 | 33 | private function prepTmpFolder(){ 34 | var targetPath = expandPath( "/tests/tmp" ); 35 | 36 | if ( !directoryExists( targetPath ) ) { 37 | directoryCreate( targetPath ); 38 | } 39 | 40 | if ( fileExists( targetPath & "/example.txt" ) ) { 41 | fileDelete( targetPath & "/example.txt" ); 42 | } 43 | } 44 | 45 | private function isOldACF(){ 46 | var isLucee = structKeyExists( server, "lucee" ); 47 | return !isLucee and listFind( "11,2016", listFirst( server.coldfusion.productVersion ) ); 48 | } 49 | 50 | function run(){ 51 | describe( "Amazon S3 SDK", function(){ 52 | describe( "objects", function(){ 53 | afterEach( function( currentSpec ){ 54 | // Add any test fixtures here that you create below 55 | s3.deleteObject( testBucket, "example.txt" ); 56 | s3.deleteObject( testBucket, "example-2.txt" ); 57 | s3.deleteObject( testBucket, "testFolder/example.txt" ); 58 | s3.deleteObject( testBucket, "testFolder/" ); 59 | s3.deleteObject( testBucket, "emptyFolder/" ); 60 | s3.deleteObject( testBucket, "big_file.txt" ); 61 | s3.deleteObject( testBucket, "exam%20p le (fo%2Fo)+,!@##$%^&*()_+~ ;:.txt" ); 62 | 63 | // Avoid these on cf11,2016 because their http sucks! 64 | if ( !isOldACF() ) { 65 | s3.deleteObject( testBucket, "Word Doc Tests.txt" ); 66 | } 67 | var contents = s3.getBucket( testBucket ); 68 | s3.setDefaultBucketName( "" ); 69 | } ); 70 | 71 | it( "can store a new object", function(){ 72 | s3.putObject( 73 | bucketName = testBucket, 74 | uri = "example.txt", 75 | data = "Hello, world!", 76 | contentType = "auto" 77 | ); 78 | var md = s3.getObjectInfo( testBucket, "example.txt" ); 79 | debug( md ); 80 | expect( md ).notToBeEmpty(); 81 | expect( md[ "Content-Type" ] ).toBe( "text/plain" ); 82 | } ); 83 | 84 | it( "can store a new object from file", function(){ 85 | var filePath = expandPath( "/tests/tmp/example.txt" ); 86 | fileWrite( filePath, "file contents" ); 87 | sleepIfNIO(); 88 | s3.putObjectFile( 89 | bucketName = testBucket, 90 | uri = "example.txt", 91 | filepath = filePath, 92 | contentType = "auto" 93 | ); 94 | var md = s3.getObjectInfo( testBucket, "example.txt" ); 95 | 96 | expect( md ).notToBeEmpty(); 97 | expect( md[ "Content-Type" ] ).toBe( "text/plain" ); 98 | } ); 99 | 100 | it( "can perform a multi-part upload on a file over 5MB", function(){ 101 | var testFile = expandPath( "/tests/tmp/big_file.txt" ); 102 | var fileSize = round( s3.getMultiPartByteThreshold() * 1.2 ) 103 | fileWrite( 104 | testFile, 105 | repeatString( randRange( 0, 9 ), fileSize ), 106 | "utf-8" 107 | ); 108 | sleepIfNIO(); 109 | var uploadFileName = "big_file.txt"; 110 | var resp = s3.putObjectFile( 111 | bucketName = testBucket, 112 | uri = uploadFileName, 113 | filepath = testFile, 114 | contentType = "auto" 115 | ); 116 | expect( resp.contains( "multipart" ) ).toBeTrue(); 117 | var md = s3.getObjectInfo( testBucket, uploadFileName ); 118 | 119 | expect( md ).notToBeEmpty(); 120 | expect( md[ "Content-Length" ] ).toBe( fileSize ); 121 | expect( md[ "Content-Type" ] ).toBe( "text/plain" ); 122 | 123 | // Download the uploaded file 124 | s3.downloadObject( 125 | testBucket, 126 | "big_file.txt", 127 | expandPath( "/tests/tmp/big_file2.txt" ) 128 | ); 129 | sleepIfNIO(); 130 | // And confirm a hash of both file contents still matches 131 | expect( hash( fileRead( expandPath( "/tests/tmp/big_file2.txt" ) ) ) ).toBe( 132 | hash( fileRead( expandPath( "/tests/tmp/big_file.txt" ) ) ) 133 | ) 134 | } ); 135 | 136 | it( 137 | title = "can store a new object with spaces in the name", 138 | skip = isOldACF(), 139 | body = function(){ 140 | s3.putObject( 141 | testBucket, 142 | "Word Doc Tests.txt", 143 | "Hello, space world!" 144 | ); 145 | var md = s3.getObjectInfo( testBucket, "Word Doc Tests.txt" ); 146 | 147 | expect( md ).notToBeEmpty(); 148 | } 149 | ); 150 | 151 | it( "can store a new object with special chars in name", function(){ 152 | s3.putObject( 153 | testBucket, 154 | "exam%20p le (fo%2Fo)+,!@##$%^&*()_+~ ;:.txt", 155 | "Hello, world!" 156 | ); 157 | var md = s3.getObjectInfo( testBucket, "example.txt" ); 158 | debug( md ); 159 | expect( md ).notToBeEmpty(); 160 | } ); 161 | 162 | it( "can list all objects", function(){ 163 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 164 | s3.putObject( 165 | testBucket, 166 | "testFolder/example.txt", 167 | "Hello, world!" 168 | ); 169 | var bucketContents = s3.getBucket( bucketName = testBucket, delimiter = "/" ); 170 | expect( bucketContents ).toBeArray(); 171 | expect( bucketContents ).toHaveLength( 2 ); 172 | for ( var item in bucketContents ) { 173 | if ( item.key == "testFolder" ) { 174 | expect( item.isDirectory ).toBeTrue(); 175 | } else { 176 | expect( item.isDirectory ).toBeFalse(); 177 | } 178 | } 179 | } ); 180 | 181 | it( "can list with prefix", function(){ 182 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 183 | s3.putObject( 184 | testBucket, 185 | "testFolder/example.txt", 186 | "Hello, world!" 187 | ); 188 | 189 | var bucketContents = s3.getBucket( testBucket, "example.txt" ); 190 | expect( bucketContents ).toBeArray(); 191 | expect( bucketContents ).toHaveLength( 1 ); 192 | 193 | var bucketContents = s3.getBucket( 194 | bucketName = testBucket, 195 | prefix = "testFolder/", 196 | delimiter = "/" 197 | ); 198 | 199 | expect( bucketContents ).toBeArray(); 200 | expect( bucketContents ).toHaveLength( 1 ); 201 | expect( bucketContents[ 1 ].isDirectory ).toBeFalse(); 202 | 203 | s3.putObject( testBucket, "emptyFolder/", "" ); 204 | var bucketContents = s3.getBucket( 205 | bucketName = testBucket, 206 | prefix = "emptyFolder/", 207 | delimiter = "/" 208 | ); 209 | 210 | expect( bucketContents ).toBeArray(); 211 | expect( bucketContents ).toHaveLength( 1 ); 212 | expect( bucketContents[ 1 ].isDirectory ).toBeTrue(); 213 | } ); 214 | 215 | it( "can list with and without delimter", function(){ 216 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 217 | s3.putObject( 218 | testBucket, 219 | "testFolder/example.txt", 220 | "Hello, world!" 221 | ); 222 | 223 | // With no delimiter, there is no concept of folders, so all keys just show up and everything is a "file" 224 | var bucketContents = s3.getBucket( bucketName = testBucket, delimiter = "" ); 225 | expect( bucketContents ).toBeArray(); 226 | expect( bucketContents ).toHaveLength( 2 ); 227 | 228 | bucketContents.each( function( item ){ 229 | expect( item.isDirectory ).toBeFalse(); 230 | } ); 231 | 232 | // With a delimiter of "/", we only get the top level items and "testFolder" shows as a directory 233 | var bucketContents = s3.getBucket( bucketName = testBucket, delimiter = "/" ); 234 | expect( bucketContents ).toBeArray(); 235 | expect( bucketContents ).toHaveLength( 2 ); 236 | 237 | bucketContents.each( function( item ){ 238 | if ( item.key == "testFolder" ) { 239 | expect( item.isDirectory ).toBeTrue(); 240 | } else { 241 | expect( item.isDirectory ).toBeFalse(); 242 | } 243 | } ); 244 | } ); 245 | 246 | it( "can check if an object exists", function(){ 247 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 248 | s3.putObject( testBucket, "emptyFolder/", "" ); 249 | s3.putObject( 250 | testBucket, 251 | "testFolder/example.txt", 252 | "Hello, world!" 253 | ); 254 | 255 | var existsCheck = s3.objectExists( testBucket, "example.txt" ); 256 | expect( existsCheck ).toBeTrue(); 257 | 258 | var existsCheck = s3.objectExists( testBucket, "notHere.txt" ); 259 | expect( existsCheck ).toBeFalse(); 260 | 261 | var existsCheck = s3.objectExists( testBucket, "emptyFolder/" ); 262 | expect( existsCheck ).toBeTrue(); 263 | 264 | var existsCheck = s3.objectExists( testBucket, "testFolder/example.txt" ); 265 | expect( existsCheck ).toBeTrue(); 266 | 267 | if ( !isOldACF() ) { 268 | var existsCheck = s3.objectExists( testBucket, "Word Doc Tests.docx" ); 269 | expect( existsCheck ).toBeFalse(); 270 | } 271 | } ); 272 | 273 | it( "can delete an object from a bucket", function(){ 274 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 275 | s3.deleteObject( testBucket, "example.txt" ); 276 | var bucketContents = s3.getBucket( testBucket ); 277 | expect( bucketContents ).toBeArray(); 278 | expect( bucketContents ).toHaveLength( 0 ); 279 | } ); 280 | 281 | it( "can copy an object", function(){ 282 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 283 | var bucketContents = s3.getBucket( testBucket ); 284 | expect( bucketContents[ 1 ].key ).toBe( "example.txt" ); 285 | 286 | s3.copyObject( 287 | testBucket, 288 | "example.txt", 289 | testBucket, 290 | "example-2.txt" 291 | ); 292 | 293 | var bucketContents = s3.getBucket( testBucket ); 294 | expect( bucketContents ).toHaveLength( 2 ); 295 | } ); 296 | 297 | it( "can rename an object", function(){ 298 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 299 | s3.renameObject( 300 | testBucket, 301 | "example.txt", 302 | testBucket, 303 | "example-2.txt" 304 | ); 305 | 306 | var bucketContents = s3.getBucket( testBucket ); 307 | expect( bucketContents ).toHaveLength( 1 ); 308 | expect( bucketContents[ 1 ].key ).toBe( "example-2.txt" ); 309 | } ); 310 | 311 | it( "can get a file", function(){ 312 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 313 | var get = s3.getObject( testBucket, "example.txt" ); 314 | expect( get.error ).toBeFalse(); 315 | expect( get.response ).toBe( "Hello, world!" ); 316 | } ); 317 | 318 | it( "can get object ACL", function(){ 319 | s3.putObject( 320 | bucketName = testBucket, 321 | uri = "example.txt", 322 | data = "Hello, world!", 323 | acl = s3.ACL_PUBLIC_READ 324 | ); 325 | var ACL = s3.getObjectACL( testBucket, "example.txt" ); 326 | expect( ACL ).toBeStruct(); 327 | expect( ACL ).toHaveKey( "owner" ); 328 | expect( ACL.owner ).toBeStruct(); 329 | expect( ACL.owner ).toHaveKey( "ID" ); 330 | expect( ACL.owner ).toHaveKey( "DisplayName" ); 331 | expect( ACL ).toHaveKey( "grants" ); 332 | expect( ACL.grants ).toBeStruct(); 333 | expect( ACL.grants ).toHaveKey( "FULL_CONTROL" ); 334 | expect( ACL.grants ).toHaveKey( "WRITE" ); 335 | expect( ACL.grants ).toHaveKey( "WRITE_ACP" ); 336 | expect( ACL.grants ).toHaveKey( "READ" ); 337 | expect( ACL.grants ).toHaveKey( "READ_ACP" ); 338 | expect( ACL.grants.FULL_CONTROL ).toBeArray(); 339 | expect( ACL.grants.WRITE ).toBeArray(); 340 | expect( ACL.grants.WRITE_ACP ).toBeArray(); 341 | expect( ACL.grants.READ ).toBeArray(); 342 | expect( ACL.grants.READ_ACP ).toBeArray(); 343 | } ); 344 | 345 | it( "can translate canned ACL headers", function(){ 346 | makePublic( s3, "applyACLHeaders" ); 347 | 348 | var ACL = s3.applyACLHeaders( acl = "canned-acl" ); 349 | expect( ACL ).toBeStruct(); 350 | expect( ACL ).toHaveKey( "x-amz-acl" ); 351 | expect( ACL[ "x-amz-acl" ] ).toBe( "canned-acl" ); 352 | } ); 353 | 354 | it( "can translate complex ACL headers", function(){ 355 | makePublic( s3, "applyACLHeaders" ); 356 | 357 | var ACL = s3.applyACLHeaders( 358 | acl = { 359 | "FULL_CONTROL" : [ 360 | { id : "12345" }, 361 | { uri : "http://acs.amazonaws.com/groups/global/AllUsers" }, 362 | { emailAddress : "xyz@amazon.com" } 363 | ], 364 | "WRITE" : [ 365 | { id : "12345" }, 366 | { uri : "http://acs.amazonaws.com/groups/global/AllUsers" }, 367 | { emailAddress : "xyz@amazon.com" } 368 | ], 369 | "WRITE_ACP" : [ 370 | { id : "12345" }, 371 | { uri : "http://acs.amazonaws.com/groups/global/AllUsers" }, 372 | { emailAddress : "xyz@amazon.com" } 373 | ], 374 | "READ" : [ 375 | { id : "12345" }, 376 | { uri : "http://acs.amazonaws.com/groups/global/AllUsers" }, 377 | { emailAddress : "xyz@amazon.com" } 378 | ], 379 | "READ_ACP" : [ 380 | { id : "12345" }, 381 | { uri : "http://acs.amazonaws.com/groups/global/AllUsers" }, 382 | { emailAddress : "xyz@amazon.com" } 383 | ] 384 | } 385 | ); 386 | expect( ACL ).toBeStruct(); 387 | expect( ACL ).toHaveKey( "x-amz-grant-full-control" ); 388 | expect( ACL[ "x-amz-grant-full-control" ] ).toBe( 389 | "id=""12345"", uri=""http://acs.amazonaws.com/groups/global/AllUsers"", emailAddress=""xyz@amazon.com""" 390 | ); 391 | expect( ACL ).toHaveKey( "x-amz-grant-write" ); 392 | expect( ACL[ "x-amz-grant-write" ] ).toBe( 393 | "id=""12345"", uri=""http://acs.amazonaws.com/groups/global/AllUsers"", emailAddress=""xyz@amazon.com""" 394 | ); 395 | expect( ACL ).toHaveKey( "x-amz-grant-write" ); 396 | expect( ACL[ "x-amz-grant-write" ] ).toBe( 397 | "id=""12345"", uri=""http://acs.amazonaws.com/groups/global/AllUsers"", emailAddress=""xyz@amazon.com""" 398 | ); 399 | expect( ACL ).toHaveKey( "x-amz-grant-read" ); 400 | expect( ACL[ "x-amz-grant-read" ] ).toBe( 401 | "id=""12345"", uri=""http://acs.amazonaws.com/groups/global/AllUsers"", emailAddress=""xyz@amazon.com""" 402 | ); 403 | expect( ACL ).toHaveKey( "x-amz-grant-read-acp" ); 404 | expect( ACL[ "x-amz-grant-read-acp" ] ).toBe( 405 | "id=""12345"", uri=""http://acs.amazonaws.com/groups/global/AllUsers"", emailAddress=""xyz@amazon.com""" 406 | ); 407 | } ); 408 | 409 | it( "can download a file", function(){ 410 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 411 | var dl = s3.downloadObject( 412 | testBucket, 413 | "example.txt", 414 | expandPath( "/tests/tmp/example.txt" ) 415 | ); 416 | debug( dl ); 417 | expect( dl ).notToBeEmpty(); 418 | expect( dl.error ).toBeFalse(); 419 | } ); 420 | 421 | it( "validates missing bucketname", function(){ 422 | expect( function(){ 423 | s3.getBucket(); 424 | } ).toThrow( message = "bucketName is required" ); 425 | } ); 426 | 427 | it( "Allows default bucket name", function(){ 428 | s3.setDefaultBucketName( testBucket ); 429 | s3.getBucket(); 430 | } ); 431 | 432 | it( "Allows default delimiter", function(){ 433 | s3.setDefaultDelimiter( "/" ); 434 | 435 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 436 | s3.putObject( 437 | testBucket, 438 | "testFolder/example.txt", 439 | "Hello, world!" 440 | ); 441 | 442 | var bucketContents = s3.getBucket( bucketName = testBucket, prefix = "testFolder/" ); 443 | 444 | expect( bucketContents ).toBeArray(); 445 | expect( bucketContents ).toHaveLength( 1 ); 446 | expect( bucketContents[ 1 ].isDirectory ).toBeFalse(); 447 | } ); 448 | } ); 449 | 450 | describe( "buckets", function(){ 451 | it( "returns true if a bucket exists", function(){ 452 | expect( s3.hasBucket( testBucket ) ).toBeTrue(); 453 | } ); 454 | 455 | it( "can list the buckets associated with the account", function(){ 456 | expect( arrayLen( s3.listBuckets() ) ).toBeGTE( 1, "At least one bucket should be returned" ); 457 | } ); 458 | 459 | xit( "can delete a bucket", function(){ 460 | expect( s3.hasBucket( testBucket ) ).toBeTrue(); 461 | var results = s3.deleteBucket( testBucket ); 462 | expect( results ).toBeTrue(); 463 | s3.putBucket( testBucket ); 464 | } ); 465 | 466 | it( "can get bucketPublicAccess", function(){ 467 | var results = s3.getBucketPublicAccess( testBucket ); 468 | expect( results ).toHaveKey( "BlockPublicAcls" ); 469 | expect( results ).toHaveKey( "IgnorePublicAcls" ); 470 | expect( results ).toHaveKey( "BlockPublicPolicy" ); 471 | expect( results ).toHaveKey( "RestrictPublicBuckets" ); 472 | 473 | expect( results.BlockPublicAcls ).toBeBoolean(); 474 | expect( results.IgnorePublicAcls ).toBeBoolean(); 475 | expect( results.BlockPublicPolicy ).toBeBoolean(); 476 | expect( results.RestrictPublicBuckets ).toBeBoolean(); 477 | } ); 478 | 479 | it( "can set bucketPublicAccess", function(){ 480 | s3.putBucketPublicAccess( testBucket, true, true, true, true ); 481 | var results = s3.getBucketPublicAccess( testBucket ); 482 | 483 | expect( results.BlockPublicAcls ).toBeTrue(); 484 | expect( results.IgnorePublicAcls ).toBeTrue(); 485 | expect( results.BlockPublicPolicy ).toBeTrue(); 486 | expect( results.RestrictPublicBuckets ).toBeTrue(); 487 | 488 | s3.putBucketPublicAccess( testBucket, false, false, false, false ); 489 | var results = s3.getBucketPublicAccess( testBucket ); 490 | 491 | expect( results.BlockPublicAcls ).toBeFalse(); 492 | expect( results.IgnorePublicAcls ).toBeFalse(); 493 | expect( results.BlockPublicPolicy ).toBeFalse(); 494 | expect( results.RestrictPublicBuckets ).toBeFalse(); 495 | } ); 496 | 497 | it( "can set bucket ACL", function(){ 498 | s3.putBucketACL( testBucket, "private" ); 499 | } ); 500 | } ); 501 | 502 | describe( "Presigned URL", function(){ 503 | afterEach( function( currentSpec ){ 504 | var contents = s3.getBucket( testBucket ); 505 | contents 506 | .filter( ( obj ) => !obj.isDirectory ) 507 | .each( ( obj ) => s3.deleteObject( testBucket, obj.key ) ); 508 | contents 509 | .filter( ( obj ) => obj.isDirectory ) 510 | .each( ( obj ) => s3.deleteObject( testBucket, obj.key ) ); 511 | } ); 512 | 513 | it( "can access via get", function(){ 514 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 515 | var presignedURL = s3.getAuthenticatedURL( bucketName = testBucket, uri = "example.txt" ); 516 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ); 517 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 518 | expect( local.httpResponse.fileContent ).toBe( "Hello, world!" ); 519 | } ); 520 | 521 | it( "can expire", function(){ 522 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 523 | var presignedURL = s3.getAuthenticatedURL( 524 | bucketName = testBucket, 525 | uri = "example.txt", 526 | minutesValid = 1 / 60 527 | ); 528 | sleep( 2000 ) 529 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ); 530 | 531 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "403", local.httpResponse.fileContent ); 532 | expect( local.httpResponse.fileContent ).toMatch( "expired" ); 533 | } ); 534 | 535 | it( "cannot PUT with a GET URL", function(){ 536 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 537 | var presignedURL = s3.getAuthenticatedURL( bucketName = testBucket, uri = "example.txt" ); 538 | 539 | cfhttp( 540 | url = "#presignedURL#", 541 | result = "local.httpResponse", 542 | method = "PUT" 543 | ) { 544 | cfhttpparam( type = "body", value = "Pre-Signed Put!" ); 545 | }; 546 | 547 | // If a presigned URL is created for a GET operation, it can't be used for anything else! 548 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "403", local.httpResponse.fileContent ); 549 | } ); 550 | 551 | it( "can put file", function(){ 552 | var presignedURL = s3.getAuthenticatedURL( 553 | bucketName = testBucket, 554 | uri = "presignedput.txt", 555 | method = "PUT" 556 | ); 557 | cfhttp( 558 | url = "#presignedURL#", 559 | result = "local.httpResponse", 560 | method = "PUT" 561 | ) { 562 | cfhttpparam( type = "body", value = "Pre-Signed Put!" ); 563 | }; 564 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 565 | 566 | var get = s3.getObject( testBucket, "presignedput.txt" ); 567 | 568 | expect( get.error ).toBeFalse(); 569 | // toString() since there is no content type set in this test, Adobe doesn't send back the file as a string, but a byte output stream 570 | expect( toString( get.response ) ).toBe( "Pre-Signed Put!" ); 571 | } ); 572 | 573 | it( "can put file with friends", function(){ 574 | var presignedURL = s3.getAuthenticatedURL( 575 | bucketName = testBucket, 576 | uri = "presignedputfriends.txt", 577 | method = "PUT", 578 | metaHeaders = { "custom-header" : "custom value" }, 579 | // If the following are left off, they are simply not verfied, meaning there is no issue if the actual CFHTTP call sends them with any value it choses. 580 | contentType = "text/plain", 581 | acl = "public-read" 582 | ); 583 | 584 | cfhttp( 585 | url = "#presignedURL#", 586 | result = "local.httpResponse", 587 | method = "PUT" 588 | ) { 589 | cfhttpparam( type = "body", value = "Pre-Signed Put!" ); 590 | cfhttpparam( 591 | type = "header", 592 | name = "content-type", 593 | value = "text/plain" 594 | ); 595 | cfhttpparam( 596 | type = "header", 597 | name = "x-amz-acl", 598 | value = "public-read" 599 | ); 600 | cfhttpparam( 601 | type = "header", 602 | name = "x-amz-meta-custom-header", 603 | value = "custom value" 604 | ); 605 | }; 606 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 607 | 608 | var get = s3.getObject( testBucket, "presignedputfriends.txt" ); 609 | 610 | expect( get.error ).toBeFalse(); 611 | expect( get.response ).toBe( "Pre-Signed Put!" ); 612 | } ); 613 | 614 | it( "can enforce invalid ACL on PUT", function(){ 615 | var presignedURL = s3.getAuthenticatedURL( 616 | bucketName = testBucket, 617 | uri = "presignedputacl.txt", 618 | method = "PUT", 619 | acl = "public-read" 620 | ); 621 | 622 | cfhttp( 623 | url = "#presignedURL#", 624 | result = "local.httpResponse", 625 | method = "PUT" 626 | ) { 627 | cfhttpparam( type = "body", value = "Pre-Signed Put!" ); 628 | // ACL doesn't match! 629 | cfhttpparam( 630 | type = "header", 631 | name = "x-amz-acl", 632 | value = "public-read-write" 633 | ); 634 | }; 635 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "403", local.httpResponse.fileContent ); 636 | } ); 637 | 638 | it( "Can use presigned URL with forced response headers", function(){ 639 | s3.putObject( testBucket, "example.txt", "Hello, world!" ); 640 | var presignedURL = s3.getAuthenticatedURL( 641 | bucketName = testBucket, 642 | uri = "example.txt", 643 | responseHeaders = { 644 | "content-type" : "custom-type", 645 | "content-language" : "custom-language", 646 | "expires" : "custom-expires", 647 | "cache-control" : "custom-cache", 648 | "content-disposition" : "custom-disposition", 649 | "content-encoding" : "custom-encoding" 650 | } 651 | ); 652 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ); 653 | 654 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 655 | expect( local.httpResponse.fileContent ).toBe( "Hello, world!" ); 656 | expect( local.httpResponse.Responseheader[ "content-type" ] ).toBe( "custom-type" ); 657 | expect( local.httpResponse.Responseheader[ "content-language" ] ).toBe( "custom-language" ); 658 | expect( local.httpResponse.Responseheader[ "expires" ] ).toBe( "custom-expires" ); 659 | expect( local.httpResponse.Responseheader[ "cache-control" ] ).toBe( "custom-cache" ); 660 | expect( local.httpResponse.Responseheader[ "content-disposition" ] ).toBe( "custom-disposition" ); 661 | expect( local.httpResponse.Responseheader[ "content-encoding" ] ).toBe( "custom-encoding" ); 662 | } ); 663 | 664 | it( "Can use presigned URL with auto response content type", function(){ 665 | s3.putObject( 666 | testBucket, 667 | "example.txt", 668 | "Hello, world!", 669 | "", 670 | "wacky-content-type" 671 | ); 672 | var presignedURL = s3.getAuthenticatedURL( 673 | bucketName = testBucket, 674 | uri = "example.txt", 675 | responseHeaders = { "content-type" : "auto" } 676 | ); 677 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ); 678 | 679 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 680 | expect( local.httpResponse.fileContent ).toBe( "Hello, world!" ); 681 | // Our explicit content type when storing the file is ignored and the corret type is automatically returned based on MIME type 682 | expect( local.httpResponse.Responseheader[ "content-type" ] ).toBe( "text/plain" ); 683 | } ); 684 | 685 | it( "Creating presigned URL with invalid response header throws error", function(){ 686 | expect( () => s3.getAuthenticatedURL( 687 | bucketName = testBucket, 688 | uri = "example.txt", 689 | responseHeaders = { "fake" : "" } 690 | ) ).toThrow(); 691 | } ); 692 | } ); 693 | } ); 694 | 695 | describe( "encryption", function(){ 696 | afterEach( function( currentSpec ){ 697 | // Add any test fixtures here that you create below 698 | s3.deleteObject( testBucket, "encrypted.txt" ); 699 | s3.deleteObject( testBucket, "encrypted-copy.txt" ); 700 | s3.deleteObject( testBucket, "encrypted2.txt" ); 701 | } ); 702 | 703 | it( "can put encrypted file", function(){ 704 | var data = "Hello, encrypted world!"; 705 | s3.putObject( 706 | bucketName = testBucket, 707 | uri = "encrypted.txt", 708 | data = data, 709 | encryptionAlgorithm = "AES256" 710 | ); 711 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted.txt" ); 712 | 713 | expect( o.error ).toBe( false ); 714 | expect( o.response ).toBe( data ); 715 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption" ); 716 | expect( o.responseHeader[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 717 | 718 | var o = s3.getObjectInfo( bucketName = testBucket, uri = "encrypted.txt" ); 719 | expect( o ).toHaveKey( "x-amz-server-side-encryption" ); 720 | expect( o[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 721 | } ); 722 | 723 | it( "can get presigned URL for encrypted file", function(){ 724 | var data = "Hello, encrypted world!"; 725 | s3.putObject( 726 | bucketName = testBucket, 727 | uri = "encrypted.txt", 728 | data = data, 729 | encryptionAlgorithm = "AES256" 730 | ); 731 | 732 | var presignedURL = s3.getAuthenticatedURL( bucketName = testBucket, uri = "encrypted.txt" ); 733 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ); 734 | 735 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 736 | expect( local.httpResponse.fileContent ).toBe( data ); 737 | } ); 738 | 739 | it( "can get presigned URL for encrypted file with custom encrypted key", function(){ 740 | var data = "Hello, encrypted world!"; 741 | var key = generateSecretKey( "AES", 256 ); 742 | var keyMD5 = toBase64( binaryDecode( hash( toBinary( key ), "MD5" ), "hex" ) ); 743 | s3.putObject( 744 | bucketName = testBucket, 745 | uri = "encrypted.txt", 746 | data = data, 747 | encryptionAlgorithm = "AES256", 748 | encryptionKey = key 749 | ); 750 | 751 | var presignedURL = s3.getAuthenticatedURL( 752 | bucketName = testBucket, 753 | uri = "encrypted.txt", 754 | encryptionKey = key 755 | ); 756 | 757 | // Since the encryption details MUST be sent via HTTP headers, it is not possible to use this signed URL in a web browser 758 | // Per https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-and-presignedurl 759 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ) { 760 | cfhttpparam( 761 | type = "header", 762 | name = "x-amz-server-side-encryption-customer-algorithm", 763 | value = "AES256" 764 | ); 765 | cfhttpparam( 766 | type = "header", 767 | name = "x-amz-server-side-encryption-customer-key", 768 | value = key 769 | ); 770 | cfhttpparam( 771 | type = "header", 772 | name = "x-amz-server-side-encryption-customer-key-MD5", 773 | value = keyMD5 774 | ); 775 | }; 776 | 777 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 778 | expect( local.httpResponse.fileContent ).toBe( data ); 779 | } ); 780 | 781 | it( "can copy encrypted file", function(){ 782 | var data = "Hello, encrypted world!"; 783 | s3.putObject( 784 | bucketName = testBucket, 785 | uri = "encrypted.txt", 786 | data = data, 787 | encryptionAlgorithm = "AES256" 788 | ); 789 | var o = s3.copyObject( 790 | fromBucket = testBucket, 791 | fromURI = "encrypted.txt", 792 | toBucket = testBucket, 793 | toURI = "encrypted-copy.txt", 794 | encryptionAlgorithm = "AES256" 795 | ); 796 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted-copy.txt" ); 797 | 798 | expect( o.error ).toBe( false ); 799 | expect( o.response ).toBe( data ); 800 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption" ); 801 | expect( o.responseHeader[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 802 | } ); 803 | 804 | it( "can rename encrypted file", function(){ 805 | var data = "Hello, encrypted world!"; 806 | s3.putObject( 807 | bucketName = testBucket, 808 | uri = "encrypted.txt", 809 | data = data, 810 | encryptionAlgorithm = "AES256" 811 | ); 812 | var o = s3.renameObject( 813 | oldBucketName = testBucket, 814 | oldFileKey = "encrypted.txt", 815 | newBucketName = testBucket, 816 | newFileKey = "encrypted-copy.txt", 817 | encryptionAlgorithm = "AES256" 818 | ); 819 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted-copy.txt" ); 820 | 821 | expect( o.error ).toBe( false ); 822 | expect( o.response ).toBe( data ); 823 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption" ); 824 | expect( o.responseHeader[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 825 | } ); 826 | 827 | it( "can put encrypted with custom encryption key", function(){ 828 | var data = "Hello, encrypted world!"; 829 | var key = generateSecretKey( "AES", 256 ); 830 | var keyMD5 = toBase64( binaryDecode( hash( toBinary( key ), "MD5" ), "hex" ) ); 831 | s3.putObject( 832 | bucketName = testBucket, 833 | uri = "encrypted.txt", 834 | data = data, 835 | encryptionKey = key 836 | ); 837 | var o = s3.getObject( 838 | bucketName = testBucket, 839 | uri = "encrypted.txt", 840 | encryptionKey = key 841 | ); 842 | 843 | expect( o.error ).toBe( false ); 844 | expect( o.response ).toBe( data ); 845 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 846 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 847 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 848 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 849 | } ); 850 | 851 | it( "can copy encrypted file with custom encryption key", function(){ 852 | var data = "Hello, encrypted world!"; 853 | var key = generateSecretKey( "AES", 256 ); 854 | // Store file with original encryption key 855 | s3.putObject( 856 | bucketName = testBucket, 857 | uri = "encrypted.txt", 858 | data = data, 859 | encryptionKey = key 860 | ); 861 | 862 | var newKey = generateSecretKey( "AES", 256 ); 863 | var keyMD5 = toBase64( binaryDecode( hash( toBinary( newKey ), "MD5" ), "hex" ) ); 864 | 865 | // Copy file with new encryption key 866 | var o = s3.copyObject( 867 | fromBucket = testBucket, 868 | fromURI = "encrypted.txt", 869 | toBucket = testBucket, 870 | toURI = "encrypted-copy.txt", 871 | encryptionKey = newKey, 872 | encryptionKeySource = key 873 | ); 874 | 875 | var o = s3.getObject( 876 | bucketName = testBucket, 877 | uri = "encrypted-copy.txt", 878 | encryptionKey = newKey 879 | ); 880 | 881 | expect( o.error ).toBe( false ); 882 | expect( o.response ).toBe( data ); 883 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 884 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 885 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 886 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 887 | } ); 888 | 889 | it( "can rename encrypted file with custom encryption key", function(){ 890 | var data = "Hello, encrypted world!"; 891 | var key = generateSecretKey( "AES", 256 ); 892 | var keyMD5 = toBase64( binaryDecode( hash( toBinary( key ), "MD5" ), "hex" ) ); 893 | // Store file with original encryption key 894 | s3.putObject( 895 | bucketName = testBucket, 896 | uri = "encrypted.txt", 897 | data = data, 898 | encryptionKey = key 899 | ); 900 | 901 | // Copy file with new encryption key 902 | var o = s3.renameObject( 903 | oldBucketName = testBucket, 904 | oldFileKey = "encrypted.txt", 905 | newBucketName = testBucket, 906 | newFileKey = "encrypted-copy.txt", 907 | encryptionKey = key 908 | ); 909 | 910 | var o = s3.getObject( 911 | bucketName = testBucket, 912 | uri = "encrypted-copy.txt", 913 | encryptionKey = key 914 | ); 915 | 916 | expect( o.error ).toBe( false ); 917 | expect( o.response ).toBe( data ); 918 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 919 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 920 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 921 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 922 | } ); 923 | 924 | it( "can put encrypted with custom encryption key and custom algorithm", function(){ 925 | var data = "Hello, encrypted world!"; 926 | var key = generateSecretKey( "AES", 256 ); 927 | var keyMD5 = toBase64( binaryDecode( hash( toBinary( key ), "MD5" ), "hex" ) ); 928 | s3.putObject( 929 | bucketName = testBucket, 930 | uri = "encrypted.txt", 931 | data = data, 932 | encryptionKey = key, 933 | encryptionAlgorithm = "AES256" 934 | ); 935 | var o = s3.getObject( 936 | bucketName = testBucket, 937 | uri = "encrypted.txt", 938 | encryptionKey = key, 939 | encryptionAlgorithm = "AES256" 940 | ); 941 | 942 | expect( o.error ).toBe( false ); 943 | expect( o.response ).toBe( data ); 944 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 945 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 946 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 947 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 948 | 949 | var o = s3.getObjectInfo( 950 | bucketName = testBucket, 951 | uri = "encrypted.txt", 952 | encryptionKey = key, 953 | encryptionAlgorithm = "AES256" 954 | ); 955 | 956 | expect( o ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 957 | expect( o[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 958 | expect( o ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 959 | expect( o[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 960 | 961 | var filePath = expandPath( "/tests/tmp/example.txt" ); 962 | var o = s3.downloadObject( 963 | bucketName = testBucket, 964 | uri = "encrypted.txt", 965 | filepath = filePath, 966 | encryptionKey = key, 967 | encryptionAlgorithm = "AES256" 968 | ); 969 | 970 | expect( o ).toHaveKey( "error" ); 971 | expect( o.error ).toBe( false ); 972 | 973 | // Lucee doesn't return headers AND direct download a file because it's dumb 974 | // https://luceeserver.atlassian.net/browse/LDEV-4357 975 | if ( isNull( server.lucee ) ) { 976 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 977 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 978 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 979 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 980 | } 981 | expect( fileRead( filePath ) ).toBe( data ); 982 | } ); 983 | 984 | 985 | it( "can use default encryption algorithm", function(){ 986 | var data = "Hello, encrypted world!"; 987 | s3.setDefaultEncryptionAlgorithm( "AES256" ); 988 | 989 | s3.putObject( 990 | bucketName = testBucket, 991 | uri = "encrypted.txt", 992 | data = data 993 | ); 994 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted.txt" ); 995 | 996 | expect( o.error ).toBe( false ); 997 | expect( o.response ).toBe( data ); 998 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption" ); 999 | expect( o.responseHeader[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 1000 | 1001 | var o = s3.getObjectInfo( bucketName = testBucket, uri = "encrypted.txt" ); 1002 | expect( o ).toHaveKey( "x-amz-server-side-encryption" ); 1003 | expect( o[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 1004 | 1005 | 1006 | var presignedURL = s3.getAuthenticatedURL( bucketName = testBucket, uri = "encrypted.txt" ); 1007 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ); 1008 | 1009 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 1010 | expect( local.httpResponse.fileContent ).toBe( data ); 1011 | 1012 | var o = s3.copyObject( 1013 | fromBucket = testBucket, 1014 | fromURI = "encrypted.txt", 1015 | toBucket = testBucket, 1016 | toURI = "encrypted-copy.txt" 1017 | ); 1018 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted-copy.txt" ); 1019 | 1020 | expect( o.error ).toBe( false ); 1021 | expect( o.response ).toBe( data ); 1022 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption" ); 1023 | expect( o.responseHeader[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 1024 | 1025 | var o = s3.renameObject( 1026 | oldBucketName = testBucket, 1027 | oldFileKey = "encrypted.txt", 1028 | newBucketName = testBucket, 1029 | newFileKey = "encrypted2.txt" 1030 | ); 1031 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted2.txt" ); 1032 | 1033 | expect( o.error ).toBe( false ); 1034 | expect( o.response ).toBe( data ); 1035 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption" ); 1036 | expect( o.responseHeader[ "x-amz-server-side-encryption" ] ).toBe( "AES256" ); 1037 | 1038 | s3.setDefaultEncryptionAlgorithm( "" ); 1039 | } ); 1040 | 1041 | 1042 | 1043 | it( "can use default encryption key", function(){ 1044 | var data = "Hello, encrypted world!"; 1045 | var key = generateSecretKey( "AES", 256 ); 1046 | var keyMD5 = toBase64( binaryDecode( hash( toBinary( key ), "MD5" ), "hex" ) ); 1047 | s3.setDefaultEncryptionKey( key ); 1048 | 1049 | s3.putObject( 1050 | bucketName = testBucket, 1051 | uri = "encrypted.txt", 1052 | data = data 1053 | ); 1054 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted.txt" ); 1055 | 1056 | expect( o.error ).toBe( false ); 1057 | expect( o.response ).toBe( data ); 1058 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 1059 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 1060 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 1061 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 1062 | 1063 | var o = s3.getObjectInfo( bucketName = testBucket, uri = "encrypted.txt" ); 1064 | expect( o ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 1065 | expect( o[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 1066 | expect( o ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 1067 | expect( o[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 1068 | 1069 | 1070 | var presignedURL = s3.getAuthenticatedURL( bucketName = testBucket, uri = "encrypted.txt" ); 1071 | cfhttp( url = "#presignedURL#", result = "local.httpResponse" ) { 1072 | cfhttpparam( 1073 | type = "header", 1074 | name = "x-amz-server-side-encryption-customer-algorithm", 1075 | value = "AES256" 1076 | ); 1077 | cfhttpparam( 1078 | type = "header", 1079 | name = "x-amz-server-side-encryption-customer-key", 1080 | value = key 1081 | ); 1082 | cfhttpparam( 1083 | type = "header", 1084 | name = "x-amz-server-side-encryption-customer-key-MD5", 1085 | value = keyMD5 1086 | ); 1087 | }; 1088 | 1089 | expect( local.httpResponse.Responseheader.status_code ?: 0 ).toBe( "200", local.httpResponse.fileContent ); 1090 | expect( local.httpResponse.fileContent ).toBe( data ); 1091 | 1092 | var o = s3.copyObject( 1093 | fromBucket = testBucket, 1094 | fromURI = "encrypted.txt", 1095 | toBucket = testBucket, 1096 | toURI = "encrypted-copy.txt" 1097 | ); 1098 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted-copy.txt" ); 1099 | 1100 | expect( o.error ).toBe( false ); 1101 | expect( o.response ).toBe( data ); 1102 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 1103 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 1104 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 1105 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 1106 | 1107 | var o = s3.renameObject( 1108 | oldBucketName = testBucket, 1109 | oldFileKey = "encrypted.txt", 1110 | newBucketName = testBucket, 1111 | newFileKey = "encrypted2.txt" 1112 | ); 1113 | var o = s3.getObject( bucketName = testBucket, uri = "encrypted2.txt" ); 1114 | 1115 | expect( o.error ).toBe( false ); 1116 | expect( o.response ).toBe( data ); 1117 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-algorithm" ); 1118 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-algorithm" ] ).toBe( "AES256" ); 1119 | expect( o.responseHeader ).toHaveKey( "x-amz-server-side-encryption-customer-key-MD5" ); 1120 | expect( o.responseHeader[ "x-amz-server-side-encryption-customer-key-MD5" ] ).toBe( keyMD5 ); 1121 | 1122 | s3.setDefaultEncryptionKey( "" ); 1123 | } ); 1124 | } ); 1125 | } 1126 | 1127 | private function createLogStub(){ 1128 | return createStub() 1129 | .$( "canDebug", false ) 1130 | .$( "debug" ) 1131 | .$( "error" ) 1132 | .$( "warn" ); 1133 | } 1134 | 1135 | /** 1136 | * Boxlang uses java.nio which is non blocking so file operations may take a few ms to complete. 1137 | */ 1138 | function sleepIfNIO( duration = 50 ){ 1139 | if ( getMetadata( this ).name == "LocalProvider" && server.keyExists( "boxlang" ) ) { 1140 | sleep( duration ); 1141 | } 1142 | } 1143 | 1144 | } 1145 | -------------------------------------------------------------------------------- /test-harness/tests/specs/Sv4UtilSpec.cfc: -------------------------------------------------------------------------------- 1 | component extends="coldbox.system.testing.BaseTestCase" { 2 | 3 | function beforeAll(){ 4 | variables.sv4 = new s3sdk.models.Sv4Util(); 5 | 6 | variables.awsSigV4TestSuiteConfig = { 7 | // The following four are derived from the "credential scope" listed in the 8 | // SigV4 Test Suite docs at 9 | // https://docs.aws.amazon.com/general/latest/gr/signature-v4-test-suite.html 10 | accessKey : "AKIDEXAMPLE", 11 | dateStamp : "20150830", 12 | regionName : "us-east-1", 13 | serviceName : "service", 14 | // This is derived from the files in the SigV4 Test Suite. 15 | amzDate : "20150830T123600Z", 16 | // This comes straight from the SigV4 Test Suite docs. 17 | secretKey : "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" 18 | }; 19 | } 20 | 21 | function run(){ 22 | describe( "SigV4 utilities", function(){ 23 | describe( "get-presigned-url", function(){ 24 | it( "generateSignatureData", function(){ 25 | // The following are example data from 26 | // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html 27 | config = { 28 | accessKey : "AKIAIOSFODNN7EXAMPLE", 29 | dateStamp : "20130524", 30 | regionName : "us-east-1", 31 | serviceName : "s3", 32 | amzDate : "20130524T000000Z", 33 | secretKey : "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 34 | }; 35 | var testData = fixtureData( "get-presigned-url" ); 36 | 37 | var sigData = sv4.generateSignatureData( 38 | requestMethod = testData.method, 39 | hostName = testData.host, 40 | requestURI = testData.uri, 41 | requestBody = "", 42 | requestHeaders = testData.headers, 43 | requestParams = testData.urlParams, 44 | accessKey = config.accessKey, 45 | secretKey = config.secretKey, 46 | regionName = config.regionName, 47 | serviceName = config.serviceName, 48 | amzDate = config.amzDate, 49 | dateStamp = config.dateStamp, 50 | presignDownloadURL = true 51 | ); 52 | 53 | expect( sigData.canonicalRequest ).toBe( testData.canonicalRequest ); 54 | expect( sigData.stringToSign ).toBe( testData.stringToSign ); 55 | expect( sigData.authorizationHeader ).toBe( testData.authHeader ); 56 | } ); 57 | } ); 58 | 59 | describe( "adapted AWS SigV4 test suite tests", function(){ 60 | it( "signs get-vanilla-query-unreserved", function(){ 61 | var config = variables.awsSigV4TestSuiteConfig; 62 | var testData = fixtureData( "get-vanilla-query-unreserved-s3" ); 63 | expectCorrectSignature( config, testData ); 64 | } ); 65 | 66 | it( "signs post-header-key-sort", function(){ 67 | var config = variables.awsSigV4TestSuiteConfig; 68 | var testData = fixtureData( "post-header-key-sort-s3" ); 69 | expectCorrectSignature( config, testData ); 70 | } ); 71 | 72 | it( "signs post-vanilla-query", function(){ 73 | var config = variables.awsSigV4TestSuiteConfig; 74 | var testData = fixtureData( "post-vanilla-query-s3" ); 75 | expectCorrectSignature( config, testData ); 76 | } ); 77 | } ); 78 | } ); 79 | } 80 | 81 | private function expectCorrectSignature( required struct config, required struct testData ){ 82 | var sigData = sv4.generateSignatureData( 83 | requestMethod = testData.method, 84 | hostName = testData.host, 85 | requestURI = testData.uri, 86 | requestBody = "", 87 | requestHeaders = testData.headers, 88 | requestParams = testData.urlParams, 89 | accessKey = config.accessKey, 90 | secretKey = config.secretKey, 91 | regionName = config.regionName, 92 | serviceName = config.serviceName, 93 | amzDate = config.amzDate, 94 | dateStamp = config.dateStamp 95 | ); 96 | 97 | expect( sigData.canonicalRequest ).toBe( testData.canonicalRequest ); 98 | expect( sigData.stringToSign ).toBe( testData.stringToSign ); 99 | expect( sigData.authorizationHeader ).toBe( testData.authHeader ); 100 | } 101 | 102 | private function fixtureData( required string folderName ){ 103 | var folderPath = expandPath( "/tests/fixtures/#folderName#" ); 104 | var data = { 105 | request : fileRead( "#folderPath#/#folderName#.req" ).replace( chr( 13 ), "", "all" ), 106 | canonicalRequest : fileRead( "#folderPath#/#folderName#.creq" ).replace( chr( 13 ), "", "all" ), 107 | stringToSign : fileRead( "#folderPath#/#folderName#.sts" ).replace( chr( 13 ), "", "all" ), 108 | authHeader : fileRead( "#folderPath#/#folderName#.authz" ).replace( chr( 13 ), "", "all" ) 109 | }; 110 | data.method = data.request.listToArray( " " )[ 1 ]; 111 | data.host = data.request.listToArray( chr( 10 ) )[ 2 ].listToArray( ":" )[ 2 ]; 112 | data.uri = data.request.listToArray( " " )[ 2 ].reReplace( "\?.*$", "" ); 113 | data.headers = headersFromRequestFile( data.request ); 114 | data.urlParams = urlParamsFromRequestFile( data.request ); 115 | return data; 116 | } 117 | 118 | // TODO: Handle multi-line headers 119 | private function headersFromRequestFile( file ){ 120 | var lines = file.listToArray( chr( 10 ) ); 121 | var lineNumberAfterHeaders = lines.find( "" ); 122 | if ( !lineNumberAfterHeaders ) { 123 | lineNumberAfterHeaders = lines.len(); 124 | } 125 | var headers = lines.slice( 2, lineNumberAfterHeaders - 1 ); 126 | return headers.reduce( function( memo, el ){ 127 | var colonPos = el.find( ":" ); 128 | var name = el.left( colonPos - 1 ); 129 | var value = el.right( el.len() - colonPos ); 130 | memo[ "#name#" ] = value; 131 | return memo; 132 | }, {} ); 133 | } 134 | 135 | private function urlParamsFromRequestFile( file ){ 136 | var uri = file.listToArray( " " )[ 2 ]; 137 | var params = {}; 138 | if ( !uri.find( "?" ) ) { 139 | return params; 140 | } 141 | var queryString = uri.listToArray( "?" )[ 2 ]; 142 | return queryString 143 | .listToArray( "&" ) 144 | .reduce( function( memo, el ){ 145 | var eqPos = el.find( "=" ); 146 | var name = el.left( eqPos - 1 ); 147 | var value = el.right( el.len() - eqPos ); 148 | memo[ "#name#" ] = value; 149 | return memo; 150 | }, params ); 151 | } 152 | 153 | } 154 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/buildKeyName.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | describe( "The buildKeyName function should...", function(){ 17 | beforeEach( function(){ 18 | uri = mockData( $num = 1, $type = "words:1" )[ 1 ]; 19 | bucketName = mockData( $num = 1, $type = "words:1" )[ 1 ]; 20 | var moduleSettings = getWirebox().getInstance( "box:moduleSettings:s3sdk" ); 21 | 22 | testObj = new s3sdk.models.AmazonS3( 23 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 27 | ssl = true, 28 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 29 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 30 | ); 31 | } ); 32 | it( "If the urlStyle is path and a bucket is submitted, return bucket\uri", function(){ 33 | testObj.setUrlStyle( "path" ); 34 | testme = testObj.buildKeyName( uri, bucketName ); 35 | expect( testme ).tobe( "#bucketName#/#uri#" ); 36 | } ); 37 | it( "If the urlStyle is path and a bucket is not submitted, return uri", function(){ 38 | testObj.setUrlStyle( "path" ); 39 | testme = testObj.buildKeyName( uri ); 40 | expect( testme ).tobe( uri ); 41 | } ); 42 | it( "If the urlStyle is path and the bucket is an empty string, return uri", function(){ 43 | testObj.setUrlStyle( "path" ); 44 | testme = testObj.buildKeyName( uri, "" ); 45 | expect( testme ).tobe( uri ); 46 | } ); 47 | it( "If the urlStyle is virtual, return uri", function(){ 48 | testObj.setUrlStyle( "virtual" ); 49 | testme = testObj.buildKeyName( uri, bucketname ); 50 | expect( testme ).tobe( uri ); 51 | } ); 52 | } ); 53 | } 54 | 55 | } 56 | 57 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/buildUrlEndpoint.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The buildUrlEndpoint function should ...", function(){ 18 | beforeEach( function(){ 19 | domain = mockData( $num = 1, $type = "words:1" )[ 1 ]; 20 | region = mockData( $num = 1, $type = "words:1" )[ 1 ]; 21 | bucketName = mockData( $num = 1, $type = "words:1" )[ 1 ]; 22 | keyName = mockData( $num = 1, $type = "words:1" )[ 1 ]; 23 | testObj = new s3sdk.models.AmazonS3( 24 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 27 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 28 | ssl = true, 29 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 30 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 31 | ); 32 | testObj.setAwsRegion( region ); 33 | } ); 34 | it( "If the urlStyle is path - the default - , build according to https://s3.region-code.amazonaws.com/bucket-name/key-name", function(){ 35 | testObj.setUrlStyle( "path" ); 36 | testObj.setawsDomain( "amazonaws.com" ); 37 | var testme = testObj.buildUrlEndpoint( bucketName ); 38 | expect( testme.getURLEndpointHostname() ).tobe( "s3.#region#.amazonaws.com" ); 39 | expect( testme.getURLEndpoint() ).tobe( "https://s3.#region#.amazonaws.com" ); 40 | } ); 41 | it( "If the urlStyle is path and the domain is not amazonaws.com, do not include s3", function(){ 42 | testObj.setUrlStyle( "path" ); 43 | testObj.setawsDomain( domain ); 44 | var testme = testObj.buildUrlEndpoint( bucketName ); 45 | expect( testme.getURLEndpointHostname() ).tobe( "#region#.#domain#" ); 46 | expect( testme.getURLEndpoint() ).tobe( "https://#region#.#domain#" ); 47 | } ); 48 | it( "If the urlStyle is path and the region is empty, do not include it", function(){ 49 | testObj.setUrlStyle( "path" ); 50 | testObj.setawsDomain( "amazonaws.com" ); 51 | testObj.setawsRegion( "" ); 52 | var testme = testObj.buildUrlEndpoint( bucketName ); 53 | expect( testme.getURLEndpointHostname() ).tobe( "s3.amazonaws.com" ); 54 | expect( testme.getURLEndpoint() ).tobe( "https://s3.amazonaws.com" ); 55 | } ); 56 | it( "If the urlStyle is virtual and the domain has amazonaws.com, build according to https://bucket-name.s3.region-code.amazonaws.com/key-name", function(){ 57 | testObj.setUrlStyle( "virtual" ); 58 | testObj.setawsDomain( "amazonaws.com" ); 59 | var testme = testObj.buildUrlEndpoint( bucketName ); 60 | expect( testme.getURLEndpointHostname() ).tobe( "#bucketName#.s3.#region#.amazonaws.com" ); 61 | expect( testme.getURLEndpoint() ).tobe( "https://#bucketName#.s3.#region#.amazonaws.com" ); 62 | } ); 63 | it( "If the urlStyle is virtual and the domain has amazonaws.com, but a bucket name is not submitted, do not include it", function(){ 64 | testObj.setUrlStyle( "virtual" ); 65 | testObj.setawsDomain( "amazonaws.com" ); 66 | var testme = testObj.buildUrlEndpoint(); 67 | expect( testme.getURLEndpointHostname() ).tobe( "s3.#region#.amazonaws.com" ); 68 | expect( testme.getURLEndpoint() ).tobe( "https://s3.#region#.amazonaws.com" ); 69 | } ); 70 | it( "If the urlStyle is virtual and the domain has amazonaws.com, but a region is not set, do not include it", function(){ 71 | testObj.setUrlStyle( "virtual" ); 72 | testObj.setawsDomain( "amazonaws.com" ); 73 | testObj.setawsRegion( "" ); 74 | var testme = testObj.buildUrlEndpoint( bucketName ); 75 | expect( testme.getURLEndpointHostname() ).tobe( "#bucketName#.s3.amazonaws.com" ); 76 | expect( testme.getURLEndpoint() ).tobe( "https://#bucketName#.s3.amazonaws.com" ); 77 | } ); 78 | it( "If the urlStyle is virtual and the domain does not have amazonaws.com, do not alter the domain name", function(){ 79 | testObj.setUrlStyle( "virtual" ); 80 | testObj.setawsDomain( "mydomain.com" ); 81 | testObj.setawsRegion( "" ); 82 | var testme = testObj.buildUrlEndpoint( bucketName ); 83 | expect( testme.getURLEndpointHostname() ).tobe( "mydomain.com" ); 84 | expect( testme.getURLEndpoint() ).tobe( "https://mydomain.com" ); 85 | } ); 86 | it( "If the urlStyle is path and the domain does not have amazonaws.com, do not alter the domain name", function(){ 87 | testObj.setUrlStyle( "virtual" ); 88 | testObj.setawsDomain( "mydomain.com" ); 89 | testObj.setawsRegion( "" ); 90 | var testme = testObj.buildUrlEndpoint( bucketName ); 91 | expect( testme.getURLEndpointHostname() ).tobe( "mydomain.com" ); 92 | expect( testme.getURLEndpoint() ).tobe( "https://mydomain.com" ); 93 | } ); 94 | it( "It should return an instance of AmazonS3", function(){ 95 | testObj.setUrlStyle( "path" ); 96 | testObj.setawsDomain( "amazonaws.com" ); 97 | var testme = testObj.buildUrlEndpoint( bucketName ); 98 | expect( testme ).tobeInstanceOf( "AmazonS3" ); 99 | } ); 100 | } ); 101 | } 102 | 103 | } 104 | 105 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/createSignatureUtil.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The createSignatureUtil function should...", function(){ 18 | beforeEach( function(){ 19 | testObj = new s3sdk.models.AmazonS3( 20 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 21 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 22 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 23 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | ssl = true, 25 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 27 | ); 28 | } ); 29 | it( "If V2 is submitted, return SV2Util", function(){ 30 | testme = testObj.createSignatureUtil( "V2" ); 31 | expect( testme ).tobeInstanceOf( "Sv2Util" ); 32 | } ); 33 | it( "If V4 is submitted, return SV2Util", function(){ 34 | testme = testObj.createSignatureUtil( "V4" ); 35 | expect( testme ).tobeInstanceOf( "Sv4Util" ); 36 | } ); 37 | } ); 38 | } 39 | 40 | } 41 | 42 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/getBucketLocation.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The setAwsRegion function should...", function(){ 18 | beforeEach( function(){ 19 | accessKey = mockData( $num = 1, $type = "words:1" )[ 1 ]; 20 | secretKey = mockData( $num = 1, $type = "words:1" )[ 1 ]; 21 | bucketName = mockData( $num = 1, $type = "words:1" )[ 1 ]; 22 | 23 | testObj = testObj = new s3sdk.models.AmazonS3( 24 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 27 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 28 | ssl = true, 29 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 30 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 31 | ); 32 | prepareMock( testObj ); 33 | testObj.setAccessKey( accessKey ); 34 | testObj.setSecretKey( secretKey ); 35 | testObj.$( method = "requireBucketName" ); 36 | testObj.$( method = "s3Request", returns = createResponse() ); 37 | } ); 38 | it( "Should call requireBucketName 1x ", function(){ 39 | testme = testObj.getBucketLocation( bucketName ); 40 | expect( testObj.$count( "requireBucketName" ) ).tobe( 1 ); 41 | } ); 42 | it( "Should call s3Request 1x ", function(){ 43 | testme = testObj.getBucketLocation( bucketName ); 44 | expect( testObj.$count( "s3Request" ) ).tobe( 1 ); 45 | } ); 46 | it( "If an error is returned from s3Request, it should throw an error with the message from s3 as the error message ", function(){ 47 | var message = mockdata( $num = 1, $type = "words:10" )[ 1 ]; 48 | testObj.$( method = "s3Request", returns = createResponse( error = true, message = message ) ); 49 | expect( function(){ 50 | testObj.getBucketLocation( bucketName ); 51 | } ).tothrow( type = "application", message = message ); 52 | } ); 53 | } ); 54 | } 55 | 56 | function createResponse( 57 | required boolean error = false, 58 | string location = "", 59 | message = "" 60 | ){ 61 | return { 62 | "response" : xmlParse( " 63 | #arguments.location#" ), 64 | "message" : arguments.message, 65 | "error" : arguments.error, 66 | "responseheader" : { 67 | "Date" : "Tue, 19 Sep 2023 16:09:11 GMT", 68 | "Server" : "AmazonS3", 69 | "Transfer-Encoding" : "chunked", 70 | "x-amz-id-2" : "8CxOH41yj+NlQLaKGmgFGRpImXai9QnR+nNT5biih8eeYBWSZ1R65tUW1C6uw9eTvj5435wzWPg=", 71 | "x-amz-request-id" : "263PWVHD32Y7P5Q9", 72 | "status_code" : 200, 73 | "Content-Type" : "application/xml", 74 | "explanation" : "OK" 75 | } 76 | }; 77 | } 78 | 79 | } 80 | 81 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/init.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "By default the init function should...", function(){ 18 | beforeEach( function(){ 19 | accessKey = mockData( $num = 1, $type = "words:1" )[ 1 ]; 20 | secretKey = mockData( $num = 1, $type = "words:1" )[ 1 ]; 21 | 22 | testObj = new s3sdk.models.AmazonS3( 23 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 27 | ssl = true, 28 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 29 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 30 | ); 31 | prepareMock( testObj ); 32 | testObj.$( method = "createSignatureUtil" ); 33 | } ); 34 | it( "Have the accessKey set", function(){ 35 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 36 | expect( testme.getAccessKey() ).tobe( accessKey ); 37 | } ); 38 | it( "Have the secretKey set", function(){ 39 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 40 | expect( testme.getSecretKey() ).tobe( secretKey ); 41 | } ); 42 | it( "Have the awsDomain set to amazonaws.com", function(){ 43 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 44 | expect( testme.getawsDomain() ).tobe( "amazonaws.com" ); 45 | } ); 46 | it( "Have the awsRegion set to awsRegion.com", function(){ 47 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 48 | expect( testme.getawsRegion() ).tobe( "us-east-1" ); 49 | } ); 50 | it( "Have the encryptionCharset set to UTF-8", function(){ 51 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 52 | expect( testme.getencryptionCharset() ).tobe( "UTF-8" ); 53 | } ); 54 | it( "Have the ssl set to true", function(){ 55 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 56 | expect( testme.getssl() ).tobeTrue(); 57 | } ); 58 | it( "Have the defaultTimeOut set to 300", function(){ 59 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 60 | expect( testme.getdefaultTimeOut() ).tobe( 300 ); 61 | } ); 62 | it( "Have the defaultDelimiter set to /", function(){ 63 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 64 | expect( testme.getdefaultDelimiter() ).tobe( "/" ); 65 | } ); 66 | it( "Have the defaultBucketName set to blank string", function(){ 67 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 68 | expect( testme.getdefaultBucketName().len() ).tobe( 0 ); 69 | } ); 70 | it( "Have the defaultCacheControl set to no-store, no-cache, must-revalidate", function(){ 71 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 72 | expect( testme.getdefaultCacheControl() ).tobe( "no-store, no-cache, must-revalidate" ); 73 | } ); 74 | it( "Have the defaultStorageClass set to no-store, no-cache, must-revalidate", function(){ 75 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 76 | expect( testme.getdefaultStorageClass() ).tobe( "STANDARD" ); 77 | } ); 78 | it( "Have the defaultACL set to public-read", function(){ 79 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 80 | expect( testme.getdefaultACL() ).tobe( "public-read" ); 81 | } ); 82 | it( "Have the throwOnRequestError set to true", function(){ 83 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 84 | expect( testme.getthrowOnRequestError() ).tobeTrue(); 85 | } ); 86 | it( "Have the retriesOnError set to 3", function(){ 87 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 88 | expect( testme.getretriesOnError() ).tobe( 3 ); 89 | } ); 90 | it( "Have the autoContentType set to false", function(){ 91 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 92 | expect( testme.getautoContentType() ).tobeFalse(); 93 | } ); 94 | it( "Have the autoMD5 set to an empty string since the signature type defaults to V4", function(){ 95 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 96 | expect( testme.getautoMD5().len() ).tobe( 0 ); 97 | } ); 98 | it( "Have the serviceName set to s3", function(){ 99 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 100 | expect( testme.getserviceName() ).tobe( "s3" ); 101 | } ); 102 | it( "Have the defaultEncryptionAlgorithm set to an empty string", function(){ 103 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 104 | expect( testme.getdefaultEncryptionAlgorithm().len() ).toBe( 0 ); 105 | } ); 106 | it( "Have the defaultEncryptionKey set to an empty string", function(){ 107 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 108 | expect( testme.getdefaultEncryptionKey().len() ).tobe( 0 ); 109 | } ); 110 | it( "Have the multiPartByteThreshold set to 5242880", function(){ 111 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 112 | expect( testme.getmultiPartByteThreshold() ).tobe( 5242880 ); 113 | } ); 114 | it( "Have the defaultObjectOwnership set to ObjectWriter", function(){ 115 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 116 | expect( testme.getdefaultObjectOwnership() ).tobe( "ObjectWriter" ); 117 | } ); 118 | it( "Have the defaultBlockPublicAcls set to False", function(){ 119 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 120 | expect( testme.getdefaultBlockPublicAcls() ).tobeFalse(); 121 | } ); 122 | it( "Have the defaultIgnorePublicAcls set to False", function(){ 123 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 124 | expect( testme.getdefaultIgnorePublicAcls() ).tobeFalse(); 125 | } ); 126 | it( "Have the defaultBlockPublicPolicy set to False", function(){ 127 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 128 | expect( testme.getdefaultBlockPublicPolicy() ).tobeFalse(); 129 | } ); 130 | it( "Have the defaultRestrictPublicBuckets set to False", function(){ 131 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 132 | expect( testme.getdefaultRestrictPublicBuckets() ).tobeFalse(); 133 | } ); 134 | it( "Have the urlStyle set to path", function(){ 135 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 136 | expect( testme.geturlStyle() ).tobe( "path" ); 137 | } ); 138 | it( "Have the ssl set to true", function(){ 139 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 140 | expect( testme.getSSL() ).tobeTrue(); 141 | } ); 142 | it( "Have the mimeTypes set", function(){ 143 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 144 | expect( testme.getMimeTypes() ).tobeTypeOf( "struct" ); 145 | 146 | var mimeTypes = returnMimeTypes(); 147 | var targetMimeTypes = testme.getMimeTypes(); 148 | expect( mimeTypes.keyArray().len() ).tobe( targetMimeTypes.keyArray().len() ); 149 | mimeTypes.each( function( item ){ 150 | expect( targetMimeTypes ).toHaveKey( item ); 151 | expect( targetMimeTypes[ item ] ).tobe( 152 | mimeTypes[ item ], 153 | "#mimeTypes[ item ]# was the wrong value in " 154 | ); 155 | } ); 156 | targetMimeTypes.each( function( item ){ 157 | expect( mimeTypes ).toHaveKey( item ); 158 | expect( mimeTypes[ item ] ).tobe( 159 | targetMimeTypes[ item ], 160 | "#targetMimeTypes[ item ]# changed - update test reference " 161 | ); 162 | } ); 163 | } ); 164 | 165 | 166 | 167 | it( "If the signature type is V2, Have the autoMD5 set to auto", function(){ 168 | testme = testObj.init( 169 | accessKey = accessKey, 170 | secretKey = secretKey, 171 | signatureType = "V2" 172 | ); 173 | expect( testme.getautoMD5() ).tobe( "auto" ); 174 | } ); 175 | it( "If arguments.autoMD5 is true , Have the autoMD5 set to auto", function(){ 176 | testme = testObj.init( 177 | accessKey = accessKey, 178 | secretKey = secretKey, 179 | autoMD5 = true 180 | ); 181 | expect( testme.getautoMD5() ).tobe( "auto" ); 182 | } ); 183 | it( "Should call createSignatureUtil 1x passing in the submitted signatureType", function(){ 184 | testme = testObj.init( accessKey = accessKey, secretKey = secretKey ); 185 | expect( testObj.$count( "createSignatureUtil" ) ).tobe( 1 ); 186 | expect( testObj._mockCallLoggers ).toHaveKey( "createSignatureUtil" ); 187 | expect( testObj._mockCallLoggers.createSignatureUtil.len() ).tobe( 1 ); 188 | // expect(testObj._mockCallLoggers.createSignatureUtil[1].len()).tobe(1); 189 | expect( testObj._mockCallLoggers.createSignatureUtil[ 1 ][ 1 ] ).tobe( "v4" ); 190 | } ); 191 | } ); 192 | } 193 | 194 | function returnMimeTypes(){ 195 | return { 196 | htm : "text/html", 197 | html : "text/html", 198 | js : "application/x-javascript", 199 | txt : "text/plain", 200 | xml : "text/xml", 201 | rss : "application/rss+xml", 202 | css : "text/css", 203 | gz : "application/x-gzip", 204 | gif : "image/gif", 205 | jpe : "image/jpeg", 206 | jpeg : "image/jpeg", 207 | jpg : "image/jpeg", 208 | png : "image/png", 209 | swf : "application/x-shockwave-flash", 210 | ico : "image/x-icon", 211 | flv : "video/x-flv", 212 | doc : "application/msword", 213 | xls : "application/vnd.ms-excel", 214 | pdf : "application/pdf", 215 | htc : "text/x-component", 216 | svg : "image/svg+xml", 217 | eot : "application/vnd.ms-fontobject", 218 | ttf : "font/ttf", 219 | otf : "font/opentype", 220 | woff : "application/font-woff", 221 | woff2 : "font/woff2" 222 | }; 223 | } 224 | 225 | } 226 | 227 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/requireBucketName.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The setAwsRegion function should...", function(){ 18 | beforeEach( function(){ 19 | bucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ]; 20 | testObj = testObj = new s3sdk.models.AmazonS3( 21 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 22 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 23 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | ssl = true, 26 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 27 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 28 | ); 29 | makePublic( 30 | testObj, 31 | "requireBucketName", 32 | "requireBucketNamePublic" 33 | ); 34 | } ); 35 | it( "If a bucketname is submitted, do nothing", function(){ 36 | testme = testObj.requireBucketNamePublic( bucketName ); 37 | expect( isNull( testme ) ).tobeTrue(); 38 | } ); 39 | it( "If a bucketname is blank, throw an application error", function(){ 40 | expect( function(){ 41 | testObj.requireBucketNamePublic( "" ); 42 | } ).toThrow( "application" ); 43 | } ); 44 | it( "If a bucketname is null, throw an application error", function(){ 45 | expect( function(){ 46 | testObj.requireBucketNamePublic(); 47 | } ).toThrow( "application" ); 48 | } ); 49 | } ); 50 | } 51 | 52 | } 53 | 54 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/setAuth.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The setAuth function should...", function(){ 18 | beforeEach( function(){ 19 | accessKey = mockData( $num = 1, $type = "words:1" )[ 1 ]; 20 | secretKey = mockData( $num = 1, $type = "words:1" )[ 1 ]; 21 | 22 | testObj = new s3sdk.models.AmazonS3( 23 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 27 | ssl = true, 28 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 29 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 30 | ); 31 | } ); 32 | it( "Set the accessKey submitted", function(){ 33 | testme = testObj.setAuth( accessKey, secretKey ); 34 | expect( testme.getAccessKey() ).tobe( accessKey ); 35 | } ); 36 | it( "Set the secretKey submitted", function(){ 37 | testme = testObj.setAuth( accessKey, secretKey ); 38 | expect( testme.getSecretKey() ).tobe( secretKey ); 39 | } ); 40 | } ); 41 | } 42 | 43 | } 44 | 45 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/setAwsDomain.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The setAWSDomain function should...", function(){ 18 | beforeEach( function(){ 19 | domain = mockData( $num = 1, $type = "words:1" )[ 1 ]; 20 | 21 | testObj = new s3sdk.models.AmazonS3( 22 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 23 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | ssl = true, 27 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 28 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 29 | ); 30 | prepareMock( testObj ); 31 | testObj.$( method = "buildUrlEndpoint", returns = testObj ); 32 | } ); 33 | it( "Set the domain submitted", function(){ 34 | testme = testObj.setAwsDomain( domain ); 35 | expect( testme.getAwsDomain() ).tobe( domain ); 36 | } ); 37 | it( "call buildUrlEndpoint 1x", function(){ 38 | testme = testObj.setAwsDomain( domain ); 39 | expect( testObj.$count( "buildUrlEndpoint" ) ).tobe( 1 ); 40 | } ); 41 | } ); 42 | } 43 | 44 | } 45 | 46 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/setAwsRegion.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The setAwsRegion function should...", function(){ 18 | beforeEach( function(){ 19 | region = mockData( $num = 1, $type = "words:1" )[ 1 ]; 20 | 21 | testObj = new s3sdk.models.AmazonS3( 22 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 23 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 25 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | ssl = true, 27 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 28 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 29 | ); 30 | prepareMock( testObj ); 31 | testObj.$( method = "buildUrlEndpoint", returns = testObj ); 32 | } ); 33 | it( "Set the region submitted", function(){ 34 | testme = testObj.setAwsRegion( region ); 35 | expect( testme.getAwsREgion() ).tobe( region ); 36 | } ); 37 | it( "call buildUrlEndpoint 1x", function(){ 38 | testme = testObj.setAwsRegion( region ); 39 | expect( testObj.$count( "buildUrlEndpoint" ) ).tobe( 1 ); 40 | } ); 41 | } ); 42 | } 43 | 44 | } 45 | 46 | -------------------------------------------------------------------------------- /test-harness/tests/specs/models/AmazonS3/setSSL.cfc: -------------------------------------------------------------------------------- 1 | /** 2 | * My BDD Test 3 | */ 4 | component extends="coldbox.system.testing.BaseTestCase" { 5 | 6 | /*********************************** LIFE CYCLE Methods ***********************************/ 7 | this.unloadColdbox = false; 8 | // executes before all suites+specs in the run() method 9 | function beforeAll(){ 10 | super.beforeAll(); 11 | } 12 | 13 | /*********************************** BDD SUITES ***********************************/ 14 | 15 | function run( testResults, testBox ){ 16 | // all your suites go here. 17 | describe( "The setAwsRegion function should...", function(){ 18 | beforeEach( function(){ 19 | testObj = new s3sdk.models.AmazonS3( 20 | accessKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 21 | secretKey = mockdata( $num = 1, $type = "words:1" )[ 1 ], 22 | awsRegion = mockdata( $num = 1, $type = "words:1" )[ 1 ], 23 | awsDomain = mockdata( $num = 1, $type = "words:1" )[ 1 ], 24 | ssl = true, 25 | defaultBucketName = mockdata( $num = 1, $type = "words:1" )[ 1 ], 26 | defaultObjectOwnership = mockdata( $num = 1, $type = "words:1" )[ 1 ] 27 | ); 28 | prepareMock( testObj ); 29 | testObj.$( method = "buildUrlEndpoint", returns = testObj ); 30 | } ); 31 | it( "Set the ssl submitted", function(){ 32 | testme = testObj.setSSL( true ); 33 | expect( testme.getSSL() ).tobe( true ); 34 | } ); 35 | it( "call buildUrlEndpoint 1x", function(){ 36 | testme = testObj.setSSL( true ); 37 | expect( testObj.$count( "buildUrlEndpoint" ) ).tobe( 1 ); 38 | } ); 39 | } ); 40 | } 41 | 42 | } 43 | 44 | -------------------------------------------------------------------------------- /test-harness/tests/tmp/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coldbox-modules/s3sdk/bfbf1a1fded5e55121dbc571540cbe4bdf13b16e/test-harness/tests/tmp/.gitkeep --------------------------------------------------------------------------------