├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── f5_cla.yml │ ├── lint-pr-title.yml │ ├── ossf_scorecard.yml │ └── s3-gateway.yml ├── .gitignore ├── .tool-versions ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile.buildkit.plus ├── Dockerfile.latest-njs ├── Dockerfile.oss ├── Dockerfile.plus ├── Dockerfile.unprivileged ├── GNUmakefile ├── LICENSE ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── common ├── docker-entrypoint.d │ ├── 00-check-for-required-env.sh │ └── 22-enable_js_fetch_trusted_certificate.sh ├── docker-entrypoint.sh └── etc │ └── nginx │ ├── include │ ├── awscredentials.js │ ├── awssig2.js │ ├── awssig4.js │ ├── listing.xsl │ ├── s3gateway.js │ └── utils.js │ ├── nginx.conf │ └── templates │ ├── cache.conf.template │ ├── default.conf.template │ └── gateway │ ├── cors.conf.template │ ├── js_fetch_trusted_certificate.conf.template │ ├── s3_location.conf.template │ ├── s3_location_common.conf.template │ ├── s3_server.conf.template │ ├── s3listing_location.conf.template │ ├── v2_headers.conf.template │ ├── v2_js_vars.conf.template │ ├── v4_headers.conf.template │ └── v4_js_vars.conf.template ├── deployments ├── ecs │ └── cloudformation │ │ └── s3gateway.yaml └── s3_express │ ├── .terraform.lock.hcl │ ├── .tool-versions │ ├── README.md │ ├── main.tf │ ├── settings.s3express.example │ ├── test_data │ └── test.txt │ ├── variables.tf │ └── versions.tf ├── docs ├── development.md ├── getting_started.md └── img │ ├── nginx-s3-gateway-directory-listing-path-prefix.png │ └── nginx-s3-gateway-signature-flow.png ├── examples ├── brotli-compression │ ├── Dockerfile.oss │ ├── Dockerfile.plus │ └── etc │ │ └── nginx │ │ └── conf.d │ │ └── brotli_compression.conf ├── gzip-compression │ ├── Dockerfile.oss │ ├── Dockerfile.plus │ └── etc │ │ └── nginx │ │ └── conf.d │ │ └── gzip_compression.conf └── modsecurity │ ├── Dockerfile.oss │ ├── Dockerfile.plus │ ├── etc │ └── nginx │ │ ├── conf.d │ │ └── gateway │ │ │ └── modsecurity.conf │ │ └── modsec │ │ ├── main.conf │ │ ├── modsecurity.conf │ │ └── unicode.mapping │ └── usr │ └── local │ └── nginx │ └── conf │ └── owasp-modsecurity-crs │ └── crs-setup.conf ├── jsdoc └── conf.json ├── oss └── etc │ └── nginx │ ├── conf.d │ └── gateway │ │ └── server_variables.conf │ └── templates │ └── upstreams.conf.template ├── package-lock.json ├── package.json ├── plus ├── docker-entrypoint.d │ ├── 10-listen-on-ipv6-by-default.sh │ └── 20-envsubst-on-templates.sh └── etc │ ├── nginx │ ├── conf.d │ │ ├── gateway │ │ │ └── server_variables.conf │ │ ├── instance_credential_cache.conf │ │ └── v4_signing_key_cache.conf │ └── templates │ │ └── upstreams.conf.template │ └── ssl │ └── nginx │ └── .gitignore ├── settings.example ├── standalone_ubuntu_oss_install.sh ├── test.sh └── test ├── data ├── .gitignore └── bucket-1 │ ├── a.txt │ ├── a │ ├── abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.txt │ ├── c │ │ └── あ │ ├── plus+plus.txt │ └── これは This is ASCII системы חן .txt │ ├── b │ ├── c │ │ ├── '(1).txt │ │ ├── = │ │ ├── @ │ │ └── d.txt │ ├── e.txt │ ├── クズ箱 │ │ └── ゴミ.txt │ └── ブツブツ.txt │ ├── index.html │ ├── statichost │ ├── index.html │ └── noindexdir │ │ ├── multipledir │ │ └── index.html │ │ └── noindex.html │ ├── test │ └── index.html │ └── системы │ ├── %bad%file%name% │ └── system.txt ├── docker-compose.yaml ├── integration └── test_api.sh └── unit ├── awscredentials_test.js ├── awssig2_test.js ├── awssig4_test.js ├── s3gateway_test.js └── utils_test.js /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | ##################### 2 | # Main global owner # 3 | ##################### 4 | 5 | * @nginx/s3-gateway 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🐛 Bug report 3 | description: Create a report to help us improve 4 | labels: 5 | - bug 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to fill out this bug report! 11 | 12 | Before you continue filling out this report, please take a moment to check that your bug has not been [already reported on GitHub][issue search] 🙌 13 | 14 | Remember to redact any sensitive information such as authentication credentials and/or license keys! 15 | 16 | **Note:** If you are seeking community support or have a question, please consider starting a new thread via [GitHub discussions][discussions] or the [NGINX Community forum][forum]. 17 | 18 | [issue search]: https://github.com/nginx/nginx-s3-gateway/issues 19 | [discussions]: https://github.com/nginx/nginx-s3-gateway/discussions 20 | [forum]: https://community.nginx.org 21 | 22 | - type: textarea 23 | id: overview 24 | attributes: 25 | label: Bug Overview 26 | description: A clear and concise overview of the bug. 27 | placeholder: When I do "X" with the NGINX S3 gateway, "Y" happens instead of "Z". 28 | validations: 29 | required: true 30 | 31 | - type: textarea 32 | id: behavior 33 | attributes: 34 | label: Expected Behavior 35 | description: A clear and concise description of what you expected to happen. 36 | placeholder: When I do "X" with the NGINX S3 gateway, I expect "Z" to happen. 37 | validations: 38 | required: true 39 | 40 | - type: textarea 41 | id: steps 42 | attributes: 43 | label: Steps to Reproduce the Bug 44 | description: Detail the series of steps required to reproduce the bug. 45 | placeholder: When I use the NGINX S3 gateway using "X", the NGINX S3 gateway fails with "Y" error message. If I check the terminal outputs and/or logs, I see the following info. 46 | validations: 47 | required: true 48 | 49 | - type: textarea 50 | id: environment 51 | attributes: 52 | label: Environment Details 53 | description: Please provide details about your environment. 54 | value: | 55 | - Version of the S3 container used: [DockerHub/GCR] 56 | - Commit/Tag (if building the NGINX S3 gateway from source): 57 | - Version of NGINX Open Source or NGINX Plus: [OSS/Plus] 58 | - Version of NGINX JavaScript: [0.8.8/0.8.7/etc...] 59 | - Target deployment platforms [e.g. AWS/GCP/local cluster/etc...]: 60 | - S3 backend implementation: [e.g. AWS, Ceph, NetApp StorageGrid, etc...] 61 | - Authentication method: [e.g. IAM, IAM with Fargate, IAM with K8S, AWS Credentials, etc...] 62 | validations: 63 | required: true 64 | 65 | - type: textarea 66 | id: context 67 | attributes: 68 | label: Additional Context 69 | description: Add any other context about the problem here. 70 | placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this issue in here. 71 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blank_issues_enabled: false 3 | contact_links: 4 | - name: 💬 Talk to the NGINX community! 5 | url: https://community.nginx.org 6 | about: A community forum for NGINX users, developers, and contributors 7 | - name: 📝 Code of Conduct 8 | url: https://www.contributor-covenant.org/version/2/1/code_of_conduct 9 | about: NGINX follows the Contributor Covenant Code of Conduct to ensure a safe and inclusive community 10 | - name: 💼 For commercial & enterprise users 11 | url: https://www.f5.com/products/nginx 12 | about: F5 offers a wide range of NGINX products for commercial & enterprise users 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ✨ Feature request 3 | description: Suggest an idea for this project 4 | labels: 5 | - enhancement 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to fill out this feature request! 11 | 12 | Before you continue filling out this request, please take a moment to check that your feature has not been [already requested on GitHub][issue search] 🙌 13 | 14 | **Note:** If you are seeking community support or have a question, please consider starting a new thread via [GitHub discussions][discussions] or the [NGINX Community forum][forum]. 15 | 16 | [issue search]: https://github.com/nginx/nginx-s3-gateway/issues 17 | [discussions]: https://github.com/nginx/nginx-s3-gateway/discussions 18 | [forum]: https://community.nginx.org 19 | 20 | - type: textarea 21 | id: overview 22 | attributes: 23 | label: Feature Overview 24 | description: A clear and concise description of what the feature request is. 25 | placeholder: I would like the NGINX S3 gateway to be able to do "X". 26 | validations: 27 | required: true 28 | 29 | - type: textarea 30 | id: alternatives 31 | attributes: 32 | label: Alternatives Considered 33 | description: Detail any potential alternative solutions/workarounds you've used or considered. 34 | placeholder: I have done/might be able to do "X" with the NGINX S3 gateway by doing "Y". 35 | 36 | - type: textarea 37 | id: context 38 | attributes: 39 | label: Additional Context 40 | description: Add any other context about the problem here. 41 | placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this feature request here. 42 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: docker 5 | directory: / 6 | schedule: 7 | interval: weekly 8 | day: monday 9 | time: "00:00" 10 | 11 | - package-ecosystem: github-actions 12 | directory: / 13 | schedule: 14 | interval: weekly 15 | day: monday 16 | time: "00:00" 17 | groups: 18 | docker-github-actions: 19 | patterns: 20 | - docker/* 21 | official-github-actions: 22 | patterns: 23 | - actions/* 24 | 25 | - package-ecosystem: npm 26 | directory: / 27 | schedule: 28 | interval: weekly 29 | day: monday 30 | time: "00:00" 31 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Proposed changes 2 | 3 | Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using one of the [supported keywords](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) in this PR description (not in the title of the PR). 4 | 5 | ### Checklist 6 | 7 | Before creating a pull request (PR), run through this checklist and mark each as complete: 8 | 9 | - [ ] I have read the [contributing guidelines](/CONTRIBUTING.md). 10 | - [ ] I have signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md). 11 | - [ ] The PR title follows the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/). 12 | - [ ] If applicable, I have added tests that prove my fix is effective or that my feature works. 13 | - [ ] If applicable, I have checked that any relevant tests pass after adding my changes. 14 | - [ ] I have updated any relevant documentation (e.g. [`README.md`](/README.md)). 15 | -------------------------------------------------------------------------------- /.github/workflows/f5_cla.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: F5 CLA 3 | on: 4 | issue_comment: 5 | types: [created] 6 | pull_request_target: 7 | types: [opened, closed, synchronize] 8 | permissions: read-all 9 | jobs: 10 | f5-cla: 11 | name: F5 CLA 12 | runs-on: ubuntu-24.04 13 | permissions: 14 | actions: write 15 | pull-requests: write 16 | statuses: write 17 | steps: 18 | - name: Run F5 Contributor License Agreement (CLA) assistant 19 | if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have hereby read the F5 CLA and agree to its terms') || github.event_name == 'pull_request_target' 20 | uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1 21 | with: 22 | # Path to the CLA document. 23 | path-to-document: https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md 24 | # Custom CLA messages. 25 | custom-notsigned-prcomment: '🎉 Thank you for your contribution! It appears you have not yet signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md), which is required for your changes to be incorporated into an F5 Open Source Software (OSS) project. Please kindly read the [F5 CLA](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md) and reply on a new comment with the following text to agree:' 26 | custom-pr-sign-comment: 'I have hereby read the F5 CLA and agree to its terms' 27 | custom-allsigned-prcomment: '✅ All required contributors have signed the F5 CLA for this PR. Thank you!' 28 | # Remote repository storing CLA signatures. 29 | remote-organization-name: f5 30 | remote-repository-name: f5-cla-data 31 | # Branch where CLA signatures are stored. 32 | branch: main 33 | path-to-signatures: signatures/signatures.json 34 | # Comma separated list of usernames for maintainers or any other individuals who should not be prompted for a CLA. 35 | # NOTE: You will want to edit the usernames to suit your project needs. 36 | allowlist: bot* 37 | # Do not lock PRs after a merge. 38 | lock-pullrequest-aftermerge: false 39 | env: 40 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 41 | PERSONAL_ACCESS_TOKEN: ${{ secrets.F5_CLA_TOKEN }} 42 | -------------------------------------------------------------------------------- /.github/workflows/lint-pr-title.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Lint PR Title 3 | on: 4 | pull_request_target: 5 | types: [opened, edited, synchronize] 6 | permissions: read-all 7 | jobs: 8 | main: 9 | name: Validate PR title 10 | runs-on: ubuntu-24.04 11 | permissions: 12 | pull-requests: write 13 | steps: 14 | - name: Check PR title 15 | id: lint_pr_title 16 | uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # v5.5.3 17 | env: 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | 20 | - name: Comment on the PR if the title doesn't follow the Conventional Commits specification 21 | if: always() && (steps.lint_pr_title.outputs.error_message != null) 22 | uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2 23 | with: 24 | header: pr-title-lint-error 25 | message: | 26 | Thank you for opening this pull request! 👋🏼 27 | 28 | In order to streamline our release process and maintain a consistent commit history, we require pull request (PR) titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/). 29 | 30 | Based on our linter, it looks like your proposed title needs to be adjusted. Here's the error message we received: 31 | 32 | ``` 33 | ${{ steps.lint_pr_title.outputs.error_message }} 34 | ``` 35 | 36 | - name: Delete the previous comment once the PR title has been updated 37 | if: ${{ steps.lint_pr_title.outputs.error_message == null }} 38 | uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2 39 | with: 40 | header: pr-title-lint-error 41 | delete: true 42 | -------------------------------------------------------------------------------- /.github/workflows/ossf_scorecard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow uses actions that are not certified by GitHub. They are provided by a third-party and are governed by separate terms of service, privacy policy, and support documentation. 3 | name: OSSF Scorecard 4 | on: 5 | # For Branch-Protection check. Only the default branch is supported. See https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection. 6 | branch_protection_rule: 7 | # To guarantee Maintained check is occasionally updated. See https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained. 8 | schedule: 9 | - cron: "0 0 * * 1" 10 | push: 11 | branches: [main] 12 | workflow_dispatch: 13 | # Declare default permissions as read only. 14 | permissions: read-all 15 | jobs: 16 | analysis: 17 | name: Scorecard analysis 18 | runs-on: ubuntu-24.04 19 | permissions: 20 | # Needed if using Code Scanning alerts. 21 | security-events: write 22 | # Needed for GitHub OIDC token if publish_results is true. 23 | id-token: write 24 | steps: 25 | - name: Check out the codebase 26 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 27 | with: 28 | persist-credentials: false 29 | 30 | - name: Run analysis 31 | uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 32 | with: 33 | results_file: results.sarif 34 | results_format: sarif 35 | # Publish the results for public repositories to enable scorecard badges. For more details, see https://github.com/ossf/scorecard-action#publishing-results. 36 | publish_results: true 37 | 38 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF format to the repository Actions tab. 39 | - name: Upload artifact 40 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 41 | with: 42 | name: SARIF file 43 | path: results.sarif 44 | retention-days: 5 45 | 46 | # Upload the results to GitHub's code scanning dashboard. 47 | - name: Upload SARIF results to code scanning 48 | uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 49 | with: 50 | sarif_file: results.sarif 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/intellij+all,node,vim,emacs,macos 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=intellij+all,node,vim,emacs,macos 3 | 4 | ### Emacs ### 5 | # -*- mode: gitignore; -*- 6 | *~ 7 | \#*\# 8 | /.emacs.desktop 9 | /.emacs.desktop.lock 10 | *.elc 11 | auto-save-list 12 | tramp 13 | .\#* 14 | 15 | # Org-mode 16 | .org-id-locations 17 | *_archive 18 | 19 | # flymake-mode 20 | *_flymake.* 21 | 22 | # eshell files 23 | /eshell/history 24 | /eshell/lastdir 25 | 26 | # elpa packages 27 | /elpa/ 28 | 29 | # reftex files 30 | *.rel 31 | 32 | # AUCTeX auto folder 33 | /auto/ 34 | 35 | # cask packages 36 | .cask/ 37 | dist/ 38 | 39 | # Flycheck 40 | flycheck_*.el 41 | 42 | # server auth directory 43 | /server/ 44 | 45 | # projectiles files 46 | .projectile 47 | 48 | # directory configuration 49 | .dir-locals.el 50 | 51 | # network security 52 | /network-security.data 53 | 54 | 55 | ### Intellij+all ### 56 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 57 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 58 | 59 | # User-specific stuff 60 | .idea/**/workspace.xml 61 | .idea/**/tasks.xml 62 | .idea/**/usage.statistics.xml 63 | .idea/**/dictionaries 64 | .idea/**/shelf 65 | 66 | # AWS User-specific 67 | .idea/**/aws.xml 68 | 69 | # Generated files 70 | .idea/**/contentModel.xml 71 | 72 | # Sensitive or high-churn files 73 | .idea/**/dataSources/ 74 | .idea/**/dataSources.ids 75 | .idea/**/dataSources.local.xml 76 | .idea/**/sqlDataSources.xml 77 | .idea/**/dynamic.xml 78 | .idea/**/uiDesigner.xml 79 | .idea/**/dbnavigator.xml 80 | 81 | # Gradle 82 | .idea/**/gradle.xml 83 | .idea/**/libraries 84 | 85 | # Gradle and Maven with auto-import 86 | # When using Gradle or Maven with auto-import, you should exclude module files, 87 | # since they will be recreated, and may cause churn. Uncomment if using 88 | # auto-import. 89 | # .idea/artifacts 90 | # .idea/compiler.xml 91 | # .idea/jarRepositories.xml 92 | # .idea/modules.xml 93 | # .idea/*.iml 94 | # .idea/modules 95 | # *.iml 96 | # *.ipr 97 | 98 | # CMake 99 | cmake-build-*/ 100 | 101 | # Mongo Explorer plugin 102 | .idea/**/mongoSettings.xml 103 | 104 | # File-based project format 105 | *.iws 106 | 107 | # IntelliJ 108 | out/ 109 | 110 | # jsdoc build directory 111 | reference/ 112 | 113 | # minio storage 114 | data/ 115 | 116 | # mpeltonen/sbt-idea plugin 117 | .idea_modules/ 118 | 119 | # JIRA plugin 120 | atlassian-ide-plugin.xml 121 | 122 | # Cursive Clojure plugin 123 | .idea/replstate.xml 124 | 125 | # SonarLint plugin 126 | .idea/sonarlint/ 127 | 128 | # Crashlytics plugin (for Android Studio and IntelliJ) 129 | com_crashlytics_export_strings.xml 130 | crashlytics.properties 131 | crashlytics-build.properties 132 | fabric.properties 133 | 134 | # Editor-based Rest Client 135 | .idea/httpRequests 136 | 137 | # Android studio 3.1+ serialized cache file 138 | .idea/caches/build_file_checksums.ser 139 | 140 | ### Intellij+all Patch ### 141 | # Ignore everything but code style settings and run configurations 142 | # that are supposed to be shared within teams. 143 | 144 | .idea/* 145 | 146 | !.idea/codeStyles 147 | !.idea/runConfigurations 148 | 149 | ### macOS ### 150 | # General 151 | .DS_Store 152 | .AppleDouble 153 | .LSOverride 154 | 155 | # Icon must end with two \r 156 | Icon 157 | 158 | 159 | # Thumbnails 160 | ._* 161 | 162 | # Files that might appear in the root of a volume 163 | .DocumentRevisions-V100 164 | .fseventsd 165 | .Spotlight-V100 166 | .TemporaryItems 167 | .Trashes 168 | .VolumeIcon.icns 169 | .com.apple.timemachine.donotpresent 170 | 171 | # Directories potentially created on remote AFP share 172 | .AppleDB 173 | .AppleDesktop 174 | Network Trash Folder 175 | Temporary Items 176 | .apdisk 177 | 178 | ### macOS Patch ### 179 | # iCloud generated files 180 | *.icloud 181 | 182 | ### Node ### 183 | # Logs 184 | logs 185 | *.log 186 | npm-debug.log* 187 | yarn-debug.log* 188 | yarn-error.log* 189 | lerna-debug.log* 190 | .pnpm-debug.log* 191 | 192 | # Diagnostic reports (https://nodejs.org/api/report.html) 193 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 194 | 195 | # Runtime data 196 | pids 197 | *.pid 198 | *.seed 199 | *.pid.lock 200 | 201 | # Directory for instrumented libs generated by jscoverage/JSCover 202 | lib-cov 203 | 204 | # Coverage directory used by tools like istanbul 205 | coverage 206 | *.lcov 207 | 208 | # nyc test coverage 209 | .nyc_output 210 | 211 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 212 | .grunt 213 | 214 | # Bower dependency directory (https://bower.io/) 215 | bower_components 216 | 217 | # node-waf configuration 218 | .lock-wscript 219 | 220 | # Compiled binary addons (https://nodejs.org/api/addons.html) 221 | build/Release 222 | 223 | # Dependency directories 224 | node_modules/ 225 | jspm_packages/ 226 | 227 | # Snowpack dependency directory (https://snowpack.dev/) 228 | web_modules/ 229 | 230 | # TypeScript cache 231 | *.tsbuildinfo 232 | 233 | # Optional npm cache directory 234 | .npm 235 | 236 | # Optional eslint cache 237 | .eslintcache 238 | 239 | # Optional stylelint cache 240 | .stylelintcache 241 | 242 | # Microbundle cache 243 | .rpt2_cache/ 244 | .rts2_cache_cjs/ 245 | .rts2_cache_es/ 246 | .rts2_cache_umd/ 247 | 248 | # Optional REPL history 249 | .node_repl_history 250 | 251 | # Output of 'npm pack' 252 | *.tgz 253 | 254 | # Yarn Integrity file 255 | .yarn-integrity 256 | 257 | # dotenv environment variable files 258 | .env 259 | .env.development.local 260 | .env.test.local 261 | .env.production.local 262 | .env.local 263 | 264 | # parcel-bundler cache (https://parceljs.org/) 265 | .cache 266 | .parcel-cache 267 | 268 | # Next.js build output 269 | .next 270 | out 271 | 272 | # Nuxt.js build / generate output 273 | .nuxt 274 | dist 275 | 276 | # Gatsby files 277 | .cache/ 278 | # Comment in the public line in if your project uses Gatsby and not Next.js 279 | # https://nextjs.org/blog/next-9-1#public-directory-support 280 | # public 281 | 282 | # vuepress build output 283 | .vuepress/dist 284 | 285 | # vuepress v2.x temp and cache directory 286 | .temp 287 | 288 | # Docusaurus cache and generated files 289 | .docusaurus 290 | 291 | # Serverless directories 292 | .serverless/ 293 | 294 | # FuseBox cache 295 | .fusebox/ 296 | 297 | # DynamoDB Local files 298 | .dynamodb/ 299 | 300 | # TernJS port file 301 | .tern-port 302 | 303 | # Stores VSCode versions used for testing VSCode extensions 304 | .vscode-test 305 | 306 | # yarn v2 307 | .yarn/cache 308 | .yarn/unplugged 309 | .yarn/build-state.yml 310 | .yarn/install-state.gz 311 | .pnp.* 312 | 313 | ### Node Patch ### 314 | # Serverless Webpack directories 315 | .webpack/ 316 | 317 | # Optional stylelint cache 318 | 319 | # SvelteKit build / generate output 320 | .svelte-kit 321 | 322 | ### Vim ### 323 | # Swap 324 | [._]*.s[a-v][a-z] 325 | !*.svg # comment out if you don't need vector files 326 | [._]*.sw[a-p] 327 | [._]s[a-rt-v][a-z] 328 | [._]ss[a-gi-z] 329 | [._]sw[a-p] 330 | 331 | # Session 332 | Session.vim 333 | Sessionx.vim 334 | 335 | # Temporary 336 | .netrwhist 337 | # Auto-generated tag files 338 | tags 339 | # Persistent undo 340 | [._]*.un~ 341 | 342 | # End of https://www.toptal.com/developers/gitignore/api/intellij+all,node,vim,emacs,macos 343 | 344 | # Test data files 345 | test-settings.* 346 | s3-requests.http 347 | httpRequests/ 348 | 349 | .bin/ 350 | 351 | # Created by https://www.toptal.com/developers/gitignore/api/terraform 352 | # Edit at https://www.toptal.com/developers/gitignore?templates=terraform 353 | 354 | ### Terraform ### 355 | # Local .terraform directories 356 | **/.terraform/* 357 | 358 | # .tfstate files 359 | *.tfstate 360 | *.tfstate.* 361 | 362 | # Crash log files 363 | crash.log 364 | crash.*.log 365 | 366 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 367 | # password, private keys, and other secrets. These should not be part of version 368 | # control as they are data points which are potentially sensitive and subject 369 | # to change depending on the environment. 370 | *.tfvars 371 | *.tfvars.json 372 | 373 | # Ignore override files as they are usually used to override resources locally and so 374 | # are not checked in 375 | override.tf 376 | override.tf.json 377 | *_override.tf 378 | *_override.tf.json 379 | 380 | # Include override files you do wish to add to version control using negated pattern 381 | # !example_override.tf 382 | 383 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 384 | # example: *tfplan* 385 | 386 | # Ignore CLI configuration files 387 | .terraformrc 388 | terraform.rc 389 | .tfplan 390 | # End of https://www.toptal.com/developers/gitignore/api/terraform 391 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | nodejs 20.8.0 2 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. 6 | 7 | We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. 8 | 9 | ## Our Standards 10 | 11 | Examples of behavior that contributes to a positive environment for our community include: 12 | 13 | - Demonstrating empathy and kindness toward other people. 14 | - Being respectful of differing opinions, viewpoints, and experiences. 15 | - Giving and gracefully accepting constructive feedback. 16 | - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience. 17 | - Focusing on what is best not just for us as individuals, but for the overall community. 18 | 19 | Examples of unacceptable behavior include: 20 | 21 | - The use of sexualized language or imagery, and sexual attention or advances of any kind. 22 | - Trolling, insulting or derogatory comments, and personal or political attacks. 23 | - Public or private harassment. 24 | - Publishing others' private information, such as a physical or email address, without their explicit permission. 25 | - Other conduct which could reasonably be considered inappropriate in a professional setting. 26 | 27 | ## Enforcement Responsibilities 28 | 29 | Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. 32 | 33 | ## Scope 34 | 35 | This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. 36 | 37 | ## Enforcement 38 | 39 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. 40 | 41 | All community leaders are obligated to respect the privacy and security of the reporter of any incident. 42 | 43 | ## Enforcement Guidelines 44 | 45 | Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 46 | 47 | ### 1. Correction 48 | 49 | **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. 50 | 51 | **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 52 | 53 | ### 2. Warning 54 | 55 | **Community Impact**: A violation through a single incident or series of actions. 56 | 57 | **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 58 | 59 | ### 3. Temporary Ban 60 | 61 | **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. 62 | 63 | **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 64 | 65 | ### 4. Permanent Ban 66 | 67 | **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. 68 | 69 | **Consequence**: A permanent ban from any sort of public interaction within the community. 70 | 71 | ## Attribution 72 | 73 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1, available at . 74 | 75 | Community Impact Guidelines were inspired by 76 | [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/inclusion). 77 | 78 | For answers to common questions about this code of conduct, see the FAQ at . Translations are available at . 79 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | The following is a set of guidelines for contributing to this project. We really appreciate that you are considering contributing! 4 | 5 | #### Table Of Contents 6 | 7 | [Getting Started](#getting-started) 8 | 9 | [Contributing](#contributing) 10 | 11 | [Code Guidelines](#code-guidelines) 12 | 13 | [Code of Conduct](/CODE_OF_CONDUCT.md) 14 | 15 | ## Getting Started 16 | 17 | Refer to the [Getting Started Guide](docs/getting_started.md) for how to build and run the gateway. 18 | 19 | ## Contributing 20 | 21 | ### Report a Bug 22 | 23 | To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please ensure the bug has not already been reported. **If the bug is a potential security vulnerability, please report it using our [security policy](/SECURITY.md).** 24 | 25 | ### Suggest a Feature or Enhancement 26 | 27 | To suggest a feature or enhancement, please create an issue on GitHub with the label `enhancement` using the available [feature request template](/.github/feature_request_template.md). Please ensure the feature or enhancement has not already been suggested. 28 | 29 | ### Open a Pull Request (PR) 30 | 31 | - Fork the repo, create a branch, implement your changes, add any relevant tests, and submit a PR when your changes are **tested** and ready for review. 32 | - Fill in the [PR template](/.github/pull_request_template.md). 33 | 34 | **Note:** If you'd like to implement a new feature, please consider creating a [feature request issue](/.github/feature_request_template.md) first to start a discussion about the feature. 35 | 36 | #### F5 Contributor License Agreement (CLA) 37 | 38 | F5 requires all external contributors to agree to the terms of the F5 CLA (available [here](https://github.com/f5/.github/blob/main/CLA/cla-markdown.md)) before any of their changes can be incorporated into an F5 Open Source repository. 39 | 40 | If you have not yet agreed to the F5 CLA terms and submit a PR to this repository, a bot will prompt you to view and agree to the F5 CLA. You will have to agree to the F5 CLA terms through a comment in the PR before any of your changes can be merged. Your agreement signature will be safely stored by F5 and no longer be required in future PRs. 41 | 42 | ## Code Guidelines 43 | 44 | 45 | 46 | ### Git Guidelines 47 | 48 | - Keep a clean, concise and meaningful git commit history on your branch (within reason), rebasing locally and squashing before submitting a PR. 49 | - If possible and/or relevant, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated. 50 | - Follow the guidelines of writing a good commit message as described here and summarized in the next few points: 51 | - In the subject line, use the present tense ("Add feature" not "Added feature"). 52 | - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to..."). 53 | - Limit the subject line to 72 characters or less. 54 | - Reference issues and pull requests liberally after the subject line. 55 | - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`). 56 | -------------------------------------------------------------------------------- /Dockerfile.buildkit.plus: -------------------------------------------------------------------------------- 1 | FROM debian:bookworm-slim@sha256:b1211f6d19afd012477bd34fdcabb6b663d680e0f4b0537da6e6b0fd057a3ec3 2 | 3 | # Create RELEASE argument 4 | ARG RELEASE=bookworm 5 | 6 | # NJS env vars 7 | ENV NGINX_VERSION=34 8 | ENV NGINX_PKG_RELEASE=1~${RELEASE} 9 | ENV NJS_VERSION=0.9.0 10 | ENV NJS_PKG_RELEASE=1~${RELEASE} 11 | 12 | # Proxy cache env vars 13 | ENV PROXY_CACHE_MAX_SIZE=10g 14 | ENV PROXY_CACHE_INACTIVE=60m 15 | ENV PROXY_CACHE_SLICE_SIZE=1m 16 | ENV PROXY_CACHE_VALID_OK=1h 17 | ENV PROXY_CACHE_VALID_NOTFOUND=1m 18 | ENV PROXY_CACHE_VALID_FORBIDDEN=30s 19 | 20 | # CORS env vars 21 | ENV CORS_ENABLED=0 22 | ENV CORS_ALLOW_PRIVATE_NETWORK_ACCESS="" 23 | 24 | # S3 proxy env vars 25 | ENV DIRECTORY_LISTING_PATH_PREFIX="" 26 | ENV STRIP_LEADING_DIRECTORY_PATH="" 27 | ENV PREFIX_LEADING_DIRECTORY_PATH="" 28 | 29 | # We create an NGINX Plus image based on the official NGINX Plus Dockerfiles (https://gist.github.com/nginx-gists/36e97fc87efb5cf0039978c8e41a34b5) and modify it by: 30 | # 1. Explicitly installing the version of njs coded in the environment variable above. 31 | # 2. Adding configuration files needed for proxying private S3 buckets. 32 | # 3. Adding a directory for proxied objects to be stored. 33 | # 4. Adding the entrypoint scripts found in the base NGINX OSS Docker image with a modified version that explicitly sets resolvers. 34 | 35 | # Download your NGINX license certificate and key from the F5 customer portal (https://account.f5.com) and copy it to the build context 36 | RUN --mount=type=secret,id=nginx-crt,dst=nginx-repo.crt \ 37 | --mount=type=secret,id=nginx-key,dst=nginx-repo.key \ 38 | set -x \ 39 | # Create nginx user/group first, to be consistent throughout Docker variants 40 | && groupadd --system --gid 101 nginx \ 41 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \ 42 | && apt-get update \ 43 | && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates gnupg1 lsb-release \ 44 | && \ 45 | NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \ 46 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 47 | export GNUPGHOME="$(mktemp -d)"; \ 48 | found=''; \ 49 | for NGINX_GPGKEY in $NGINX_GPGKEYS; do \ 50 | for server in \ 51 | hkp://keyserver.ubuntu.com:80 \ 52 | pgp.mit.edu \ 53 | ; do \ 54 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ 55 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ 56 | done; \ 57 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ 58 | done; \ 59 | gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \ 60 | rm -rf "$GNUPGHOME"; \ 61 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ 62 | # Install the latest release of NGINX Plus and/or NGINX Plus modules (written and maintained by F5) 63 | && nginxPackages=" \ 64 | nginx-plus=${NGINX_VERSION}-${NGINX_PKG_RELEASE} \ 65 | nginx-plus-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_PKG_RELEASE} \ 66 | nginx-plus-module-xslt=${NGINX_VERSION}-${NGINX_PKG_RELEASE} \ 67 | " \ 68 | && echo "Acquire::https::pkgs.nginx.com::Verify-Peer \"true\";" > /etc/apt/apt.conf.d/90nginx \ 69 | && echo "Acquire::https::pkgs.nginx.com::Verify-Host \"true\";" >> /etc/apt/apt.conf.d/90nginx \ 70 | && echo "Acquire::https::pkgs.nginx.com::SslCert \"/etc/ssl/nginx/nginx-repo.crt\";" >> /etc/apt/apt.conf.d/90nginx \ 71 | && echo "Acquire::https::pkgs.nginx.com::SslKey \"/etc/ssl/nginx/nginx-repo.key\";" >> /etc/apt/apt.conf.d/90nginx \ 72 | && echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" > /etc/apt/sources.list.d/nginx-plus.list \ 73 | && mkdir -p /etc/ssl/nginx \ 74 | && cat nginx-repo.crt > /etc/ssl/nginx/nginx-repo.crt \ 75 | && cat nginx-repo.key > /etc/ssl/nginx/nginx-repo.key \ 76 | && apt-get update \ 77 | && apt-get install --no-install-recommends --no-install-suggests -y $nginxPackages curl gettext-base \ 78 | && apt-get remove --purge -y lsb-release \ 79 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx-plus.list \ 80 | && rm -rf /etc/apt/apt.conf.d/90nginx /etc/ssl/nginx \ 81 | # Forward request logs to Docker log collector 82 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 83 | && ln -sf /dev/stderr /var/log/nginx/error.log 84 | 85 | EXPOSE 80 86 | 87 | STOPSIGNAL SIGTERM 88 | 89 | CMD ["nginx", "-g", "daemon off;"] 90 | 91 | # Copy files from the OSS NGINX Docker container such that the container 92 | # startup is the same. 93 | COPY plus/etc/nginx /etc/nginx 94 | COPY common/etc /etc 95 | COPY common/docker-entrypoint.sh /docker-entrypoint.sh 96 | COPY common/docker-entrypoint.d /docker-entrypoint.d/ 97 | COPY plus/docker-entrypoint.d /docker-entrypoint.d/ 98 | 99 | RUN set -x \ 100 | && mkdir -p /var/cache/nginx/s3_proxy \ 101 | && chown nginx:nginx /var/cache/nginx/s3_proxy \ 102 | && chmod -R -v +x /docker-entrypoint.sh /docker-entrypoint.d/*.sh; 103 | 104 | ENTRYPOINT ["/docker-entrypoint.sh"] 105 | -------------------------------------------------------------------------------- /Dockerfile.latest-njs: -------------------------------------------------------------------------------- 1 | # This container image removes the existing njs package from the inherited image 2 | # (which could be OSS NGINX or NGINX Plus), builds njs from the latest 3 | # source, and installs it. 4 | FROM nginx-s3-gateway 5 | 6 | RUN set -eux \ 7 | export DEBIAN_FRONTEND=noninteractive; \ 8 | apt-get update -qq; \ 9 | apt-get install --no-install-recommends --no-install-suggests --yes make gcc libc6-dev curl expect libpcre2-dev libpcre3-dev libedit-dev libreadline-dev libssl-dev libpcre2-posix3 libxml2-dev libxslt1-dev zlib1g-dev; \ 10 | mkdir -p /tmp/nginx /tmp/njs-latest; \ 11 | curl --retry 6 --location "https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz" \ 12 | | gunzip | tar --extract --strip-components=1 --directory /tmp/nginx; \ 13 | curl --retry 6 --location 'https://hg.nginx.org/njs/archive/tip.tar.gz' \ 14 | | gunzip | tar --extract --strip-components=1 --directory /tmp/njs-latest; \ 15 | cd /tmp/njs-latest; \ 16 | ./configure; \ 17 | make -j "$(nproc)"; \ 18 | cp build/njs /usr/bin/njs; \ 19 | cd /tmp/nginx; \ 20 | ./configure \ 21 | --add-dynamic-module=/tmp/njs-latest/nginx \ 22 | --without-http_gzip_module \ 23 | --without-http_rewrite_module \ 24 | --prefix=/etc/nginx \ 25 | --sbin-path=/usr/sbin/nginx \ 26 | --modules-path=/usr/lib/nginx/modules \ 27 | --conf-path=/etc/nginx/nginx.conf \ 28 | --error-log-path=/var/log/nginx/error.log \ 29 | --http-log-path=/var/log/nginx/access.log \ 30 | --pid-path=/var/run/nginx.pid \ 31 | --lock-path=/var/run/nginx.lock \ 32 | --http-client-body-temp-path=/var/cache/nginx/client_temp \ 33 | --http-proxy-temp-path=/var/cache/nginx/proxy_temp \ 34 | --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \ 35 | --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \ 36 | --http-scgi-temp-path=/var/cache/nginx/scgi_temp \ 37 | --user=nginx --group=nginx --with-compat --with-file-aio \ 38 | --with-stream \ 39 | --with-mail \ 40 | --with-threads \ 41 | --with-compat \ 42 | --with-cc-opt="-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-${NGINX_VERSION}/debian/debuild-base/nginx-${NGINX_VERSION}=. -fstack-protector-strong -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fPIC"; \ 43 | make -j "$(nproc)"; \ 44 | cp objs/ngx_stream_js_module.so /usr/lib/nginx/modules; \ 45 | cp objs/ngx_http_js_module.so /usr/lib/nginx/modules; \ 46 | apt-get purge --yes --auto-remove make gcc libc6-dev expect libpcre2-dev libpcre3-dev libedit-dev libreadline-dev libssl-dev zlib1g-dev; \ 47 | rm -rf \ 48 | /var/lib/apt/lists/* \ 49 | /tmp/* 50 | -------------------------------------------------------------------------------- /Dockerfile.oss: -------------------------------------------------------------------------------- 1 | FROM nginx:1.27.5@sha256:fb39280b7b9eba5727c884a3c7810002e69e8f961cc373b89c92f14961d903a0 2 | 3 | # NJS env vars 4 | ENV NJS_VERSION=0.9.0 5 | ENV NJS_RELEASE=1~bookworm 6 | 7 | # Proxy cache env vars 8 | ENV PROXY_CACHE_MAX_SIZE=10g 9 | ENV PROXY_CACHE_INACTIVE=60m 10 | ENV PROXY_CACHE_SLICE_SIZE=1m 11 | ENV PROXY_CACHE_VALID_OK=1h 12 | ENV PROXY_CACHE_VALID_NOTFOUND=1m 13 | ENV PROXY_CACHE_VALID_FORBIDDEN=30s 14 | 15 | # CORS env vars 16 | ENV CORS_ENABLED=0 17 | ENV CORS_ALLOW_PRIVATE_NETWORK_ACCESS="" 18 | 19 | # S3 proxy env vars 20 | ENV DIRECTORY_LISTING_PATH_PREFIX="" 21 | ENV STRIP_LEADING_DIRECTORY_PATH="" 22 | ENV PREFIX_LEADING_DIRECTORY_PATH="" 23 | 24 | # We modify the NGINX base image by: 25 | # 1. Explicitly installing the version of njs coded in the environment variable above. 26 | # 2. Adding configuration files needed for proxying private S3 buckets. 27 | # 3. Adding a directory for proxied objects to be stored. 28 | # 4. Replacing the entrypoint script with a modified version that explicitly sets resolvers. 29 | 30 | RUN set -x \ 31 | && echo "deb [signed-by=/etc/apt/keyrings/nginx-archive-keyring.gpg] https://nginx.org/packages/mainline/debian/ $(echo $PKG_RELEASE | cut -f2 -d~) nginx" >> /etc/apt/sources.list.d/nginx.list; \ 32 | apt-get update \ 33 | && apt-get install --no-install-recommends --no-install-suggests -y \ 34 | libedit2 \ 35 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \ 36 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list 37 | 38 | COPY oss/etc /etc 39 | COPY common/etc /etc 40 | COPY common/docker-entrypoint.sh /docker-entrypoint.sh 41 | COPY common/docker-entrypoint.d /docker-entrypoint.d/ 42 | 43 | RUN set -x \ 44 | && mkdir -p /var/cache/nginx/s3_proxy \ 45 | && chown nginx:nginx /var/cache/nginx/s3_proxy \ 46 | && chmod -R -v +x /docker-entrypoint.sh /docker-entrypoint.d/*.sh; 47 | -------------------------------------------------------------------------------- /Dockerfile.plus: -------------------------------------------------------------------------------- 1 | FROM debian:bookworm-slim@sha256:b1211f6d19afd012477bd34fdcabb6b663d680e0f4b0537da6e6b0fd057a3ec3 2 | 3 | # Create RELEASE argument 4 | ARG RELEASE=bookworm 5 | 6 | # NJS env vars 7 | ENV NGINX_VERSION=34 8 | ENV NGINX_PKG_RELEASE=1~${RELEASE} 9 | ENV NJS_VERSION=0.9.0 10 | ENV NJS_PKG_RELEASE=1~${RELEASE} 11 | 12 | # Proxy cache env vars 13 | ENV PROXY_CACHE_MAX_SIZE=10g 14 | ENV PROXY_CACHE_INACTIVE=60m 15 | ENV PROXY_CACHE_SLICE_SIZE=1m 16 | ENV PROXY_CACHE_VALID_OK=1h 17 | ENV PROXY_CACHE_VALID_NOTFOUND=1m 18 | ENV PROXY_CACHE_VALID_FORBIDDEN=30s 19 | 20 | # CORS env vars 21 | ENV CORS_ENABLED=0 22 | ENV CORS_ALLOW_PRIVATE_NETWORK_ACCESS="" 23 | 24 | # S3 proxy env vars 25 | ENV DIRECTORY_LISTING_PATH_PREFIX="" 26 | ENV STRIP_LEADING_DIRECTORY_PATH="" 27 | ENV PREFIX_LEADING_DIRECTORY_PATH="" 28 | 29 | # We create an NGINX Plus image based on the official NGINX Plus Dockerfiles (https://gist.github.com/nginx-gists/36e97fc87efb5cf0039978c8e41a34b5) and modify it by: 30 | # 1. Explicitly installing the version of njs coded in the environment variable above. 31 | # 2. Adding configuration files needed for proxying private S3 buckets. 32 | # 3. Adding a directory for proxied objects to be stored. 33 | # 4. Adding the entrypoint scripts found in the base NGINX OSS Docker image with a modified version that explicitly sets resolvers. 34 | 35 | # Download your NGINX license certificate and key from the F5 customer portal (https://account.f5.com) and copy it to the build context 36 | COPY plus/etc/ssl /etc/ssl 37 | 38 | RUN set -x \ 39 | # Create nginx user/group first, to be consistent throughout Docker variants 40 | && groupadd --system --gid 101 nginx \ 41 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \ 42 | && apt-get update \ 43 | && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates gnupg1 lsb-release \ 44 | && \ 45 | NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \ 46 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 47 | export GNUPGHOME="$(mktemp -d)"; \ 48 | found=''; \ 49 | for NGINX_GPGKEY in $NGINX_GPGKEYS; do \ 50 | for server in \ 51 | hkp://keyserver.ubuntu.com:80 \ 52 | pgp.mit.edu \ 53 | ; do \ 54 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ 55 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ 56 | done; \ 57 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ 58 | done; \ 59 | gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \ 60 | rm -rf "$GNUPGHOME"; \ 61 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ 62 | # Install the latest release of NGINX Plus and/or NGINX Plus modules (written and maintained by F5) 63 | && nginxPackages=" \ 64 | nginx-plus=${NGINX_VERSION}-${NGINX_PKG_RELEASE} \ 65 | nginx-plus-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_PKG_RELEASE} \ 66 | nginx-plus-module-xslt=${NGINX_VERSION}-${NGINX_PKG_RELEASE} \ 67 | " \ 68 | && echo "Acquire::https::pkgs.nginx.com::Verify-Peer \"true\";" > /etc/apt/apt.conf.d/90nginx \ 69 | && echo "Acquire::https::pkgs.nginx.com::Verify-Host \"true\";" >> /etc/apt/apt.conf.d/90nginx \ 70 | && echo "Acquire::https::pkgs.nginx.com::SslCert \"/etc/ssl/nginx/nginx-repo.crt\";" >> /etc/apt/apt.conf.d/90nginx \ 71 | && echo "Acquire::https::pkgs.nginx.com::SslKey \"/etc/ssl/nginx/nginx-repo.key\";" >> /etc/apt/apt.conf.d/90nginx \ 72 | && echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://pkgs.nginx.com/plus/debian `lsb_release -cs` nginx-plus\n" > /etc/apt/sources.list.d/nginx-plus.list \ 73 | && apt-get update \ 74 | && apt-get install --no-install-recommends --no-install-suggests -y $nginxPackages curl gettext-base \ 75 | && apt-get remove --purge -y lsb-release \ 76 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx-plus.list \ 77 | && rm -rf /etc/apt/apt.conf.d/90nginx /etc/ssl/nginx \ 78 | # Forward request logs to Docker log collector 79 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 80 | && ln -sf /dev/stderr /var/log/nginx/error.log 81 | 82 | EXPOSE 80 83 | 84 | STOPSIGNAL SIGTERM 85 | 86 | CMD ["nginx", "-g", "daemon off;"] 87 | 88 | # Copy files from the OSS NGINX Docker container such that the container 89 | # startup is the same. 90 | COPY plus/etc/nginx /etc/nginx 91 | COPY common/etc /etc 92 | COPY common/docker-entrypoint.sh /docker-entrypoint.sh 93 | COPY common/docker-entrypoint.d /docker-entrypoint.d/ 94 | COPY plus/docker-entrypoint.d /docker-entrypoint.d/ 95 | 96 | RUN set -x \ 97 | && mkdir -p /var/cache/nginx/s3_proxy \ 98 | && chown nginx:nginx /var/cache/nginx/s3_proxy \ 99 | && chmod -R -v +x /docker-entrypoint.sh /docker-entrypoint.d/*.sh; 100 | 101 | ENTRYPOINT ["/docker-entrypoint.sh"] 102 | -------------------------------------------------------------------------------- /Dockerfile.unprivileged: -------------------------------------------------------------------------------- 1 | # This container images makes the necessary modifications in the 2 | # inherited image (which could be OSS NGINX or NGINX Plus) in order 3 | # to allow running NGINX S3 Gateway as a non root user. 4 | # Steps are based on the official unprivileged container: 5 | # https://github.com/nginxinc/docker-nginx-unprivileged/blob/main/Dockerfile-debian.template 6 | FROM nginx-s3-gateway 7 | 8 | # Implement changes required to run NGINX as an unprivileged user 9 | RUN sed -i "/^server {/a \ listen 8080;" /etc/nginx/templates/default.conf.template \ 10 | && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ 11 | && sed -i 's#http://127.0.0.1:80#http://127.0.0.1:8080#g' /etc/nginx/include/s3gateway.js \ 12 | && sed -i 's,/var/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ 13 | && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ 14 | # Nginx user must own the cache and etc directory to write cache and tweak the nginx config 15 | && chown -R nginx:0 /var/cache/nginx \ 16 | && chmod -R g+w /var/cache/nginx \ 17 | && chown -R nginx:0 /etc/nginx \ 18 | && chmod -R g+w /etc/nginx 19 | 20 | EXPOSE 8080 21 | 22 | USER nginx 23 | -------------------------------------------------------------------------------- /GNUmakefile: -------------------------------------------------------------------------------- 1 | MAKE_MAJOR_VER := $(shell echo $(MAKE_VERSION) | cut -d'.' -f1) 2 | 3 | ifneq ($(shell test $(MAKE_MAJOR_VER) -gt 3; echo $$?),0) 4 | $(error Make version $(MAKE_VERSION) is not supported, please install GNU Make 4.x) 5 | endif 6 | 7 | AWK ?= $(shell command -v gawk 2> /dev/null || command -v awk 2> /dev/null) 8 | 9 | Q = $(if $(filter 1,$V),,@) 10 | M = $(shell printf "\033[34;1m▶\033[0m") 11 | 12 | # Use docker based commitsar if it isn't in the path 13 | ifeq ($(COMMITSAR),) 14 | COMMITSAR = $(COMMITSAR_DOCKER) 15 | endif 16 | 17 | .PHONY: help 18 | help: 19 | @grep --no-filename -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ 20 | $(AWK) 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-28s\033[0m %s\n", $$1, $$2}' | sort 21 | 22 | .PHONY: test 23 | test: ## Run all tests 24 | $Q $(CURDIR)/test.sh --type oss --unprivileged false --latest-njs false 25 | 26 | # Check if the 'open' command exists on the system 27 | OPEN := $(shell command -v open 2> /dev/null) 28 | 29 | # Define the open command based on availability 30 | ifeq ($(OPEN),) 31 | OPEN_COMMAND = xdg-open 32 | else 33 | OPEN_COMMAND = open 34 | endif 35 | 36 | docs_destination_directory = "reference" 37 | 38 | .PHONY: docs 39 | docs: 40 | npx jsdoc -c $(CURDIR)/jsdoc/conf.json -d $(CURDIR)/$(docs_destination_directory) || true 41 | 42 | .PHONY: jsdoc 43 | jsdoc: docs ## Build JSDoc output 44 | 45 | .PHONY: jsdoc-open 46 | jsdoc-open: docs 47 | $(OPEN_COMMAND) $(CURDIR)/$(docs_destination_directory)/index.html 48 | 49 | .PHONY: clean 50 | clean: ## Clean up build artifacts 51 | $Q rm -rf $(CURDIR)/$(docs_destination_directory) 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![NGINX S3 Gateway CI/CD](https://github.com/nginxinc/nginx-s3-gateway/actions/workflows/s3-gateway.yml/badge.svg)](https://github.com/nginxinc/nginx-s3-gateway/actions/workflows/s3-gateway.yml) 2 | [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/nginxinc/nginx-s3-gateway/badge)](https://securityscorecards.dev/viewer/?uri=github.com/nginxinc/nginx-s3-gateway) 3 | [![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) 4 | [![Community Support](https://badgen.net/badge/support/community/cyan?icon=awesome)](/SUPPORT.md) 5 | [![Community Forum](https://img.shields.io/badge/community-forum-009639?logo=discourse&link=https%3A%2F%2Fcommunity.nginx.org)](https://community.nginx.org) 6 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/license/apache-2-0) 7 | [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](/CODE_OF_CONDUCT.md) 8 | 9 | # NGINX S3 Gateway 10 | 11 | ## Introduction 12 | 13 | This project provides a working configuration of NGINX configured to act as 14 | an authenticating and caching gateway for to AWS S3 or another S3 compatible 15 | service. This allows you to proxy a private S3 bucket without requiring users 16 | to authenticate to it. Within the proxy layer, additional functionality can be 17 | configured such as: 18 | 19 | * Listing the contents of a S3 bucket 20 | * Providing an authentication gateway using an alternative authentication 21 | system to S3 22 | * Caching frequently accessed S3 objects for lower latency delivery and 23 | protection against S3 outages 24 | * For internal/micro services that can't authenticate against the S3 API 25 | (e.g. don't have libraries available) the gateway can provide a means 26 | to accessing S3 objects without authentication 27 | * Compressing objects ([gzip](examples/gzip-compression), [brotli](examples/brotli-compression)) from gateway to end user 28 | * Protecting S3 bucket from arbitrary public access and traversal 29 | * Rate limiting S3 objects 30 | * Protecting a S3 bucket with a [WAF](examples/modsecurity) 31 | * Serving static assets from a S3 bucket alongside a dynamic application 32 | endpoints all in a single RESTful directory structure 33 | 34 | All such functionality can be enabled within a standard NGINX configuration 35 | because this project is nothing other than NGINX with additional configuration 36 | that allows for proxying S3. It can be used as-is if the predefined 37 | configuration is sufficient, or it can serve as a base example for a more 38 | customized configuration. 39 | 40 | If the predefined configuration does not meet your needs, it is best to borrow 41 | from the patterns in this project and build your own configuration. For example, 42 | if you want to enable SSL/TLS and compression in your NGINX S3 gateway 43 | configuration, you will need to look at other documentation because this 44 | project does not enable those features of NGINX. 45 | 46 | ## Usage 47 | 48 | This project can be run as a stand-alone container or as a Systemd service. 49 | Both modes use the same NGINX configuration and are functionally equal in terms 50 | features. However, in the case of running as a Systemd service, other services 51 | can be configured that additional functionality such as [certbot](https://certbot.eff.org/) 52 | for [Let's Encrypt](https://letsencrypt.org/) support. 53 | 54 | ## Getting Started 55 | 56 | Refer to the [Getting Started Guide](docs/getting_started.md) for how to build 57 | and run the gateway. 58 | 59 | ## Directory Structure and File Descriptions 60 | 61 | ``` 62 | common/ contains files used by both NGINX OSS and Plus configurations 63 | etc/nginx/include/ 64 | awscredentials.js common library to read and write credentials 65 | awssig2.js common library to build AWS signature 2 66 | awssig4.js common library to build AWS signature 4 and get a session token 67 | s3gateway.js common library to integrate the s3 storage from NGINX OSS and Plus 68 | utils.js common library to be reused by all of NJS codebases 69 | deployments/ contains files used for deployment technologies such as 70 | CloudFormation 71 | docs/ contains documentation about the project 72 | examples/ contains additional `Dockerfile` examples that extend the base 73 | configuration 74 | jsdoc JSDoc configuration files 75 | oss/ contains files used solely in NGINX OSS configurations 76 | plus/ contains files used solely in NGINX Plus configurations 77 | test/ contains automated tests for validang that the examples work 78 | Dockerfile.oss Dockerfile that configures NGINX OSS to act as a S3 gateway 79 | Dockerfile.plus Dockerfile that builds a NGINX Plus instance that is configured 80 | equivelently to NGINX OSS - instance is configured to act as a 81 | S3 gateway with NGINX Plus additional features enabled 82 | Dockerfile.buildkit.plus Dockerfile with the same configuration as Dockerfile.plus, but 83 | with support for hiding secrets using Docker's Buildkit 84 | Dockerfile.latest-njs Dockerfile that inherits from the last build of the gateway and 85 | then builds and installs the latest version of njs from source 86 | Dockerfile.unprivileged Dockerfiles that inherits from the last build of the gateway and 87 | makes the necessary modifications to allow running the container 88 | as a non root, unprivileged user. 89 | package.json Node.js package file used only for generating JSDoc 90 | settings.example Docker env file example 91 | standalone_ubuntu_oss_install.sh install script that will install the gateway as a Systemd service 92 | test.sh test launcher 93 | ``` 94 | 95 | ## Development 96 | 97 | Refer to the [Development Guide](docs/development.md) for more information about 98 | extending or testing the gateway. 99 | 100 | ## Contributing 101 | 102 | Please see the [contributing guide](/CONTRIBUTING.md) for guidelines on how to best contribute to this project. 103 | 104 | ## License 105 | 106 | [Apache License, Version 2.0](/LICENSE) 107 | 108 | © [F5, Inc.](https://www.f5.com/) 2020 - 2025 109 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Latest Versions 4 | 5 | We advise users to run or update to the most recent release of this project. Older versions of this project may not have all enhancements and/or bug fixes applied to them. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | The F5 Security Incident Response Team (F5 SIRT) has an email alias that makes it easy to report potential security vulnerabilities: 10 | 11 | - If you’re an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/services/support). 12 | - If you aren’t an F5 customer, please report any potential or current instances of security vulnerabilities with any F5 product to the F5 Security Incident Response Team at . 13 | 14 | For more information visit [https://www.f5.com/services/support/report-a-vulnerability](https://www.f5.com/services/support/report-a-vulnerability). 15 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # Support 2 | 3 | ## Ask a Question 4 | 5 | We use GitHub for tracking bugs and feature requests related to this project. 6 | 7 | Don't know how something in this project works? Curious if this project can achieve your desired functionality? Please open an issue on GitHub with the label `question`. Alternatively, start a GitHub discussion! 8 | 9 | ## NGINX Specific Questions and/or Issues 10 | 11 | This isn't the right place to get support for NGINX specific questions, but the following resources are available below. Thanks for your understanding! 12 | 13 | ### Community Forum 14 | 15 | We have a community [forum](https://community.nginx.org/)! If you have any questions and/or issues, try checking out the [`Troubleshooting`](https://community.nginx.org/c/troubleshooting/8) and [`How do I...?`](https://community.nginx.org/c/how-do-i/9) categories. Both fellow community members and NGINXers might be able to help you! :) 16 | 17 | ### Documentation 18 | 19 | For a comprehensive list of all NGINX directives, check out . 20 | 21 | For a comprehensive list of administration and deployment guides for all NGINX products, check out . 22 | 23 | ### Mailing List 24 | 25 | Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at ! 26 | 27 | ## Contributing 28 | 29 | Please see the [contributing guide](/CONTRIBUTING.md) or the [Getting Started](/README.md#getting-started) guide, for guidelines on how to best contribute to this project. 30 | 31 | ## Community Support 32 | 33 | This project does **not** offer commercial support. Community support is offered on a best effort basis through either GitHub issues/PRs/discussions or through any of our active communities. 34 | -------------------------------------------------------------------------------- /common/docker-entrypoint.d/00-check-for-required-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2020 F5 Networks 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # This script checks to see that required environment variables were correctly 19 | # passed into the Docker container. 20 | 21 | set -e 22 | 23 | failed=0 24 | 25 | required=("S3_BUCKET_NAME" "S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" 26 | "S3_REGION" "S3_STYLE" "ALLOW_DIRECTORY_LIST" "AWS_SIGS_VERSION" 27 | "CORS_ENABLED") 28 | 29 | # Require some form of authentication to be configured. 30 | 31 | # a) Using container credentials. This is indicated by AWS_CONTAINER_CREDENTIALS_RELATIVE_URI being set. 32 | # See https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html 33 | # Example: We are running inside an ECS task. 34 | if [[ -v AWS_CONTAINER_CREDENTIALS_RELATIVE_URI ]]; then 35 | echo "Running inside an ECS task, using container credentials" 36 | 37 | elif [[ -v S3_SESSION_TOKEN ]]; then 38 | echo "Deprecated the S3_SESSION_TOKEN! Use the environment variable of AWS_SESSION_TOKEN instead" 39 | failed=1 40 | 41 | elif [[ -v AWS_SESSION_TOKEN ]]; then 42 | echo "S3 Session token specified - not using IMDS for credentials" 43 | 44 | # b) Using Instance Metadata Service (IMDS) credentials, if IMDS is present at http://169.254.169.254. 45 | # See https://docs.aws.amazon.com/sdkref/latest/guide/feature-imds-credentials.html. 46 | # Example: We are running inside an EC2 instance. 47 | elif TOKEN=`curl -X PUT --silent --fail --connect-timeout 2 --max-time 2 "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"` && curl -H "X-aws-ec2-metadata-token: $TOKEN" --output /dev/null --silent --head --fail --connect-timeout 2 --max-time 5 "http://169.254.169.254"; then 48 | echo "Running inside an EC2 instance, using IMDS for credentials" 49 | 50 | # c) Using assume role credentials. This is indicated by AWS_WEB_IDENTITY_TOKEN_FILE being set. 51 | # See https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html. 52 | # Example: We are running inside an EKS cluster with IAM roles for service accounts enabled. 53 | elif [[ -v AWS_WEB_IDENTITY_TOKEN_FILE ]]; then 54 | echo "Running inside EKS with IAM roles for service accounts" 55 | if [[ ! -v AWS_ROLE_SESSION_NAME ]]; then 56 | # The default value is set as a nginx-s3-gateway unless the value is defined. 57 | AWS_ROLE_SESSION_NAME="nginx-s3-gateway" 58 | fi 59 | 60 | elif [[ -v S3_ACCESS_KEY_ID ]]; then 61 | echo "Deprecated the S3_ACCESS_KEY_ID! Use the environment variable of AWS_ACCESS_KEY_ID instead" 62 | failed=1 63 | 64 | elif [[ -v S3_SECRET_KEY ]]; then 65 | echo "Deprecated the S3_SECRET_KEY! Use the environment variable of AWS_SECRET_ACCESS_KEY instead" 66 | failed=1 67 | 68 | elif [[ -v AWS_SECRET_KEY ]]; then 69 | echo "AWS_SECRET_KEY is not a valid setting! Use the environment variable of AWS_SECRET_ACCESS_KEY instead" 70 | failed=1 71 | 72 | 73 | # If none of the options above is used, require static credentials. 74 | # See https://docs.aws.amazon.com/sdkref/latest/guide/feature-static-credentials.html. 75 | else 76 | required+=("AWS_ACCESS_KEY_ID" "AWS_SECRET_ACCESS_KEY") 77 | fi 78 | 79 | if [[ -v S3_DEBUG ]]; then 80 | echo "Deprecated the S3_DEBUG! Use the environment variable of DEBUG instead" 81 | failed=1 82 | fi 83 | 84 | for name in ${required[@]}; do 85 | if [[ ! -v $name ]]; then 86 | >&2 echo "Required ${name} environment variable missing" 87 | failed=1 88 | fi 89 | done 90 | 91 | if [ "${S3_SERVER_PROTO}" != "http" ] && [ "${S3_SERVER_PROTO}" != "https" ]; then 92 | >&2 echo "S3_SERVER_PROTO contains an invalid value (${S3_SERVER_PROTO}). Valid values: http, https" 93 | failed=1 94 | fi 95 | 96 | if [ "${AWS_SIGS_VERSION}" != "2" ] && [ "${AWS_SIGS_VERSION}" != "4" ]; then 97 | >&2 echo "AWS_SIGS_VERSION contains an invalid value (${AWS_SIGS_VERSION}). Valid values: 2, 4" 98 | failed=1 99 | fi 100 | 101 | parseBoolean() { 102 | case "$1" in 103 | TRUE | true | True | YES | Yes | 1) 104 | echo 1 105 | ;; 106 | *) 107 | echo 0 108 | ;; 109 | esac 110 | } 111 | 112 | if [ -n "${HEADER_PREFIXES_TO_STRIP+x}" ]; then 113 | if [[ "${HEADER_PREFIXES_TO_STRIP}" =~ [A-Z] ]]; then 114 | >&2 echo "HEADER_PREFIXES_TO_STRIP must not contain uppercase characters" 115 | failed=1 116 | fi 117 | fi 118 | 119 | 120 | if [ $failed -gt 0 ]; then 121 | exit 1 122 | fi 123 | 124 | echo "S3 Backend Environment" 125 | echo "Service: ${S3_SERVICE:-s3}" 126 | echo "Access Key ID: ${AWS_ACCESS_KEY_ID}" 127 | echo "Origin: ${S3_SERVER_PROTO}://${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" 128 | echo "Region: ${S3_REGION}" 129 | echo "Addressing Style: ${S3_STYLE}" 130 | echo "AWS Signatures Version: v${AWS_SIGS_VERSION}" 131 | echo "DNS Resolvers: ${DNS_RESOLVERS}" 132 | echo "Directory Listing Enabled: ${ALLOW_DIRECTORY_LIST}" 133 | echo "Directory Listing Path Prefix: ${DIRECTORY_LISTING_PATH_PREFIX}" 134 | echo "Provide Index Pages Enabled: ${PROVIDE_INDEX_PAGE}" 135 | echo "Append slash for directory enabled: ${APPEND_SLASH_FOR_POSSIBLE_DIRECTORY}" 136 | echo "Stripping the following headers from responses: x-amz-;${HEADER_PREFIXES_TO_STRIP}" 137 | echo "Allow the following headers from responses (these take precendence over the above): ${HEADER_PREFIXES_ALLOWED}" 138 | echo "CORS Enabled: ${CORS_ENABLED}" 139 | echo "CORS Allow Private Network Access: ${CORS_ALLOW_PRIVATE_NETWORK_ACCESS}" 140 | -------------------------------------------------------------------------------- /common/docker-entrypoint.d/22-enable_js_fetch_trusted_certificate.sh: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2022 F5 Networks 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | set -e 18 | 19 | if [ -f /etc/nginx/conf.d/gateway/js_fetch_trusted_certificate.conf ] && [ -n "${JS_TRUSTED_CERT_PATH+x}" ]; then 20 | if [ ! -f "${JS_TRUSTED_CERT_PATH}" ]; then 21 | >&2 echo "JS_TRUSTED_CERT_PATH environment variable error: no file found at the path: ${JS_TRUSTED_CERT_PATH}" 22 | exit 1 23 | fi 24 | 25 | echo "js_fetch_trusted_certificate ${JS_TRUSTED_CERT_PATH};" >> /etc/nginx/conf.d/gateway/js_fetch_trusted_certificate.conf 26 | echo "Enabling js_fetch_trusted_certificate ${JS_TRUSTED_CERT_PATH}" 27 | fi 28 | -------------------------------------------------------------------------------- /common/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2020 F5 Networks 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # vim:sw=4:ts=4:et 19 | 20 | set -e 21 | 22 | parseBoolean() { 23 | case "$1" in 24 | TRUE | true | True | YES | Yes | 1) 25 | echo 1 26 | ;; 27 | *) 28 | echo 0 29 | ;; 30 | esac 31 | } 32 | 33 | # This line is an addition to the NGINX Docker image's entrypoint script. 34 | if [ -z ${DNS_RESOLVERS+x} ]; then 35 | resolvers="" 36 | 37 | # This method of pulling individual nameservers from 38 | # /etc/resolv.conf taken from the entrypoint script in the 39 | # official docker image. 40 | # https://github.com/nginxinc/docker-nginx/blob/master/entrypoint/15-local-resolvers.envsh 41 | for ip in $(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf) 42 | do 43 | if echo "${ip}" | grep -q ':'; then 44 | resolvers="$resolvers [${ip}]" 45 | else 46 | resolvers="$resolvers $ip" 47 | fi 48 | done 49 | export DNS_RESOLVERS="${resolvers}" 50 | fi 51 | 52 | # Normalize the CORS_ENABLED environment variable to a numeric value 53 | # so that it can be easily parsed in the nginx configuration. 54 | export CORS_ENABLED="$(parseBoolean "${CORS_ENABLED}")" 55 | 56 | # By enabling CORS, we also need to enable the OPTIONS method which 57 | # is not normally used as part of the gateway. The following variable 58 | # defines the set of acceptable headers. 59 | if [ "${CORS_ENABLED}" == "1" ]; then 60 | export LIMIT_METHODS_TO="GET HEAD OPTIONS" 61 | export LIMIT_METHODS_TO_CSV="GET, HEAD, OPTIONS" 62 | else 63 | export LIMIT_METHODS_TO="GET HEAD" 64 | export LIMIT_METHODS_TO_CSV="GET, HEAD" 65 | fi 66 | 67 | if [ -z "${CORS_ALLOWED_ORIGIN+x}" ]; then 68 | export CORS_ALLOWED_ORIGIN="*" 69 | fi 70 | 71 | # See documentation for this feature. We do not parse this as a boolean 72 | # since "true" and "false" are the required values of the header this populates 73 | if [ "${CORS_ALLOW_PRIVATE_NETWORK_ACCESS}" != "true" ] && [ "${CORS_ALLOW_PRIVATE_NETWORK_ACCESS}" != "false" ]; then 74 | export CORS_ALLOW_PRIVATE_NETWORK_ACCESS="" 75 | fi 76 | 77 | # This is the primary logic to determine the s3 host used for the 78 | # upstream (the actual proxying action) as well as the `Host` header 79 | # 80 | # It is currently slightly more complex than necessary because we are transitioning 81 | # to a new logic which is defined by "virtual-v2". "virtual-v2" is the recommended setting 82 | # for all deployments. 83 | 84 | # S3_UPSTREAM needs the port specified. The port must 85 | # correspond to https/http in the proxy_pass directive. 86 | if [ "${S3_STYLE}" == "virtual-v2" ]; then 87 | export S3_UPSTREAM="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" 88 | export S3_HOST_HEADER="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" 89 | elif [ "${S3_STYLE}" == "path" ]; then 90 | export S3_UPSTREAM="${S3_SERVER}:${S3_SERVER_PORT}" 91 | export S3_HOST_HEADER="${S3_SERVER}:${S3_SERVER_PORT}" 92 | else 93 | export S3_UPSTREAM="${S3_SERVER}:${S3_SERVER_PORT}" 94 | export S3_HOST_HEADER="${S3_BUCKET_NAME}.${S3_SERVER}" 95 | fi 96 | 97 | 98 | # Nothing is modified under this line 99 | 100 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 101 | exec 3>&1 102 | else 103 | exec 3>/dev/null 104 | fi 105 | 106 | if [ "$1" = "nginx" -o "$1" = "nginx-debug" ]; then 107 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then 108 | echo >&3 "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration" 109 | 110 | echo >&3 "$0: Looking for shell scripts in /docker-entrypoint.d/" 111 | find "/docker-entrypoint.d/" -follow -type f -print | sort -n | while read -r f; do 112 | case "$f" in 113 | *.sh) 114 | if [ -x "$f" ]; then 115 | echo >&3 "$0: Launching $f"; 116 | "$f" 117 | else 118 | # warn on shell scripts without exec bit 119 | echo >&3 "$0: Ignoring $f, not executable"; 120 | fi 121 | ;; 122 | *) echo >&3 "$0: Ignoring $f";; 123 | esac 124 | done 125 | 126 | echo >&3 "$0: Configuration complete; ready for start up" 127 | else 128 | echo >&3 "$0: No files found in /docker-entrypoint.d/, skipping configuration" 129 | fi 130 | fi 131 | 132 | exec "$@" 133 | -------------------------------------------------------------------------------- /common/etc/nginx/include/awssig2.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 F5, Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | /** 18 | * @module awssig2 19 | * @alias AwsSig2 20 | */ 21 | 22 | import utils from "./utils.js"; 23 | 24 | const mod_hmac = require('crypto'); 25 | 26 | /** 27 | * Create HTTP Authorization header for authenticating with an AWS compatible 28 | * v2 API. 29 | * 30 | * @param r {NginxHTTPRequest} HTTP request object 31 | * @param uri {string} The URI-encoded version of the absolute path component URL to create a request 32 | * @param httpDate {string} RFC2616 timestamp used to sign the request 33 | * @param credentials {Credentials} Credential object with AWS credentials in it (AccessKeyId, SecretAccessKey, SessionToken) 34 | * @returns {string} HTTP Authorization header value 35 | */ 36 | function signatureV2(r, uri, httpDate, credentials) { 37 | const method = r.method; 38 | const hmac = mod_hmac.createHmac('sha1', credentials.secretAccessKey); 39 | const stringToSign = method + '\n\n\n' + httpDate + '\n' + uri; 40 | 41 | utils.debug_log(r, 'AWS v2 Auth Signing String: [' + stringToSign + ']'); 42 | 43 | const signature = hmac.update(stringToSign).digest('base64'); 44 | 45 | return `AWS ${credentials.accessKeyId}:${signature}`; 46 | } 47 | 48 | export default { 49 | signatureV2 50 | } 51 | -------------------------------------------------------------------------------- /common/etc/nginx/include/awssig4.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 F5, Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | /** 18 | * @module awssig4 19 | * @alias AwsSig4 20 | */ 21 | 22 | import awscred from "./awscredentials.js"; 23 | import utils from "./utils.js"; 24 | 25 | const mod_hmac = require('crypto'); 26 | 27 | /** 28 | * Constant defining the headers being signed. 29 | * @type {string} 30 | */ 31 | const DEFAULT_SIGNED_HEADERS = 'host;x-amz-content-sha256;x-amz-date'; 32 | 33 | /** 34 | * Create HTTP Authorization header for authenticating with an AWS compatible 35 | * v4 API. 36 | * 37 | * @param r {NginxHTTPRequest} HTTP request object 38 | * @param timestamp {Date} timestamp associated with request (must fall within a skew) 39 | * @param region {string} API region associated with request 40 | * @param service {string} service code (for example, s3, lambda) 41 | * @param uri {string} The URI-encoded version of the absolute path component URL to create a canonical request 42 | * @param queryParams {string} The URL-encoded query string parameters to create a canonical request 43 | * @param host {string} HTTP host header value 44 | * @param credentials {Credentials} Credential object with AWS credentials in it (AccessKeyId, SecretAccessKey, SessionToken) 45 | * @returns {string} HTTP Authorization header value 46 | */ 47 | function signatureV4(r, timestamp, region, service, uri, queryParams, host, credentials) { 48 | const eightDigitDate = utils.getEightDigitDate(timestamp); 49 | const amzDatetime = utils.getAmzDatetime(timestamp, eightDigitDate); 50 | const canonicalRequest = _buildCanonicalRequest(r, 51 | r.method, uri, queryParams, host, amzDatetime, credentials.sessionToken); 52 | const signature = _buildSignatureV4(r, amzDatetime, eightDigitDate, 53 | credentials, region, service, canonicalRequest); 54 | const authHeader = 'AWS4-HMAC-SHA256 Credential=' 55 | .concat(credentials.accessKeyId, '/', eightDigitDate, '/', region, '/', service, '/aws4_request,', 56 | 'SignedHeaders=', _signedHeaders(r, credentials.sessionToken), ',Signature=', signature); 57 | 58 | utils.debug_log(r, 'AWS v4 Auth header: [' + authHeader + ']'); 59 | 60 | return authHeader; 61 | } 62 | 63 | /** 64 | * Creates a canonical request that will later be signed 65 | * 66 | * @see {@link https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html | Creating a Canonical Request} 67 | * @param method {string} HTTP method 68 | * @param uri {string} URI associated with request 69 | * @param queryParams {string} query parameters associated with request 70 | * @param host {string} HTTP Host header value 71 | * @param amzDatetime {string} ISO8601 timestamp string to sign request with 72 | * @returns {string} string with concatenated request parameters 73 | * @private 74 | */ 75 | function _buildCanonicalRequest(r, 76 | method, uri, queryParams, host, amzDatetime, sessionToken) { 77 | const payloadHash = awsHeaderPayloadHash(r); 78 | let canonicalHeaders = 'host:' + host + '\n' + 79 | 'x-amz-content-sha256:' + payloadHash + '\n' + 80 | 'x-amz-date:' + amzDatetime + '\n'; 81 | 82 | if (sessionToken && sessionToken.length > 0) { 83 | canonicalHeaders += 'x-amz-security-token:' + sessionToken + '\n' 84 | } 85 | 86 | let canonicalRequest = method + '\n'; 87 | canonicalRequest += uri + '\n'; 88 | canonicalRequest += queryParams + '\n'; 89 | canonicalRequest += canonicalHeaders + '\n'; 90 | canonicalRequest += _signedHeaders(r, sessionToken) + '\n'; 91 | canonicalRequest += payloadHash; 92 | return canonicalRequest; 93 | } 94 | 95 | /** 96 | * Creates a signature for use authenticating against an AWS compatible API. 97 | * 98 | * @see {@link https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html | AWS V4 Signing Process} 99 | * @param r {NginxHTTPRequest} HTTP request object 100 | * @param amzDatetime {string} ISO8601 timestamp string to sign request with 101 | * @param eightDigitDate {string} date in the form of 'YYYYMMDD' 102 | * @param creds {object} AWS credentials 103 | * @param region {string} API region associated with request 104 | * @param service {string} service code (for example, s3, lambda) 105 | * @param canonicalRequest {string} string with concatenated request parameters 106 | * @returns {string} hex encoded hash of signature HMAC value 107 | * @private 108 | */ 109 | function _buildSignatureV4( 110 | r, amzDatetime, eightDigitDate, creds, region, service, canonicalRequest) { 111 | utils.debug_log(r, 'AWS v4 Auth Canonical Request: [' + canonicalRequest + ']'); 112 | 113 | const canonicalRequestHash = mod_hmac.createHash('sha256') 114 | .update(canonicalRequest) 115 | .digest('hex'); 116 | 117 | utils.debug_log(r, 'AWS v4 Auth Canonical Request Hash: [' + canonicalRequestHash + ']'); 118 | 119 | const stringToSign = _buildStringToSign( 120 | amzDatetime, eightDigitDate, region, service, canonicalRequestHash); 121 | 122 | utils.debug_log(r, 'AWS v4 Auth Signing String: [' + stringToSign + ']'); 123 | 124 | let kSigningHash; 125 | 126 | /* If we have a keyval zone and key defined for caching the signing key hash, 127 | * then signing key caching will be enabled. By caching signing keys we can 128 | * accelerate the signing process because we will have four less HMAC 129 | * operations that have to be performed per incoming request. The signing 130 | * key expires every day, so our cache key can persist for 24 hours safely. 131 | */ 132 | if ("variables" in r && r.variables.cache_signing_key_enabled == 1) { 133 | // cached value is in the format: [eightDigitDate]:[signingKeyHash] 134 | const cached = "signing_key_hash" in r.variables ? r.variables.signing_key_hash : ""; 135 | const fields = _splitCachedValues(cached); 136 | const cachedEightDigitDate = fields[0]; 137 | const cacheIsValid = fields.length === 2 && eightDigitDate === cachedEightDigitDate; 138 | 139 | // If true, use cached value 140 | if (cacheIsValid) { 141 | utils.debug_log(r, 'AWS v4 Using cached Signing Key Hash'); 142 | /* We are forced to JSON encode the string returned from the HMAC 143 | * operation because it is in a very specific format that include 144 | * binary data and in order to preserve that data when persisting 145 | * we encode it as JSON. By doing so we can gracefully decode it 146 | * when reading from the cache. */ 147 | kSigningHash = Buffer.from(JSON.parse(fields[1])); 148 | // Otherwise, generate a new signing key hash and store it in the cache 149 | } else { 150 | kSigningHash = _buildSigningKeyHash(creds.secretAccessKey, eightDigitDate, region, service); 151 | utils.debug_log(r, 'Writing key: ' + eightDigitDate + ':' + kSigningHash.toString('hex')); 152 | r.variables.signing_key_hash = eightDigitDate + ':' + JSON.stringify(kSigningHash); 153 | } 154 | // Otherwise, don't use caching at all (like when we are using NGINX OSS) 155 | } else { 156 | kSigningHash = _buildSigningKeyHash(creds.secretAccessKey, eightDigitDate, region, service); 157 | } 158 | 159 | utils.debug_log(r, 'AWS v4 Signing Key Hash: [' + kSigningHash.toString('hex') + ']'); 160 | 161 | const signature = mod_hmac.createHmac('sha256', kSigningHash) 162 | .update(stringToSign).digest('hex'); 163 | 164 | utils.debug_log(r, 'AWS v4 Authorization Header: [' + signature + ']'); 165 | 166 | return signature; 167 | } 168 | 169 | /** 170 | * Creates a string to sign by concatenating together multiple parameters required 171 | * by the signatures algorithm. 172 | * 173 | * @see {@link https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html | String to Sign} 174 | * @param amzDatetime {string} ISO8601 timestamp string to sign request with 175 | * @param eightDigitDate {string} date in the form of 'YYYYMMDD' 176 | * @param region {string} region associated with server API 177 | * @param service {string} service code (for example, s3, lambda) 178 | * @param canonicalRequestHash {string} hex encoded hash of canonical request string 179 | * @returns {string} a concatenated string of the passed parameters formatted for signatures 180 | * @private 181 | */ 182 | function _buildStringToSign(amzDatetime, eightDigitDate, region, service, canonicalRequestHash) { 183 | return 'AWS4-HMAC-SHA256\n' + 184 | amzDatetime + '\n' + 185 | eightDigitDate + '/' + region + '/' + service + '/aws4_request\n' + 186 | canonicalRequestHash; 187 | } 188 | 189 | /** 190 | * Creates a string containing the headers that need to be signed as part of v4 191 | * signature authentication. 192 | * 193 | * @param r {NginxHTTPRequest} HTTP request object 194 | * @param sessionToken {string|undefined} AWS session token if present 195 | * @returns {string} semicolon delimited string of the headers needed for signing 196 | * @private 197 | */ 198 | function _signedHeaders(r, sessionToken) { 199 | let headers = DEFAULT_SIGNED_HEADERS; 200 | if (sessionToken && sessionToken.length > 0) { 201 | headers += ';x-amz-security-token'; 202 | } 203 | return headers; 204 | } 205 | 206 | /** 207 | * Creates a signing key HMAC. This value is used to sign the request made to 208 | * the API. 209 | * 210 | * @param kSecret {string} secret access key 211 | * @param eightDigitDate {string} date in the form of 'YYYYMMDD' 212 | * @param region {string} region associated with server API 213 | * @param service {string} name of service that request is for e.g. s3, lambda 214 | * @returns {ArrayBuffer} signing HMAC 215 | * @private 216 | */ 217 | function _buildSigningKeyHash(kSecret, eightDigitDate, region, service) { 218 | const kDate = mod_hmac.createHmac('sha256', 'AWS4'.concat(kSecret)) 219 | .update(eightDigitDate).digest(); 220 | const kRegion = mod_hmac.createHmac('sha256', kDate) 221 | .update(region).digest(); 222 | const kService = mod_hmac.createHmac('sha256', kRegion) 223 | .update(service).digest(); 224 | const kSigning = mod_hmac.createHmac('sha256', kService) 225 | .update('aws4_request').digest(); 226 | 227 | return kSigning; 228 | } 229 | 230 | /** 231 | * Splits the cached values into an array with two elements or returns an 232 | * empty array if the input string is invalid. The first element contains 233 | * the eight digit date string and the second element contains a JSON string 234 | * of the kSigningHash. 235 | * 236 | * @param cached {string} input string to parse 237 | * @returns {Array} array containing eight digit date and kSigningHash or empty 238 | * @private 239 | */ 240 | function _splitCachedValues(cached) { 241 | const matchedPos = cached.indexOf(':', 0); 242 | // Do a sanity check on the position returned, if it isn't sane, return 243 | // an empty array and let the caller logic process it. 244 | if (matchedPos < 0 || matchedPos + 1 > cached.length) { 245 | return [] 246 | } 247 | 248 | const eightDigitDate = cached.substring(0, matchedPos); 249 | const kSigningHash = cached.substring(matchedPos + 1); 250 | 251 | return [eightDigitDate, kSigningHash] 252 | } 253 | 254 | /** 255 | * Outputs the timestamp used to sign the request, so that it can be added to 256 | * the 'x-amz-date' header and sent by NGINX. The output format is 257 | * ISO 8601: YYYYMMDD'T'HHMMSS'Z'. 258 | * @see {@link https://docs.aws.amazon.com/general/latest/gr/sigv4-date-handling.html | Handling dates in Signature Version 4} 259 | * 260 | * @param _r {NginxHTTPRequest} HTTP request object (not used, but required for NGINX configuration) 261 | * @returns {string} ISO 8601 timestamp 262 | */ 263 | function awsHeaderDate(_r) { 264 | return utils.getAmzDatetime( 265 | awscred.Now(), 266 | utils.getEightDigitDate(awscred.Now()) 267 | ); 268 | } 269 | 270 | /** 271 | * Return a payload hash in the header 272 | * 273 | * @param r {NginxHTTPRequest} HTTP request object 274 | * @returns {string} payload hash 275 | */ 276 | function awsHeaderPayloadHash(r) { 277 | const reqBody = r.variables.request_body ? r.variables.request_body: ''; 278 | const payloadHash = mod_hmac.createHash('sha256', 'utf8') 279 | .update(reqBody) 280 | .digest('hex'); 281 | return payloadHash; 282 | } 283 | 284 | export default { 285 | awsHeaderDate, 286 | awsHeaderPayloadHash, 287 | signatureV4, 288 | // These functions do not need to be exposed, but they are exposed so that 289 | // unit tests can run against them. 290 | _buildCanonicalRequest, 291 | _buildSignatureV4, 292 | _buildSigningKeyHash, 293 | _splitCachedValues 294 | } 295 | -------------------------------------------------------------------------------- /common/etc/nginx/include/listing.xsl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 22 | 23 | 24 | No Files Available for Listing 25 | 26 |

No Files Available for Listing

27 | 28 | 29 |
30 | 31 | 32 | <!DOCTYPE html> 33 | 35 | 36 | 37 | <xsl:value-of select="$globalPrefix"/> 38 | 39 | 40 | 41 |

Index of /

42 |
43 | 44 | 45 | 46 | 48 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 59 | 60 | 61 | 63 | 65 | 66 | 68 | 70 | 71 | 72 |
Filename 47 | File Size 49 | Date
57 | .. 58 |
73 | 74 | 75 |
76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 87 | 88 | 89 | // 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 106 | 108 | 109 | 110 | 111 | 112 | / 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 127 | 128 | 129 | 130 | 131 | 132 | 141 | / 142 | 143 |
144 | -------------------------------------------------------------------------------- /common/etc/nginx/include/utils.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 F5, Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | /** 18 | * @module utils 19 | * @alias Utils 20 | */ 21 | 22 | /** 23 | * Flag indicating debug mode operation. If true, additional information 24 | * about signature generation will be logged. 25 | * @type {boolean} 26 | */ 27 | const DEBUG = parseBoolean(process.env['DEBUG']); 28 | 29 | 30 | /** 31 | * Checks to see if all the elements of the passed array are present as keys 32 | * in the running process' environment variables. Alternatively, if a single 33 | * string is passed, it will check for the presence of that string. 34 | * @param envVars {Array|string} array of expected keys or single expected key 35 | * @returns {boolean} true if all keys are set as environment variables 36 | */ 37 | function areAllEnvVarsSet(envVars) { 38 | if (envVars instanceof Array) { 39 | const envVarsLen = envVars.length; 40 | for (let i = 0; i < envVarsLen; i++) { 41 | if (!process.env[envVars[i]]) { 42 | return false; 43 | } 44 | } 45 | return true; 46 | } 47 | return envVars in process.env; 48 | } 49 | 50 | /** 51 | * Parses a string delimited by semicolons into an array of values 52 | * @param string {string|null} value representing a array of strings 53 | * @returns {Array} a list of values 54 | */ 55 | function parseArray(string) { 56 | if (string == null || !string || string === ';') { 57 | return []; 58 | } 59 | 60 | // Exclude trailing delimiter 61 | if (string.endsWith(';')) { 62 | return string.substr(0, string.length - 1).split(';'); 63 | } 64 | 65 | return string.split(';') 66 | } 67 | 68 | /** 69 | * Parses a string to and returns a boolean value based on its value. If the 70 | * string can't be parsed, this method returns false. 71 | * 72 | * @param string {*} value representing a boolean 73 | * @returns {boolean} boolean value of string 74 | */ 75 | function parseBoolean(string) { 76 | switch(string) { 77 | case "TRUE": 78 | case "true": 79 | case "True": 80 | case "YES": 81 | case "yes": 82 | case "Yes": 83 | case "1": 84 | return true; 85 | default: 86 | return false; 87 | } 88 | } 89 | 90 | /** 91 | * Outputs a log message to the request logger if debug messages are enabled. 92 | * 93 | * @param r {NginxHTTPRequest} HTTP request object 94 | * @param msg {string} message to log 95 | */ 96 | function debug_log(r, msg) { 97 | if (DEBUG && "log" in r) { 98 | r.log(msg); 99 | } 100 | } 101 | 102 | /** 103 | * Pads the supplied number with leading zeros. 104 | * 105 | * @param num {number|string} number to pad 106 | * @param size number of leading zeros to pad 107 | * @returns {string} a string with leading zeros 108 | * @private 109 | */ 110 | function padWithLeadingZeros(num, size) { 111 | const s = "0" + num; 112 | return s.substr(s.length-size); 113 | } 114 | 115 | /** 116 | * Creates a string in the ISO601 date format (YYYYMMDD'T'HHMMSS'Z') based on 117 | * the supplied timestamp and date. The date is not extracted from the timestamp 118 | * because that operation is already done once during the signing process. 119 | * 120 | * @param timestamp {Date} timestamp to extract date from 121 | * @param eightDigitDate {string} 'YYYYMMDD' format date string that was already extracted from timestamp 122 | * @returns {string} string in the format of YYYYMMDD'T'HHMMSS'Z' 123 | * @private 124 | */ 125 | function getAmzDatetime(timestamp, eightDigitDate) { 126 | const hours = timestamp.getUTCHours(); 127 | const minutes = timestamp.getUTCMinutes(); 128 | const seconds = timestamp.getUTCSeconds(); 129 | 130 | return ''.concat( 131 | eightDigitDate, 132 | 'T', padWithLeadingZeros(hours, 2), 133 | padWithLeadingZeros(minutes, 2), 134 | padWithLeadingZeros(seconds, 2), 135 | 'Z'); 136 | } 137 | 138 | /** 139 | * Formats a timestamp into a date string in the format 'YYYYMMDD'. 140 | * 141 | * @param timestamp {Date} timestamp 142 | * @returns {string} a formatted date string based on the input timestamp 143 | * @private 144 | */ 145 | function getEightDigitDate(timestamp) { 146 | const year = timestamp.getUTCFullYear(); 147 | const month = timestamp.getUTCMonth() + 1; 148 | const day = timestamp.getUTCDate(); 149 | 150 | return ''.concat(padWithLeadingZeros(year, 4), 151 | padWithLeadingZeros(month,2), 152 | padWithLeadingZeros(day,2)); 153 | } 154 | 155 | 156 | /** 157 | * Checks to see if the given environment variable is present. If not, an error 158 | * is thrown. 159 | * @param envVarName {string} environment variable to check for 160 | * @private 161 | */ 162 | function requireEnvVar(envVarName) { 163 | const isSet = envVarName in process.env; 164 | 165 | if (!isSet) { 166 | throw('Required environment variable ' + envVarName + ' is missing'); 167 | } 168 | } 169 | 170 | export default { 171 | areAllEnvVarsSet, 172 | debug_log, 173 | getAmzDatetime, 174 | getEightDigitDate, 175 | padWithLeadingZeros, 176 | parseArray, 177 | parseBoolean, 178 | requireEnvVar 179 | } 180 | -------------------------------------------------------------------------------- /common/etc/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes 1; 3 | 4 | error_log /dev/stdout info; 5 | pid /var/run/nginx.pid; 6 | 7 | # NJS module used for implementing S3 authentication 8 | load_module modules/ngx_http_js_module.so; 9 | load_module modules/ngx_http_xslt_filter_module.so; 10 | 11 | # Preserve S3 environment variables for worker threads 12 | env AWS_ACCESS_KEY_ID; 13 | env AWS_SECRET_ACCESS_KEY; 14 | env AWS_SESSION_TOKEN; 15 | env AWS_CONTAINER_CREDENTIALS_RELATIVE_URI; 16 | env S3_BUCKET_NAME; 17 | env S3_SERVER; 18 | env S3_SERVER_PORT; 19 | env S3_SERVER_PROTO; 20 | env S3_REGION; 21 | env AWS_SIGS_VERSION; 22 | env DEBUG; 23 | env S3_STYLE; 24 | env S3_SERVICE; 25 | env ALLOW_DIRECTORY_LIST; 26 | env PROVIDE_INDEX_PAGE; 27 | env APPEND_SLASH_FOR_POSSIBLE_DIRECTORY; 28 | env DIRECTORY_LISTING_PATH_PREFIX; 29 | env PROXY_CACHE_MAX_SIZE; 30 | env PROXY_CACHE_INACTIVE; 31 | env PROXY_CACHE_SLICE_SIZE; 32 | env PROXY_CACHE_VALID_OK; 33 | env PROXY_CACHE_SLICE_SIZE; 34 | env PROXY_CACHE_VALID_NOTFOUND; 35 | env PROXY_CACHE_VALID_FORBIDDEN; 36 | env HEADER_PREFIXES_TO_STRIP; 37 | env FOUR_O_FOUR_ON_EMPTY_BUCKET; 38 | env STRIP_LEADING_DIRECTORY_PATH; 39 | env PREFIX_LEADING_DIRECTORY_PATH; 40 | 41 | events { 42 | worker_connections 1024; 43 | } 44 | 45 | http { 46 | include /etc/nginx/mime.types; 47 | default_type application/octet-stream; 48 | 49 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 50 | '$status $body_bytes_sent "$http_referer" ' 51 | '"$http_user_agent" "$http_x_forwarded_for"'; 52 | 53 | access_log /var/log/nginx/access.log main; 54 | 55 | sendfile on; 56 | #tcp_nopush on; 57 | 58 | keepalive_timeout 65; 59 | 60 | #gzip on; 61 | 62 | include /etc/nginx/conf.d/*.conf; 63 | } 64 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/cache.conf.template: -------------------------------------------------------------------------------- 1 | # Settings for S3 cache 2 | 3 | proxy_cache_path /var/cache/nginx/s3_proxy 4 | levels=1:2 5 | keys_zone=s3_cache:10m 6 | max_size=$PROXY_CACHE_MAX_SIZE 7 | inactive=$PROXY_CACHE_INACTIVE 8 | use_temp_path=off; 9 | 10 | 11 | proxy_cache_path /var/cache/nginx/s3_proxy_slices 12 | levels=1:2 13 | keys_zone=s3_cache_slices:10m 14 | max_size=$PROXY_CACHE_MAX_SIZE 15 | inactive=$PROXY_CACHE_INACTIVE 16 | use_temp_path=off; 17 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/default.conf.template: -------------------------------------------------------------------------------- 1 | js_import /etc/nginx/include/awscredentials.js; 2 | js_import /etc/nginx/include/s3gateway.js; 3 | 4 | 5 | # We include only the variables needed for the authentication signatures that 6 | # we plan to use. 7 | include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_js_vars.conf; 8 | 9 | # Extracts only the path from the requested URI. This strips out all query 10 | # parameters and anchors in order to prevent extraneous data from being sent 11 | # to S3. 12 | map $request_uri $uri_full_path { 13 | "~^(?P.*?)(\?.*)*$" $path; 14 | } 15 | 16 | # Remove/replace a portion of request URL (if configured) 17 | map $uri_full_path $uri_path { 18 | "~^$STRIP_LEADING_DIRECTORY_PATH(.*)" $PREFIX_LEADING_DIRECTORY_PATH$1; 19 | default $PREFIX_LEADING_DIRECTORY_PATH$uri_full_path; 20 | } 21 | 22 | # S3_HOST_HEADER is set in the startup script 23 | # (either ./common/docker-entrypoint.sh or ./standalone_ubuntu_oss_install.sh) 24 | # based on the S3_STYLE configuration option. 25 | js_var $s3_host ${S3_HOST_HEADER}; 26 | 27 | js_var $indexIsEmpty true; 28 | js_var $forIndexPage true; 29 | # This creates the HTTP authentication header to be sent to S3 30 | js_set $s3auth s3gateway.s3auth; 31 | js_set $awsSessionToken awscredentials.sessionToken; 32 | js_set $s3uri s3gateway.s3uri; 33 | 34 | server { 35 | include /etc/nginx/conf.d/gateway/server_variables.conf; 36 | 37 | # Don't display the NGINX version number because we don't want to reveal 38 | # information that could be used to find an exploit. 39 | server_tokens off; 40 | 41 | # Uncomment this for a HTTP header that will let you know the cache status 42 | # of an object. 43 | # add_header X-Cache-Status $upstream_cache_status; 44 | 45 | # Proxy caching configuration. Customize this for your needs. 46 | proxy_cache s3_cache; 47 | proxy_cache_valid 200 302 ${PROXY_CACHE_VALID_OK}; 48 | proxy_cache_valid 404 ${PROXY_CACHE_VALID_NOTFOUND}; 49 | proxy_cache_valid 403 ${PROXY_CACHE_VALID_FORBIDDEN}; 50 | proxy_cache_methods GET HEAD; 51 | # When this is enabled a HEAD request to NGINX will result in a GET 52 | # request upstream. Unfortunately, proxy_cache_convert_head has to be 53 | # disabled because there is no way for the signatures generation code to 54 | # get access to the metadata in the GET request that is sent upstream. 55 | proxy_cache_convert_head off; 56 | proxy_cache_revalidate on; 57 | proxy_cache_background_update on; 58 | proxy_cache_lock on; 59 | proxy_cache_use_stale error timeout http_500 http_502 http_503 http_504; 60 | proxy_cache_key "$request_method$host$uri"; 61 | 62 | # If you need to support proxying range request, refer to this article: 63 | # https://www.nginx.com/blog/smart-efficient-byte-range-caching-nginx/ 64 | 65 | # Do not proxy the S3 SOAP API. The S3 API has a less-documented feature 66 | # where the object name "soap" is used for the SOAP API. We don't allow 67 | # access to it. 68 | location /soap { 69 | return 404; 70 | } 71 | 72 | location /health { 73 | return 200; 74 | } 75 | 76 | location / { 77 | # This value is templated in based on the value of $CORS_ENABLED. When 78 | # CORS is enabled, acceptable methods are GET, HEAD, and OPTIONS. 79 | # Otherwise, they are GET and HEAD. 80 | limit_except ${LIMIT_METHODS_TO} {} 81 | 82 | # CORS is implemented by returning the appropriate headers as part of 83 | # the response to an OPTIONS request. If you want to customize the 84 | # CORS response, the cors.conf.template file can be overwritten and 85 | # extended to meet your needs. 86 | include /etc/nginx/conf.d/gateway/cors.conf; 87 | 88 | auth_request /aws/credentials/retrieve; 89 | 90 | # Redirect to the proper location based on the client request - either 91 | # @s3, @s3PreListing or @error405. 92 | 93 | js_content s3gateway.redirectToS3; 94 | } 95 | 96 | location /aws/credentials/retrieve { 97 | internal; 98 | js_content awscredentials.fetchCredentials; 99 | 100 | include /etc/nginx/conf.d/gateway/js_fetch_trusted_certificate.conf; 101 | } 102 | 103 | # This is the primary location that proxies the request to s3 104 | # See the included s3_location_common.conf file for all logic 105 | location @s3 { 106 | include /etc/nginx/conf.d/gateway/s3_location_common.conf; 107 | } 108 | 109 | # Same as the primary location above but handling and caching 110 | # byte range requests efficiently 111 | location @s3_sliced { 112 | proxy_cache s3_cache_slices; 113 | proxy_cache_valid 200 302 206 ${PROXY_CACHE_VALID_OK}; 114 | proxy_cache_key "$request_method$host$uri$slice_range"; 115 | 116 | slice ${PROXY_CACHE_SLICE_SIZE}; 117 | proxy_set_header Range $slice_range; 118 | include /etc/nginx/conf.d/gateway/s3_location_common.conf; 119 | } 120 | 121 | location @s3PreListing { 122 | # We include only the headers needed for the authentication signatures that 123 | # we plan to use. 124 | include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; 125 | 126 | # The CORS configuration needs to be imported in several places in order for 127 | # it to be applied within different contexts. 128 | include /etc/nginx/conf.d/gateway/cors.conf; 129 | 130 | # Don't allow any headers from the client - we don't want them messing 131 | # with S3 at all. 132 | proxy_pass_request_headers off; 133 | 134 | # Enable passing of the server name through TLS Server Name Indication extension. 135 | proxy_ssl_server_name on; 136 | proxy_ssl_name ${S3_SERVER}; 137 | 138 | # Set the Authorization header to the AWS Signatures credentials 139 | proxy_set_header Authorization $s3auth; 140 | proxy_set_header X-Amz-Security-Token $awsSessionToken; 141 | 142 | # We set the host as the bucket name to inform the S3 API of the bucket 143 | proxy_set_header Host $s3_host; 144 | 145 | # Use keep alive connections in order to improve performance 146 | proxy_http_version 1.1; 147 | proxy_set_header Connection ''; 148 | 149 | # We strip off all of the AWS specific headers from the server so that 150 | # there is nothing identifying the object as having originated in an 151 | # object store. 152 | js_header_filter s3gateway.editHeaders; 153 | 154 | # Apply XSL transformation to the XML returned from S3 directory listing 155 | # results such that we can output an HTML directory contents list. 156 | xslt_stylesheet /etc/nginx/include/listing.xsl; 157 | xslt_string_param rootPath '${DIRECTORY_LISTING_PATH_PREFIX}'; 158 | xslt_types application/xml; 159 | 160 | # We apply an output filter to the XML input received from S3 before it 161 | # is passed to XSLT in order to determine if the resource is not a valid 162 | # S3 directory. If it isn't a valid directory, we do a dirty hack to 163 | # corrupt the contents of the XML causing the XSLT to fail and thus 164 | # nginx to return a 404 to the client. If you don't care about empty 165 | # directory listings for invalid directories, remove this. 166 | js_body_filter s3gateway.filterListResponse; 167 | 168 | # Catch all errors from S3 and sanitize them so that the user can't 169 | # gain intelligence about the S3 bucket being proxied. 170 | proxy_intercept_errors on; 171 | 172 | # Comment out this line to receive the error messages returned by S3 173 | error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 420 422 423 424 426 428 429 431 444 449 450 451 500 501 502 503 504 505 506 507 508 509 510 511 =404 @error404; 174 | 175 | js_content s3gateway.loadContent; 176 | include /etc/nginx/conf.d/gateway/s3listing_location.conf; 177 | } 178 | 179 | location @s3Directory { 180 | # We include only the headers needed for the authentication signatures that 181 | # we plan to use. 182 | include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; 183 | 184 | # Necessary for determining the correct URI to construct. 185 | set $forIndexPage false; 186 | 187 | # The CORS configuration needs to be imported in several places in order for 188 | # it to be applied within different contexts. 189 | include /etc/nginx/conf.d/gateway/cors.conf; 190 | 191 | # Don't allow any headers from the client - we don't want them messing 192 | # with S3 at all. 193 | proxy_pass_request_headers off; 194 | 195 | # Enable passing of the server name through TLS Server Name Indication extension. 196 | proxy_ssl_server_name on; 197 | proxy_ssl_name ${S3_SERVER}; 198 | 199 | # Set the Authorization header to the AWS Signatures credentials 200 | proxy_set_header Authorization $s3auth; 201 | proxy_set_header X-Amz-Security-Token $awsSessionToken; 202 | 203 | # We set the host as the bucket name to inform the S3 API of the bucket 204 | proxy_set_header Host $s3_host; 205 | 206 | # Use keep alive connections in order to improve performance 207 | proxy_http_version 1.1; 208 | proxy_set_header Connection ''; 209 | 210 | # We strip off all of the AWS specific headers from the server so that 211 | # there is nothing identifying the object as having originated in an 212 | # object store. 213 | js_header_filter s3gateway.editHeaders; 214 | 215 | # Apply XSL transformation to the XML returned from S3 directory listing 216 | # results such that we can output an HTML directory contents list. 217 | xslt_stylesheet /etc/nginx/include/listing.xsl; 218 | xslt_string_param rootPath '${DIRECTORY_LISTING_PATH_PREFIX}'; 219 | xslt_types application/xml; 220 | 221 | # We apply an output filter to the XML input received from S3 before it 222 | # is passed to XSLT in order to determine if the resource is not a valid 223 | # S3 directory. If it isn't a valid directory, we do a dirty hack to 224 | # corrupt the contents of the XML causing the XSLT to fail and thus 225 | # nginx to return a 404 to the client. If you don't care about empty 226 | # directory listings for invalid directories, remove this. 227 | js_body_filter s3gateway.filterListResponse; 228 | 229 | # Catch all errors from S3 and sanitize them so that the user can't 230 | # gain intelligence about the S3 bucket being proxied. 231 | proxy_intercept_errors on; 232 | 233 | # Comment out this line to receive the error messages returned by S3 234 | error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 420 422 423 424 426 428 429 431 444 449 450 451 500 501 502 503 504 505 506 507 508 509 510 511 =404 @error404; 235 | 236 | proxy_pass ${S3_SERVER_PROTO}://storage_urls$s3Uri; 237 | include /etc/nginx/conf.d/gateway/s3listing_location.conf; 238 | } 239 | 240 | location ~ /index.html$ { 241 | # Configuration for handling locations ending with /index.html 242 | 243 | # Necessary for determining the correct URI to construct. 244 | set $forIndexPage true; 245 | 246 | # We include only the headers needed for the authentication signatures that 247 | # we plan to use. 248 | include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; 249 | 250 | # The CORS configuration needs to be imported in several places in order for 251 | # it to be applied within different contexts. 252 | include /etc/nginx/conf.d/gateway/cors.conf; 253 | 254 | # Don't allow any headers from the client - we don't want them messing 255 | # with S3 at all. 256 | proxy_pass_request_headers off; 257 | 258 | # Enable passing of the server name through TLS Server Name Indication extension. 259 | proxy_ssl_server_name on; 260 | proxy_ssl_name ${S3_SERVER}; 261 | 262 | # Set the Authorization header to the AWS Signatures credentials 263 | proxy_set_header Authorization $s3auth; 264 | proxy_set_header X-Amz-Security-Token $awsSessionToken; 265 | 266 | # We set the host as the bucket name to inform the S3 API of the bucket 267 | proxy_set_header Host $s3_host; 268 | 269 | # Use keep alive connections in order to improve performance 270 | proxy_http_version 1.1; 271 | proxy_set_header Connection ''; 272 | 273 | # We strip off all of the AWS specific headers from the server so that 274 | # there is nothing identifying the object as having originated in an 275 | # object store. 276 | js_header_filter s3gateway.editHeaders; 277 | 278 | # Catch all errors from S3 and sanitize them so that the user can't 279 | # gain intelligence about the S3 bucket being proxied. 280 | proxy_intercept_errors on; 281 | 282 | # Comment out this line to receive the error messages returned by S3 283 | error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 420 422 423 424 426 428 429 431 444 449 450 451 500 501 502 503 504 505 506 507 508 509 510 511 =404 @error404; 284 | 285 | proxy_pass ${S3_SERVER_PROTO}://storage_urls$s3uri; 286 | include /etc/nginx/conf.d/gateway/s3listing_location.conf; 287 | } 288 | 289 | location @error404 { 290 | # The CORS configuration needs to be imported in several places in order for 291 | # it to be applied within different contexts. 292 | include /etc/nginx/conf.d/gateway/cors.conf; 293 | 294 | return 404; 295 | } 296 | 297 | location @trailslashControl { 298 | # Checks if requesting a folder without trailing slash, and return 302 299 | # appending a slash to it when using for static site hosting. 300 | js_content s3gateway.trailslashControl; 301 | } 302 | 303 | location @trailslash { 304 | # 302 to request without slashes 305 | # Adding a ? to the end of the replacement param in `rewrite` prevents it from 306 | # appending the query string. 307 | rewrite ^ $scheme://$http_host$uri/$is_args$query_string? redirect; 308 | } 309 | 310 | # Provide a hint to the client on 405 errors of the acceptable request methods 311 | error_page 405 @error405; 312 | location @error405 { 313 | add_header Allow "${LIMIT_METHODS_TO_CSV}" always; 314 | return 405; 315 | } 316 | 317 | include /etc/nginx/conf.d/gateway/s3_server.conf; 318 | } 319 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/cors.conf.template: -------------------------------------------------------------------------------- 1 | set $request_cors "${request_method}_${CORS_ENABLED}"; 2 | 3 | if ($request_cors = "OPTIONS_1") { 4 | add_header 'Access-Control-Allow-Origin' '${CORS_ALLOWED_ORIGIN}'; 5 | add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; 6 | # 7 | # Custom headers and headers various browsers *should* be OK with but aren't 8 | # 9 | add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'; 10 | # 11 | # Tell client that this pre-flight info is valid for 20 days 12 | # 13 | add_header 'Access-Control-Max-Age' 1728000; 14 | # 15 | # Allow/deny Private Network Access CORS requests. 16 | # https://developer.chrome.com/blog/private-network-access-preflight/ 17 | # 18 | add_header 'Access-Control-Allow-Private-Network' '${CORS_ALLOW_PRIVATE_NETWORK_ACCESS}'; 19 | 20 | add_header 'Content-Type' 'text/plain; charset=utf-8'; 21 | add_header 'Content-Length' 0; 22 | return 204; 23 | } 24 | 25 | if ($request_cors = "GET_1") { 26 | add_header 'Access-Control-Allow-Origin' '${CORS_ALLOWED_ORIGIN}' always; 27 | add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; 28 | add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range' always; 29 | add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always; 30 | } 31 | 32 | if ($request_cors = "HEAD_1") { 33 | add_header 'Access-Control-Allow-Origin' '${CORS_ALLOWED_ORIGIN}' always; 34 | add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; 35 | add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range' always; 36 | add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always; 37 | } 38 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/js_fetch_trusted_certificate.conf.template: -------------------------------------------------------------------------------- 1 | # Enable js_fetch_trusted_certificate if you are seeing SSL Handshake error when connecing to S3 2 | # The following often works for connecting to AWS services: 3 | # js_fetch_trusted_certificate /etc/ssl/certs/Amazon_Root_CA_1.pem; 4 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/s3_location.conf.template: -------------------------------------------------------------------------------- 1 | # This file is intentional left as blank. 2 | # Use this file to add additional configuration to the "location @s3" 3 | # block with default.conf.template 4 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/s3_location_common.conf.template: -------------------------------------------------------------------------------- 1 | # We include only the headers needed for the authentication signatures that 2 | # we plan to use. 3 | include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; 4 | 5 | # The CORS configuration needs to be imported in several places in order for 6 | # it to be applied within different contexts. 7 | include /etc/nginx/conf.d/gateway/cors.conf; 8 | 9 | # Don't allow any headers from the client - we don't want them messing 10 | # with S3 at all. 11 | proxy_pass_request_headers off; 12 | 13 | # Enable passing of the server name through TLS Server Name Indication extension. 14 | proxy_ssl_server_name on; 15 | proxy_ssl_name ${S3_SERVER}; 16 | 17 | # Set the Authorization header to the AWS Signatures credentials 18 | proxy_set_header Authorization $s3auth; 19 | proxy_set_header X-Amz-Security-Token $awsSessionToken; 20 | 21 | # We set the host as the bucket name to inform the S3 API of the bucket 22 | proxy_set_header Host $s3_host; 23 | 24 | # Use keep alive connections in order to improve performance 25 | proxy_http_version 1.1; 26 | proxy_set_header Connection ''; 27 | 28 | # We strip off all of the AWS specific headers from the server so that 29 | # there is nothing identifying the object as having originated in an 30 | # object store. 31 | js_header_filter s3gateway.editHeaders; 32 | 33 | # Catch all errors from S3 and sanitize them so that the user can't 34 | # gain intelligence about the S3 bucket being proxied. 35 | proxy_intercept_errors on; 36 | 37 | # Comment out this line to receive the error messages returned by S3 38 | error_page 400 401 402 403 405 406 407 408 409 410 411 412 413 414 415 416 417 418 420 422 423 424 426 428 429 431 444 449 450 451 500 501 502 503 504 505 506 507 508 509 510 511 =404 @error404; 39 | 40 | error_page 404 @trailslashControl; 41 | 42 | proxy_pass ${S3_SERVER_PROTO}://storage_urls$s3uri; 43 | 44 | include /etc/nginx/conf.d/gateway/s3_location.conf; 45 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/s3_server.conf.template: -------------------------------------------------------------------------------- 1 | # This file is intentional left as blank. 2 | # Use this file to add additional configuration to the "server" 3 | # block with default.conf.template 4 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/s3listing_location.conf.template: -------------------------------------------------------------------------------- 1 | # This file is intentional left as blank. 2 | # Use this file to add additional configuration to the "location @s3Listing" 3 | # block with default.conf.template 4 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/v2_headers.conf.template: -------------------------------------------------------------------------------- 1 | # This header is needed when doing v2 signature authentication. It 2 | # specifies the timestamp in which the signature was generated. 3 | proxy_set_header Date $httpDate; 4 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/v2_js_vars.conf.template: -------------------------------------------------------------------------------- 1 | # This header is needed when doing v2 signature authentication. It 2 | # specifies the timestamp in which the signature was generated and is used with 3 | # the HTTP Date header. 4 | js_set $httpDate s3gateway.s3date; 5 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/v4_headers.conf.template: -------------------------------------------------------------------------------- 1 | # This header is needed when doing v4 signature authentication. It 2 | # specifies the timestamp in which the signature was generated. 3 | proxy_set_header x-amz-date $awsDate; 4 | 5 | # All HTTP bodies are empty because we are only doing GET/HEAD requests, 6 | # so we can hardcode the body checksum. 7 | proxy_set_header x-amz-content-sha256 $awsPayloadHash; 8 | -------------------------------------------------------------------------------- /common/etc/nginx/templates/gateway/v4_js_vars.conf.template: -------------------------------------------------------------------------------- 1 | # This header is needed when doing v4 signature authentication. It 2 | # specifies the timestamp in which the signature was generated and is used with 3 | # the x-amz-date header. 4 | js_import /etc/nginx/include/awssig4.js; 5 | 6 | js_set $awsDate awssig4.awsHeaderDate; 7 | js_set $awsPayloadHash awssig4.awsHeaderPayloadHash; 8 | -------------------------------------------------------------------------------- /deployments/ecs/cloudformation/s3gateway.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Parameters: 3 | NewBucketName: 4 | Default: 5 | Description: S3 Bucket Name 6 | Type: String 7 | Subnet1: 8 | Default: 9 | Description: ID of the first subnet to be used for resources 10 | Type: String 11 | Subnet2: 12 | Default: 13 | Description: ID of the second subnet to be used for resources 14 | Type: String 15 | VpcId: 16 | Default: 17 | Description: ID of the VPC to be used for resources 18 | Type: String 19 | ContainerName: 20 | Default: s3gateway 21 | Description: Name of the NGINX Container. No need to change this 22 | Type: String 23 | ResourceNamePrefix: 24 | Default: nginx-s3-gateway 25 | Description: Common prefix used for resource names. No need to change this 26 | Type: String 27 | Outputs: 28 | PublicDNS: 29 | Description: DNS name of load balancer 30 | Value: !GetAtt 'ALB.DNSName' 31 | Resources: 32 | ALB: 33 | Properties: 34 | Name: !Join 35 | - '-' 36 | - - !Ref 'ResourceNamePrefix' 37 | - alb 38 | SecurityGroups: 39 | - !GetAtt 'S3GatewaySG.GroupId' 40 | SubnetMappings: 41 | - SubnetId: !Ref 'Subnet1' 42 | - SubnetId: !Ref 'Subnet2' 43 | Type: application 44 | Type: AWS::ElasticLoadBalancingV2::LoadBalancer 45 | ALBHttpListener: 46 | Properties: 47 | DefaultActions: 48 | - ForwardConfig: 49 | TargetGroups: 50 | - TargetGroupArn: !Ref 'ALBTargetGroup' 51 | Type: forward 52 | LoadBalancerArn: !Ref 'ALB' 53 | Port: 80 54 | Protocol: HTTP 55 | Type: AWS::ElasticLoadBalancingV2::Listener 56 | ALBTargetGroup: 57 | Properties: 58 | Name: !Join 59 | - '-' 60 | - - !Ref 'ResourceNamePrefix' 61 | - log-group 62 | Port: 80 63 | Protocol: HTTP 64 | ProtocolVersion: HTTP1 65 | TargetGroupAttributes: 66 | - Key: deregistration_delay.timeout_seconds 67 | Value: '150' 68 | TargetType: ip 69 | VpcId: !Ref 'VpcId' 70 | Type: AWS::ElasticLoadBalancingV2::TargetGroup 71 | Cluster: 72 | Properties: 73 | ClusterName: !Join 74 | - '-' 75 | - - !Ref 'ResourceNamePrefix' 76 | - cluster 77 | Type: AWS::ECS::Cluster 78 | ECSLogGroup: 79 | Properties: 80 | LogGroupName: !Join 81 | - '-' 82 | - - !Ref 'ResourceNamePrefix' 83 | - logs 84 | RetentionInDays: 14 85 | Type: AWS::Logs::LogGroup 86 | ECSTaskExecutionPolicy: 87 | Properties: 88 | PolicyDocument: 89 | Statement: 90 | - Action: 91 | - logs:CreateLogStream 92 | - logs:CreateLogGroup 93 | - logs:PutLogEvents 94 | Effect: Allow 95 | Resource: 96 | - '*' 97 | Version: '2012-10-17' 98 | PolicyName: !Join 99 | - '-' 100 | - - !Ref 'ResourceNamePrefix' 101 | - ecs-task-execution-policy 102 | Roles: 103 | - !Ref 'ECSTaskExecutionRole' 104 | Type: AWS::IAM::Policy 105 | ECSTaskExecutionRole: 106 | Properties: 107 | AssumeRolePolicyDocument: 108 | Statement: 109 | - Action: 110 | - sts:AssumeRole 111 | Effect: Allow 112 | Principal: 113 | Service: 114 | - ecs-tasks.amazonaws.com 115 | Version: '2012-10-17' 116 | Description: An IAM role to enable ECS agents to perform AWS operations such as creating CloudWatch logs. 117 | RoleName: !Join 118 | - '-' 119 | - - !Ref 'ResourceNamePrefix' 120 | - ecs-task-execution-role 121 | Type: AWS::IAM::Role 122 | ECSTaskPolicy: 123 | Properties: 124 | PolicyDocument: 125 | Statement: 126 | - Action: 127 | - s3:GetObject 128 | - s3:ListBucket 129 | Effect: Allow 130 | Resource: 131 | - !Sub 132 | - arn:aws:s3:::${bucketName}/* 133 | - bucketName: !Ref 'NewBucketName' 134 | - !Sub 135 | - arn:aws:s3:::${bucketName} 136 | - bucketName: !Ref 'NewBucketName' 137 | Version: '2012-10-17' 138 | PolicyName: !Join 139 | - '-' 140 | - - !Ref 'ResourceNamePrefix' 141 | - ecs-task-policy 142 | Roles: 143 | - !Ref 'ECSTaskRole' 144 | Type: AWS::IAM::Policy 145 | ECSTaskRole: 146 | Properties: 147 | AssumeRolePolicyDocument: 148 | Statement: 149 | - Action: 150 | - sts:AssumeRole 151 | Effect: Allow 152 | Principal: 153 | Service: 154 | - ecs-tasks.amazonaws.com 155 | Version: '2012-10-17' 156 | Description: An IAM role to enable ECS containers to perform AWS operations such as accessing S3 buckets. 157 | RoleName: !Join 158 | - '-' 159 | - - !Ref 'ResourceNamePrefix' 160 | - ecs-task-role 161 | Type: AWS::IAM::Role 162 | S3Bucket: 163 | Properties: 164 | BucketName: !Ref 'NewBucketName' 165 | Type: AWS::S3::Bucket 166 | S3GatewaySG: 167 | Properties: 168 | GroupDescription: Security group for NGINX S3 Gateway Infra 169 | GroupName: !Join 170 | - '-' 171 | - - !Ref 'ResourceNamePrefix' 172 | - sg 173 | SecurityGroupEgress: 174 | - CidrIp: '0.0.0.0/0' 175 | Description: Allow all outbound IPv4 traffic 176 | IpProtocol: '-1' 177 | - CidrIpv6: ::/0 178 | Description: Allow all outbound IPv6 traffic 179 | IpProtocol: '-1' 180 | SecurityGroupIngress: 181 | - CidrIp: '0.0.0.0/0' 182 | Description: Allow HTTP Traffic on 80 183 | FromPort: 80 184 | IpProtocol: tcp 185 | ToPort: 80 186 | - CidrIp: '0.0.0.0/0' 187 | Description: Allow HTTPS Traffic on 443 188 | FromPort: 443 189 | IpProtocol: tcp 190 | ToPort: 443 191 | VpcId: !Ref 'VpcId' 192 | Type: AWS::EC2::SecurityGroup 193 | S3GatewayService: 194 | DependsOn: ALBHttpListener 195 | Properties: 196 | Cluster: !Ref 'Cluster' 197 | DesiredCount: 1 198 | LaunchType: FARGATE 199 | LoadBalancers: 200 | - ContainerName: !Ref 'ContainerName' 201 | ContainerPort: 80 202 | TargetGroupArn: !Ref 'ALBTargetGroup' 203 | NetworkConfiguration: 204 | AwsvpcConfiguration: 205 | AssignPublicIp: ENABLED 206 | SecurityGroups: 207 | - !GetAtt 'S3GatewaySG.GroupId' 208 | Subnets: 209 | - !Ref 'Subnet1' 210 | - !Ref 'Subnet2' 211 | ServiceName: !Join 212 | - '-' 213 | - - !Ref 'ResourceNamePrefix' 214 | - service 215 | TaskDefinition: !Ref 'TaskDefinition' 216 | Type: AWS::ECS::Service 217 | TaskDefinition: 218 | DependsOn: S3Bucket 219 | Properties: 220 | ContainerDefinitions: 221 | - Environment: 222 | - Name: ALLOW_DIRECTORY_LIST 223 | Value: 'true' 224 | - Name: AWS_SIGS_VERSION 225 | Value: '4' 226 | - Name: S3_BUCKET_NAME 227 | Value: !Ref 'S3Bucket' 228 | - Name: S3_REGION 229 | Value: !Ref 'AWS::Region' 230 | - Name: S3_SERVER_PORT 231 | Value: '443' 232 | - Name: S3_SERVER_PROTO 233 | Value: https 234 | - Name: S3_SERVER 235 | Value: !Join 236 | - . 237 | - - s3 238 | - !Ref 'AWS::Region' 239 | - amazonaws.com 240 | - Name: S3_STYLE 241 | Value: default 242 | - Name: DEBUG 243 | Value: 'false' 244 | Image: ghcr.io/nginxinc/nginx-s3-gateway/nginx-oss-s3-gateway:latest-njs-oss 245 | LogConfiguration: 246 | LogDriver: awslogs 247 | Options: 248 | awslogs-group: !Ref 'ECSLogGroup' 249 | awslogs-region: !Ref 'AWS::Region' 250 | awslogs-stream-prefix: ecs 251 | Name: !Ref 'ContainerName' 252 | PortMappings: 253 | - ContainerPort: 80 254 | HostPort: 80 255 | Cpu: '1024' 256 | ExecutionRoleArn: !GetAtt 'ECSTaskExecutionRole.Arn' 257 | Family: !Ref 'ContainerName' 258 | Memory: '2048' 259 | NetworkMode: awsvpc 260 | RequiresCompatibilities: 261 | - FARGATE 262 | TaskRoleArn: !GetAtt 'ECSTaskRole.Arn' 263 | Type: AWS::ECS::TaskDefinition 264 | -------------------------------------------------------------------------------- /deployments/s3_express/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.45.0" 6 | constraints = "5.45.0" 7 | hashes = [ 8 | "h1:8m3+C1VNevzU/8FsABoKp2rTOx3Ue7674INfhfk0TZY=", 9 | "zh:1379bcf45aef3d486ee18b4f767bfecd40a0056510d26107f388be3d7994c368", 10 | "zh:1615a6f5495acfb3a0cb72324587261dd4d72711a3cc51aff13167b14531501e", 11 | "zh:18b69a0f33f8b1862fbd3f200756b7e83e087b73687085f2cf9c7da4c318e3e6", 12 | "zh:2c5e7aecd197bc3d3b19290bad8cf4c390c2c6a77bb165da4e11f53f2dfe2e54", 13 | "zh:3794da9bef97596e3bc60e12cdd915bda5ec2ed62cd1cd93723d58b4981905fe", 14 | "zh:40a5e45ed91801f83db76dffd467dcf425ea2ca8642327cf01119601cb86021c", 15 | "zh:4abfc3f53d0256a7d5d1fa5e931e4601b02db3d1da28f452341d3823d0518f1a", 16 | "zh:4eb0e98078f79aeb06b5ff6115286dc2135d12a80287885698d04036425494a2", 17 | "zh:75470efbadea4a8d783642497acaeec5077fc4a7f3df3340defeaa1c7de29bf7", 18 | "zh:8861a0b4891d5fa2fa7142f236ae613cea966c45b5472e3915a4ac3abcbaf487", 19 | "zh:8bf6f21cd9390b742ca0b4393fde92616ca9e6553fb75003a0999006ad233d35", 20 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 21 | "zh:ad73008a044e75d337acda910fb54d8b81a366873c8a413fec1291034899a814", 22 | "zh:bf261713b0b8bebfe8c199291365b87d9043849f28a2dc764bafdde73ae43693", 23 | "zh:da3bafa1fd830be418dfcc730e85085fe67c0d415c066716f2ac350a2306f40a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /deployments/s3_express/.tool-versions: -------------------------------------------------------------------------------- 1 | terraform 1.8.1 2 | -------------------------------------------------------------------------------- /deployments/s3_express/README.md: -------------------------------------------------------------------------------- 1 | # Purpose 2 | This Terraform script sets up an AWS S3 Express One Zone bucket for testing. 3 | 4 | ## Usage 5 | Use environment variables to authenticate: 6 | 7 | ```bash 8 | export AWS_ACCESS_KEY_ID="anaccesskey" 9 | export AWS_SECRET_ACCESS_KEY="asecretkey" 10 | export AWS_REGION="us-west-2" 11 | ``` 12 | 13 | Generate a plan: 14 | ```bash 15 | terraform plan -out=plan.tfplan \ 16 | > -var="bucket_name=my-bucket-name--usw2-az1--x-s3" \ 17 | > -var="region=us-west-2" \ 18 | > -var="availability_zone_id=usw2-az1" \ 19 | > -var="owner_email=my_email@foo.com" 20 | ``` 21 | > [!NOTE] 22 | > Note that AWS S3 Express One Zone is only available in [certain regions and availability zones](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints). If you get an error like this: `api error InvalidBucketName`. If you have met the [naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html), this likely means you have chosen a bad region/availability zone combination. 23 | 24 | 25 | If you are comfortable with the plan, apply it: 26 | ``` 27 | terraform apply "plan.tfplan" 28 | ``` 29 | 30 | Then build the image (you can also use the latest release) 31 | ```bash 32 | docker build --file Dockerfile.oss --tag nginx-s3-gateway:oss --tag nginx-s3-gateway . 33 | ``` 34 | 35 | Configure and run the image: 36 | 37 | ```bash 38 | docker run --rm --env-file ./settings.s3express.example --publish 80:80 --name nginx-s3-gateway \ 39 | nginx-s3-gateway:oss 40 | ``` 41 | 42 | Confirm that it is working. The terraform script will prepopulate the bucket with a single test object 43 | ```bash 44 | curl http://localhost:80/test.txt 45 | ``` 46 | -------------------------------------------------------------------------------- /deployments/s3_express/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | resource "aws_s3_directory_bucket" "example" { 6 | bucket = var.bucket_name 7 | location { 8 | name = var.availability_zone_id 9 | } 10 | 11 | force_destroy = true 12 | } 13 | 14 | data "aws_partition" "current" {} 15 | data "aws_caller_identity" "current" {} 16 | 17 | data "aws_iam_policy_document" "example" { 18 | statement { 19 | effect = "Allow" 20 | 21 | actions = [ 22 | "s3express:*", 23 | ] 24 | 25 | resources = [ 26 | aws_s3_directory_bucket.example.arn, 27 | ] 28 | 29 | principals { 30 | type = "AWS" 31 | identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] 32 | } 33 | } 34 | } 35 | 36 | resource "aws_s3_bucket_policy" "example" { 37 | bucket = aws_s3_directory_bucket.example.bucket 38 | policy = data.aws_iam_policy_document.example.json 39 | } 40 | 41 | # The filemd5() function is available in Terraform 0.11.12 and later 42 | # For Terraform 0.11.11 and earlier, use the md5() function and the file() function: 43 | # etag = "${md5(file("path/to/file"))}" 44 | # etag = filemd5("path/to/file") 45 | resource "aws_s3_object" "example" { 46 | bucket = aws_s3_directory_bucket.example.bucket 47 | key = "test.txt" 48 | source = "${path.root}/test_data/test.txt" 49 | } 50 | 51 | 52 | -------------------------------------------------------------------------------- /deployments/s3_express/settings.s3express.example: -------------------------------------------------------------------------------- 1 | S3_BUCKET_NAME=my-bucket-name--usw2-az1--x-s3 2 | AWS_ACCESS_KEY_ID=ZZZZZZZZZZZZZZZZZZZZ 3 | AWS_SECRET_ACCESS_KEY=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 4 | AWS_SESSION_TOKEN=bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 5 | S3_SERVER=s3express-usw2-az1.us-west-2.amazonaws.com 6 | S3_SERVER_PORT=443 7 | S3_SERVER_PROTO=https 8 | S3_REGION=us-west-2 9 | S3_STYLE=virtual-v2 10 | S3_SERVICE=s3express 11 | DEBUG=true 12 | AWS_SIGS_VERSION=4 13 | ALLOW_DIRECTORY_LIST=false 14 | PROVIDE_INDEX_PAGE=false 15 | APPEND_SLASH_FOR_POSSIBLE_DIRECTORY=false 16 | DIRECTORY_LISTING_PATH_PREFIX="" 17 | PROXY_CACHE_MAX_SIZE=10g 18 | PROXY_CACHE_SLICE_SIZE="1m" 19 | PROXY_CACHE_INACTIVE=60m 20 | PROXY_CACHE_VALID_OK=1h 21 | PROXY_CACHE_VALID_NOTFOUND=1m 22 | PROXY_CACHE_VALID_FORBIDDEN=30s 23 | -------------------------------------------------------------------------------- /deployments/s3_express/test_data/test.txt: -------------------------------------------------------------------------------- 1 | Congratulations, friend. You are using Amazon S3 Express One Zone. 2 | 🚂🚂🚂 Choo-choo~ 🚂🚂🚂 -------------------------------------------------------------------------------- /deployments/s3_express/variables.tf: -------------------------------------------------------------------------------- 1 | # Format for bucket name [bucket_name]--[azid]--x-s3 2 | variable "bucket_name" { 3 | type = string 4 | default = "example--usw2-az2--x-s3" 5 | } 6 | 7 | variable "owner_email" { 8 | type = string 9 | } 10 | 11 | variable "region" { 12 | type = string 13 | default = "us-west-2" 14 | } 15 | 16 | # "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#az-ids" 17 | variable "availability_zone_id" { 18 | type = string 19 | default = "usw2-az2" 20 | } 21 | -------------------------------------------------------------------------------- /deployments/s3_express/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "5.45.0" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | # Development Guide 2 | 3 | ## Integrating with AWS Signature 4 | 5 | Update the following files when enhancing `nginx-s3-gateway` to integrate with AWS signature whenever AWS releases a new version of signature or you have a new PR: 6 | 7 | - NGINX Proxy: [`/etc/nginx/conf.d/default.conf`](/common/etc/nginx/templates/gateway/s3_location.conf.template) 8 | - AWS Credentials Lib: [`/etc/nginx/include/awscredentials.js`](/common/etc/nginx/include/awscredentials.js) 9 | > Note: The `fetchCredentials()` is going to be part of here soon. 10 | 11 | - AWS Signature Lib per version: 12 | - [`/etc/nginx/include/awssig2.js`](/common/etc/nginx/include/awssig2.js) 13 | - [`/etc/nginx/include/awssig4.js`](/common/etc/nginx/include/awssig4.js) 14 | 15 | - S3 Integration Lib: [`/etc/nginx/include/s3gateway.js`](/common/etc/nginx/include/s3gateway.js) 16 | - Common Lib for all of NJS: [`/etc/nginx/include/utils.js`](/common/etc/nginx/include/utils.js) 17 | 18 | ![](./img/nginx-s3-gateway-signature-flow.png) 19 | 20 | ## Extending the Gateway 21 | 22 | ### Extending gateway configuration via container images 23 | 24 | #### `conf.d` Directory 25 | 26 | On the container image, all files with the extension `.conf` in the 27 | directory `/etc/nginx/conf.d` will be loaded into the configuration 28 | of the base `http` block within the main NGINX configuration. 29 | 30 | This allows for extension of the configuration by adding additional 31 | configuration files into the container image extending the base 32 | gateway image. 33 | 34 | #### Stub Files 35 | 36 | On the container image there are three NGINX configuration stub files: 37 | 38 | * [`/etc/nginx/conf.d/s3_server.conf`](/common/etc/nginx/templates/gateway/s3_location.conf.template) 39 | * [`/etc/nginx/conf.d/s3_location.conf`](/common/etc/nginx/templates/gateway/s3_server.conf.template) 40 | * [`/etc/nginx/conf.d/s3listing_location.conf`](/common/etc/nginx/templates/gateway/s3listing_location.conf.template) 41 | 42 | Each of these files can be overwritten in a container image that inherits 43 | from the S3 Gateway container image, so that additional NGINX configuration 44 | directives can be inserted into the gateway configuration. 45 | 46 | ### Examples 47 | 48 | In the [examples/ directory](/examples), there are `Dockerfile` examples that 49 | show how to extend the base functionality of the NGINX S3 Gateway by adding 50 | additional modules. 51 | 52 | * [Enabling Brotli Compression in Docker](/examples/brotli-compression) 53 | * [Enabling GZip Compression in Docker](/examples/gzip-compression) 54 | * [Installing Modsecurity in Docker](/examples/modsecurity) 55 | 56 | ## Testing 57 | 58 | Automated tests require `docker`, `docker-compose`, `curl`, `md5sum` (or `md5` on MacOS), and `mc` (the minio [cli tool](https://github.com/penpyt/asdf-mc)) to be 59 | installed. To run all unit tests and integration tests, run the following command. 60 | If you invoke the test script with a plus parameter, you will need to add your 61 | NGINX repository keys to the `plus/etc/ssl/nginx` directory 62 | 63 | ``` 64 | $ ./test.sh 65 | ``` 66 | -------------------------------------------------------------------------------- /docs/img/nginx-s3-gateway-directory-listing-path-prefix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nginx/nginx-s3-gateway/5652fa1d1be939f846de05a33e9651a0e601f259/docs/img/nginx-s3-gateway-directory-listing-path-prefix.png -------------------------------------------------------------------------------- /docs/img/nginx-s3-gateway-signature-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nginx/nginx-s3-gateway/5652fa1d1be939f846de05a33e9651a0e601f259/docs/img/nginx-s3-gateway-signature-flow.png -------------------------------------------------------------------------------- /examples/brotli-compression/Dockerfile.oss: -------------------------------------------------------------------------------- 1 | FROM nginxinc/nginx-s3-gateway@sha256:8aa48324479b3653b5936183cc97f2ca1aa9078d229042f1bca357834bd906f4 2 | ENV BROTLI_VERSION "v1.0.0rc" 3 | 4 | # Build Brotli module from source because there is no repository package 5 | RUN set -eux \ 6 | export DEBIAN_FRONTEND=noninteractive; \ 7 | apt-get update -qq; \ 8 | apt-get install -y -qq build-essential libpcre3-dev git libbrotli1 libbrotli-dev; \ 9 | curl -o /tmp/nginx.tar.gz --retry 6 -Ls "http://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz"; \ 10 | mkdir /tmp/nginx /tmp/brotli; \ 11 | tar -C /tmp/nginx --strip-components 1 -xzf /tmp/nginx.tar.gz; \ 12 | curl -o /tmp/brotli.tar.gz --retry 6 -Ls "https://github.com/google/ngx_brotli/archive/${BROTLI_VERSION}.tar.gz"; \ 13 | tar -C "/tmp/brotli" --strip-components 1 -xzf /tmp/brotli.tar.gz; \ 14 | cd /tmp/nginx; \ 15 | ./configure --add-dynamic-module=/tmp/brotli \ 16 | --without-http_gzip_module \ 17 | --prefix=/etc/nginx \ 18 | --sbin-path=/usr/sbin/nginx \ 19 | --modules-path=/usr/lib/nginx/modules \ 20 | --conf-path=/etc/nginx/nginx.conf \ 21 | --error-log-path=/var/log/nginx/error.log \ 22 | --http-log-path=/var/log/nginx/access.log \ 23 | --pid-path=/var/run/nginx.pid \ 24 | --lock-path=/var/run/nginx.lock \ 25 | --http-client-body-temp-path=/var/cache/nginx/client_temp \ 26 | --http-proxy-temp-path=/var/cache/nginx/proxy_temp \ 27 | --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \ 28 | --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \ 29 | --http-scgi-temp-path=/var/cache/nginx/scgi_temp \ 30 | --user=nginx --group=nginx --with-compat --with-file-aio \ 31 | --with-threads \ 32 | --with-compat \ 33 | --with-cc-opt="-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-${NGINX_VERSION}/debian/debuild-base/nginx-${NGINX_VERSION}=. -fstack-protector-strong -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fPIC" \ 34 | --with-ld-opt='-Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'; \ 35 | make -j $(nproc) modules; \ 36 | cp /tmp/nginx/objs/ngx_http_brotli_filter_module.so /usr/lib/nginx/modules; \ 37 | cp /tmp/nginx/objs/ngx_http_brotli_static_module.so /usr/lib/nginx/modules; \ 38 | apt-get purge -y --auto-remove build-essential libpcre3-dev git libbrotli-dev; \ 39 | rm -rf /var/lib/apt/lists/* /var/tmp/* /tmp/* 40 | 41 | # Update configuration to load module 42 | RUN sed -i '1s#^#load_module modules/ngx_http_brotli_filter_module.so;\n\n#' /etc/nginx/nginx.conf 43 | 44 | COPY etc/nginx/conf.d /etc/nginx/conf.d 45 | -------------------------------------------------------------------------------- /examples/brotli-compression/Dockerfile.plus: -------------------------------------------------------------------------------- 1 | FROM nginx-plus-s3-gateway 2 | 3 | # Install Brolti from the NGINX Plus repository 4 | RUN set -eux \ 5 | export DEBIAN_FRONTEND=noninteractive; \ 6 | apt-get -qq update; \ 7 | apt-get -qq install --no-install-recommends --no-install-suggests -y nginx-plus-module-brotli; \ 8 | rm -rf /var/lib/apt/lists/* 9 | 10 | # Update configuration to load module 11 | RUN sed -i '1s#^#load_module modules/ngx_http_brotli_filter_module.so;\n\n#' /etc/nginx/nginx.conf 12 | 13 | COPY etc/nginx/conf.d /etc/nginx/conf.d 14 | -------------------------------------------------------------------------------- /examples/brotli-compression/etc/nginx/conf.d/brotli_compression.conf: -------------------------------------------------------------------------------- 1 | brotli on; 2 | brotli_comp_level 6; 3 | brotli_buffers 16 8k; 4 | brotli_min_length 1024; 5 | brotli_types 6 | application/atom+xml 7 | application/geo+json 8 | application/javascript 9 | application/x-javascript 10 | application/json 11 | application/ld+json 12 | application/manifest+json 13 | application/rdf+xml 14 | application/rss+xml 15 | application/vnd.ms-fontobject 16 | application/wasm 17 | application/x-web-app-manifest+json 18 | application/xhtml+xml 19 | application/xml 20 | font/eot 21 | font/otf 22 | font/ttf 23 | image/bmp 24 | image/svg+xml 25 | text/cache-manifest 26 | text/calendar 27 | text/css 28 | text/javascript 29 | text/markdown 30 | text/plain 31 | text/xml 32 | text/vcard 33 | text/vnd.rim.location.xloc 34 | text/vtt 35 | text/x-component 36 | text/x-cross-domain-policy; 37 | -------------------------------------------------------------------------------- /examples/gzip-compression/Dockerfile.oss: -------------------------------------------------------------------------------- 1 | FROM nginxinc/nginx-s3-gateway@sha256:8aa48324479b3653b5936183cc97f2ca1aa9078d229042f1bca357834bd906f4 2 | 3 | COPY etc/nginx/conf.d /etc/nginx/conf.d 4 | -------------------------------------------------------------------------------- /examples/gzip-compression/Dockerfile.plus: -------------------------------------------------------------------------------- 1 | FROM nginx-plus-s3-gateway 2 | 3 | COPY etc/nginx/conf.d /etc/nginx/conf.d 4 | -------------------------------------------------------------------------------- /examples/gzip-compression/etc/nginx/conf.d/gzip_compression.conf: -------------------------------------------------------------------------------- 1 | # Credit: https://github.com/h5bp/server-configs-nginx/ 2 | 3 | # ---------------------------------------------------------------------- 4 | # | Compression | 5 | # ---------------------------------------------------------------------- 6 | 7 | # https://nginx.org/en/docs/http/ngx_http_gzip_module.html 8 | 9 | # Enable gzip compression. 10 | # Default: off 11 | gzip on; 12 | 13 | # Compression level (1-9). 14 | # 5 is a perfect compromise between size and CPU usage, offering about 75% 15 | # reduction for most ASCII files (almost identical to level 9). 16 | # Default: 1 17 | gzip_comp_level 6; 18 | 19 | # Don't compress anything that's already small and unlikely to shrink much if at 20 | # all (the default is 20 bytes, which is bad as that usually leads to larger 21 | # files after gzipping). 22 | # Default: 20 23 | gzip_min_length 256; 24 | 25 | # Compress data even for clients that are connecting to us via proxies, 26 | # identified by the "Via" header (required for CloudFront). 27 | # Default: off 28 | gzip_proxied any; 29 | 30 | # Tell proxies to cache both the gzipped and regular version of a resource 31 | # whenever the client's Accept-Encoding capabilities header varies; 32 | # Avoids the issue where a non-gzip capable client (which is extremely rare 33 | # today) would display gibberish if their proxy gave them the gzipped version. 34 | # Default: off 35 | gzip_vary on; 36 | 37 | # Compress all output labeled with one of the following MIME-types. 38 | # `text/html` is always compressed by gzip module. 39 | # Default: text/html 40 | gzip_types 41 | application/atom+xml 42 | application/geo+json 43 | application/javascript 44 | application/x-javascript 45 | application/json 46 | application/ld+json 47 | application/manifest+json 48 | application/rdf+xml 49 | application/rss+xml 50 | application/vnd.ms-fontobject 51 | application/wasm 52 | application/x-web-app-manifest+json 53 | application/xhtml+xml 54 | application/xml 55 | font/eot 56 | font/otf 57 | font/ttf 58 | image/bmp 59 | image/svg+xml 60 | text/cache-manifest 61 | text/calendar 62 | text/css 63 | text/javascript 64 | text/markdown 65 | text/plain 66 | text/xml 67 | text/vcard 68 | text/vnd.rim.location.xloc 69 | text/vtt 70 | text/x-component 71 | text/x-cross-domain-policy; 72 | -------------------------------------------------------------------------------- /examples/modsecurity/Dockerfile.oss: -------------------------------------------------------------------------------- 1 | FROM nginxinc/nginx-s3-gateway@sha256:8aa48324479b3653b5936183cc97f2ca1aa9078d229042f1bca357834bd906f4 2 | 3 | ENV MODSECURITY_VERSION "v1.0.1" 4 | ENV OWASP_RULESET_VERSION "v3.3.0" 5 | ENV OWASP_RULESET_CHECKSUM "1f4002b5cf941a9172b6250cea7e3465a85ef6ee" 6 | 7 | # Build modsecurity module from source because there is no repository package 8 | # Download OWASP ruleset 9 | RUN set -eux \ 10 | export DEBIAN_FRONTEND=noninteractive; \ 11 | apt-get update -qq; \ 12 | apt-get install -y -qq build-essential libpcre3-dev git libmodsecurity3 libmodsecurity-dev curl libdigest-sha-perl; \ 13 | curl -o /tmp/nginx.tar.gz --retry 6 -Ls "http://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz"; \ 14 | mkdir /tmp/nginx /tmp/modsecurity; \ 15 | tar -C /tmp/nginx --strip-components 1 -xzf /tmp/nginx.tar.gz; \ 16 | curl -o /tmp/modsecurity.tar.gz --retry 6 -Ls "https://github.com/SpiderLabs/ModSecurity-nginx/archive/${MODSECURITY_VERSION}.tar.gz"; \ 17 | tar -C "/tmp/modsecurity" --strip-components 1 -xzf /tmp/modsecurity.tar.gz; \ 18 | cd /tmp/nginx; \ 19 | ./configure --add-dynamic-module=/tmp/modsecurity \ 20 | --without-http_gzip_module \ 21 | --prefix=/etc/nginx \ 22 | --sbin-path=/usr/sbin/nginx \ 23 | --modules-path=/usr/lib/nginx/modules \ 24 | --conf-path=/etc/nginx/nginx.conf \ 25 | --error-log-path=/var/log/nginx/error.log \ 26 | --http-log-path=/var/log/nginx/access.log \ 27 | --pid-path=/var/run/nginx.pid \ 28 | --lock-path=/var/run/nginx.lock \ 29 | --http-client-body-temp-path=/var/cache/nginx/client_temp \ 30 | --http-proxy-temp-path=/var/cache/nginx/proxy_temp \ 31 | --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \ 32 | --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \ 33 | --http-scgi-temp-path=/var/cache/nginx/scgi_temp \ 34 | --user=nginx --group=nginx --with-compat --with-file-aio \ 35 | --with-threads \ 36 | --with-compat \ 37 | --with-cc-opt="-g -O2 -fdebug-prefix-map=/data/builder/debuild/nginx-${NGINX_VERSION}/debian/debuild-base/nginx-${NGINX_VERSION}=. -fstack-protector-strong -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fPIC" \ 38 | --with-ld-opt='-Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'; \ 39 | make -j $(nproc) modules; \ 40 | cp /tmp/nginx/objs/ngx_http_modsecurity_module.so /usr/lib/nginx/modules; \ 41 | curl -o /tmp/coreruleset.tar.gz --retry 6 -Ls "https://github.com/coreruleset/coreruleset/archive/${OWASP_RULESET_VERSION}.tar.gz"; \ 42 | echo "${OWASP_RULESET_CHECKSUM} /tmp/coreruleset.tar.gz" | shasum -c; \ 43 | mkdir -p /usr/local/nginx/conf/owasp-modsecurity-crs; \ 44 | tar -C /usr/local/nginx/conf/owasp-modsecurity-crs --strip-components 1 -xzf /tmp/coreruleset.tar.gz; \ 45 | apt-get purge -y --auto-remove build-essential libpcre3-dev git libmodsecurity-dev curl libdigest-sha-perl; \ 46 | rm -rf /var/lib/apt/lists/* /var/tmp/* /tmp/* 47 | 48 | # Update configuration to load module 49 | RUN sed -i '1s#^#load_module modules/ngx_http_modsecurity_module.so;\n#' /etc/nginx/nginx.conf 50 | 51 | # Update configuration to include modsecurity 52 | RUN sed -i 's#server {#server \{\n include /etc/nginx/conf.d/gateway/modsecurity.conf;#' /etc/nginx/templates/default.conf.template 53 | 54 | COPY etc/nginx /etc/nginx 55 | COPY usr/local /usr/local 56 | -------------------------------------------------------------------------------- /examples/modsecurity/Dockerfile.plus: -------------------------------------------------------------------------------- 1 | FROM nginx-plus-s3-gateway 2 | 3 | ENV OWASP_RULESET_VERSION "v3.3.0" 4 | ENV OWASP_RULESET_CHECKSUM "1f4002b5cf941a9172b6250cea7e3465a85ef6ee" 5 | 6 | # Install modsecurity from the NGINX Plus repository and download OWASP ruleset 7 | RUN set -eux \ 8 | export DEBIAN_FRONTEND=noninteractive; \ 9 | apt-get -qq update; \ 10 | apt-get -qq install --no-install-recommends --no-install-suggests -y curl libdigest-sha-perl nginx-plus-module-modsecurity; \ 11 | curl -o /tmp/coreruleset.tar.gz --retry 6 -Ls "https://github.com/coreruleset/coreruleset/archive/${OWASP_RULESET_VERSION}.tar.gz"; \ 12 | echo "${OWASP_RULESET_CHECKSUM} /tmp/coreruleset.tar.gz" | shasum -c; \ 13 | mkdir -p /usr/local/nginx/conf/owasp-modsecurity-crs; \ 14 | tar -C /usr/local/nginx/conf/owasp-modsecurity-crs --strip-components 1 -xzf /tmp/coreruleset.tar.gz; \ 15 | apt-get -qq purge curl libdigest-sha-perl; \ 16 | rm -rf /var/lib/apt/lists/* 17 | 18 | # Update configuration to load module 19 | RUN sed -i '1s#^#load_module modules/ngx_http_modsecurity_module.so;\n#' /etc/nginx/nginx.conf 20 | 21 | # Update configuration to include modsecurity 22 | RUN sed -i 's#server {#server \{\n include /etc/nginx/conf.d/gateway/modsecurity.conf;#' /etc/nginx/templates/default.conf.template 23 | 24 | COPY etc/nginx /etc/nginx 25 | COPY usr/local /usr/local 26 | -------------------------------------------------------------------------------- /examples/modsecurity/etc/nginx/conf.d/gateway/modsecurity.conf: -------------------------------------------------------------------------------- 1 | modsecurity on; 2 | modsecurity_rules_file /etc/nginx/modsec/main.conf; 3 | -------------------------------------------------------------------------------- /examples/modsecurity/etc/nginx/modsec/main.conf: -------------------------------------------------------------------------------- 1 | # Include the recommended configuration 2 | Include modsecurity.conf 3 | 4 | # Rules from OWASP ruleset 5 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/crs-setup.conf 6 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-901-INITIALIZATION.conf 7 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-905-COMMON-EXCEPTIONS.conf 8 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-911-METHOD-ENFORCEMENT.conf 9 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-912-DOS-PROTECTION.conf 10 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-913-SCANNER-DETECTION.conf 11 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf 12 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-921-PROTOCOL-ATTACK.conf 13 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/REQUEST-949-BLOCKING-EVALUATION.conf 14 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/RESPONSE-950-DATA-LEAKAGES.conf 15 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/RESPONSE-959-BLOCKING-EVALUATION.conf 16 | Include /usr/local/nginx/conf/owasp-modsecurity-crs/rules/RESPONSE-980-CORRELATION.conf 17 | -------------------------------------------------------------------------------- /examples/modsecurity/etc/nginx/modsec/modsecurity.conf: -------------------------------------------------------------------------------- 1 | # -- Rule engine initialization ---------------------------------------------- 2 | 3 | # Enable ModSecurity, attaching it to every transaction. Use detection 4 | # only to start with, because that minimises the chances of post-installation 5 | # disruption. 6 | # 7 | SecRuleEngine On 8 | 9 | 10 | # -- Request body handling --------------------------------------------------- 11 | 12 | # Allow ModSecurity to access request bodies. If you don't, ModSecurity 13 | # won't be able to see any POST parameters, which opens a large security 14 | # hole for attackers to exploit. 15 | # 16 | SecRequestBodyAccess On 17 | 18 | 19 | # Enable XML request body parser. 20 | # Initiate XML Processor in case of xml content-type 21 | # 22 | SecRule REQUEST_HEADERS:Content-Type "(?:application(?:/soap\+|/)|text/)xml" \ 23 | "id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML" 24 | 25 | # Enable JSON request body parser. 26 | # Initiate JSON Processor in case of JSON content-type; change accordingly 27 | # if your application does not use 'application/json' 28 | # 29 | SecRule REQUEST_HEADERS:Content-Type "application/json" \ 30 | "id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON" 31 | 32 | # Maximum request body size we will accept for buffering. If you support 33 | # file uploads then the value given on the first line has to be as large 34 | # as the largest file you are willing to accept. The second value refers 35 | # to the size of data, with files excluded. You want to keep that value as 36 | # low as practical. 37 | # 38 | SecRequestBodyLimit 13107200 39 | SecRequestBodyNoFilesLimit 131072 40 | 41 | # What do do if the request body size is above our configured limit. 42 | # Keep in mind that this setting will automatically be set to ProcessPartial 43 | # when SecRuleEngine is set to DetectionOnly mode in order to minimize 44 | # disruptions when initially deploying ModSecurity. 45 | # 46 | SecRequestBodyLimitAction Reject 47 | 48 | # Verify that we've correctly processed the request body. 49 | # As a rule of thumb, when failing to process a request body 50 | # you should reject the request (when deployed in blocking mode) 51 | # or log a high-severity alert (when deployed in detection-only mode). 52 | # 53 | SecRule REQBODY_ERROR "!@eq 0" \ 54 | "id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2" 55 | 56 | # By default be strict with what we accept in the multipart/form-data 57 | # request body. If the rule below proves to be too strict for your 58 | # environment consider changing it to detection-only. You are encouraged 59 | # _not_ to remove it altogether. 60 | # 61 | SecRule MULTIPART_STRICT_ERROR "!@eq 0" \ 62 | "id:'200003',phase:2,t:none,log,deny,status:400, \ 63 | msg:'Multipart request body failed strict validation: \ 64 | PE %{REQBODY_PROCESSOR_ERROR}, \ 65 | BQ %{MULTIPART_BOUNDARY_QUOTED}, \ 66 | BW %{MULTIPART_BOUNDARY_WHITESPACE}, \ 67 | DB %{MULTIPART_DATA_BEFORE}, \ 68 | DA %{MULTIPART_DATA_AFTER}, \ 69 | HF %{MULTIPART_HEADER_FOLDING}, \ 70 | LF %{MULTIPART_LF_LINE}, \ 71 | SM %{MULTIPART_MISSING_SEMICOLON}, \ 72 | IQ %{MULTIPART_INVALID_QUOTING}, \ 73 | IP %{MULTIPART_INVALID_PART}, \ 74 | IH %{MULTIPART_INVALID_HEADER_FOLDING}, \ 75 | FL %{MULTIPART_FILE_LIMIT_EXCEEDED}'" 76 | 77 | # Did we see anything that might be a boundary? 78 | # 79 | # Here is a short description about the ModSecurity Multipart parser: the 80 | # parser returns with value 0, if all "boundary-like" line matches with 81 | # the boundary string which given in MIME header. In any other cases it returns 82 | # with different value, eg. 1 or 2. 83 | # 84 | # The RFC 1341 descript the multipart content-type and its syntax must contains 85 | # only three mandatory lines (above the content): 86 | # * Content-Type: multipart/mixed; boundary=BOUNDARY_STRING 87 | # * --BOUNDARY_STRING 88 | # * --BOUNDARY_STRING-- 89 | # 90 | # First line indicates, that this is a multipart content, second shows that 91 | # here starts a part of the multipart content, third shows the end of content. 92 | # 93 | # If there are any other lines, which starts with "--", then it should be 94 | # another boundary id - or not. 95 | # 96 | # After 3.0.3, there are two kinds of types of boundary errors: strict and permissive. 97 | # 98 | # If multipart content contains the three necessary lines with correct order, but 99 | # there are one or more lines with "--", then parser returns with value 2 (non-zero). 100 | # 101 | # If some of the necessary lines (usually the start or end) misses, or the order 102 | # is wrong, then parser returns with value 1 (also a non-zero). 103 | # 104 | # You can choose, which one is what you need. The example below contains the 105 | # 'strict' mode, which means if there are any lines with start of "--", then 106 | # ModSecurity blocked the content. But the next, commented example contains 107 | # the 'permissive' mode, then you check only if the necessary lines exists in 108 | # correct order. Whit this, you can enable to upload PEM files (eg "----BEGIN.."), 109 | # or other text files, which contains eg. HTTP headers. 110 | # 111 | # The difference is only the operator - in strict mode (first) the content blocked 112 | # in case of any non-zero value. In permissive mode (second, commented) the 113 | # content blocked only if the value is explicit 1. If it 0 or 2, the content will 114 | # allowed. 115 | # 116 | 117 | # 118 | # See #1747 and #1924 for further information on the possible values for 119 | # MULTIPART_UNMATCHED_BOUNDARY. 120 | # 121 | SecRule MULTIPART_UNMATCHED_BOUNDARY "@eq 1" \ 122 | "id:'200004',phase:2,t:none,log,deny,msg:'Multipart parser detected a possible unmatched boundary.'" 123 | 124 | 125 | # PCRE Tuning 126 | # We want to avoid a potential RegEx DoS condition 127 | # 128 | SecPcreMatchLimit 1000 129 | SecPcreMatchLimitRecursion 1000 130 | 131 | # Some internal errors will set flags in TX and we will need to look for these. 132 | # All of these are prefixed with "MSC_". The following flags currently exist: 133 | # 134 | # MSC_PCRE_LIMITS_EXCEEDED: PCRE match limits were exceeded. 135 | # 136 | SecRule TX:/^MSC_/ "!@streq 0" \ 137 | "id:'200005',phase:2,t:none,deny,msg:'ModSecurity internal error flagged: %{MATCHED_VAR_NAME}'" 138 | 139 | 140 | # -- Response body handling -------------------------------------------------- 141 | 142 | # Allow ModSecurity to access response bodies. 143 | # You should have this directive enabled in order to identify errors 144 | # and data leakage issues. 145 | # 146 | # Do keep in mind that enabling this directive does increases both 147 | # memory consumption and response latency. 148 | # 149 | SecResponseBodyAccess On 150 | 151 | # Which response MIME types do you want to inspect? You should adjust the 152 | # configuration below to catch documents but avoid static files 153 | # (e.g., images and archives). 154 | # 155 | SecResponseBodyMimeType text/plain text/html text/xml 156 | 157 | # Buffer response bodies of up to 512 KB in length. 158 | SecResponseBodyLimit 524288 159 | 160 | # What happens when we encounter a response body larger than the configured 161 | # limit? By default, we process what we have and let the rest through. 162 | # That's somewhat less secure, but does not break any legitimate pages. 163 | # 164 | SecResponseBodyLimitAction ProcessPartial 165 | 166 | 167 | # -- Filesystem configuration ------------------------------------------------ 168 | 169 | # The location where ModSecurity stores temporary files (for example, when 170 | # it needs to handle a file upload that is larger than the configured limit). 171 | # 172 | # This default setting is chosen due to all systems have /tmp available however, 173 | # this is less than ideal. It is recommended that you specify a location that's private. 174 | # 175 | SecTmpDir /tmp/ 176 | 177 | # The location where ModSecurity will keep its persistent data. This default setting 178 | # is chosen due to all systems have /tmp available however, it 179 | # too should be updated to a place that other users can't access. 180 | # 181 | SecDataDir /var/tmp/ 182 | 183 | 184 | # -- File uploads handling configuration ------------------------------------- 185 | 186 | # The location where ModSecurity stores intercepted uploaded files. This 187 | # location must be private to ModSecurity. You don't want other users on 188 | # the server to access the files, do you? 189 | # 190 | #SecUploadDir /opt/modsecurity/var/upload/ 191 | 192 | # By default, only keep the files that were determined to be unusual 193 | # in some way (by an external inspection script). For this to work you 194 | # will also need at least one file inspection rule. 195 | # 196 | #SecUploadKeepFiles RelevantOnly 197 | 198 | # Uploaded files are by default created with permissions that do not allow 199 | # any other user to access them. You may need to relax that if you want to 200 | # interface ModSecurity to an external program (e.g., an anti-virus). 201 | # 202 | #SecUploadFileMode 0600 203 | 204 | 205 | # -- Debug log configuration ------------------------------------------------- 206 | 207 | # The default debug log configuration is to duplicate the error, warning 208 | # and notice messages from the error log. 209 | # 210 | #SecDebugLog /opt/modsecurity/var/log/debug.log 211 | #SecDebugLogLevel 3 212 | 213 | 214 | # -- Audit log configuration ------------------------------------------------- 215 | 216 | # Log the transactions that are marked by a rule, as well as those that 217 | # trigger a server error (determined by a 5xx or 4xx, excluding 404, 218 | # level response status codes). 219 | # 220 | SecAuditEngine RelevantOnly 221 | SecAuditLogRelevantStatus "^(?:5|4(?!04))" 222 | 223 | # Log everything we know about a transaction. 224 | SecAuditLogParts ABIJDEFHZ 225 | 226 | # Use a single file for logging. This is much easier to look at, but 227 | # assumes that you will use the audit log only ocassionally. 228 | # 229 | SecAuditLogType Serial 230 | #SecAuditLog /var/log/modsec_audit.log 231 | SecAuditLog /dev/stdout 232 | 233 | # Specify the path for concurrent audit logging. 234 | #SecAuditLogStorageDir /opt/modsecurity/var/audit/ 235 | 236 | 237 | # -- Miscellaneous ----------------------------------------------------------- 238 | 239 | # Use the most commonly used application/x-www-form-urlencoded parameter 240 | # separator. There's probably only one application somewhere that uses 241 | # something else so don't expect to change this value. 242 | # 243 | SecArgumentSeparator & 244 | 245 | # Settle on version 0 (zero) cookies, as that is what most applications 246 | # use. Using an incorrect cookie version may open your installation to 247 | # evasion attacks (against the rules that examine named cookies). 248 | # 249 | SecCookieFormat 0 250 | 251 | # Specify your Unicode Code Point. 252 | # This mapping is used by the t:urlDecodeUni transformation function 253 | # to properly map encoded data to your language. Properly setting 254 | # these directives helps to reduce false positives and negatives. 255 | # 256 | SecUnicodeMapFile unicode.mapping 20127 257 | 258 | # Improve the quality of ModSecurity by sharing information about your 259 | # current ModSecurity version and dependencies versions. 260 | # The following information will be shared: ModSecurity version, 261 | # Web Server version, APR version, PCRE version, Lua version, Libxml2 262 | # version, Anonymous unique id for host. 263 | SecStatusEngine On 264 | 265 | -------------------------------------------------------------------------------- /jsdoc/conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "plugins": [ 3 | "node_modules/better-docs/typescript" 4 | ], 5 | "source": { 6 | "include": ["./common/etc/nginx/include", "./node_modules/njs-types"], 7 | "includePattern": "\\.(jsx|js|ts|tsx)$", 8 | "excludePattern": "(^|\\/|\\\\)_" 9 | }, 10 | "opts": { 11 | "template": "./node_modules/better-docs", 12 | "readme": "README.md", 13 | "verbose": true 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /oss/etc/nginx/conf.d/gateway/server_variables.conf: -------------------------------------------------------------------------------- 1 | # Variable indicating to the awssig4.js script that singing key 2 | # caching is turned off. This feature uses the keyval store, so it 3 | # is only enabled when using NGINX Plus. 4 | set $cache_signing_key_enabled 0; 5 | 6 | # Variable indicating to the awscredentials.js script that session token 7 | # caching is turned on. This feature uses the keyval store, so it 8 | # is only enabled when using NGINX Plus. 9 | set $cache_instance_credentials_enabled 0; 10 | -------------------------------------------------------------------------------- /oss/etc/nginx/templates/upstreams.conf.template: -------------------------------------------------------------------------------- 1 | # This configuration should dynamically reload S3 backends 2 | # as they change in DNS. 3 | 4 | # Use NGINX's non-blocking DNS resolution 5 | resolver ${DNS_RESOLVERS}; 6 | 7 | upstream storage_urls { 8 | zone s3_backends 64k; 9 | 10 | server ${S3_UPSTREAM} resolve; 11 | } 12 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "better-docs": "^2.7.3", 4 | "jsdoc": "^4.0.4", 5 | "njs-types": "^0.8.2", 6 | "taffydb": "^2.7.3" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /plus/docker-entrypoint.d/10-listen-on-ipv6-by-default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Copyright 2020 F5 Networks 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # vim:sw=4:ts=4:et 19 | 20 | set -e 21 | 22 | ME=$(basename $0) 23 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf" 24 | 25 | # check if we have ipv6 available 26 | if [ ! -f "/proc/net/if_inet6" ]; then 27 | echo >&3 "$ME: error: ipv6 not available" 28 | exit 0 29 | fi 30 | 31 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then 32 | echo >&3 "$ME: error: /$DEFAULT_CONF_FILE is not a file or does not exist" 33 | exit 0 34 | fi 35 | 36 | # check if the file can be modified, e.g. not on a r/o filesystem 37 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { echo >&3 "$ME: error: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; } 38 | 39 | # check if the file is already modified, e.g. on a container restart 40 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { echo >&3 "$ME: error: IPv6 listen already enabled"; exit 0; } 41 | 42 | if [ -f "/etc/os-release" ]; then 43 | . /etc/os-release 44 | else 45 | echo >&3 "$ME: error: can not guess the operating system" 46 | exit 0 47 | fi 48 | 49 | echo >&3 "$ME: Getting the checksum of /$DEFAULT_CONF_FILE" 50 | 51 | case "$ID" in 52 | "debian") 53 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3) 54 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || { 55 | echo >&3 "$ME: error: /$DEFAULT_CONF_FILE differs from the packaged version" 56 | exit 0 57 | } 58 | ;; 59 | "alpine") 60 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2) 61 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || { 62 | echo >&3 "$ME: error: /$DEFAULT_CONF_FILE differs from the packages version" 63 | exit 0 64 | } 65 | ;; 66 | *) 67 | echo >&3 "$ME: error: Unsupported distribution" 68 | exit 0 69 | ;; 70 | esac 71 | 72 | # enable ipv6 on default.conf listen sockets 73 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE 74 | 75 | echo >&3 "$ME: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE" 76 | 77 | exit 0 78 | -------------------------------------------------------------------------------- /plus/docker-entrypoint.d/20-envsubst-on-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Copyright 2020 F5 Networks 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | set -e 20 | 21 | ME=$(basename $0) 22 | 23 | auto_envsubst() { 24 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}" 25 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}" 26 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}" 27 | 28 | local template defined_envs relative_path output_path subdir 29 | defined_envs=$(printf '${%s} ' $(env | cut -d= -f1)) 30 | [ -d "$template_dir" ] || return 0 31 | if [ ! -w "$output_dir" ]; then 32 | echo >&3 "$ME: ERROR: $template_dir exists, but $output_dir is not writable" 33 | return 0 34 | fi 35 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do 36 | relative_path="${template#$template_dir/}" 37 | output_path="$output_dir/${relative_path%$suffix}" 38 | subdir=$(dirname "$relative_path") 39 | # create a subdirectory where the template file exists 40 | mkdir -p "$output_dir/$subdir" 41 | echo >&3 "$ME: Running envsubst on $template to $output_path" 42 | envsubst "$defined_envs" < "$template" > "$output_path" 43 | done 44 | } 45 | 46 | auto_envsubst 47 | 48 | exit 0 49 | -------------------------------------------------------------------------------- /plus/etc/nginx/conf.d/gateway/server_variables.conf: -------------------------------------------------------------------------------- 1 | # Variable indicating to the s3gateway.js script that singing key 2 | # caching is turned on. This feature uses the keyval store, so it 3 | # is only enabled when using NGINX Plus. 4 | set $cache_signing_key_enabled 1; 5 | 6 | # Variable indicating to the s3gateway.js script that session token 7 | # caching is turned on. This feature uses the keyval store, so it 8 | # is only enabled when using NGINX Plus. 9 | set $cache_instance_credentials_enabled 1; 10 | -------------------------------------------------------------------------------- /plus/etc/nginx/conf.d/instance_credential_cache.conf: -------------------------------------------------------------------------------- 1 | # This key value zone allows us to cache a portion of the cryptographic 2 | # signatures used by AWS v4 signatures. 3 | keyval_zone zone=instance_credential_cache:32k type=string timeout=6h; 4 | keyval 'instance_credential' $instance_credential_json zone=instance_credential_cache; 5 | -------------------------------------------------------------------------------- /plus/etc/nginx/conf.d/v4_signing_key_cache.conf: -------------------------------------------------------------------------------- 1 | # This key value zone allows us to cache a portion of the cryptographic 2 | # signatures used by AWS v4 signatures. 3 | keyval_zone zone=aws_signing_cache:32k type=string timeout=24h; 4 | keyval 'aws_signing_key_hash' $signing_key_hash zone=aws_signing_cache; 5 | -------------------------------------------------------------------------------- /plus/etc/nginx/templates/upstreams.conf.template: -------------------------------------------------------------------------------- 1 | # This configuration should dynamically reload S3 backends 2 | # as they change in DNS. 3 | 4 | # Use NGINX's non-blocking DNS resolution 5 | resolver ${DNS_RESOLVERS}; 6 | 7 | upstream storage_urls { 8 | zone s3_backends 64k; 9 | 10 | server ${S3_UPSTREAM} resolve; 11 | } 12 | -------------------------------------------------------------------------------- /plus/etc/ssl/nginx/.gitignore: -------------------------------------------------------------------------------- 1 | nginx-repo.crt 2 | nginx-repo.key 3 | -------------------------------------------------------------------------------- /settings.example: -------------------------------------------------------------------------------- 1 | S3_BUCKET_NAME=my-bucket 2 | AWS_ACCESS_KEY_ID=ZZZZZZZZZZZZZZZZZZZZ 3 | AWS_SECRET_ACCESS_KEY=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 4 | AWS_SESSION_TOKEN=bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 5 | S3_SERVER=s3.us-east-1.amazonaws.com 6 | S3_SERVER_PORT=443 7 | S3_SERVER_PROTO=https 8 | S3_REGION=us-east-1 9 | S3_STYLE=virtual-v2 10 | S3_SERVICE=s3 11 | DEBUG=false 12 | AWS_SIGS_VERSION=4 13 | ALLOW_DIRECTORY_LIST=false 14 | PROVIDE_INDEX_PAGE=false 15 | APPEND_SLASH_FOR_POSSIBLE_DIRECTORY=false 16 | DIRECTORY_LISTING_PATH_PREFIX="" 17 | PROXY_CACHE_MAX_SIZE=10g 18 | PROXY_CACHE_SLICE_SIZE="1m" 19 | PROXY_CACHE_INACTIVE=60m 20 | PROXY_CACHE_VALID_OK=1h 21 | PROXY_CACHE_VALID_NOTFOUND=1m 22 | PROXY_CACHE_VALID_FORBIDDEN=30s 23 | STRIP_LEADING_DIRECTORY_PATH=/somepath -------------------------------------------------------------------------------- /test/data/.gitignore: -------------------------------------------------------------------------------- 1 | bucket-1/a/%@!*()=$#^&|.txt 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/a.txt: -------------------------------------------------------------------------------- 1 | Let go, or be dragged. 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/a/abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.txt: -------------------------------------------------------------------------------- 1 | "Where the is not one thing, what then?" 2 | "Throw it away!" 3 | "With not one thing, what there is to throw away?" 4 | "Then carry it off!" 5 | -------------------------------------------------------------------------------- /test/data/bucket-1/a/c/あ: -------------------------------------------------------------------------------- 1 | aaaaaaaaaahhhhhhhhhhhhh 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/a/plus+plus.txt: -------------------------------------------------------------------------------- 1 | 代悲白頭翁   Lament for the White-Haired Old Man 2 | 洛陽城東桃李花 In the east of Luoyang City, Peach blossoms abound 3 | 飛來飛去落誰家 Their petals float around, coming and going, to whose house will they fall? 4 | 洛陽女児惜顔色 Girls in Luoyang cherish their complexion 5 | 行逢落花長歎息 They breathe a deep sigh upon seeing the petals fall 6 | 今年花落顔色改 This year the petals fall and their complexion changes 7 | 明年花開復誰在 Who will be there when the flowers bloom next year? 8 | 已見松柏摧為薪 I've seen the pines and cypresses destroyed and turned into firewood 9 | 更聞桑田変成海 I hear that the mulberry fields have fallen into the sea 10 | 古人無復洛城東 The people of old never came back to the east of Luoyang City 11 | 今人還對落花風 The people of today likewise face the falling flowers in the wind 12 | 年年歳歳花相似 Year after year, flowers look alike 13 | 歳歳年年人不同 Year after year, the people are not the same 14 | 寄言全盛紅顔子 I want you to get this message, my child, you are in your prime, with a rosy complexion 15 | 應憐半死白頭翁 Take pity on the half-dead white-haired old man 16 | 此翁白頭真可憐 You really must take pity on this white-haired old man 17 | 伊昔紅顔美少年 For once upon a time, I used to be a red-faced handsome young man 18 | 公子王孫芳樹下 A child of noble birth under a fragrant tree 19 | 清歌妙舞落花前 Singing and dancing in front of the falling petals 20 | 光禄池臺開錦繍 At the platform before the mirror pond, beautiful autumn leaves opening all around 21 | 将軍楼閣畫神仙 The general’s pavilion is painted with gods and goddesses 22 | 一朝臥病無相識 Once I was sick and no one knew me 23 | 三春行楽在誰邉 Who will be at the shore for the spring outing? 24 | 宛轉蛾眉能幾時 For how long will the moths gracefully turn about? 25 | 須臾鶴髪亂如絲 The crane’s feathers are like tangled threads for just a moment 26 | 但看古来歌舞地 Yet, look at the ancient places of song and dance 27 | 惟有黄昏鳥雀悲 Only in twilight, do the birds lament 28 | -------------------------------------------------------------------------------- /test/data/bucket-1/a/これは This is ASCII системы חן .txt: -------------------------------------------------------------------------------- 1 | What name do you use to call out to the emptiness? 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/b/c/'(1).txt: -------------------------------------------------------------------------------- 1 | In the midst of movement and chaos, keep stillness inside of you. -------------------------------------------------------------------------------- /test/data/bucket-1/b/c/=: -------------------------------------------------------------------------------- 1 | This is an awful filename. 2 | このフィール名を選ばないでください 3 | -------------------------------------------------------------------------------- /test/data/bucket-1/b/c/@: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nginx/nginx-s3-gateway/5652fa1d1be939f846de05a33e9651a0e601f259/test/data/bucket-1/b/c/@ -------------------------------------------------------------------------------- /test/data/bucket-1/b/c/d.txt: -------------------------------------------------------------------------------- 1 | When thoughts arise, then do all things arise. When thoughts vanish, then do all things vanish. 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/b/e.txt: -------------------------------------------------------------------------------- 1 | If only you could hear the sound of snow. 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/b/クズ箱/ゴミ.txt: -------------------------------------------------------------------------------- 1 | What is the sound of two hands clapping? 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/b/ブツブツ.txt: -------------------------------------------------------------------------------- 1 | Relax. Nothing is under control. 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/index.html: -------------------------------------------------------------------------------- 1 | Test whether redirects to index.html -------------------------------------------------------------------------------- /test/data/bucket-1/statichost/index.html: -------------------------------------------------------------------------------- 1 | static index -------------------------------------------------------------------------------- /test/data/bucket-1/statichost/noindexdir/multipledir/index.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nginx/nginx-s3-gateway/5652fa1d1be939f846de05a33e9651a0e601f259/test/data/bucket-1/statichost/noindexdir/multipledir/index.html -------------------------------------------------------------------------------- /test/data/bucket-1/statichost/noindexdir/noindex.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nginx/nginx-s3-gateway/5652fa1d1be939f846de05a33e9651a0e601f259/test/data/bucket-1/statichost/noindexdir/noindex.html -------------------------------------------------------------------------------- /test/data/bucket-1/test/index.html: -------------------------------------------------------------------------------- 1 |

This is an index page of the d directory

-------------------------------------------------------------------------------- /test/data/bucket-1/системы/%bad%file%name%: -------------------------------------------------------------------------------- 1 | Filename encoding issues are hard. 2 | -------------------------------------------------------------------------------- /test/data/bucket-1/системы/system.txt: -------------------------------------------------------------------------------- 1 | The rewrite will not fix it. 2 | -------------------------------------------------------------------------------- /test/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | nginx-s3-gateway: 5 | # If minio client is up and running properly, we are reasonably sure that 6 | # minio has properly started. That's why we depend on it here. 7 | depends_on: 8 | minio: 9 | condition: service_healthy 10 | image: "nginx-s3-gateway" 11 | ports: 12 | - "8989:${NGINX_INTERNAL_PORT-80}/tcp" 13 | links: 14 | - "minio" 15 | restart: "no" 16 | environment: 17 | S3_BUCKET_NAME: "bucket-1" 18 | AWS_ACCESS_KEY_ID: "AKIAIOSFODNN7EXAMPLE" 19 | AWS_SECRET_ACCESS_KEY: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 20 | S3_SERVER: "minio" 21 | S3_SERVER_PORT: "9000" 22 | S3_SERVER_PROTO: "http" 23 | S3_REGION: "us-east-1" 24 | DEBUG: "true" 25 | S3_STYLE: "${S3_STYLE:-virtual-v2}" 26 | S3_SERVICE: "s3" 27 | ALLOW_DIRECTORY_LIST: 28 | PROVIDE_INDEX_PAGE: 29 | APPEND_SLASH_FOR_POSSIBLE_DIRECTORY: 30 | STRIP_LEADING_DIRECTORY_PATH: 31 | PREFIX_LEADING_DIRECTORY_PATH: 32 | AWS_SIGS_VERSION: 33 | STATIC_SITE_HOSTING: 34 | PROXY_CACHE_MAX_SIZE: "10g" 35 | PROXY_CACHE_SLICE_SIZE: "1m" 36 | PROXY_CACHE_INACTIVE: "60m" 37 | PROXY_CACHE_VALID_OK: "1h" 38 | PROXY_CACHE_VALID_NOTFOUND: "1m" 39 | PROXY_CACHE_VALID_FORBIDDEN: "30s" 40 | 41 | minio: 42 | image: quay.io/minio/minio:RELEASE.2023-06-09T07-32-12Z 43 | hostname: bucket-1.minio 44 | ports: 45 | - "9090:9000/tcp" 46 | restart: "no" 47 | command: minio server /data 48 | environment: 49 | MINIO_ADDRESS: :9000 50 | MINIO_ROOT_USER: "AKIAIOSFODNN7EXAMPLE" 51 | MINIO_ROOT_PASSWORD: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 52 | MINIO_REGION_NAME: "us-east-1" 53 | MINIO_DOMAIN: "minio" 54 | MINIO_BROWSER: "off" 55 | healthcheck: 56 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 57 | interval: 5s 58 | timeout: 3s 59 | retries: 3 60 | # It may be useful to enable minio client so that you can debug what calls 61 | # were made to minio by the gateway. 62 | # minio-client: 63 | # depends_on: 64 | # - "minio" 65 | # image: "minio/mc" 66 | # restart: "no" 67 | # command: "admin trace --verbose nginx-test-gateway" 68 | # environment: 69 | # MC_HOST_nginx-test-gateway: "http://AKIAIOSFODNN7EXAMPLE:wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY@minio:9000" 70 | -------------------------------------------------------------------------------- /test/unit/awssig2_test.js: -------------------------------------------------------------------------------- 1 | #!env njs 2 | 3 | /* 4 | * Copyright 2023 F5, Inc. 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | import awssig2 from "include/awssig2.js"; 20 | 21 | 22 | function _runSignatureV2(r) { 23 | r.log = function(msg) { 24 | console.log(msg); 25 | } 26 | const timestamp = new Date('2020-08-11T19:42:14Z'); 27 | const bucket = 'test-bucket-1'; 28 | const accessKey = 'test-access-key-1'; 29 | const secret = 'pvgoBEA1z7zZKqN9RoKVksKh31AtNou+pspn+iyb' 30 | const creds = { 31 | accessKeyId:accessKey, secretAccessKey: secret, sessionToken: null 32 | }; 33 | 34 | const httpDate = timestamp.toUTCString(); 35 | const expected = 'AWS test-access-key-1:VviSS4cFhUC6eoB4CYqtRawzDrc='; 36 | const req_uri = '/'.concat(bucket, r.variables.uri_path); 37 | let signature = awssig2.signatureV2(r, req_uri, httpDate, creds); 38 | 39 | if (signature !== expected) { 40 | throw 'V2 signature hash was not created correctly.\n' + 41 | 'Actual: [' + signature + ']\n' + 42 | 'Expected: [' + expected + ']'; 43 | } 44 | } 45 | 46 | function testSignatureV2() { 47 | printHeader('testSignatureV2'); 48 | // Note: since this is a read-only gateway, host, query parameters and all 49 | // client headers will be ignored. 50 | var r = { 51 | "remoteAddress" : "172.17.0.1", 52 | "headersIn" : { 53 | "Connection" : "keep-alive", 54 | "Accept-Encoding" : "gzip, deflate", 55 | "Accept-Language" : "en-US,en;q=0.7,ja;q=0.3", 56 | "Host" : "localhost:8999", 57 | "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0", 58 | "DNT" : "1", 59 | "Cache-Control" : "max-age=0", 60 | "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 61 | "Upgrade-Insecure-Requests" : "1" 62 | }, 63 | "uri" : "/a/c/ramen.jpg", 64 | "method" : "GET", 65 | "httpVersion" : "1.1", 66 | "headersOut" : {}, 67 | "args" : { 68 | "foo" : "bar" 69 | }, 70 | "variables" : { 71 | "uri_path": "/a/c/ramen.jpg" 72 | }, 73 | "status" : 0 74 | }; 75 | 76 | _runSignatureV2(r); 77 | } 78 | 79 | async function test() { 80 | testSignatureV2(); 81 | } 82 | 83 | function printHeader(testName) { 84 | console.log(`\n## ${testName}`); 85 | } 86 | 87 | test(); 88 | console.log('Finished unit tests for awssig2.js'); 89 | -------------------------------------------------------------------------------- /test/unit/awssig4_test.js: -------------------------------------------------------------------------------- 1 | #!env njs 2 | 3 | /* 4 | * Copyright 2023 F5, Inc. 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | import awssig4 from "include/awssig4.js"; 20 | import utils from "include/utils.js"; 21 | 22 | 23 | function testBuildSigningKeyHashWithReferenceInputs() { 24 | printHeader('testBuildSigningKeyHashWithReferenceInputs'); 25 | var kSecret = 'wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY'; 26 | var date = '20150830'; 27 | var service = 'iam'; 28 | var region = 'us-east-1'; 29 | var expected = 'c4afb1cc5771d871763a393e44b703571b55cc28424d1a5e86da6ed3c154a4b9'; 30 | var signingKeyHash = awssig4._buildSigningKeyHash(kSecret, date, region, service).toString('hex'); 31 | 32 | if (signingKeyHash !== expected) { 33 | throw 'Signing key hash was not created correctly.\n' + 34 | 'Actual: [' + signingKeyHash + ']\n' + 35 | 'Expected: [' + expected + ']'; 36 | } 37 | } 38 | 39 | function testBuildSigningKeyHashWithTestSuiteInputs() { 40 | printHeader('testBuildSigningKeyHashWithTestSuiteInputs'); 41 | var kSecret = 'pvgoBEA1z7zZKqN9RoKVksKh31AtNou+pspn+iyb'; 42 | var date = '20200811'; 43 | var service = 's3'; 44 | var region = 'us-west-2'; 45 | var expected = 'a48701bfe803103e89051f55af2297dd76783bbceb5eb416dab71e0eadcbc4f6'; 46 | var signingKeyHash = awssig4._buildSigningKeyHash(kSecret, date, region, service).toString('hex'); 47 | 48 | if (signingKeyHash !== expected) { 49 | throw 'Signing key hash was not created correctly.\n' + 50 | 'Actual: [' + signingKeyHash + ']\n' + 51 | 'Expected: [' + expected + ']'; 52 | } 53 | } 54 | 55 | function _runSignatureV4(r) { 56 | r.log = function(msg) { 57 | console.log(msg); 58 | } 59 | var timestamp = new Date('2020-08-11T19:42:14Z'); 60 | var eightDigitDate = utils.getEightDigitDate(timestamp); 61 | var amzDatetime = utils.getAmzDatetime(timestamp, eightDigitDate); 62 | var bucket = 'ez-test-bucket-1' 63 | var secret = 'pvgoBEA1z7zZKqN9RoKVksKh31AtNou+pspn+iyb' 64 | var creds = {secretAccessKey: secret, sessionToken: null}; 65 | var region = 'us-west-2'; 66 | var service = 's3'; 67 | var server = 's3-us-west-2.amazonaws.com'; 68 | 69 | const req = { 70 | uri : r.variables.uri_path, 71 | queryParams : '', 72 | host: bucket.concat('.', server) 73 | } 74 | const canonicalRequest = awssig4._buildCanonicalRequest(r, 75 | r.method, req.uri, req.queryParams, req.host, amzDatetime, creds.sessionToken); 76 | 77 | var expected = 'cf4dd9e1d28c74e2284f938011efc8230d0c20704f56f67e4a3bfc2212026bec'; 78 | var signature = awssig4._buildSignatureV4(r, 79 | amzDatetime, eightDigitDate, creds, region, service, canonicalRequest); 80 | 81 | if (signature !== expected) { 82 | throw 'V4 signature hash was not created correctly.\n' + 83 | 'Actual: [' + signature + ']\n' + 84 | 'Expected: [' + expected + ']'; 85 | } 86 | } 87 | 88 | function testSignatureV4() { 89 | printHeader('testSignatureV4'); 90 | // Note: since this is a read-only gateway, host, query parameters and all 91 | // client headers will be ignored. 92 | var r = { 93 | "remoteAddress" : "172.17.0.1", 94 | "headersIn" : { 95 | "Connection" : "keep-alive", 96 | "Accept-Encoding" : "gzip, deflate", 97 | "Accept-Language" : "en-US,en;q=0.7,ja;q=0.3", 98 | "Host" : "localhost:8999", 99 | "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0", 100 | "DNT" : "1", 101 | "Cache-Control" : "max-age=0", 102 | "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 103 | "Upgrade-Insecure-Requests" : "1" 104 | }, 105 | "uri" : "/a/c/ramen.jpg", 106 | "method" : "GET", 107 | "httpVersion" : "1.1", 108 | "headersOut" : {}, 109 | "args" : { 110 | "foo" : "bar" 111 | }, 112 | "variables" : { 113 | "request_body": "", 114 | "uri_path": "/a/c/ramen.jpg" 115 | }, 116 | "status" : 0 117 | }; 118 | 119 | _runSignatureV4(r); 120 | } 121 | 122 | function testSignatureV4Cache() { 123 | printHeader('testSignatureV4Cache'); 124 | // Note: since this is a read-only gateway, host, query parameters and all 125 | // client headers will be ignored. 126 | var r = { 127 | "remoteAddress" : "172.17.0.1", 128 | "headersIn" : { 129 | "Connection" : "keep-alive", 130 | "Accept-Encoding" : "gzip, deflate", 131 | "Accept-Language" : "en-US,en;q=0.7,ja;q=0.3", 132 | "Host" : "localhost:8999", 133 | "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0", 134 | "DNT" : "1", 135 | "Cache-Control" : "max-age=0", 136 | "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 137 | "Upgrade-Insecure-Requests" : "1" 138 | }, 139 | "uri" : "/a/c/ramen.jpg", 140 | "method" : "GET", 141 | "httpVersion" : "1.1", 142 | "headersOut" : {}, 143 | "args" : { 144 | "foo" : "bar" 145 | }, 146 | "variables": { 147 | "cache_signing_key_enabled": 1, 148 | "request_body": "", 149 | "uri_path": "/a/c/ramen.jpg" 150 | }, 151 | "status" : 0 152 | }; 153 | 154 | _runSignatureV4(r); 155 | 156 | if (!"signing_key_hash" in r.variables) { 157 | throw "Hash key not written to r.variables.signing_key_hash"; 158 | } 159 | 160 | _runSignatureV4(r); 161 | } 162 | 163 | async function test() { 164 | testBuildSigningKeyHashWithReferenceInputs(); 165 | testBuildSigningKeyHashWithTestSuiteInputs(); 166 | testSignatureV4(); 167 | testSignatureV4Cache(); 168 | } 169 | 170 | function printHeader(testName) { 171 | console.log(`\n## ${testName}`); 172 | } 173 | 174 | test(); 175 | console.log('Finished unit tests for awssig4.js'); 176 | -------------------------------------------------------------------------------- /test/unit/utils_test.js: -------------------------------------------------------------------------------- 1 | #!env njs 2 | 3 | /* 4 | * Copyright 2023 F5, Inc. 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | import utils from "include/utils.js"; 20 | 21 | function testParseArray() { 22 | printHeader('testParseArray'); 23 | 24 | function testParseNull() { 25 | console.log(' ## testParseNull'); 26 | const actual = utils.parseArray(null); 27 | if (!Array.isArray(actual) || actual.length > 0) { 28 | throw 'Null not parsed into an empty array'; 29 | } 30 | } 31 | function testParseEmptyString() { 32 | console.log(' ## testParseEmptyString'); 33 | const actual = utils.parseArray(''); 34 | if (!Array.isArray(actual) || actual.length > 0) { 35 | throw 'Empty string not parsed into an empty array'; 36 | } 37 | } 38 | function testParseSingleValue() { 39 | console.log(' ## testParseSingleValue'); 40 | const value = 'Single Value'; 41 | const actual = utils.parseArray(value); 42 | if (!Array.isArray(actual) || actual.length !== 1) { 43 | throw 'Single value not parsed into an array with a single element'; 44 | } 45 | if (actual[0] !== value) { 46 | throw `Unexpected array element: ${actual[0]}` 47 | } 48 | } 49 | function testParseMultipleValues() { 50 | console.log(' ## testParseMultipleValues'); 51 | const values = ['string 1', 'something else', 'Yet another value']; 52 | const textValues = values.join(';'); 53 | const actual = utils.parseArray(textValues); 54 | if (!Array.isArray(actual) || actual.length !== values.length) { 55 | throw 'Multiple values not parsed into an array with the expected length'; 56 | } 57 | for (let i = 0; i < values.length; i++) { 58 | if (values[i] !== actual[i]) { 59 | throw `Unexpected array element [${i}]: ${actual[i]}` 60 | } 61 | } 62 | } 63 | 64 | function testParseMultipleValuesTrailingDelimiter() { 65 | console.log(' ## testParseMultipleValuesTrailingDelimiter'); 66 | const values = ['string 1', 'something else', 'Yet another value']; 67 | const textValues = values.join(';'); 68 | const actual = utils.parseArray(textValues + ';'); 69 | if (!Array.isArray(actual) || actual.length !== values.length) { 70 | throw 'Multiple values not parsed into an array with the expected length'; 71 | } 72 | for (let i = 0; i < values.length; i++) { 73 | if (values[i] !== actual[i]) { 74 | throw `Unexpected array element [${i}]: ${actual[i]}` 75 | } 76 | } 77 | } 78 | 79 | testParseNull(); 80 | testParseEmptyString(); 81 | testParseSingleValue(); 82 | testParseMultipleValues(); 83 | testParseMultipleValuesTrailingDelimiter(); 84 | } 85 | 86 | function testAmzDatetime() { 87 | printHeader('testAmzDatetime'); 88 | var timestamp = new Date('2020-08-03T02:01:09.004Z'); 89 | var eightDigitDate = utils.getEightDigitDate(timestamp); 90 | var amzDatetime = utils.getAmzDatetime(timestamp, eightDigitDate); 91 | var expected = '20200803T020109Z'; 92 | 93 | if (amzDatetime !== expected) { 94 | throw 'Amazon date time was not created correctly.\n' + 95 | 'Actual: [' + amzDatetime + ']\n' + 96 | 'Expected: [' + expected + ']'; 97 | } 98 | } 99 | 100 | function testEightDigitDate() { 101 | printHeader('testEightDigitDate'); 102 | var timestamp = new Date('2020-08-03T02:01:09.004Z'); 103 | var eightDigitDate = utils.getEightDigitDate(timestamp); 104 | var expected = '20200803'; 105 | 106 | if (eightDigitDate !== expected) { 107 | throw 'Eight digit date was not created correctly.\n' + 108 | 'Actual: ' + eightDigitDate + '\n' + 109 | 'Expected: ' + expected; 110 | } 111 | } 112 | 113 | function testPad() { 114 | printHeader('testPad'); 115 | var padSingleDigit = utils.padWithLeadingZeros(3, 2); 116 | var expected = '03'; 117 | 118 | if (padSingleDigit !== expected) { 119 | throw 'Single digit 3 was not padded with leading zero.\n' + 120 | 'Actual: ' + padSingleDigit + '\n' + 121 | 'Expected: ' + expected; 122 | } 123 | } 124 | 125 | function testAreAllEnvVarsSet() { 126 | function testAreAllEnvVarsSetStringFound() { 127 | console.log(' ## testAreAllEnvVarsSetStringFound'); 128 | const key = 'TEST_ENV_VAR_KEY'; 129 | process.env[key] = 'some value'; 130 | const actual = utils.areAllEnvVarsSet(key); 131 | if (!actual) { 132 | throw 'Environment variable that was set not indicated as present'; 133 | } 134 | } 135 | 136 | function testAreAllEnvVarsSetStringNotFound() { 137 | console.log(' ## testAreAllEnvVarsSetStringNotFound'); 138 | const actual = utils.areAllEnvVarsSet('UNKNOWN_ENV_VAR_KEY'); 139 | if (actual) { 140 | throw 'Unknown environment variable indicated as being present'; 141 | } 142 | } 143 | 144 | function testAreAllEnvVarsSetStringArrayFound() { 145 | console.log(' ## testAreAllEnvVarsSetStringArrayFound'); 146 | const keys = ['TEST_ENV_VAR_KEY_1', 'TEST_ENV_VAR_KEY_2', 'TEST_ENV_VAR_KEY_3']; 147 | for (let i = 0; i < keys.length; i++) { 148 | process.env[keys[i]] = 'something'; 149 | } 150 | const actual = utils.areAllEnvVarsSet(keys); 151 | if (!actual) { 152 | throw 'Environment variables that were set not indicated as present'; 153 | } 154 | } 155 | 156 | function testAreAllEnvVarsSetStringArrayNotFound() { 157 | console.log(' ## testAreAllEnvVarsSetStringArrayNotFound'); 158 | const keys = ['UNKNOWN_ENV_VAR_KEY_1', 'UNKNOWN_ENV_VAR_KEY_2', 'UNKNOWN_ENV_VAR_KEY_3']; 159 | const actual = utils.areAllEnvVarsSet(keys); 160 | if (actual) { 161 | throw 'Unknown environment variables that were not set indicated as present'; 162 | } 163 | } 164 | 165 | function testAreAllEnvVarsSetStringArrayWithSomeSet() { 166 | console.log(' ## testAreAllEnvVarsSetStringArrayWithSomeSet'); 167 | const keys = ['TEST_ENV_VAR_KEY_1', 'UNKNOWN_ENV_VAR_KEY_2', 'UNKNOWN_ENV_VAR_KEY_3']; 168 | process.env[keys[0]] = 'something'; 169 | 170 | const actual = utils.areAllEnvVarsSet(keys); 171 | if (actual) { 172 | throw 'Unknown environment variables that were not set indicated as present'; 173 | } 174 | } 175 | 176 | printHeader('testAreAllEnvVarsSet'); 177 | testAreAllEnvVarsSetStringFound(); 178 | testAreAllEnvVarsSetStringNotFound(); 179 | testAreAllEnvVarsSetStringArrayFound(); 180 | testAreAllEnvVarsSetStringArrayNotFound(); 181 | testAreAllEnvVarsSetStringArrayWithSomeSet(); 182 | } 183 | 184 | 185 | async function test() { 186 | testAmzDatetime(); 187 | testEightDigitDate(); 188 | testPad(); 189 | testParseArray(); 190 | testAreAllEnvVarsSet(); 191 | } 192 | 193 | function printHeader(testName) { 194 | console.log(`\n## ${testName}`); 195 | } 196 | 197 | test(); 198 | console.log('Finished unit tests for utils.js'); 199 | --------------------------------------------------------------------------------