├── .github ├── PULL_REQUEST_TEMPLATE.md ├── actions │ └── setup │ │ └── action.yml ├── dependabot.yml └── workflows │ ├── audit.yml │ ├── ci.yml │ ├── minio.yml │ └── publish.yml ├── .gitignore ├── .mergify.yml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── codegen ├── Cargo.toml └── src │ ├── main.rs │ ├── v1 │ ├── access.rs │ ├── aws_conv.rs │ ├── aws_proxy.rs │ ├── dto.rs │ ├── error.rs │ ├── headers.rs │ ├── minio.rs │ ├── mod.rs │ ├── ops.rs │ ├── rust.rs │ ├── s3_trait.rs │ ├── smithy.rs │ ├── sts.rs │ ├── utils.rs │ └── xml.rs │ └── v2 │ └── mod.rs ├── crates ├── s3s-aws │ ├── Cargo.toml │ ├── LICENSE │ └── src │ │ ├── body.rs │ │ ├── connector.rs │ │ ├── conv │ │ ├── builtin.rs │ │ ├── generated.rs │ │ └── mod.rs │ │ ├── error.rs │ │ ├── event_stream.rs │ │ ├── lib.rs │ │ └── proxy │ │ ├── generated.rs │ │ ├── meta.rs │ │ └── mod.rs ├── s3s-e2e │ ├── Cargo.toml │ ├── LICENSE │ ├── build.rs │ └── src │ │ ├── advanced.rs │ │ ├── basic.rs │ │ ├── main.rs │ │ └── utils.rs ├── s3s-fs │ ├── Cargo.toml │ ├── LICENSE │ ├── src │ │ ├── checksum.rs │ │ ├── error.rs │ │ ├── fs.rs │ │ ├── lib.rs │ │ ├── main.rs │ │ ├── s3.rs │ │ └── utils.rs │ └── tests │ │ └── it_aws.rs ├── s3s-model │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── smithy.rs ├── s3s-policy │ ├── Cargo.toml │ ├── LICENSE │ └── src │ │ ├── lib.rs │ │ ├── model.rs │ │ ├── pattern.rs │ │ └── tests.rs ├── s3s-proxy │ ├── Cargo.toml │ ├── LICENSE │ └── src │ │ └── main.rs ├── s3s-test │ ├── Cargo.toml │ ├── LICENSE │ └── src │ │ ├── build.rs │ │ ├── cli.rs │ │ ├── error.rs │ │ ├── lib.rs │ │ ├── report.rs │ │ ├── runner.rs │ │ ├── tcx.rs │ │ └── traits.rs └── s3s │ ├── Cargo.toml │ ├── LICENSE │ ├── examples │ ├── axum.rs │ └── tokio_util.rs │ ├── src │ ├── access │ │ ├── context.rs │ │ ├── generated.rs │ │ └── mod.rs │ ├── auth │ │ ├── mod.rs │ │ ├── secret_key.rs │ │ └── simple_auth.rs │ ├── checksum.rs │ ├── crypto.rs │ ├── dto │ │ ├── build_error.rs │ │ ├── content_type.rs │ │ ├── copy_source.rs │ │ ├── event.rs │ │ ├── event_stream.rs │ │ ├── generated.rs │ │ ├── mod.rs │ │ ├── range.rs │ │ ├── streaming_blob.rs │ │ └── timestamp.rs │ ├── error │ │ ├── generated.rs │ │ └── mod.rs │ ├── header │ │ ├── generated.rs │ │ └── mod.rs │ ├── host.rs │ ├── http │ │ ├── aws_chunked_stream.rs │ │ ├── body.rs │ │ ├── de.rs │ │ ├── keep_alive_body.rs │ │ ├── mod.rs │ │ ├── multipart.rs │ │ ├── ordered_headers.rs │ │ ├── ordered_qs.rs │ │ ├── request.rs │ │ ├── response.rs │ │ └── ser.rs │ ├── lib.rs │ ├── ops │ │ ├── generated.rs │ │ ├── get_object.rs │ │ ├── mod.rs │ │ ├── signature.rs │ │ └── tests.rs │ ├── path.rs │ ├── protocol.rs │ ├── route.rs │ ├── s3_op.rs │ ├── s3_trait.rs │ ├── service.rs │ ├── sig_v2 │ │ ├── authorization_v2.rs │ │ ├── methods.rs │ │ ├── mod.rs │ │ └── presigned_url_v2.rs │ ├── sig_v4 │ │ ├── amz_content_sha256.rs │ │ ├── amz_date.rs │ │ ├── authorization_v4.rs │ │ ├── methods.rs │ │ ├── mod.rs │ │ ├── post_signature.rs │ │ └── presigned_url_v4.rs │ ├── stream.rs │ ├── utils │ │ ├── crypto.rs │ │ ├── format.rs │ │ ├── mod.rs │ │ └── parser.rs │ └── xml │ │ ├── de.rs │ │ ├── generated.rs │ │ ├── mod.rs │ │ └── ser.rs │ └── tests │ ├── dto.rs │ └── xml.rs ├── data ├── crawl.py ├── minio-patches.json ├── s3.json ├── s3_error_codes.json └── sts.json ├── docs └── arch │ ├── arch.drawio │ └── arch.svg ├── justfile ├── pyproject.toml ├── rustfmt.toml ├── scripts ├── e2e-fs.sh ├── e2e-minio.sh ├── e2e-mint.sh ├── install.py ├── license.py ├── minio.sh ├── mint.sh ├── report-mint.py ├── s3s-e2e.sh ├── s3s-fs.sh └── s3s-proxy.sh └── uv.lock /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 14 | -------------------------------------------------------------------------------- /.github/actions/setup/action.yml: -------------------------------------------------------------------------------- 1 | name: "setup" 2 | description: "setup environment for s3s" 3 | runs: 4 | using: "composite" 5 | steps: 6 | - uses: taiki-e/install-action@just 7 | - uses: astral-sh/setup-uv@v3 8 | with: 9 | enable-cache: true 10 | - uses: dtolnay/rust-toolchain@stable 11 | - uses: Swatinem/rust-cache@v2 12 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "monthly" 12 | groups: 13 | dependencies: 14 | patterns: 15 | - "*" 16 | -------------------------------------------------------------------------------- /.github/workflows/audit.yml: -------------------------------------------------------------------------------- 1 | name: Audit 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - 'feat/**' 8 | paths: 9 | - '**/Cargo.toml' 10 | - '**/Cargo.lock' 11 | pull_request: 12 | branches: 13 | - main 14 | - 'feat/**' 15 | paths: 16 | - '**/Cargo.toml' 17 | - '**/Cargo.lock' 18 | schedule: 19 | - cron: '0 0 * * 0' # at midnight of each sunday 20 | workflow_dispatch: 21 | 22 | jobs: 23 | audit: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v4 27 | - uses: dtolnay/rust-toolchain@stable 28 | - uses: taiki-e/install-action@cargo-audit 29 | - run: cargo audit -D warnings 30 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | - 'feat/**' 11 | schedule: 12 | - cron: '0 0 * * 0' # at midnight of each sunday 13 | workflow_dispatch: 14 | 15 | jobs: 16 | skip-check: 17 | permissions: 18 | actions: write 19 | contents: read 20 | runs-on: ubuntu-latest 21 | outputs: 22 | should_skip: ${{ steps.skip_check.outputs.should_skip }} 23 | steps: 24 | - id: skip_check 25 | uses: fkirc/skip-duplicate-actions@v5 26 | with: 27 | cancel_others: true 28 | paths_ignore: '["*.md"]' 29 | 30 | python: 31 | needs: skip-check 32 | if: needs.skip-check.outputs.should_skip != 'true' 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v4 36 | - uses: taiki-e/install-action@just 37 | - uses: astral-sh/setup-uv@v3 38 | with: 39 | enable-cache: true 40 | - run: just ci-python 41 | 42 | rust: 43 | needs: skip-check 44 | if: needs.skip-check.outputs.should_skip != 'true' 45 | runs-on: ubuntu-latest 46 | strategy: 47 | fail-fast: false 48 | matrix: 49 | toolchain: 50 | - 1.85.0 # MSRV 51 | - stable 52 | - nightly 53 | steps: 54 | - uses: actions/checkout@v4 55 | - uses: taiki-e/install-action@just 56 | - uses: dtolnay/rust-toolchain@master 57 | with: 58 | toolchain: ${{ matrix.toolchain }} 59 | components: rustfmt, clippy 60 | - uses: Swatinem/rust-cache@v2 61 | - run: just ci-rust 62 | 63 | cross-test: 64 | needs: skip-check 65 | if: needs.skip-check.outputs.should_skip != 'true' 66 | runs-on: ${{ matrix.os }} 67 | strategy: 68 | fail-fast: false 69 | matrix: 70 | os: 71 | - windows-latest 72 | - macos-latest 73 | steps: 74 | - uses: actions/checkout@v4 75 | - uses: taiki-e/install-action@just 76 | - uses: dtolnay/rust-toolchain@stable 77 | - uses: Swatinem/rust-cache@v2 78 | - run: just test 79 | 80 | mint-proxy-minio: 81 | name: e2e (mint, s3s-proxy, minio) 82 | needs: skip-check 83 | if: needs.skip-check.outputs.should_skip != 'true' 84 | runs-on: ubuntu-latest 85 | steps: 86 | - uses: actions/checkout@v4 87 | - uses: ./.github/actions/setup 88 | - run: docker pull minio/mint:edge 89 | - run: docker pull minio/minio:latest 90 | - run: just install s3s-proxy 91 | - run: ./scripts/e2e-mint.sh 92 | - run: ./scripts/report-mint.py /tmp/mint/log.json 93 | - uses: actions/upload-artifact@v4 94 | with: 95 | name: mint-proxy-minio.logs 96 | path: ./target/s3s-proxy.log 97 | 98 | e2e-fs: 99 | name: e2e (s3s-e2e, s3s-fs) 100 | needs: skip-check 101 | if: needs.skip-check.outputs.should_skip != 'true' 102 | runs-on: ubuntu-latest 103 | steps: 104 | - uses: actions/checkout@v4 105 | - uses: ./.github/actions/setup 106 | - run: just install s3s-e2e 107 | - run: just install s3s-fs 108 | - run: ./scripts/e2e-fs.sh --filter '^Basic' 109 | - uses: actions/upload-artifact@v4 110 | with: 111 | name: e2e-fs.logs 112 | path: ./target/s3s-fs.log 113 | 114 | e2e-minio: 115 | name: e2e (s3s-e2e, minio) 116 | needs: skip-check 117 | if: needs.skip-check.outputs.should_skip != 'true' 118 | runs-on: ubuntu-latest 119 | steps: 120 | - uses: actions/checkout@v4 121 | - uses: ./.github/actions/setup 122 | - run: docker pull minio/minio:latest 123 | - run: just install s3s-e2e 124 | - run: ./scripts/e2e-minio.sh 125 | -------------------------------------------------------------------------------- /.github/workflows/minio.yml: -------------------------------------------------------------------------------- 1 | name: MinIO 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | workflow_dispatch: 8 | 9 | jobs: 10 | sync: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: taiki-e/install-action@just 15 | - uses: dtolnay/rust-toolchain@stable 16 | - uses: Swatinem/rust-cache@v2 17 | - run: | 18 | git checkout main 19 | git checkout -B minio 20 | just codegen 21 | - uses: actions-js/push@master 22 | with: 23 | github_token: ${{ secrets.GITHUB_TOKEN }} 24 | branch: minio 25 | force: true 26 | message: "sync codegen minio" 27 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: taiki-e/install-action@just 14 | - uses: dtolnay/rust-toolchain@nightly 15 | - name: Publish all crates 16 | env: 17 | CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_IO_API_TOKEN }} 18 | run: | 19 | cargo publish -p s3s --dry-run 20 | 21 | # fundamental 22 | cargo publish -p s3s 23 | cargo publish -p s3s-aws 24 | 25 | # supporting 26 | cargo publish -p s3s-model 27 | cargo publish -p s3s-policy 28 | cargo publish -p s3s-test 29 | 30 | # binary 31 | cargo publish -p s3s-proxy 32 | cargo publish -p s3s-fs 33 | cargo publish -p s3s-e2e 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | .vscode 4 | .idea 5 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | queue_rules: 2 | - merge_method: fast-forward 3 | name: default 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | [Unreleased]: https://github.com/Nugine/s3s/compare/v0.11.0...HEAD 11 | 12 | ## [v0.11.0] - 2025-03-28 13 | 14 | [v0.11.0]: https://github.com/Nugine/s3s/compare/v0.10.1...v0.11.0 15 | 16 | Tracking in [#267](https://github.com/Nugine/s3s/issues/267). 17 | 18 | MSRV of this minor version: 1.85.0 19 | 20 | ### s3s 21 | 22 | **BREAKING**: Following the latest model definitions in [aws-sdk-rust](https://github.com/awslabs/aws-sdk-rust), `s3s::dto` is updated. 23 | + You may come across some type changes reported by rustc. 24 | + The migration is not hard but requires some time. 25 | 26 | **BREAKING**: More request parameters are accepted via upgrading model definitions. 27 | + S3 preconditions ([#241](https://github.com/Nugine/s3s/issues/241)) 28 | + PutObject write_offset_bytes ([#249](https://github.com/Nugine/s3s/issues/249)) 29 | 30 | **BREAKING**: Policy-based access control is supported in `s3s::access` ([#161](https://github.com/Nugine/s3s/issues/161)) 31 | + Add `S3Access` trait for access control. 32 | + Add `S3ServiceBuilder::set_access`. 33 | + Move `S3Auth::check_access` to `S3Access::check`. 34 | 35 | **BREAKING**: Multi-domain is supported in `s3s::host`. ([#175](https://github.com/Nugine/s3s/issues/175)) 36 | + Add `S3Host` trait for parsing host header. 37 | + Change `S3ServiceBuilder::set_base_domain` to `S3ServiceBuilder::set_host`. 38 | + Add `SingleDomain` parser. 39 | + Add `MultiDomain` parser. 40 | 41 | Custom route is supported in `s3s::route` ([#195](https://github.com/Nugine/s3s/issues/195)) 42 | + Add `S3Route` trait for custom route protected by signature verification. 43 | + Add `S3ServiceBuilder::set_route`. 44 | + Signature v4 supports AWS STS requests ([#208](https://github.com/Nugine/s3s/pull/208)) 45 | + Add example using [axum](https://github.com/tokio-rs/axum) web framework ([#263](https://github.com/Nugine/s3s/pull/263)) 46 | 47 | Unstable `minio` branch: 48 | + Add `minio` branch for MinIO compatibility. 49 | + This branch is automatically force-rebased to the latest `main` branch. 50 | 51 | Other notable changes 52 | + feat(s3s): export xml module ([#189](https://github.com/Nugine/s3s/pull/189)) 53 | + fix(s3s/ops): allow presigned url requests with up to 15 minutes clock skew ([#216](https://github.com/Nugine/s3s/pull/216)) 54 | + handle fmt message with implicit arguments in s3_error macro ([#228](https://github.com/Nugine/s3s/pull/228)) 55 | + feat(s3s/dto): ignore empty strings ([#244](https://github.com/Nugine/s3s/pull/244)) 56 | + feat(model): extra error codes ([#255](https://github.com/Nugine/s3s/pull/255)) 57 | + feat(s3s/checksum): add crc64nvme ([#256](https://github.com/Nugine/s3s/pull/256)) 58 | + feat(s3s/xml): support xmlns ([#265](https://github.com/Nugine/s3s/pull/265)) 59 | 60 | ### s3s-model 61 | 62 | + Add crate `s3s-model` for S3 model definitions. 63 | 64 | ### s3s-policy 65 | 66 | + Add crate `s3s-policy` for S3 policy language. 67 | + Add grammar model types for serialization and deserialization in `s3s_policy::model`. 68 | + Add `PatternSet` for matching multiple patterns in `s3s_policy::pattern`. 69 | 70 | ### s3s-test 71 | 72 | + Add crate `s3s-test` for custom test framework. 73 | 74 | ### s3s-e2e 75 | 76 | + Add crate `s3s-e2e` for S3 compatibility tests. 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Development Guide 2 | 3 | ## Requirements 4 | 5 | | Toolchain | Version | 6 | | :-----------------------------------: | :-----: | 7 | | [Rust](https://rustup.rs/) | ^1.85.0 | 8 | | [just](https://github.com/casey/just) | ^1.36.0 | 9 | | [uv](https://github.com/astral-sh/uv) | ^0.5.0 | 10 | | Docker | - | 11 | 12 | ## Workflow 13 | 14 | Get the source code 15 | 16 | ```bash 17 | git clone https://github.com/Nugine/s3s.git 18 | cd s3s 19 | ``` 20 | 21 | ### Run basic checks and tests 22 | 23 | ```bash 24 | just dev 25 | ``` 26 | 27 | ### Run the codegen 28 | 29 | ```bash 30 | just crawl 31 | just codegen 32 | ``` 33 | 34 | It should change nothing if you are running the latest code. 35 | 36 | ### Open documentation 37 | 38 | ```bash 39 | just doc 40 | ``` 41 | 42 | ### Play the test server 43 | 44 | Install `s3s-fs` from source 45 | 46 | ```bash 47 | cargo install --path crates/s3s-fs --features binary 48 | ``` 49 | 50 | You can also use the shortcut 51 | 52 | ```bash 53 | just install s3s-fs 54 | ``` 55 | 56 | Or install from crates.io 57 | 58 | ```bash 59 | cargo install s3s-fs --features binary 60 | ``` 61 | 62 | Run `s3s-fs` with [example configuration](./scripts/s3s-fs.sh) 63 | 64 | ```bash 65 | ./scripts/s3s-fs.sh 66 | ``` 67 | 68 | Credentials used in the example configuration: 69 | 70 | ``` 71 | Access Key: AKEXAMPLES3S 72 | Secret Key: SKEXAMPLES3S 73 | ``` 74 | 75 | Then you can explore it with your favorite S3 client! 76 | 77 | ### Run E2E tests 78 | 79 | Install `s3s-proxy` 80 | 81 | ```bash 82 | just install s3s-proxy 83 | ``` 84 | 85 | Run the combined server and save logs 86 | 87 | ```bash 88 | ./scripts/s3s-proxy.sh | tee target/s3s-proxy.log 89 | ``` 90 | 91 | Open a new terminal, then run the test suite 92 | 93 | ```bash 94 | ./scripts/mint.sh | tee target/mint.log 95 | ``` 96 | 97 | ## Git 98 | 99 | ### Commit Message 100 | 101 | We follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification. 102 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["crates/*", "codegen"] 3 | resolver = "3" 4 | 5 | [workspace.package] 6 | edition = "2024" 7 | repository = "https://github.com/Nugine/s3s" 8 | license = "Apache-2.0" 9 | rust-version = "1.85.0" 10 | 11 | [workspace.lints.rust] 12 | unsafe_code = "forbid" 13 | 14 | [workspace.lints.clippy] 15 | # deny 16 | all = { level = "deny", priority = -1 } 17 | pedantic = { level = "deny", priority = -1 } 18 | cargo = { level = "deny", priority = -1 } 19 | self_named_module_files = "deny" 20 | # warn 21 | dbg_macro = "warn" 22 | # allow 23 | module_name_repetitions = "allow" 24 | multiple_crate_versions = "allow" 25 | 26 | [profile.release] 27 | debug = "line-tables-only" 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # s3s 2 | 3 | [![Apache 2.0 licensed][license-badge]][license-url] 4 | [![Unsafe Forbidden][unsafe-forbidden-badge]][unsafe-forbidden-url] 5 | 6 | [license-badge]: https://img.shields.io/badge/license-Apache--2.0-blue.svg 7 | [license-url]: ./LICENSE 8 | [unsafe-forbidden-badge]: https://img.shields.io/badge/unsafe-forbidden-success.svg 9 | [unsafe-forbidden-url]: https://github.com/rust-secure-code/safety-dance/ 10 | 11 | S3 Service Adapter 12 | 13 | 14 | | crate | version | docs | 15 | | :------------------------- | :-----------------------------------------------------------------------------------------: | :------------------------------------------------------------------: | 16 | | [s3s](./crates/s3s/) | [![Crates.io](https://img.shields.io/crates/v/s3s.svg)](https://crates.io/crates/s3s) | [![Docs](https://docs.rs/s3s/badge.svg)](https://docs.rs/s3s/) | 17 | | [s3s-aws](./crates/s3s-aws/) | [![Crates.io](https://img.shields.io/crates/v/s3s-aws.svg)](https://crates.io/crates/s3s-aws) | [![Docs](https://docs.rs/s3s-aws/badge.svg)](https://docs.rs/s3s-aws/) | 18 | | [s3s-fs](./crates/s3s-fs/) | [![Crates.io](https://img.shields.io/crates/v/s3s-fs.svg)](https://crates.io/crates/s3s-fs) | [![Docs](https://docs.rs/s3s-fs/badge.svg)](https://docs.rs/s3s-fs/) | 19 | 20 | This experimental project intends to offer an ergonomic adapter for building S3-compatible services. 21 | 22 | `s3s` implements Amazon S3 REST API in the form of a generic [hyper](https://github.com/hyperium/hyper) service. S3-compatible services can focus on the S3 API itself and don't have to care about the HTTP layer. 23 | 24 | `s3s-aws` provides useful types and integration with [`aws-sdk-s3`](https://crates.io/crates/aws-sdk-s3). 25 | 26 | `s3s-fs` implements the S3 API based on file system, as a sample implementation. It is designed for integration testing, which can be used to [mock an S3 client](https://github.com/Nugine/s3s/blob/main/crates/s3s-fs/tests/it_aws.rs). It also provides a binary for debugging. [Play it!](./CONTRIBUTING.md#play-the-test-server) 27 | 28 | ## How it works 29 | 30 | ![architecture diagram](docs/arch/arch.svg) 31 | 32 | The diagram above shows how `s3s` works. 33 | 34 | `s3s` converts HTTP requests to operation inputs before calling the user-defined service. 35 | 36 | `s3s` converts operation outputs or errors to HTTP responses after calling the user-defined service. 37 | 38 | The data types, serialization and deserialization are generated from the smithy model in [aws-sdk-rust](https://github.com/awslabs/aws-sdk-rust) repository. We apply manual hacks to fix some problems in [smithy server codegen](https://smithy-lang.github.io/smithy-rs/design/server/overview.html) and make `s3s` ready to use now. 39 | 40 | ## Security 41 | 42 | `S3Service` and other adapters in this project have no security protection. If they are exposed to the Internet directly, they may be **attacked**. 43 | 44 | It is up to the user to implement security enhancements such as **HTTP body length limit**, rate limit and back pressure. 45 | 46 | ## Contributing 47 | 48 | + [Development Guide](./CONTRIBUTING.md) 49 | 50 | ## Sponsor 51 | 52 | We have a reward funds pool for contributors: 53 | 54 | If my open-source work has been helpful to you, please [sponsor me](https://github.com/Nugine#sponsor). 55 | 56 | Every little bit helps. Thank you! 57 | -------------------------------------------------------------------------------- /codegen/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-codegen" 3 | version = "0.0.0" 4 | edition.workspace = true 5 | repository.workspace = true 6 | license.workspace = true 7 | rust-version.workspace = true 8 | publish = false 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | scoped-writer = "0.3.0" 15 | heck = "0.5.0" 16 | nugine-rust-utils = "0.3.1" 17 | std-next = "0.1.8" 18 | numeric_cast = "0.3.0" 19 | regex = "1.11.1" 20 | serde = { version = "1.0.219", features = ["derive"] } 21 | serde_json = { version = "1.0.140", features = ["preserve_order"] } 22 | serde_urlencoded = "0.7.1" 23 | s3s-model = { version = "0.12.0-dev", path = "../crates/s3s-model" } 24 | http = "1.3.1" 25 | -------------------------------------------------------------------------------- /codegen/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::single_match_else, // 3 | clippy::wildcard_imports, 4 | clippy::match_same_arms, 5 | clippy::let_underscore_untyped, 6 | )] 7 | 8 | mod v1; 9 | mod v2; 10 | 11 | fn main() { 12 | v1::run(); 13 | v2::run(); 14 | } 15 | -------------------------------------------------------------------------------- /codegen/src/v1/access.rs: -------------------------------------------------------------------------------- 1 | use super::ops::Operations; 2 | 3 | use crate::declare_codegen; 4 | 5 | use heck::ToSnakeCase; 6 | use scoped_writer::g; 7 | 8 | pub fn codegen(ops: &Operations) { 9 | declare_codegen!(); 10 | 11 | g([ 12 | "#![allow(clippy::doc_markdown)]", 13 | "", 14 | "use super::S3AccessContext;", 15 | "", 16 | "use crate::dto::*;", 17 | "use crate::error::S3Result;", 18 | "use crate::protocol::S3Request;", 19 | "", 20 | "#[async_trait::async_trait]", 21 | "pub trait S3Access: Send + Sync + 'static {", 22 | "", 23 | ]); 24 | 25 | g([ 26 | "/// Checks whether the current request has accesses to the resources.", 27 | "///", 28 | "/// This method is called before deserializing the operation input.", 29 | "///", 30 | "/// By default, this method rejects all anonymous requests", 31 | "/// and returns [`AccessDenied`](crate::S3ErrorCode::AccessDenied) error.", 32 | "///", 33 | "/// An access control provider can override this method to implement custom logic.", 34 | "///", 35 | "/// Common fields in the context:", 36 | "/// + [`cx.credentials()`](S3AccessContext::credentials)", 37 | "/// + [`cx.s3_path()`](S3AccessContext::s3_path)", 38 | "/// + [`cx.s3_op().name()`](crate::S3Operation::name)", 39 | "/// + [`cx.extensions_mut()`](S3AccessContext::extensions_mut)", 40 | "async fn check(&self, cx: &mut S3AccessContext<'_>) -> S3Result<()> {", 41 | " super::default_check(cx)", 42 | "}", 43 | ]); 44 | 45 | for op in ops.values() { 46 | let method_name = op.name.to_snake_case(); 47 | let input = &op.input; 48 | 49 | g!("/// Checks whether the {} request has accesses to the resources.", op.name); 50 | g!("/// "); 51 | g!("/// This method returns `Ok(())` by default."); 52 | g!("async fn {method_name}(&self, _req: &mut S3Request<{input}>) -> S3Result<()> {{"); 53 | g!("Ok(())"); 54 | g!("}}"); 55 | g!(); 56 | } 57 | 58 | g!("}}"); 59 | g!(); 60 | } 61 | -------------------------------------------------------------------------------- /codegen/src/v1/aws_proxy.rs: -------------------------------------------------------------------------------- 1 | use super::dto::RustTypes; 2 | use super::ops::Operations; 3 | use super::rust; 4 | 5 | use crate::declare_codegen; 6 | 7 | use std::format as f; 8 | 9 | use heck::ToSnakeCase; 10 | use scoped_writer::g; 11 | 12 | pub fn codegen(ops: &Operations, rust_types: &RustTypes) { 13 | declare_codegen!(); 14 | 15 | g([ 16 | "use super::*;", 17 | "", 18 | "use crate::conv::{try_from_aws, try_into_aws};", 19 | "", 20 | "use s3s::S3;", 21 | "use s3s::{S3Request, S3Response};", 22 | "use s3s::S3Result;", 23 | "", 24 | "use tracing::debug;", 25 | "", 26 | ]); 27 | 28 | g!("#[async_trait::async_trait]"); 29 | g!("impl S3 for Proxy {{"); 30 | 31 | for op in ops.values() { 32 | let method_name = op.name.to_snake_case(); 33 | let s3s_input = f!("s3s::dto::{}", op.input); 34 | let s3s_output = f!("s3s::dto::{}", op.output); 35 | 36 | g!("#[tracing::instrument(skip(self, req))]"); 37 | g!("async fn {method_name}(&self, req: S3Request<{s3s_input}>) -> S3Result> {{"); 38 | 39 | g!("let input = req.input;"); 40 | g!("debug!(?input);"); 41 | 42 | if op.smithy_input == "Unit" { 43 | g!("let result = self.0.{method_name}().send().await;"); 44 | } else { 45 | g!("let mut b = self.0.{method_name}();"); 46 | let rust::Type::Struct(ty) = &rust_types[op.input.as_str()] else { panic!() }; 47 | 48 | let flattened_fields = if ty.name == "SelectObjectContentInput" { 49 | let rust::Type::Struct(flattened_ty) = &rust_types["SelectObjectContentRequest"] else { panic!() }; 50 | flattened_ty.fields.as_slice() 51 | } else { 52 | &[] 53 | }; 54 | 55 | for field in ty.fields.iter().chain(flattened_fields) { 56 | if field.is_custom_extension { 57 | continue; 58 | } 59 | 60 | let s3s_field_name = match ty.name.as_str() { 61 | "SelectObjectContentInput" if field.name == "request" => continue, 62 | "SelectObjectContentInput" if field.position == "xml" => f!("request.{}", field.name), 63 | _ => field.name.clone(), 64 | }; 65 | let aws_field_name = match ty.name.as_str() { 66 | "SelectObjectContentInput" => field.name.as_str(), 67 | _ => match s3s_field_name.as_str() { 68 | "checksum_crc32c" => "checksum_crc32_c", 69 | "checksum_crc64nvme" => "checksum_crc64_nvme", 70 | "type_" => "type", 71 | s => s, 72 | }, 73 | }; 74 | 75 | // // hack 76 | // if op.name == "PutObject" && field.type_ == "ChecksumAlgorithm" { 77 | // assert!(field.option_type); 78 | // let default_val = "aws_sdk_s3::model::ChecksumAlgorithm::Sha256"; 79 | // let val = f!("try_into_aws(input.{s3s_field_name})?.or(Some({default_val}))"); 80 | // g!("b = b.set_{aws_field_name}({val});"); 81 | // continue; 82 | // } 83 | 84 | if field.option_type { 85 | g!("b = b.set_{aws_field_name}(try_into_aws(input.{s3s_field_name})?);"); 86 | } else { 87 | g!("b = b.set_{aws_field_name}(Some(try_into_aws(input.{s3s_field_name})?));"); 88 | } 89 | } 90 | g!("let result = b.send().await;"); 91 | } 92 | 93 | g([ 94 | "match result {", 95 | " Ok(output) => {", 96 | " let headers = super::meta::build_headers(&output)?;", 97 | " let output = try_from_aws(output)?;", 98 | " debug!(?output);", 99 | " Ok(S3Response::with_headers(output, headers))", 100 | " },", 101 | " Err(e) => Err(wrap_sdk_error!(e)),", 102 | "}", 103 | ]); 104 | 105 | g!("}}"); 106 | g!(); 107 | } 108 | 109 | g!("}}"); 110 | } 111 | -------------------------------------------------------------------------------- /codegen/src/v1/headers.rs: -------------------------------------------------------------------------------- 1 | use super::smithy; 2 | 3 | use crate::declare_codegen; 4 | 5 | use std::collections::BTreeSet; 6 | 7 | use heck::ToShoutySnakeCase; 8 | use scoped_writer::g; 9 | use stdx::default::default; 10 | 11 | pub fn codegen(model: &smithy::Model) { 12 | let mut headers: BTreeSet<&str> = default(); 13 | 14 | for (name, shape) in &model.shapes { 15 | if name.ends_with("Request") || name.ends_with("Output") { 16 | let smithy::Shape::Structure(sh) = shape else { panic!() }; 17 | 18 | for member in sh.members.values() { 19 | if let Some(header) = member.traits.http_header() { 20 | headers.insert(header); 21 | } 22 | } 23 | } 24 | } 25 | 26 | { 27 | headers.insert("x-amz-content-sha256"); 28 | headers.insert("x-amz-date"); 29 | headers.insert("authorization"); 30 | headers.insert("host"); 31 | headers.insert("x-amz-decoded-content-length"); 32 | headers.insert("x-amz-request-id"); 33 | headers.insert("x-amz-id-2"); 34 | } 35 | 36 | declare_codegen!(); 37 | 38 | g([ 39 | "#![allow(clippy::declare_interior_mutable_const)]", 40 | "", 41 | "use hyper::header::HeaderName;", 42 | "", 43 | ]); 44 | 45 | for header in headers { 46 | let name = to_constant_name(header); 47 | if header.starts_with("x-amz-") || header == "Content-MD5" || header.starts_with("x-minio") { 48 | let value = header.to_ascii_lowercase(); 49 | g!("pub const {name}: HeaderName = HeaderName::from_static({value:?});",); 50 | } else { 51 | g!("pub use hyper::header::{name};"); 52 | } 53 | g!(); 54 | } 55 | } 56 | 57 | pub fn to_constant_name(header_name: &str) -> String { 58 | if header_name == "ETag" { 59 | "ETAG".into() 60 | } else { 61 | header_name.to_shouty_snake_case() 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /codegen/src/v1/minio.rs: -------------------------------------------------------------------------------- 1 | use scoped_writer::g; 2 | 3 | use super::smithy; 4 | 5 | fn git_branch() -> String { 6 | let output = std::process::Command::new("git") 7 | .args(["rev-parse", "--abbrev-ref", "HEAD"]) 8 | .output() 9 | .unwrap(); 10 | let stdout = core::str::from_utf8(&output.stdout).unwrap(); 11 | stdout.trim().to_owned() 12 | } 13 | 14 | fn is_minio_branch() -> bool { 15 | let branch_name = git_branch(); 16 | matches!(branch_name.as_str(), "minio" | "feat/minio") 17 | } 18 | 19 | /// 20 | pub fn patch(model: &mut smithy::Model) { 21 | if !is_minio_branch() { 22 | return; 23 | } 24 | 25 | let patches = smithy::Model::load_json("data/minio-patches.json").unwrap(); 26 | 27 | for (shape_name, patch) in patches.shapes { 28 | match model.shapes.get_mut(&shape_name) { 29 | None => { 30 | model.shapes.insert(shape_name, patch); 31 | } 32 | Some(shape) => match shape { 33 | smithy::Shape::Structure(shape) => { 34 | let smithy::Shape::Structure(patch) = patch else { panic!() }; 35 | for (field_name, member) in patch.members { 36 | assert!(shape.members.insert(field_name, member).is_none()); 37 | } 38 | } 39 | _ => unimplemented!(), 40 | }, 41 | } 42 | } 43 | } 44 | 45 | #[allow(clippy::too_many_lines)] 46 | pub fn codegen_in_dto() { 47 | if !is_minio_branch() { 48 | return; 49 | } 50 | 51 | let code = r#" 52 | 53 | #[derive(Debug, Default)] 54 | pub struct CachedTags(std::sync::OnceLock>); 55 | 56 | impl CachedTags { 57 | pub fn reset(&mut self) { 58 | self.0.take(); 59 | } 60 | 61 | fn test<'a>( 62 | &self, 63 | get_and_tags: impl FnOnce() -> Option<&'a [Tag]>, 64 | get_tag: impl FnOnce() -> Option<&'a Tag>, 65 | object_tags: &Map, 66 | ) -> bool { 67 | let cached_tags = self.0.get_or_init(|| { 68 | let mut map = Map::new(); 69 | 70 | if let Some(tags) = get_and_tags() { 71 | for tag in tags { 72 | let (Some(k), Some(v)) = (&tag.key, &tag.value) else {continue}; 73 | if !k.is_empty() { 74 | map.insert(k.clone(), v.clone()); 75 | } 76 | } 77 | } 78 | 79 | if let Some(tag) = get_tag() { 80 | let (Some(k), Some(v)) = (&tag.key, &tag.value) else { return map }; 81 | if !k.is_empty() { 82 | map.insert(k.clone(), v.clone()); 83 | } 84 | } 85 | 86 | map 87 | }); 88 | 89 | if cached_tags.is_empty() { 90 | return true; 91 | } 92 | 93 | if object_tags.is_empty() { 94 | return false; 95 | } 96 | 97 | let (mut lhs, mut rhs) = (cached_tags, object_tags); 98 | if lhs.len() > rhs.len() { 99 | std::mem::swap(&mut lhs, &mut rhs); 100 | } 101 | 102 | for (k, v) in lhs { 103 | if rhs.get(k) == Some(v) { 104 | return true; 105 | } 106 | } 107 | 108 | false 109 | } 110 | } 111 | 112 | impl super::LifecycleRuleFilter { 113 | pub fn test_tags(&self, object_tags: &Map) -> bool { 114 | self.cached_tags.test( 115 | || self.and.as_ref().and_then(|and| and.tags.as_deref()), 116 | || self.tag.as_ref(), 117 | object_tags, 118 | ) 119 | } 120 | } 121 | 122 | impl super::ReplicationRuleFilter { 123 | pub fn test_tags(&self, object_tags: &Map) -> bool { 124 | self.cached_tags.test( 125 | || self.and.as_ref().and_then(|and| and.tags.as_deref()), 126 | || self.tag.as_ref(), 127 | object_tags, 128 | ) 129 | } 130 | } 131 | 132 | #[cfg(test)] 133 | mod minio_tests { 134 | use super::*; 135 | 136 | use std::ops::Not; 137 | 138 | #[test] 139 | fn cached_tags() { 140 | let filter = ReplicationRuleFilter { 141 | and: Some(ReplicationRuleAndOperator { 142 | tags: Some(vec![ 143 | Tag { 144 | key: Some("key1".to_owned()), 145 | value: Some("value1".to_owned()), 146 | }, 147 | Tag { 148 | key: Some("key2".to_owned()), 149 | value: Some("value2".to_owned()), 150 | }, 151 | ]), 152 | ..default() 153 | }), 154 | tag: Some(Tag { 155 | key: Some("key3".to_owned()), 156 | value: Some("value3".to_owned()), 157 | }), 158 | ..default() 159 | }; 160 | 161 | let object_tags = Map::from_iter(vec![ 162 | ("key1".to_owned(), "value1".to_owned()), 163 | ("key4".to_owned(), "value4".to_owned()), 164 | ("key5".to_owned(), "value5".to_owned()), 165 | ]); 166 | 167 | assert!(filter.test_tags(&object_tags)); 168 | assert!(filter.test_tags(&object_tags)); 169 | assert!(filter.test_tags(&object_tags)); 170 | 171 | let object_tags = Map::from_iter(vec![ 172 | ("key4".to_owned(), "value4".to_owned()), 173 | ("key5".to_owned(), "value5".to_owned()), 174 | ]); 175 | assert!(filter.test_tags(&object_tags).not()); 176 | assert!(filter.test_tags(&object_tags).not()); 177 | assert!(filter.test_tags(&object_tags).not()); 178 | } 179 | } 180 | 181 | "#; 182 | g!("{code}"); 183 | } 184 | -------------------------------------------------------------------------------- /codegen/src/v1/mod.rs: -------------------------------------------------------------------------------- 1 | mod rust; 2 | mod smithy; 3 | mod utils; 4 | 5 | mod access; 6 | mod dto; 7 | mod error; 8 | mod headers; 9 | mod minio; 10 | mod ops; 11 | mod s3_trait; 12 | mod sts; 13 | mod xml; 14 | 15 | mod aws_conv; 16 | mod aws_proxy; 17 | 18 | use std::fs::File; 19 | use std::io::BufWriter; 20 | 21 | pub use self::utils::o; 22 | 23 | fn write_file(path: &str, f: impl FnOnce()) { 24 | let mut writer = BufWriter::new(File::create(path).unwrap()); 25 | scoped_writer::scoped(&mut writer, f); 26 | } 27 | 28 | pub fn run() { 29 | let model = { 30 | let mut s3_model = smithy::Model::load_json("data/s3.json").unwrap(); 31 | let mut sts_model = smithy::Model::load_json("data/sts.json").unwrap(); 32 | sts::reduce(&mut sts_model); 33 | s3_model.shapes.append(&mut sts_model.shapes); 34 | minio::patch(&mut s3_model); 35 | s3_model 36 | }; 37 | 38 | let ops = ops::collect_operations(&model); 39 | let rust_types = dto::collect_rust_types(&model, &ops); 40 | 41 | { 42 | let path = "crates/s3s/src/dto/generated.rs"; 43 | write_file(path, || dto::codegen(&rust_types, &ops)); 44 | } 45 | 46 | { 47 | let path = "crates/s3s/src/header/generated.rs"; 48 | write_file(path, || headers::codegen(&model)); 49 | } 50 | 51 | { 52 | let path = "crates/s3s/src/error/generated.rs"; 53 | write_file(path, || error::codegen(&model)); 54 | } 55 | 56 | { 57 | let path = "crates/s3s/src/xml/generated.rs"; 58 | write_file(path, || xml::codegen(&ops, &rust_types)); 59 | } 60 | 61 | { 62 | let path = "crates/s3s/src/s3_trait.rs"; 63 | write_file(path, || s3_trait::codegen(&ops)); 64 | } 65 | 66 | { 67 | let path = "crates/s3s/src/ops/generated.rs"; 68 | write_file(path, || ops::codegen(&ops, &rust_types)); 69 | } 70 | 71 | { 72 | let path = "crates/s3s/src/access/generated.rs"; 73 | write_file(path, || access::codegen(&ops)); 74 | } 75 | 76 | { 77 | let path = "crates/s3s-aws/src/conv/generated.rs"; 78 | write_file(path, || aws_conv::codegen(&ops, &rust_types)); 79 | } 80 | 81 | { 82 | let path = "crates/s3s-aws/src/proxy/generated.rs"; 83 | write_file(path, || aws_proxy::codegen(&ops, &rust_types)); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /codegen/src/v1/rust.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::format as f; 3 | 4 | use scoped_writer::g; 5 | use serde_json::Value; 6 | 7 | #[derive(Debug, Clone)] 8 | pub enum Type { 9 | Alias(Alias), 10 | Provided(Provided), 11 | List(List), 12 | Map(Map), 13 | StrEnum(StrEnum), 14 | Struct(Struct), 15 | StructEnum(StructEnum), 16 | Timestamp(Timestamp), 17 | } 18 | 19 | #[derive(Debug, Clone)] 20 | pub struct Alias { 21 | pub name: String, 22 | pub type_: String, 23 | pub doc: Option, 24 | } 25 | 26 | #[derive(Debug, Clone)] 27 | pub struct Provided { 28 | pub name: String, 29 | } 30 | 31 | #[derive(Debug, Clone)] 32 | pub struct List { 33 | pub name: String, 34 | pub member: ListMember, 35 | pub doc: Option, 36 | } 37 | 38 | #[derive(Debug, Clone)] 39 | pub struct ListMember { 40 | pub type_: String, 41 | pub xml_name: Option, 42 | } 43 | 44 | #[derive(Debug, Clone)] 45 | pub struct Map { 46 | pub name: String, 47 | pub key_type: String, 48 | pub value_type: String, 49 | pub doc: Option, 50 | } 51 | 52 | #[derive(Debug, Clone)] 53 | pub struct StrEnum { 54 | pub name: String, 55 | pub variants: Vec, 56 | pub doc: Option, 57 | 58 | pub is_custom_extension: bool, 59 | } 60 | 61 | #[derive(Debug, Clone)] 62 | pub struct StrEnumVariant { 63 | pub name: String, 64 | pub value: String, 65 | pub doc: Option, 66 | } 67 | 68 | #[derive(Debug, Clone)] 69 | pub struct Struct { 70 | pub name: String, 71 | pub fields: Vec, 72 | pub doc: Option, 73 | 74 | pub xml_name: Option, 75 | pub is_error_type: bool, 76 | 77 | pub is_custom_extension: bool, 78 | } 79 | 80 | #[allow(clippy::struct_excessive_bools)] 81 | #[derive(Debug, Clone)] 82 | pub struct StructField { 83 | pub name: String, 84 | pub type_: String, 85 | pub doc: Option, 86 | 87 | pub camel_name: String, 88 | 89 | pub option_type: bool, 90 | pub default_value: Option, 91 | pub is_required: bool, 92 | 93 | pub position: String, 94 | 95 | pub http_header: Option, 96 | pub http_query: Option, 97 | 98 | pub xml_name: Option, 99 | pub xml_flattened: bool, 100 | 101 | pub is_custom_extension: bool, 102 | } 103 | 104 | #[derive(Debug, Clone)] 105 | pub struct StructEnum { 106 | pub name: String, 107 | pub variants: Vec, 108 | pub doc: Option, 109 | } 110 | 111 | #[derive(Debug, Clone)] 112 | pub struct StructEnumVariant { 113 | pub name: String, 114 | pub type_: String, 115 | pub doc: Option, 116 | } 117 | 118 | #[derive(Debug, Clone)] 119 | pub struct Timestamp { 120 | pub name: String, 121 | pub format: Option, 122 | pub doc: Option, 123 | } 124 | 125 | impl Type { 126 | pub fn provided(name: &str) -> Self { 127 | Self::Provided(Provided { name: name.to_owned() }) 128 | } 129 | 130 | pub fn alias(name: &str, type_: &str, doc: Option<&str>) -> Self { 131 | Self::Alias(Alias { 132 | name: name.to_owned(), 133 | type_: type_.to_owned(), 134 | doc: doc.map(ToOwned::to_owned), 135 | }) 136 | } 137 | } 138 | 139 | pub fn codegen_doc(doc: Option<&str>) { 140 | let Some(doc) = doc else { return }; 141 | 142 | for line in doc.lines() { 143 | let mut line = line.trim_start().to_owned(); 144 | 145 | let word_fixes_type1 = [ 146 | "Region", 147 | "account-id", 148 | "access-point-name", 149 | "outpost-id", 150 | "key", 151 | "version-id", 152 | "Code", 153 | "Message", 154 | ]; 155 | 156 | for word in word_fixes_type1 { 157 | if line.contains(word) { 158 | line = line.replace(&f!("<{word}>"), &f!("<{word}>")); 159 | } 160 | } 161 | 162 | let word_fixes_type2 = ["Grantee", "BucketLoggingStatus"]; 163 | 164 | for word in word_fixes_type2 { 165 | if line.contains(word) { 166 | line = line.replace(&f!("<{word} xmlns"), &f!("<{word} xmlns")); 167 | } 168 | } 169 | 170 | let word_fixes_type3 = ["NotificationConfiguration", "Grantee"]; 171 | 172 | for word in word_fixes_type3 { 173 | if line.contains(word) { 174 | line = line.replace(&f!("<{word}>"), &f!("<{word}>")); 175 | line = line.replace(&f!(""), &f!("</{word}>")); 176 | } 177 | } 178 | 179 | if line.ends_with("200") { 180 | line = line.replace("200", "200"); 181 | } 182 | if line.starts_with("OK") { 183 | line = line.replace("OK", "OK"); 184 | } 185 | 186 | g!("/// {line}"); 187 | } 188 | } 189 | 190 | pub fn default_value_literal(v: &Value) -> &dyn fmt::Display { 191 | match v { 192 | Value::Bool(x) => x, 193 | Value::Number(x) => x, 194 | _ => unimplemented!(), 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /codegen/src/v1/s3_trait.rs: -------------------------------------------------------------------------------- 1 | use super::ops::Operations; 2 | use super::rust::codegen_doc; 3 | 4 | use crate::declare_codegen; 5 | 6 | use heck::ToSnakeCase; 7 | use scoped_writer::g; 8 | 9 | pub fn codegen(ops: &Operations) { 10 | declare_codegen!(); 11 | 12 | g([ 13 | "use crate::dto::*;", 14 | "use crate::error::S3Result;", 15 | "use crate::protocol::S3Request;", 16 | "use crate::protocol::S3Response;", 17 | "", 18 | "/// An async trait which represents the S3 API", 19 | "#[async_trait::async_trait]", 20 | "pub trait S3: Send + Sync + 'static {", 21 | "", 22 | ]); 23 | 24 | for op in ops.values() { 25 | let method_name = op.name.to_snake_case(); 26 | let input = &op.input; 27 | let output = &op.output; 28 | 29 | codegen_doc(op.doc.as_deref()); 30 | g!("async fn {method_name}(&self, _req: S3Request<{input}>) -> S3Result> {{"); 31 | g!("Err(s3_error!(NotImplemented, \"{} is not implemented yet\"))", op.name); 32 | g!("}}"); 33 | g!(); 34 | } 35 | 36 | g!("}}"); 37 | g!(); 38 | } 39 | -------------------------------------------------------------------------------- /codegen/src/v1/smithy.rs: -------------------------------------------------------------------------------- 1 | pub use s3s_model::smithy::*; 2 | 3 | pub trait SmithyTraitsExt { 4 | #[doc(hidden)] 5 | fn base(&self) -> &Traits; 6 | 7 | fn minio(&self) -> bool { 8 | self.base().get("s3s#minio").is_some() 9 | } 10 | 11 | fn sealed(&self) -> bool { 12 | self.base().get("s3s#sealed").is_some() 13 | } 14 | } 15 | 16 | impl SmithyTraitsExt for Traits { 17 | fn base(&self) -> &Traits { 18 | self 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /codegen/src/v1/sts.rs: -------------------------------------------------------------------------------- 1 | use super::smithy; 2 | use super::utils::o; 3 | 4 | use std::mem; 5 | use std::ops::Not; 6 | 7 | use heck::ToUpperCamelCase; 8 | 9 | pub const NAMES: &[&str] = &[ 10 | "AssumeRoleResponse", 11 | "AssumedRoleUser", 12 | "Credentials", 13 | "nonNegativeIntegerType", 14 | "sourceIdentityType", 15 | "arnType", 16 | "accessKeyIdType", 17 | "accessKeySecretType", 18 | "dateType", 19 | "tokenType", 20 | "assumedRoleIdType", 21 | ]; 22 | 23 | pub fn reduce(model: &mut smithy::Model) { 24 | for (shape_name, mut shape) in mem::take(&mut model.shapes) { 25 | let Some((_, name)) = shape_name.split_once('#') else { panic!() }; 26 | if NAMES.contains(&name).not() { 27 | continue; 28 | } 29 | 30 | let Some((_, name)) = shape_name.split_once('#') else { panic!() }; 31 | let new_name = match name { 32 | "AssumeRoleResponse" => o("AssumeRoleOutput"), 33 | _ if name.as_bytes()[0].is_ascii_lowercase() => name.to_upper_camel_case(), 34 | _ => o(name), 35 | }; 36 | 37 | if let smithy::Shape::Structure(ref mut shape) = shape { 38 | for member in shape.members.values_mut() { 39 | let Some((_, name)) = member.target.split_once('#') else { panic!() }; 40 | let new_name = match name { 41 | _ if name.as_bytes()[0].is_ascii_lowercase() => name.to_upper_camel_case(), 42 | _ => continue, 43 | }; 44 | member.target = member.target.replace(name, &new_name); 45 | } 46 | if name == "AssumeRoleResponse" { 47 | shape.traits.set("smithy.api#xmlName", name.into()); 48 | } 49 | } 50 | 51 | let new_shape_name = format!("com.amazonaws.s3#{new_name}"); 52 | assert!(model.shapes.insert(new_shape_name, shape).is_none()); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /codegen/src/v1/utils.rs: -------------------------------------------------------------------------------- 1 | pub fn o(x: &T) -> T::Owned { 2 | x.to_owned() 3 | } 4 | 5 | #[macro_export] 6 | macro_rules! function_name { 7 | () => {{ 8 | fn f() {} 9 | fn type_name_of(_: T) -> &'static str { 10 | std::any::type_name::() 11 | } 12 | let name = type_name_of(f); 13 | let name = name.strip_suffix("::f").unwrap(); 14 | name.strip_suffix("::{{closure}}").unwrap_or(name) 15 | }}; 16 | } 17 | 18 | #[macro_export] 19 | macro_rules! declare_codegen { 20 | () => { 21 | g!("//! Auto generated by `{}`", $crate::function_name!()); 22 | g!(); 23 | }; 24 | } 25 | -------------------------------------------------------------------------------- /codegen/src/v2/mod.rs: -------------------------------------------------------------------------------- 1 | pub fn run() {} 2 | -------------------------------------------------------------------------------- /crates/s3s-aws/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-aws" 3 | version = "0.12.0-dev" 4 | description = "S3 service adapter integrated with aws-sdk-s3" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | async-trait = "0.1.88" 18 | aws-sdk-s3 = "1.85.0" 19 | aws-smithy-runtime-api = { version = "1.7.4", features = ["client", "http-1x"] } 20 | aws-smithy-types = { version = "1.3.0", features = ["http-body-1-x"] } 21 | aws-smithy-types-convert = { version = "0.60.9", features = ["convert-time"] } 22 | hyper = "1.6.0" 23 | s3s = { version = "0.12.0-dev", path = "../s3s" } 24 | std-next = "0.1.8" 25 | sync_wrapper = "1.0.2" 26 | tracing = "0.1.41" 27 | transform-stream = "0.3.1" 28 | -------------------------------------------------------------------------------- /crates/s3s-aws/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-aws/src/body.rs: -------------------------------------------------------------------------------- 1 | use aws_smithy_types::body::SdkBody; 2 | 3 | pub fn s3s_body_into_sdk_body(body: s3s::Body) -> SdkBody { 4 | SdkBody::from_body_1_x(body) 5 | } 6 | 7 | pub fn sdk_body_into_s3s_body(body: SdkBody) -> s3s::Body { 8 | s3s::Body::http_body(body) 9 | } 10 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/connector.rs: -------------------------------------------------------------------------------- 1 | use crate::body::{s3s_body_into_sdk_body, sdk_body_into_s3s_body}; 2 | 3 | use s3s::service::S3Service; 4 | 5 | use std::ops::Not; 6 | 7 | use aws_sdk_s3::config::RuntimeComponents; 8 | use aws_smithy_runtime_api::client::http::{HttpClient, HttpConnectorSettings, SharedHttpConnector}; 9 | use aws_smithy_runtime_api::client::http::{HttpConnector, HttpConnectorFuture}; 10 | use aws_smithy_runtime_api::client::orchestrator::HttpRequest as AwsHttpRequest; 11 | use aws_smithy_runtime_api::client::orchestrator::HttpResponse as AwsHttpResponse; 12 | use aws_smithy_runtime_api::client::result::ConnectorError; 13 | 14 | use hyper::header::HOST; 15 | use hyper::http; 16 | 17 | #[derive(Debug)] 18 | pub struct Client(S3Service); 19 | 20 | impl HttpClient for Client { 21 | fn http_connector(&self, _: &HttpConnectorSettings, _: &RuntimeComponents) -> SharedHttpConnector { 22 | SharedHttpConnector::new(Connector(self.0.clone())) 23 | } 24 | } 25 | 26 | impl From for Client { 27 | fn from(val: S3Service) -> Self { 28 | Self(val) 29 | } 30 | } 31 | 32 | #[derive(Debug, Clone)] 33 | pub struct Connector(S3Service); 34 | 35 | impl From for Connector { 36 | fn from(val: S3Service) -> Self { 37 | Self(val) 38 | } 39 | } 40 | 41 | fn on_err(e: E) -> ConnectorError 42 | where 43 | E: std::error::Error + Send + Sync + 'static, 44 | { 45 | let kind = aws_smithy_runtime_api::client::retries::ErrorKind::ServerError; 46 | ConnectorError::other(Box::new(e), Some(kind)) 47 | } 48 | 49 | impl HttpConnector for Connector { 50 | fn call(&self, req: AwsHttpRequest) -> HttpConnectorFuture { 51 | let service = self.0.clone(); 52 | HttpConnectorFuture::new_boxed(Box::pin(async move { convert_output(service.call(convert_input(req)?).await) })) 53 | } 54 | } 55 | 56 | fn convert_input(req: AwsHttpRequest) -> Result { 57 | let mut req = req.try_into_http1x().map_err(on_err)?; 58 | 59 | if req.headers().contains_key(HOST).not() { 60 | let host = auto_host_header(req.uri()); 61 | req.headers_mut().insert(HOST, host); 62 | } 63 | 64 | Ok(req.map(sdk_body_into_s3s_body)) 65 | } 66 | 67 | fn convert_output(result: Result) -> Result { 68 | match result { 69 | Ok(res) => res.map(s3s_body_into_sdk_body).try_into().map_err(on_err), 70 | Err(e) => { 71 | let kind = aws_smithy_runtime_api::client::retries::ErrorKind::ServerError; 72 | Err(ConnectorError::other(e.into(), Some(kind))) 73 | } 74 | } 75 | } 76 | 77 | // From 78 | fn auto_host_header(uri: &http::Uri) -> http::HeaderValue { 79 | let hostname = uri.host().expect("authority implies host"); 80 | match get_non_default_port(uri) { 81 | Some(port) => http::HeaderValue::try_from(format!("{hostname}:{port}")), 82 | None => http::HeaderValue::from_str(hostname), 83 | } 84 | .expect("uri host is valid header value") 85 | } 86 | 87 | /// From 88 | fn get_non_default_port(uri: &http::Uri) -> Option> { 89 | match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { 90 | (Some(443), true) => None, 91 | (Some(80), false) => None, 92 | _ => uri.port(), 93 | } 94 | } 95 | 96 | fn is_schema_secure(uri: &http::Uri) -> bool { 97 | uri.scheme_str() 98 | .is_some_and(|scheme_str| matches!(scheme_str, "wss" | "https")) 99 | } 100 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/conv/mod.rs: -------------------------------------------------------------------------------- 1 | mod builtin; 2 | mod generated; 3 | 4 | use s3s::s3_error; 5 | use s3s::{S3Error, S3Result}; 6 | 7 | pub trait AwsConversion: Sized { 8 | type Target; 9 | type Error; 10 | 11 | fn try_from_aws(x: Self::Target) -> Result; 12 | 13 | fn try_into_aws(x: Self) -> Result; 14 | } 15 | 16 | pub fn try_from_aws(x: T::Target) -> Result { 17 | T::try_from_aws(x) 18 | } 19 | 20 | pub fn try_into_aws(x: T) -> S3Result { 21 | T::try_into_aws(x) 22 | } 23 | 24 | fn unwrap_from_aws(opt: Option, field_name: &str) -> S3Result 25 | where 26 | S3Error: From, 27 | { 28 | match opt { 29 | Some(x) => T::try_from_aws(x).map_err(Into::into), 30 | None => Err(s3_error!(InternalError, "missing field: {}", field_name)), 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/error.rs: -------------------------------------------------------------------------------- 1 | macro_rules! wrap_sdk_error { 2 | ($e:expr) => {{ 3 | use aws_sdk_s3::error::SdkError; 4 | use aws_sdk_s3::operation::RequestId; 5 | use s3s::{S3Error, S3ErrorCode}; 6 | 7 | let mut err = S3Error::new(S3ErrorCode::InternalError); 8 | let source = $e; 9 | tracing::debug!("sdk error: {:?}", source); 10 | 11 | if let SdkError::ServiceError(ref e) = source { 12 | let meta = e.err().meta(); 13 | if let Some(val) = meta.code().and_then(|s| S3ErrorCode::from_bytes(s.as_bytes())) { 14 | err.set_code(val); 15 | } 16 | if let Some(val) = meta.message() { 17 | err.set_message(val.to_owned()); 18 | } 19 | if let Some(val) = meta.request_id() { 20 | err.set_request_id(val); 21 | } 22 | crate::error::SetStatusCode(&mut err, e).call(); 23 | } 24 | err.set_source(Box::new(source)); 25 | 26 | err 27 | }}; 28 | } 29 | 30 | // FIXME: this is actually an overloaded function 31 | 32 | pub struct SetStatusCode<'a, 'b, E, R>( 33 | pub &'a mut s3s::S3Error, 34 | pub &'b aws_smithy_runtime_api::client::result::ServiceError, 35 | ); 36 | 37 | impl SetStatusCode<'_, '_, E, aws_smithy_runtime_api::client::orchestrator::HttpResponse> { 38 | pub fn call(self) { 39 | let Self(err, e) = self; 40 | err.set_status_code(hyper_status_code_from_aws(e.raw().status())); 41 | // TODO: headers? 42 | } 43 | } 44 | 45 | impl SetStatusCode<'_, '_, E, aws_smithy_types::event_stream::RawMessage> { 46 | #[allow(clippy::unused_self)] 47 | pub fn call(self) {} 48 | } 49 | 50 | fn hyper_status_code_from_aws(status_code: aws_smithy_runtime_api::http::StatusCode) -> hyper::StatusCode { 51 | hyper::StatusCode::from_u16(status_code.as_u16()).unwrap() 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/event_stream.rs: -------------------------------------------------------------------------------- 1 | use sync_wrapper::{SyncFuture, SyncWrapper}; 2 | use transform_stream::AsyncStream; 3 | 4 | type AwsSelectObjectContentEventStream = aws_sdk_s3::primitives::event_stream::EventReceiver< 5 | aws_sdk_s3::types::SelectObjectContentEventStream, 6 | aws_sdk_s3::types::error::SelectObjectContentEventStreamError, 7 | >; 8 | 9 | pub fn from_aws(src: AwsSelectObjectContentEventStream) -> s3s::dto::SelectObjectContentEventStream { 10 | let mut src = SyncWrapper::new(src); 11 | s3s::dto::SelectObjectContentEventStream::new(AsyncStream::new(|mut y| async move { 12 | loop { 13 | let recv = SyncFuture::new(src.get_mut().recv()); 14 | let ans = recv.await; 15 | match ans { 16 | Ok(Some(ev)) => y.yield_(crate::conv::try_from_aws(ev)).await, 17 | Ok(None) => break, 18 | Err(err) => y.yield_err(wrap_sdk_error!(err)).await, 19 | } 20 | } 21 | })) 22 | } 23 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::module_name_repetitions,// 3 | clippy::match_same_arms, // 4 | clippy::missing_errors_doc, // TODO: docs 5 | clippy::wildcard_imports, // 6 | clippy::let_underscore_untyped, 7 | clippy::multiple_crate_versions, // TODO: check later 8 | )] 9 | 10 | #[macro_use] 11 | mod error; 12 | 13 | mod body; 14 | mod event_stream; 15 | 16 | pub mod conv; 17 | 18 | mod connector; 19 | pub use self::connector::{Client, Connector}; 20 | 21 | mod proxy; 22 | pub use self::proxy::Proxy; 23 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/proxy/meta.rs: -------------------------------------------------------------------------------- 1 | use s3s::header::{X_AMZ_ID_2, X_AMZ_REQUEST_ID}; 2 | use s3s::{S3Result, s3_error}; 3 | 4 | use aws_sdk_s3::operation::{RequestId, RequestIdExt}; 5 | use hyper::HeaderMap; 6 | use hyper::header::HeaderValue; 7 | 8 | pub fn build_headers(output: &T) -> S3Result> 9 | where 10 | T: RequestId + RequestIdExt, 11 | { 12 | let mut header = HeaderMap::new(); 13 | if let Some(id) = output.request_id() { 14 | let val = HeaderValue::from_str(id).map_err(|_| s3_error!(InternalError, "invalid request id"))?; 15 | header.insert(X_AMZ_REQUEST_ID, val); 16 | } 17 | if let Some(id) = output.extended_request_id() { 18 | let val = HeaderValue::from_str(id).map_err(|_| s3_error!(InternalError, "invalid extended request id"))?; 19 | header.insert(X_AMZ_ID_2, val); 20 | } 21 | Ok(header) 22 | } 23 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/proxy/mod.rs: -------------------------------------------------------------------------------- 1 | mod generated; 2 | mod meta; 3 | 4 | pub struct Proxy(aws_sdk_s3::Client); 5 | 6 | impl From for Proxy { 7 | fn from(value: aws_sdk_s3::Client) -> Self { 8 | Self(value) 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /crates/s3s-e2e/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-e2e" 3 | version = "0.12.0-dev" 4 | description = "s3s test suite" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | s3s-test = { version = "0.12.0-dev", path = "../s3s-test" } 18 | tracing = "0.1.41" 19 | aws-credential-types = "1.2.2" 20 | aws-sdk-s3 = "1.85.0" 21 | aws-sdk-sts = { version = "1.72.0", features = ["behavior-version-latest"] } 22 | 23 | [dependencies.aws-config] 24 | version = "1.6.3" 25 | default-features = false 26 | features = ["behavior-version-latest"] 27 | 28 | [build-dependencies] 29 | s3s-test = { version = "0.12.0-dev", path = "../s3s-test" } 30 | -------------------------------------------------------------------------------- /crates/s3s-e2e/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-e2e/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | s3s_test::build::collect_info(); 3 | } 4 | -------------------------------------------------------------------------------- /crates/s3s-e2e/src/advanced.rs: -------------------------------------------------------------------------------- 1 | use crate::case; 2 | 3 | use s3s_test::Result; 4 | use s3s_test::TestFixture; 5 | use s3s_test::TestSuite; 6 | use s3s_test::tcx::TestContext; 7 | 8 | use std::ops::Not; 9 | use std::sync::Arc; 10 | 11 | use tracing::debug; 12 | 13 | pub fn register(tcx: &mut TestContext) { 14 | case!(tcx, Advanced, STS, test_assume_role); 15 | } 16 | 17 | struct Advanced { 18 | sts: aws_sdk_sts::Client, 19 | } 20 | 21 | impl TestSuite for Advanced { 22 | async fn setup() -> Result { 23 | let sdk_conf = aws_config::from_env().load().await; 24 | let sts = aws_sdk_sts::Client::new(&sdk_conf); 25 | 26 | Ok(Self { sts }) 27 | } 28 | } 29 | 30 | #[allow(clippy::upper_case_acronyms)] 31 | struct STS { 32 | sts: aws_sdk_sts::Client, 33 | } 34 | 35 | impl TestFixture for STS { 36 | async fn setup(suite: Arc) -> Result { 37 | Ok(Self { sts: suite.sts.clone() }) 38 | } 39 | } 40 | 41 | impl STS { 42 | async fn test_assume_role(self: Arc) -> Result<()> { 43 | let sts = &self.sts; 44 | 45 | let result = sts.assume_role().role_arn("example").role_session_name("test").send().await; 46 | 47 | let resp = result?; 48 | 49 | let credentials = resp.credentials().unwrap(); 50 | assert!(credentials.access_key_id().is_empty().not(), "Expected non-empty access key ID"); 51 | assert!(credentials.secret_access_key().is_empty().not(), "Expected non-empty secret access key"); 52 | assert!(credentials.session_token().is_empty().not(), "Expected session token in the response"); 53 | 54 | debug!(ak=?credentials.access_key_id()); 55 | debug!(sk=?credentials.secret_access_key()); 56 | debug!(st=?credentials.session_token()); 57 | debug!(exp=?credentials.expiration()); 58 | 59 | Ok(()) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /crates/s3s-e2e/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::missing_errors_doc, // TODO 3 | clippy::missing_panics_doc, // TODO 4 | clippy::wildcard_imports, 5 | )] 6 | 7 | mod utils; 8 | 9 | mod advanced; 10 | mod basic; 11 | 12 | use s3s_test::tcx::TestContext; 13 | 14 | fn register(tcx: &mut TestContext) { 15 | basic::register(tcx); 16 | advanced::register(tcx); 17 | } 18 | 19 | s3s_test::main!(register); 20 | -------------------------------------------------------------------------------- /crates/s3s-e2e/src/utils.rs: -------------------------------------------------------------------------------- 1 | use s3s_test::Result; 2 | 3 | use std::fmt; 4 | 5 | use aws_sdk_s3::error::ProvideErrorMetadata; 6 | use aws_sdk_s3::error::SdkError; 7 | use tracing::error; 8 | 9 | #[macro_export] 10 | macro_rules! case { 11 | ($tcx: expr, $s:ident, $x:ident, $c:ident) => {{ 12 | let mut suite = $tcx.suite::<$s>(stringify!($s)); 13 | let mut fixture = suite.fixture::<$x>(stringify!($x)); 14 | fixture.case(stringify!($c), $x::$c); 15 | }}; 16 | } 17 | 18 | #[allow(clippy::result_large_err)] 19 | pub fn check(result: Result>, allowed_codes: &[&str]) -> Result, SdkError> 20 | where 21 | E: fmt::Debug + ProvideErrorMetadata, 22 | { 23 | if let Err(SdkError::ServiceError(ref err)) = result { 24 | if let Some(code) = err.err().code() { 25 | if allowed_codes.contains(&code) { 26 | return Ok(None); 27 | } 28 | } 29 | } 30 | if let Err(ref err) = result { 31 | error!(?err); 32 | } 33 | match result { 34 | Ok(val) => Ok(Some(val)), 35 | Err(err) => Err(err), 36 | } 37 | } 38 | 39 | #[tracing::instrument(skip(s3))] 40 | pub async fn create_bucket(s3: &aws_sdk_s3::Client, bucket: &str) -> Result { 41 | s3.create_bucket().bucket(bucket).send().await?; 42 | Ok(()) 43 | } 44 | 45 | #[tracing::instrument(skip(s3))] 46 | pub async fn delete_bucket_loose(s3: &aws_sdk_s3::Client, bucket: &str) -> Result { 47 | let result = s3.delete_bucket().bucket(bucket).send().await; 48 | check(result, &["NoSuchBucket"])?; 49 | Ok(()) 50 | } 51 | 52 | #[tracing::instrument(skip(s3))] 53 | pub async fn delete_bucket_strict(s3: &aws_sdk_s3::Client, bucket: &str) -> Result { 54 | s3.delete_bucket().bucket(bucket).send().await?; 55 | Ok(()) 56 | } 57 | 58 | #[tracing::instrument(skip(s3))] 59 | pub async fn delete_object_loose(s3: &aws_sdk_s3::Client, bucket: &str, key: &str) -> Result { 60 | let result = s3.delete_object().bucket(bucket).key(key).send().await; 61 | check(result, &["NoSuchKey", "NoSuchBucket"])?; 62 | Ok(()) 63 | } 64 | 65 | #[tracing::instrument(skip(s3))] 66 | pub async fn delete_object_strict(s3: &aws_sdk_s3::Client, bucket: &str, key: &str) -> Result { 67 | s3.delete_object().bucket(bucket).key(key).send().await?; 68 | Ok(()) 69 | } 70 | -------------------------------------------------------------------------------- /crates/s3s-fs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-fs" 3 | version = "0.12.0-dev" 4 | description = "An experimental S3 server based on file system" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [[bin]] 17 | name = "s3s-fs" 18 | required-features = ["binary"] 19 | 20 | [features] 21 | binary = ["tokio/full", "dep:clap", "dep:tracing-subscriber", "dep:hyper-util"] 22 | 23 | [dependencies] 24 | async-trait = "0.1.88" 25 | base64-simd = "0.8.0" 26 | bytes = "1.10.1" 27 | chrono = { version = "0.4.41", default-features = false, features = [ 28 | "std", 29 | "clock", 30 | ] } 31 | clap = { version = "4.5.39", optional = true, features = ["derive"] } 32 | crc32c = "0.6.8" 33 | futures = "0.3.31" 34 | hex-simd = "0.8.0" 35 | hyper-util = { version = "0.1.13", optional = true, features = [ 36 | "server-auto", 37 | "server-graceful", 38 | "http1", 39 | "http2", 40 | "tokio", 41 | ] } 42 | mime = "0.3.17" 43 | std-next = "0.1.8" 44 | numeric_cast = "0.3.0" 45 | path-absolutize = "3.1.1" 46 | s3s = { version = "0.12.0-dev", path = "../s3s" } 47 | serde_json = "1.0.140" 48 | thiserror = "2.0.12" 49 | time = "0.3.41" 50 | tokio = { version = "1.45.1", features = ["fs", "io-util"] } 51 | tokio-util = { version = "0.7.15", features = ["io"] } 52 | tracing = "0.1.41" 53 | tracing-error = "0.2.1" 54 | tracing-subscriber = { version = "0.3.19", optional = true, features = [ 55 | "env-filter", 56 | "time", 57 | ] } 58 | transform-stream = "0.3.1" 59 | uuid = { version = "1.17.0", features = ["v4"] } 60 | 61 | [dev-dependencies] 62 | anyhow = { version = "1.0.98", features = ["backtrace"] } 63 | aws-config = { version = "1.6.3", default-features = false } 64 | aws-credential-types = { version = "1.2.2", features = ["test-util"] } 65 | aws-sdk-s3 = { version = "1.85.0", features = ["behavior-version-latest"] } 66 | once_cell = "1.21.3" 67 | s3s-aws = { version = "0.12.0-dev", path = "../s3s-aws" } 68 | tokio = { version = "1.45.1", features = ["full"] } 69 | tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] } 70 | -------------------------------------------------------------------------------- /crates/s3s-fs/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-fs/src/checksum.rs: -------------------------------------------------------------------------------- 1 | use crate::fs::InternalInfo; 2 | 3 | use stdx::default::default; 4 | 5 | pub fn modify_internal_info(info: &mut serde_json::Map, checksum: &s3s::dto::Checksum) { 6 | if let Some(checksum_crc32) = &checksum.checksum_crc32 { 7 | info.insert("checksum_crc32".to_owned(), serde_json::Value::String(checksum_crc32.clone())); 8 | } 9 | if let Some(checksum_crc32c) = &checksum.checksum_crc32c { 10 | info.insert("checksum_crc32c".to_owned(), serde_json::Value::String(checksum_crc32c.clone())); 11 | } 12 | if let Some(checksum_sha1) = &checksum.checksum_sha1 { 13 | info.insert("checksum_sha1".to_owned(), serde_json::Value::String(checksum_sha1.clone())); 14 | } 15 | if let Some(checksum_sha256) = &checksum.checksum_sha256 { 16 | info.insert("checksum_sha256".to_owned(), serde_json::Value::String(checksum_sha256.clone())); 17 | } 18 | } 19 | 20 | pub fn from_internal_info(info: &InternalInfo) -> s3s::dto::Checksum { 21 | let mut ans: s3s::dto::Checksum = default(); 22 | if let Some(checksum_crc32) = info.get("checksum_crc32") { 23 | ans.checksum_crc32 = Some(checksum_crc32.as_str().unwrap().to_owned()); 24 | } 25 | if let Some(checksum_crc32c) = info.get("checksum_crc32c") { 26 | ans.checksum_crc32c = Some(checksum_crc32c.as_str().unwrap().to_owned()); 27 | } 28 | if let Some(checksum_sha1) = info.get("checksum_sha1") { 29 | ans.checksum_sha1 = Some(checksum_sha1.as_str().unwrap().to_owned()); 30 | } 31 | if let Some(checksum_sha256) = info.get("checksum_sha256") { 32 | ans.checksum_sha256 = Some(checksum_sha256.as_str().unwrap().to_owned()); 33 | } 34 | ans 35 | } 36 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/error.rs: -------------------------------------------------------------------------------- 1 | use s3s::S3Error; 2 | use s3s::S3ErrorCode; 3 | use s3s::StdError; 4 | 5 | use std::panic::Location; 6 | 7 | use tracing::error; 8 | 9 | #[derive(Debug)] 10 | pub struct Error { 11 | source: StdError, 12 | } 13 | 14 | pub type Result = std::result::Result; 15 | 16 | impl Error { 17 | #[must_use] 18 | #[track_caller] 19 | pub fn new(source: StdError) -> Self { 20 | log(&*source); 21 | Self { source } 22 | } 23 | 24 | #[must_use] 25 | #[track_caller] 26 | pub fn from_string(s: impl Into) -> Self { 27 | Self::new(s.into().into()) 28 | } 29 | } 30 | 31 | impl From for Error 32 | where 33 | E: std::error::Error + Send + Sync + 'static, 34 | { 35 | #[track_caller] 36 | fn from(source: E) -> Self { 37 | Self::new(Box::new(source)) 38 | } 39 | } 40 | 41 | impl From for S3Error { 42 | fn from(e: Error) -> Self { 43 | S3Error::with_source(S3ErrorCode::InternalError, e.source) 44 | } 45 | } 46 | 47 | #[inline] 48 | #[track_caller] 49 | pub(crate) fn log(source: &dyn std::error::Error) { 50 | if cfg!(feature = "binary") { 51 | let location = Location::caller(); 52 | let span_trace = tracing_error::SpanTrace::capture(); 53 | 54 | error!( 55 | target: "s3s_fs_internal_error", 56 | %location, 57 | error=%source, 58 | "span trace:\n{span_trace}" 59 | ); 60 | } 61 | } 62 | 63 | macro_rules! try_ { 64 | ($result:expr) => { 65 | match $result { 66 | Ok(val) => val, 67 | Err(err) => { 68 | $crate::error::log(&err); 69 | return Err(::s3s::S3Error::internal_error(err)); 70 | } 71 | } 72 | }; 73 | } 74 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::wildcard_imports, 3 | clippy::missing_errors_doc, // TODO: docs 4 | clippy::let_underscore_untyped, 5 | clippy::module_name_repetitions, 6 | clippy::multiple_crate_versions, // TODO: check later 7 | )] 8 | 9 | #[macro_use] 10 | mod error; 11 | 12 | mod checksum; 13 | mod fs; 14 | mod s3; 15 | mod utils; 16 | 17 | pub use self::error::*; 18 | pub use self::fs::FileSystem; 19 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/main.rs: -------------------------------------------------------------------------------- 1 | use s3s_fs::FileSystem; 2 | use s3s_fs::Result; 3 | 4 | use s3s::auth::SimpleAuth; 5 | use s3s::host::MultiDomain; 6 | use s3s::service::S3ServiceBuilder; 7 | 8 | use std::io::IsTerminal; 9 | use std::ops::Not; 10 | use std::path::PathBuf; 11 | 12 | use tokio::net::TcpListener; 13 | 14 | use clap::{CommandFactory, Parser}; 15 | use tracing::info; 16 | 17 | use hyper_util::rt::{TokioExecutor, TokioIo}; 18 | use hyper_util::server::conn::auto::Builder as ConnBuilder; 19 | 20 | #[derive(Debug, Parser)] 21 | #[command(version)] 22 | struct Opt { 23 | /// Host name to listen on. 24 | #[arg(long, default_value = "localhost")] 25 | host: String, 26 | 27 | /// Port number to listen on. 28 | #[arg(long, default_value = "8014")] // The original design was finished on 2020-08-14. 29 | port: u16, 30 | 31 | /// Access key used for authentication. 32 | #[arg(long)] 33 | access_key: Option, 34 | 35 | /// Secret key used for authentication. 36 | #[arg(long)] 37 | secret_key: Option, 38 | 39 | /// Domain names used for virtual-hosted-style requests. 40 | #[arg(long)] 41 | domain: Vec, 42 | 43 | /// Root directory of stored data. 44 | root: PathBuf, 45 | } 46 | 47 | fn setup_tracing() { 48 | use tracing_subscriber::EnvFilter; 49 | 50 | let env_filter = EnvFilter::from_default_env(); 51 | let enable_color = std::io::stdout().is_terminal(); 52 | 53 | tracing_subscriber::fmt() 54 | .pretty() 55 | .with_env_filter(env_filter) 56 | .with_ansi(enable_color) 57 | .init(); 58 | } 59 | 60 | fn check_cli_args(opt: &Opt) { 61 | use clap::error::ErrorKind; 62 | 63 | let mut cmd = Opt::command(); 64 | 65 | // TODO: how to specify the requirements with clap derive API? 66 | if let (Some(_), None) | (None, Some(_)) = (&opt.access_key, &opt.secret_key) { 67 | let msg = "access key and secret key must be specified together"; 68 | cmd.error(ErrorKind::MissingRequiredArgument, msg).exit(); 69 | } 70 | 71 | for s in &opt.domain { 72 | if s.contains('/') { 73 | let msg = format!("expected domain name, found URL-like string: {s:?}"); 74 | cmd.error(ErrorKind::InvalidValue, msg).exit(); 75 | } 76 | } 77 | } 78 | 79 | fn main() -> Result { 80 | let opt = Opt::parse(); 81 | check_cli_args(&opt); 82 | 83 | setup_tracing(); 84 | 85 | run(opt) 86 | } 87 | 88 | #[tokio::main] 89 | async fn run(opt: Opt) -> Result { 90 | // Setup S3 provider 91 | let fs = FileSystem::new(opt.root)?; 92 | 93 | // Setup S3 service 94 | let service = { 95 | let mut b = S3ServiceBuilder::new(fs); 96 | 97 | // Enable authentication 98 | if let (Some(ak), Some(sk)) = (opt.access_key, opt.secret_key) { 99 | b.set_auth(SimpleAuth::from_single(ak, sk)); 100 | info!("authentication is enabled"); 101 | } 102 | 103 | // Enable parsing virtual-hosted-style requests 104 | if opt.domain.is_empty().not() { 105 | b.set_host(MultiDomain::new(&opt.domain)?); 106 | info!("virtual-hosted-style requests are enabled"); 107 | } 108 | 109 | b.build() 110 | }; 111 | 112 | // Run server 113 | let listener = TcpListener::bind((opt.host.as_str(), opt.port)).await?; 114 | let local_addr = listener.local_addr()?; 115 | 116 | let http_server = ConnBuilder::new(TokioExecutor::new()); 117 | let graceful = hyper_util::server::graceful::GracefulShutdown::new(); 118 | 119 | let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c()); 120 | 121 | info!("server is running at http://{local_addr}"); 122 | 123 | loop { 124 | let (socket, _) = tokio::select! { 125 | res = listener.accept() => { 126 | match res { 127 | Ok(conn) => conn, 128 | Err(err) => { 129 | tracing::error!("error accepting connection: {err}"); 130 | continue; 131 | } 132 | } 133 | } 134 | _ = ctrl_c.as_mut() => { 135 | break; 136 | } 137 | }; 138 | 139 | let conn = http_server.serve_connection(TokioIo::new(socket), service.clone()); 140 | let conn = graceful.watch(conn.into_owned()); 141 | tokio::spawn(async move { 142 | let _ = conn.await; 143 | }); 144 | } 145 | 146 | tokio::select! { 147 | () = graceful.shutdown() => { 148 | tracing::debug!("Gracefully shutdown!"); 149 | }, 150 | () = tokio::time::sleep(std::time::Duration::from_secs(10)) => { 151 | tracing::debug!("Waited 10 seconds for graceful shutdown, aborting..."); 152 | } 153 | } 154 | 155 | info!("server is stopped"); 156 | Ok(()) 157 | } 158 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::error::*; 2 | 3 | use s3s::StdError; 4 | 5 | use tokio::io::AsyncWrite; 6 | use tokio::io::AsyncWriteExt; 7 | 8 | use bytes::Bytes; 9 | use futures::pin_mut; 10 | use futures::{Stream, StreamExt}; 11 | use transform_stream::AsyncTryStream; 12 | 13 | pub async fn copy_bytes(mut stream: S, writer: &mut W) -> Result 14 | where 15 | S: Stream> + Unpin, 16 | W: AsyncWrite + Unpin, 17 | { 18 | let mut nwritten: u64 = 0; 19 | while let Some(result) = stream.next().await { 20 | let bytes = match result { 21 | Ok(x) => x, 22 | Err(e) => return Err(Error::new(e)), 23 | }; 24 | writer.write_all(&bytes).await?; 25 | nwritten += bytes.len() as u64; 26 | } 27 | writer.flush().await?; 28 | Ok(nwritten) 29 | } 30 | 31 | pub fn bytes_stream(stream: S, content_length: usize) -> impl Stream> + Send + 'static 32 | where 33 | S: Stream> + Send + 'static, 34 | E: Send + 'static, 35 | { 36 | AsyncTryStream::::new(|mut y| async move { 37 | pin_mut!(stream); 38 | let mut remaining: usize = content_length; 39 | while let Some(result) = stream.next().await { 40 | let mut bytes = result?; 41 | if bytes.len() > remaining { 42 | bytes.truncate(remaining); 43 | } 44 | remaining -= bytes.len(); 45 | y.yield_ok(bytes).await; 46 | } 47 | Ok(()) 48 | }) 49 | } 50 | 51 | pub fn hex(input: impl AsRef<[u8]>) -> String { 52 | hex_simd::encode_to_string(input.as_ref(), hex_simd::AsciiCase::Lower) 53 | } 54 | -------------------------------------------------------------------------------- /crates/s3s-model/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-model" 3 | version = "0.12.0-dev" 4 | description = "S3 Protocol Model" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | anyhow = "1.0.98" 18 | numeric_cast = "0.3.0" 19 | serde = { version = "1.0.219", features = ["derive"] } 20 | serde_json = "1.0.140" 21 | -------------------------------------------------------------------------------- /crates/s3s-model/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::missing_errors_doc, // 3 | )] 4 | 5 | pub mod smithy; 6 | -------------------------------------------------------------------------------- /crates/s3s-policy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-policy" 3 | version = "0.12.0-dev" 4 | description = "S3 Policy Language" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | indexmap = { version = "2.9.0", features = ["serde"] } 18 | serde = { version = "1.0.219", features = ["derive"] } 19 | serde_json = "1.0.140" 20 | thiserror = "2.0.12" 21 | -------------------------------------------------------------------------------- /crates/s3s-policy/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-policy/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod model; 2 | pub mod pattern; 3 | 4 | #[cfg(test)] 5 | mod tests; 6 | -------------------------------------------------------------------------------- /crates/s3s-policy/src/pattern.rs: -------------------------------------------------------------------------------- 1 | pub struct PatternSet { 2 | // TODO: rewrite the naive implementation with something like Aho-Corasick 3 | patterns: Vec, 4 | } 5 | 6 | #[derive(Debug, thiserror::Error)] 7 | pub enum PatternError { 8 | #[error("Invalid pattern")] 9 | InvalidPattern, 10 | } 11 | 12 | #[derive(Debug)] 13 | struct Pattern { 14 | bytes: Vec, 15 | } 16 | 17 | impl PatternSet { 18 | /// Create a new matcher from a list of patterns. 19 | /// 20 | /// Patterns can contain 21 | /// + `*` to match any sequence of characters (including empty sequence) 22 | /// + `?` to match any single character 23 | /// + any other character to match itself 24 | /// 25 | /// # Errors 26 | /// Returns an error if any pattern is invalid. 27 | pub fn new<'a>(patterns: impl IntoIterator) -> Result { 28 | let patterns = patterns.into_iter().map(Self::parse_pattern).collect::>()?; 29 | Ok(PatternSet { patterns }) 30 | } 31 | 32 | fn parse_pattern(pattern: &str) -> Result { 33 | if pattern.is_empty() { 34 | return Err(PatternError::InvalidPattern); 35 | } 36 | Ok(Pattern { 37 | bytes: pattern.as_bytes().to_owned(), 38 | }) 39 | } 40 | 41 | /// Check if the input matches any of the patterns. 42 | #[must_use] 43 | pub fn is_match(&self, input: &str) -> bool { 44 | for pattern in &self.patterns { 45 | if Self::match_pattern(&pattern.bytes, input.as_bytes()) { 46 | return true; 47 | } 48 | } 49 | false 50 | } 51 | 52 | /// 53 | fn match_pattern(pattern: &[u8], input: &[u8]) -> bool { 54 | let mut p_idx = 0; 55 | let mut s_idx = 0; 56 | 57 | let mut p_back = usize::MAX - 1; 58 | let mut s_back = usize::MAX - 1; 59 | 60 | loop { 61 | if p_idx < pattern.len() { 62 | let p = pattern[p_idx]; 63 | if p == b'*' { 64 | p_idx += 1; 65 | p_back = p_idx; 66 | s_back = s_idx; 67 | continue; 68 | } 69 | 70 | if s_idx < input.len() { 71 | let c = input[s_idx]; 72 | if p == c || p == b'?' { 73 | p_idx += 1; 74 | s_idx += 1; 75 | continue; 76 | } 77 | } 78 | } else if s_idx == input.len() { 79 | return true; 80 | } 81 | 82 | if p_back == pattern.len() { 83 | return true; 84 | } 85 | 86 | if s_back + 1 < input.len() { 87 | s_back += 1; 88 | p_idx = p_back; 89 | s_idx = s_back; 90 | continue; 91 | } 92 | 93 | return false; 94 | } 95 | } 96 | } 97 | 98 | #[cfg(test)] 99 | mod tests { 100 | use super::*; 101 | 102 | #[test] 103 | fn test_match() { 104 | let cases = &[ 105 | ("*", "", true), 106 | ("**", "", true), 107 | ("***", "abc", true), 108 | ("a", "aa", false), 109 | ("***a", "aaaa", true), 110 | ("*abc???def", "abcdefabc123def", true), 111 | ("a*c?b", "acdcb", false), 112 | ("*a*b*c*", "abc", true), 113 | ("a*b*c*", "abc", true), 114 | ("*a*b*c", "abc", true), 115 | ("a*b*c", "abc", true), 116 | ]; 117 | 118 | for &(pattern, input, expected) in cases { 119 | let pattern = PatternSet::parse_pattern(pattern).unwrap(); 120 | let ans = PatternSet::match_pattern(&pattern.bytes, input.as_bytes()); 121 | assert_eq!(ans, expected, "pattern: {pattern:?}, input: {input:?}"); 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /crates/s3s-policy/src/tests.rs: -------------------------------------------------------------------------------- 1 | /// 2 | pub(crate) fn example1_json() -> &'static str { 3 | r#" 4 | { 5 | "Version": "2012-10-17", 6 | "Statement": [ 7 | { 8 | "Sid": "FirstStatement", 9 | "Effect": "Allow", 10 | "Action": ["iam:ChangePassword"], 11 | "Resource": "*" 12 | }, 13 | { 14 | "Sid": "SecondStatement", 15 | "Effect": "Allow", 16 | "Action": "s3:ListAllMyBuckets", 17 | "Resource": "*" 18 | }, 19 | { 20 | "Sid": "ThirdStatement", 21 | "Effect": "Allow", 22 | "Action": [ 23 | "s3:List*", 24 | "s3:Get*" 25 | ], 26 | "Resource": [ 27 | "arn:aws:s3:::confidential-data", 28 | "arn:aws:s3:::confidential-data/*" 29 | ], 30 | "Condition": {"Bool": {"aws:MultiFactorAuthPresent": "true"}} 31 | } 32 | ] 33 | } 34 | "# 35 | } 36 | 37 | /// 38 | pub(crate) fn example2_json() -> &'static str { 39 | r#" 40 | { 41 | "Version": "2012-10-17", 42 | "Statement": { 43 | "Effect": "Allow", 44 | "Action": "s3:ListBucket", 45 | "Resource": "arn:aws:s3:::example_bucket" 46 | } 47 | } 48 | "# 49 | } 50 | 51 | /// 52 | pub(crate) fn example3_json() -> &'static str { 53 | r#" 54 | { 55 | "Version": "2012-10-17", 56 | "Statement": [{ 57 | "Sid": "1", 58 | "Effect": "Allow", 59 | "Principal": {"AWS": ["arn:aws:iam::account-id:root"]}, 60 | "Action": "s3:*", 61 | "Resource": [ 62 | "arn:aws:s3:::mybucket", 63 | "arn:aws:s3:::mybucket/*" 64 | ] 65 | }] 66 | } 67 | "# 68 | } 69 | -------------------------------------------------------------------------------- /crates/s3s-proxy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-proxy" 3 | version = "0.12.0-dev" 4 | description = "S3 Proxy" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | aws-config = { version = "1.6.3", default-features = false, features = [ 18 | "behavior-version-latest", 19 | ] } 20 | aws-credential-types = "1.2.2" 21 | aws-sdk-s3 = "1.85.0" 22 | clap = { version = "4.5.39", features = ["derive"] } 23 | hyper-util = { version = "0.1.13", features = [ 24 | "server-auto", 25 | "server-graceful", 26 | "http1", 27 | "http2", 28 | "tokio", 29 | ] } 30 | s3s = { version = "0.12.0-dev", path = "../s3s" } 31 | s3s-aws = { version = "0.12.0-dev", path = "../s3s-aws" } 32 | tokio = { version = "1.45.1", features = ["full"] } 33 | tracing = "0.1.41" 34 | tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] } 35 | -------------------------------------------------------------------------------- /crates/s3s-proxy/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-proxy/src/main.rs: -------------------------------------------------------------------------------- 1 | use s3s::auth::SimpleAuth; 2 | use s3s::host::SingleDomain; 3 | use s3s::service::S3ServiceBuilder; 4 | use tokio::net::TcpListener; 5 | 6 | use std::error::Error; 7 | use std::io::IsTerminal; 8 | 9 | use aws_credential_types::provider::ProvideCredentials; 10 | 11 | use clap::Parser; 12 | use tracing::info; 13 | 14 | use hyper_util::rt::{TokioExecutor, TokioIo}; 15 | use hyper_util::server::conn::auto::Builder as ConnBuilder; 16 | 17 | #[derive(Debug, Parser)] 18 | struct Opt { 19 | #[clap(long, default_value = "localhost")] 20 | host: String, 21 | 22 | #[clap(long, default_value = "8014")] 23 | port: u16, 24 | 25 | #[clap(long)] 26 | domain: Option, 27 | 28 | #[clap(long)] 29 | endpoint_url: String, 30 | } 31 | 32 | fn setup_tracing() { 33 | use tracing_subscriber::EnvFilter; 34 | 35 | let env_filter = EnvFilter::from_default_env(); 36 | let enable_color = std::io::stdout().is_terminal(); 37 | 38 | tracing_subscriber::fmt() 39 | .pretty() 40 | .with_env_filter(env_filter) 41 | .with_ansi(enable_color) 42 | .init(); 43 | } 44 | 45 | #[tokio::main] 46 | async fn main() -> Result<(), Box> { 47 | setup_tracing(); 48 | let opt = Opt::parse(); 49 | 50 | // Setup S3 provider 51 | let sdk_conf = aws_config::from_env().endpoint_url(&opt.endpoint_url).load().await; 52 | let client = aws_sdk_s3::Client::from_conf(aws_sdk_s3::config::Builder::from(&sdk_conf).force_path_style(true).build()); 53 | let proxy = s3s_aws::Proxy::from(client); 54 | 55 | // Setup S3 service 56 | let service = { 57 | let mut b = S3ServiceBuilder::new(proxy); 58 | 59 | // Enable authentication 60 | if let Some(cred_provider) = sdk_conf.credentials_provider() { 61 | let cred = cred_provider.provide_credentials().await?; 62 | b.set_auth(SimpleAuth::from_single(cred.access_key_id(), cred.secret_access_key())); 63 | } 64 | 65 | // Enable parsing virtual-hosted-style requests 66 | if let Some(domain) = opt.domain { 67 | b.set_host(SingleDomain::new(&domain)?); 68 | } 69 | 70 | b.build() 71 | }; 72 | 73 | // Run server 74 | let listener = TcpListener::bind((opt.host.as_str(), opt.port)).await?; 75 | 76 | let http_server = ConnBuilder::new(TokioExecutor::new()); 77 | let graceful = hyper_util::server::graceful::GracefulShutdown::new(); 78 | 79 | let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c()); 80 | 81 | info!("server is running at http://{}:{}/", opt.host, opt.port); 82 | info!("server is forwarding requests to {}", opt.endpoint_url); 83 | 84 | loop { 85 | let (socket, _) = tokio::select! { 86 | res = listener.accept() => { 87 | match res { 88 | Ok(conn) => conn, 89 | Err(err) => { 90 | tracing::error!("error accepting connection: {err}"); 91 | continue; 92 | } 93 | } 94 | } 95 | _ = ctrl_c.as_mut() => { 96 | break; 97 | } 98 | }; 99 | 100 | let conn = http_server.serve_connection(TokioIo::new(socket), service.clone()); 101 | let conn = graceful.watch(conn.into_owned()); 102 | tokio::spawn(async move { 103 | let _ = conn.await; 104 | }); 105 | } 106 | 107 | tokio::select! { 108 | () = graceful.shutdown() => { 109 | tracing::debug!("Gracefully shutdown!"); 110 | }, 111 | () = tokio::time::sleep(std::time::Duration::from_secs(10)) => { 112 | tracing::debug!("Waited 10 seconds for graceful shutdown, aborting..."); 113 | } 114 | } 115 | 116 | info!("server is stopped"); 117 | 118 | Ok(()) 119 | } 120 | -------------------------------------------------------------------------------- /crates/s3s-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-test" 3 | version = "0.12.0-dev" 4 | description = "s3s test suite" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | serde = { version = "1.0.219", features = ["derive"] } 18 | tokio = { version = "1.45.1", features = ["full"] } 19 | tracing = "0.1.41" 20 | tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] } 21 | clap = { version = "4.5.39", features = ["derive"] } 22 | dotenvy = "0.15.7" 23 | serde_json = "1.0.140" 24 | indexmap = "2.9.0" 25 | colored = "3.0.0" 26 | regex = "1.11.1" 27 | nugine-rust-utils = "0.3.1" 28 | backtrace = "0.3.75" 29 | const-str = { version = "0.6.2", features = ["std", "proc"] } 30 | -------------------------------------------------------------------------------- /crates/s3s-test/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-test/src/build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::process::Command; 3 | 4 | pub fn collect_info() { 5 | if let Some(val) = git_commit() { 6 | println!("cargo:rustc-env=S3S_GIT_COMMIT={val}"); 7 | } 8 | if let Some(branch) = git_branch() { 9 | println!("cargo:rustc-env=S3S_GIT_BRANCH={branch}"); 10 | } 11 | if let Some(tag) = git_tag() { 12 | println!("cargo:rustc-env=S3S_GIT_TAG={tag}"); 13 | } 14 | if let Ok(val) = env::var("PROFILE") { 15 | println!("cargo:rustc-env=S3S_PROFILE={val}"); 16 | } 17 | } 18 | 19 | #[must_use] 20 | fn git(args: &[&str]) -> Option { 21 | let output = Command::new("git").args(args).output().ok()?; 22 | if output.status.success() { 23 | Some(String::from_utf8_lossy(&output.stdout).trim().to_string()) 24 | } else { 25 | None 26 | } 27 | } 28 | 29 | #[must_use] 30 | pub fn git_commit() -> Option { 31 | git(&["rev-parse", "HEAD"]) 32 | } 33 | 34 | #[must_use] 35 | pub fn git_branch() -> Option { 36 | git(&["rev-parse", "--abbrev-ref", "HEAD"]) 37 | } 38 | 39 | #[must_use] 40 | pub fn git_tag() -> Option { 41 | git(&["describe", "--tags", "--exact-match"]) 42 | } 43 | 44 | #[cfg(test)] 45 | mod tests { 46 | use super::*; 47 | 48 | #[test] 49 | fn test_collect_info() { 50 | collect_info(); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s-test/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::fmt; 3 | 4 | pub type Result = std::result::Result; 5 | 6 | #[derive(Debug)] 7 | pub struct Failed { 8 | source: Option>, 9 | } 10 | 11 | impl From for Failed 12 | where 13 | E: std::error::Error + Send + Sync + 'static, 14 | { 15 | fn from(source: E) -> Self { 16 | if env::var("RUST_BACKTRACE").is_ok() { 17 | eprintln!("Failed: {source:#?}\n"); 18 | eprintln!("Backtrace:"); 19 | backtrace::trace(|frame| { 20 | backtrace::resolve_frame(frame, |symbol| { 21 | if let (Some(name), Some(filename), Some(colno)) = (symbol.name(), symbol.filename(), symbol.colno()) { 22 | if filename.components().any(|c| c.as_os_str().to_str() == Some("s3s")) { 23 | eprintln!("{name}\n at {}:{colno}\n", filename.display()); 24 | } 25 | } 26 | }); 27 | true 28 | }); 29 | } 30 | Self { 31 | source: Some(Box::new(source)), 32 | } 33 | } 34 | } 35 | 36 | impl fmt::Display for Failed { 37 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 38 | if let Some(source) = &self.source { 39 | write!(f, "Failed: {source}") 40 | } else { 41 | write!(f, "Failed") 42 | } 43 | } 44 | } 45 | 46 | impl Failed { 47 | pub fn from_string(s: impl Into) -> Self { 48 | Self { 49 | source: Some(s.into().into()), 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s-test/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::missing_errors_doc, // TODO 3 | clippy::missing_panics_doc, // TODO 4 | )] 5 | 6 | mod error; 7 | mod runner; 8 | mod traits; 9 | 10 | pub mod build; 11 | pub mod cli; 12 | pub mod report; 13 | pub mod tcx; 14 | 15 | pub use self::error::{Failed, Result}; 16 | pub use self::traits::*; 17 | -------------------------------------------------------------------------------- /crates/s3s-test/src/report.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Serialize, Deserialize)] 4 | pub struct Report { 5 | pub suite_count: CountSummary, 6 | pub duration_ns: u64, 7 | pub duration_ms: f64, 8 | 9 | pub suites: Vec, 10 | } 11 | 12 | #[derive(Serialize, Deserialize)] 13 | pub struct SuiteReport { 14 | pub name: String, 15 | 16 | pub fixture_count: CountSummary, 17 | pub duration_ns: u64, 18 | pub duration_ms: f64, 19 | 20 | pub setup: Option, 21 | pub teardown: Option, 22 | pub fixtures: Vec, 23 | } 24 | 25 | #[derive(Serialize, Deserialize)] 26 | pub struct FixtureReport { 27 | pub name: String, 28 | 29 | pub case_count: CountSummary, 30 | pub duration_ns: u64, 31 | pub duration_ms: f64, 32 | 33 | pub setup: Option, 34 | pub teardown: Option, 35 | pub cases: Vec, 36 | } 37 | 38 | #[derive(Serialize, Deserialize)] 39 | pub struct CaseReport { 40 | pub name: String, 41 | 42 | pub passed: bool, 43 | pub duration_ns: u64, 44 | pub duration_ms: f64, 45 | 46 | pub run: Option, 47 | } 48 | 49 | #[derive(Debug, Serialize, Deserialize)] 50 | pub struct FnSummary { 51 | pub result: FnResult, 52 | pub duration_ns: u64, 53 | pub duration_ms: f64, 54 | } 55 | 56 | #[derive(Debug, Serialize, Deserialize)] 57 | pub struct CountSummary { 58 | pub total: u64, 59 | pub passed: u64, 60 | pub failed: u64, 61 | } 62 | 63 | impl CountSummary { 64 | #[must_use] 65 | pub fn all_passed(&self) -> bool { 66 | self.passed == self.total 67 | } 68 | } 69 | 70 | #[derive(Debug, Serialize, Deserialize)] 71 | pub enum FnResult { 72 | Ok, 73 | Err(String), 74 | Panicked, 75 | } 76 | 77 | impl FnResult { 78 | #[must_use] 79 | pub fn is_ok(&self) -> bool { 80 | matches!(self, FnResult::Ok) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /crates/s3s-test/src/tcx.rs: -------------------------------------------------------------------------------- 1 | use crate::error::Failed; 2 | use crate::error::Result; 3 | use crate::traits::TestCase; 4 | use crate::traits::TestFixture; 5 | use crate::traits::TestSuite; 6 | 7 | use std::any::type_name; 8 | use std::future::Future; 9 | use std::marker::PhantomData; 10 | use std::ops::Not; 11 | use std::pin::Pin; 12 | use std::sync::Arc; 13 | 14 | use indexmap::IndexMap; 15 | use regex::RegexSet; 16 | 17 | pub(crate) type ArcAny = Arc; 18 | type BoxFuture<'a, T> = Pin + Send + 'a>>; 19 | 20 | type SuiteSetupFn = Box BoxFuture<'static, Result>>; 21 | type SuiteTeardownFn = Box BoxFuture<'static, Result>>; 22 | 23 | type FixtureSetupFn = Box BoxFuture<'static, Result>>; 24 | type FixtureTeardownFn = Box BoxFuture<'static, Result>>; 25 | 26 | type CaseRunFn = Box BoxFuture<'static, Result>>; 27 | 28 | pub struct TestContext { 29 | pub(crate) suites: IndexMap, 30 | } 31 | 32 | pub(crate) struct SuiteInfo { 33 | pub(crate) name: String, 34 | // pub(crate) type_id: TypeId, 35 | pub(crate) setup: SuiteSetupFn, 36 | pub(crate) teardown: SuiteTeardownFn, 37 | pub(crate) fixtures: IndexMap, 38 | } 39 | 40 | pub(crate) struct FixtureInfo { 41 | pub(crate) name: String, 42 | // pub(crate) type_id: TypeId, 43 | pub(crate) setup: FixtureSetupFn, 44 | pub(crate) teardown: FixtureTeardownFn, 45 | pub(crate) cases: IndexMap, 46 | } 47 | 48 | pub(crate) struct CaseInfo { 49 | pub(crate) name: String, 50 | pub(crate) run: CaseRunFn, 51 | pub(crate) tags: Vec, 52 | } 53 | 54 | #[derive(Debug, Clone, PartialEq, Eq)] 55 | pub enum CaseTag { 56 | Ignored, 57 | ShouldPanic, 58 | } 59 | 60 | fn wrap(x: T) -> ArcAny { 61 | Arc::new(x) 62 | } 63 | 64 | fn downcast(any: ArcAny) -> Arc { 65 | Arc::downcast(any).unwrap() 66 | } 67 | 68 | fn unwrap(any: ArcAny) -> Result { 69 | match Arc::try_unwrap(downcast::(any)) { 70 | Ok(x) => Ok(x), 71 | Err(_) => Err(Failed::from_string(format!("Arc<{}> is leaked", type_name::()))), 72 | } 73 | } 74 | 75 | impl TestContext { 76 | pub(crate) fn new() -> Self { 77 | Self { suites: IndexMap::new() } 78 | } 79 | 80 | pub fn suite(&mut self, name: impl Into) -> SuiteBuilder<'_, S> { 81 | let name = name.into(); 82 | if !self.suites.contains_key(&name) { 83 | self.suites.insert( 84 | name.clone(), 85 | SuiteInfo { 86 | name: name.clone(), 87 | // type_id: TypeId::of::(), 88 | setup: Box::new(|| Box::pin(async { S::setup().await.map(wrap) })), 89 | teardown: Box::new(|any| Box::pin(async move { S::teardown(unwrap(any)?).await })), 90 | fixtures: IndexMap::new(), 91 | }, 92 | ); 93 | } 94 | SuiteBuilder { 95 | suite: &mut self.suites[&name], 96 | _marker: PhantomData, 97 | } 98 | } 99 | 100 | pub fn filter(&mut self, filter_set: &RegexSet) { 101 | self.suites.retain(|_, suite| { 102 | suite.fixtures.retain(|_, fixture| { 103 | fixture.cases.retain(|_, case| { 104 | let id = format!("{}/{}/{}", suite.name, fixture.name, case.name); 105 | filter_set.is_match(&id) 106 | }); 107 | fixture.cases.is_empty().not() 108 | }); 109 | suite.fixtures.is_empty().not() 110 | }); 111 | } 112 | } 113 | 114 | pub struct SuiteBuilder<'a, S> { 115 | suite: &'a mut SuiteInfo, 116 | _marker: PhantomData, 117 | } 118 | 119 | impl SuiteBuilder<'_, S> { 120 | pub fn fixture>(&mut self, name: impl Into) -> FixtureBuilder<'_, X, S> { 121 | let name = name.into(); 122 | if !self.suite.fixtures.contains_key(&name) { 123 | self.suite.fixtures.insert( 124 | name.clone(), 125 | FixtureInfo { 126 | name: name.clone(), 127 | // type_id: TypeId::of::(), 128 | setup: Box::new(|any| Box::pin(async move { X::setup(downcast(any)).await.map(wrap) })), 129 | teardown: Box::new(|any| Box::pin(async move { X::teardown(unwrap(any)?).await })), 130 | cases: IndexMap::new(), 131 | }, 132 | ); 133 | } 134 | FixtureBuilder { 135 | fixture: &mut self.suite.fixtures[&name], 136 | _marker: PhantomData, 137 | } 138 | } 139 | } 140 | 141 | pub struct FixtureBuilder<'a, X, S> { 142 | fixture: &'a mut FixtureInfo, 143 | _marker: PhantomData<(X, S)>, 144 | } 145 | 146 | impl FixtureBuilder<'_, X, S> 147 | where 148 | X: TestFixture, 149 | S: TestSuite, 150 | { 151 | pub fn case>(&mut self, name: impl Into, case: C) -> CaseBuilder<'_, C, X, S> { 152 | let name = name.into(); 153 | self.fixture.cases.insert( 154 | name.clone(), 155 | CaseInfo { 156 | name: name.clone(), 157 | run: Box::new(move |any| Box::pin(case.run(downcast(any)))), 158 | tags: Vec::new(), 159 | }, 160 | ); 161 | CaseBuilder { 162 | case: &mut self.fixture.cases[&name], 163 | _marker: PhantomData, 164 | } 165 | } 166 | } 167 | 168 | pub struct CaseBuilder<'a, C, X, S> { 169 | case: &'a mut CaseInfo, 170 | _marker: PhantomData<(C, X, S)>, 171 | } 172 | 173 | impl CaseBuilder<'_, C, X, S> { 174 | pub fn tag(&mut self, tag: CaseTag) -> &mut Self { 175 | self.case.tags.push(tag); 176 | self 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /crates/s3s-test/src/traits.rs: -------------------------------------------------------------------------------- 1 | use crate::error::Result; 2 | 3 | use std::future::Future; 4 | use std::sync::Arc; 5 | 6 | pub trait TestSuite: Sized + Send + Sync + 'static { 7 | fn setup() -> impl Future> + Send + 'static; 8 | 9 | fn teardown(self) -> impl Future + Send + 'static { 10 | async { Ok(()) } 11 | } 12 | } 13 | 14 | pub trait TestFixture: Sized + Send + Sync + 'static { 15 | fn setup(suite: Arc) -> impl Future> + Send + 'static; 16 | 17 | fn teardown(self) -> impl Future + Send + 'static { 18 | async { Ok(()) } 19 | } 20 | } 21 | 22 | pub trait TestCase: Sized + Send + Sync + 'static 23 | where 24 | Self: Sized + Send + Sync + 'static, 25 | X: TestFixture, 26 | S: TestSuite, 27 | { 28 | fn run(&self, fixture: Arc) -> impl Future + Send + 'static; 29 | } 30 | 31 | trait AsyncFn<'a, A> { 32 | type Output; 33 | type Future: Future + Send + 'a; 34 | 35 | fn call(&self, args: A) -> Self::Future; 36 | } 37 | 38 | impl<'a, F, U, O, A> AsyncFn<'a, (A,)> for F 39 | where 40 | F: Fn(A) -> U, 41 | U: Future + Send + 'a, 42 | { 43 | type Output = O; 44 | 45 | type Future = U; 46 | 47 | fn call(&self, args: (A,)) -> Self::Future { 48 | (self)(args.0) 49 | } 50 | } 51 | 52 | impl TestCase for C 53 | where 54 | C: for<'a> AsyncFn<'a, (Arc,), Output = Result>, 55 | C: Send + Sync + 'static, 56 | X: TestFixture, 57 | S: TestSuite, 58 | { 59 | fn run(&self, fixture: Arc) -> impl Future + Send + 'static { 60 | AsyncFn::call(self, (fixture,)) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /crates/s3s/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s" 3 | version = "0.12.0-dev" 4 | description = "S3 Service Adapter" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [package.metadata.docs.rs] 17 | all-features = true 18 | rustdoc-args = ["--cfg", "docsrs"] 19 | 20 | [features] 21 | openssl = ["dep:openssl"] 22 | 23 | [target.'cfg(not(windows))'.dependencies] 24 | openssl = { version = "0.10.73", optional = true } 25 | 26 | [dependencies] 27 | arrayvec = "0.7.6" 28 | async-trait = "0.1.88" 29 | atoi = { version = "2.0.0", default-features = false } 30 | base64-simd = "0.8.0" 31 | bytes = "1.10.1" 32 | bytestring = "1.4.0" 33 | chrono = { version = "0.4.41", default-features = false } 34 | crc32c = "0.6.8" 35 | crc32fast = "1.4.2" 36 | futures = { version = "0.3.31", default-features = false, features = ["std"] } 37 | hex-simd = "0.8.0" 38 | hmac = "=0.13.0-pre.5" 39 | http-body = "1.0.1" 40 | http-body-util = "0.1.3" 41 | httparse = "1.10.1" 42 | hyper = { version = "1.6.0", features = ["http1", "server"] } 43 | itoa = "1.0.15" 44 | md-5 = "=0.10.6" 45 | memchr = "2.7.4" 46 | mime = "0.3.17" 47 | nom = "7.1.3" 48 | nugine-rust-utils = "0.3.1" 49 | numeric_cast = "0.3.0" 50 | pin-project-lite = "0.2.16" 51 | quick-xml = { version = "0.37.5", features = ["serialize"] } 52 | serde = { version = "1.0.219", features = ["derive"] } 53 | serde_urlencoded = "0.7.1" 54 | sha1 = "=0.11.0-pre.5" 55 | sha2 = "=0.11.0-pre.5" 56 | smallvec = "1.15.0" 57 | thiserror = "2.0.12" 58 | time = { version = "0.3.41", features = ["formatting", "parsing", "macros"] } 59 | tower = { version = "0.5.2", default-features = false } 60 | tracing = "0.1.41" 61 | transform-stream = "0.3.1" 62 | urlencoding = "2.1.3" 63 | zeroize = "1.8.1" 64 | std-next = "0.1.8" 65 | sync_wrapper = { version = "1.0.2", default-features = false } 66 | tokio = { version = "1.45.1", features = ["time"] } 67 | crc64fast-nvme = "1.2.0" 68 | const-str = "0.6.2" 69 | http = "1.3.1" 70 | 71 | [dev-dependencies] 72 | axum = "0.8.4" 73 | serde_json = "1.0.140" 74 | tokio = { version = "1.45.1", features = ["full"] } 75 | tokio-util = { version = "0.7.15", features = ["io"] } 76 | -------------------------------------------------------------------------------- /crates/s3s/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s/examples/axum.rs: -------------------------------------------------------------------------------- 1 | use s3s::route::S3Route; 2 | use s3s::{Body, S3Request, S3Response, S3Result}; 3 | 4 | use axum::http; 5 | use http::{Extensions, HeaderMap, Method, Uri}; 6 | use tower::Service; 7 | 8 | pub struct CustomRoute { 9 | router: axum::Router, 10 | } 11 | 12 | impl CustomRoute { 13 | #[must_use] 14 | pub fn build() -> Self { 15 | Self { 16 | router: self::handlers::register(), 17 | } 18 | } 19 | } 20 | 21 | #[derive(Debug, Clone)] 22 | pub struct Extra { 23 | pub credentials: Option, 24 | pub region: Option, 25 | pub service: Option, 26 | } 27 | 28 | fn convert_request(req: S3Request) -> http::Request { 29 | let (mut parts, _) = http::Request::new(Body::empty()).into_parts(); 30 | parts.method = req.method; 31 | parts.uri = req.uri; 32 | parts.headers = req.headers; 33 | parts.extensions = req.extensions; 34 | parts.extensions.insert(Extra { 35 | credentials: req.credentials, 36 | region: req.region, 37 | service: req.service, 38 | }); 39 | http::Request::from_parts(parts, req.input) 40 | } 41 | 42 | fn convert_response(resp: http::Response) -> S3Response { 43 | let (parts, body) = resp.into_parts(); 44 | let mut s3_resp = S3Response::new(Body::http_body_unsync(body)); 45 | s3_resp.status = Some(parts.status); 46 | s3_resp.headers = parts.headers; 47 | s3_resp.extensions = parts.extensions; 48 | s3_resp 49 | } 50 | 51 | #[async_trait::async_trait] 52 | impl S3Route for CustomRoute { 53 | fn is_match(&self, _method: &Method, uri: &Uri, _headers: &HeaderMap, _extensions: &mut Extensions) -> bool { 54 | let path = uri.path(); 55 | let prefix = const_str::concat!(self::handlers::PREFIX, "/"); 56 | path.starts_with(prefix) 57 | } 58 | 59 | async fn check_access(&self, req: &mut S3Request) -> S3Result<()> { 60 | if req.credentials.is_none() { 61 | tracing::debug!("anonymous access"); 62 | } 63 | Ok(()) // allow all requests 64 | } 65 | 66 | async fn call(&self, req: S3Request) -> S3Result> { 67 | let mut service = self.router.clone().into_service::(); 68 | let req = convert_request(req); 69 | let result = service.call(req).await; 70 | match result { 71 | Ok(resp) => Ok(convert_response(resp)), 72 | Err(e) => match e {}, 73 | } 74 | } 75 | } 76 | 77 | mod handlers { 78 | use std::collections::HashMap; 79 | 80 | use axum::Json; 81 | use axum::Router; 82 | use axum::body::Body; 83 | use axum::extract::Path; 84 | use axum::extract::Query; 85 | use axum::extract::Request; 86 | use axum::http::Response; 87 | use axum::response; 88 | use axum::routing::get; 89 | use axum::routing::post; 90 | 91 | pub async fn echo(req: Request) -> Response { 92 | Response::new(req.into_body()) 93 | } 94 | 95 | pub async fn hello() -> &'static str { 96 | "Hello, World!" 97 | } 98 | 99 | pub async fn show_path(Path(path): Path) -> String { 100 | path 101 | } 102 | 103 | pub async fn show_query(Query(query): Query>) -> String { 104 | format!("{query:?}") 105 | } 106 | 107 | pub async fn show_json(Json(json): Json) -> response::Json { 108 | tracing::debug!(?json); 109 | response::Json(json) 110 | } 111 | 112 | pub const PREFIX: &str = "/custom"; 113 | 114 | pub fn register() -> Router { 115 | let router = Router::new() 116 | .route("/echo", post(echo)) 117 | .route("/hello", get(hello)) 118 | .route("/show_path/{*path}", get(show_path)) 119 | .route("/show_query", get(show_query)) 120 | .route("/show_json", post(show_json)); 121 | 122 | Router::new().nest(PREFIX, router) 123 | } 124 | } 125 | 126 | fn main() {} 127 | -------------------------------------------------------------------------------- /crates/s3s/examples/tokio_util.rs: -------------------------------------------------------------------------------- 1 | use futures::TryStreamExt; 2 | use tokio::io::AsyncBufRead; 3 | use tokio_util::io::StreamReader; 4 | 5 | pub fn convert_body(body: s3s::Body) -> impl AsyncBufRead + Send + Sync + 'static { 6 | StreamReader::new(body.into_stream().map_err(std::io::Error::other)) 7 | } 8 | 9 | pub fn convert_streaming_blob(blob: s3s::dto::StreamingBlob) -> impl AsyncBufRead + Send + Sync + 'static { 10 | StreamReader::new(blob.into_stream().map_err(std::io::Error::other)) 11 | } 12 | 13 | fn main() {} 14 | -------------------------------------------------------------------------------- /crates/s3s/src/access/context.rs: -------------------------------------------------------------------------------- 1 | use crate::S3Operation; 2 | use crate::auth::Credentials; 3 | use crate::path::S3Path; 4 | 5 | use hyper::HeaderMap; 6 | use hyper::Method; 7 | use hyper::Uri; 8 | use hyper::http::Extensions; 9 | 10 | pub struct S3AccessContext<'a> { 11 | pub(crate) credentials: Option<&'a Credentials>, 12 | pub(crate) s3_path: &'a S3Path, 13 | pub(crate) s3_op: &'a S3Operation, 14 | 15 | pub(crate) method: &'a Method, 16 | pub(crate) uri: &'a Uri, 17 | pub(crate) headers: &'a HeaderMap, 18 | 19 | pub(crate) extensions: &'a mut Extensions, 20 | } 21 | 22 | impl S3AccessContext<'_> { 23 | /// Returns the credentials of current request. 24 | /// 25 | /// `None` means anonymous request. 26 | #[must_use] 27 | pub fn credentials(&self) -> Option<&Credentials> { 28 | self.credentials 29 | } 30 | 31 | /// Returns the S3 path of current request. 32 | /// 33 | /// An S3 path can be root, bucket, or object. 34 | #[must_use] 35 | pub fn s3_path(&self) -> &S3Path { 36 | self.s3_path 37 | } 38 | 39 | /// Returns the S3 operation of current request. 40 | #[must_use] 41 | pub fn s3_op(&self) -> &S3Operation { 42 | self.s3_op 43 | } 44 | 45 | #[must_use] 46 | pub fn method(&self) -> &Method { 47 | self.method 48 | } 49 | 50 | #[must_use] 51 | pub fn uri(&self) -> &Uri { 52 | self.uri 53 | } 54 | 55 | #[must_use] 56 | pub fn headers(&self) -> &HeaderMap { 57 | self.headers 58 | } 59 | 60 | /// Returns the extensions of current request. 61 | /// 62 | /// It is used to pass custom data between middlewares. 63 | #[must_use] 64 | pub fn extensions_mut(&mut self) -> &mut Extensions { 65 | self.extensions 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /crates/s3s/src/access/mod.rs: -------------------------------------------------------------------------------- 1 | mod generated; 2 | pub use self::generated::S3Access; 3 | 4 | mod context; 5 | pub use self::context::S3AccessContext; 6 | 7 | use crate::error::S3Result; 8 | 9 | pub(crate) fn default_check(cx: &mut S3AccessContext<'_>) -> S3Result<()> { 10 | match cx.credentials() { 11 | Some(_) => Ok(()), 12 | None => Err(s3_error!(AccessDenied, "Signature is required")), 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /crates/s3s/src/auth/mod.rs: -------------------------------------------------------------------------------- 1 | //! S3 Authentication 2 | 3 | mod secret_key; 4 | pub use self::secret_key::{Credentials, SecretKey}; 5 | 6 | mod simple_auth; 7 | pub use self::simple_auth::SimpleAuth; 8 | 9 | use crate::error::S3Result; 10 | 11 | /// S3 Authentication Provider 12 | #[async_trait::async_trait] 13 | pub trait S3Auth: Send + Sync + 'static { 14 | /// Gets the corresponding secret key of the access key. 15 | /// 16 | /// This method is usually implemented as a database query. 17 | async fn get_secret_key(&self, access_key: &str) -> S3Result; 18 | } 19 | -------------------------------------------------------------------------------- /crates/s3s/src/auth/secret_key.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use serde::Deserialize; 4 | use serde::Serialize; 5 | use zeroize::Zeroize; 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq)] 8 | pub struct Credentials { 9 | pub access_key: String, 10 | pub secret_key: SecretKey, 11 | } 12 | 13 | #[derive(Clone, PartialEq, Eq)] 14 | pub struct SecretKey(Box); 15 | 16 | impl SecretKey { 17 | fn new(s: impl Into>) -> Self { 18 | Self(s.into()) 19 | } 20 | 21 | #[must_use] 22 | pub fn expose(&self) -> &str { 23 | &self.0 24 | } 25 | } 26 | 27 | impl Zeroize for SecretKey { 28 | fn zeroize(&mut self) { 29 | self.0.zeroize(); 30 | } 31 | } 32 | 33 | impl Drop for SecretKey { 34 | fn drop(&mut self) { 35 | self.zeroize(); 36 | } 37 | } 38 | 39 | impl From for SecretKey { 40 | fn from(value: String) -> Self { 41 | Self::new(value) 42 | } 43 | } 44 | 45 | impl From> for SecretKey { 46 | fn from(value: Box) -> Self { 47 | Self::new(value) 48 | } 49 | } 50 | 51 | impl From<&str> for SecretKey { 52 | fn from(value: &str) -> Self { 53 | Self::new(value) 54 | } 55 | } 56 | 57 | const PLACEHOLDER: &str = "[SENSITIVE-SECRET-KEY]"; 58 | 59 | impl fmt::Debug for SecretKey { 60 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 61 | f.debug_tuple("SecretKey").field(&PLACEHOLDER).finish() 62 | } 63 | } 64 | 65 | impl<'de> Deserialize<'de> for SecretKey { 66 | fn deserialize(deserializer: D) -> Result 67 | where 68 | D: serde::Deserializer<'de>, 69 | { 70 | ::deserialize(deserializer).map(SecretKey::from) 71 | } 72 | } 73 | 74 | impl Serialize for SecretKey { 75 | fn serialize(&self, serializer: S) -> Result 76 | where 77 | S: serde::Serializer, 78 | { 79 | ::serialize(PLACEHOLDER, serializer) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /crates/s3s/src/auth/simple_auth.rs: -------------------------------------------------------------------------------- 1 | use super::S3Auth; 2 | 3 | use crate::auth::SecretKey; 4 | use crate::error::S3Result; 5 | 6 | use std::collections::HashMap; 7 | 8 | /// A simple authentication provider 9 | #[derive(Debug, Default)] 10 | pub struct SimpleAuth { 11 | /// key map 12 | map: HashMap, 13 | } 14 | 15 | impl SimpleAuth { 16 | /// Constructs a new `SimpleAuth` 17 | #[must_use] 18 | pub fn new() -> Self { 19 | Self { map: HashMap::new() } 20 | } 21 | 22 | #[must_use] 23 | pub fn from_single(access_key: impl Into, secret_key: impl Into) -> Self { 24 | let access_key = access_key.into(); 25 | let secret_key = secret_key.into(); 26 | let map = [(access_key, secret_key)].into_iter().collect(); 27 | Self { map } 28 | } 29 | 30 | /// register a pair of keys 31 | pub fn register(&mut self, access_key: String, secret_key: SecretKey) -> Option { 32 | self.map.insert(access_key, secret_key) 33 | } 34 | 35 | /// lookup a secret key 36 | #[must_use] 37 | pub fn lookup(&self, access_key: &str) -> Option<&SecretKey> { 38 | self.map.get(access_key) 39 | } 40 | } 41 | 42 | #[async_trait::async_trait] 43 | impl S3Auth for SimpleAuth { 44 | async fn get_secret_key(&self, access_key: &str) -> S3Result { 45 | match self.lookup(access_key) { 46 | None => Err(s3_error!(NotSignedUp, "Your account is not signed up")), 47 | Some(s) => Ok(s.clone()), 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /crates/s3s/src/checksum.rs: -------------------------------------------------------------------------------- 1 | use crate::crypto::Checksum as _; 2 | use crate::crypto::Crc32; 3 | use crate::crypto::Crc32c; 4 | use crate::crypto::Crc64Nvme; 5 | use crate::crypto::Sha1; 6 | use crate::crypto::Sha256; 7 | use crate::dto::Checksum; 8 | 9 | use stdx::default::default; 10 | 11 | #[derive(Default)] 12 | pub struct ChecksumHasher { 13 | pub crc32: Option, 14 | pub crc32c: Option, 15 | pub sha1: Option, 16 | pub sha256: Option, 17 | pub crc64nvme: Option, 18 | } 19 | 20 | impl ChecksumHasher { 21 | pub fn update(&mut self, data: &[u8]) { 22 | if let Some(crc32) = &mut self.crc32 { 23 | crc32.update(data); 24 | } 25 | if let Some(crc32c) = &mut self.crc32c { 26 | crc32c.update(data); 27 | } 28 | if let Some(sha1) = &mut self.sha1 { 29 | sha1.update(data); 30 | } 31 | if let Some(sha256) = &mut self.sha256 { 32 | sha256.update(data); 33 | } 34 | if let Some(crc64nvme) = &mut self.crc64nvme { 35 | crc64nvme.update(data); 36 | } 37 | } 38 | 39 | #[must_use] 40 | pub fn finalize(self) -> Checksum { 41 | let mut ans: Checksum = default(); 42 | if let Some(crc32) = self.crc32 { 43 | let sum = crc32.finalize(); 44 | ans.checksum_crc32 = Some(Self::base64(&sum)); 45 | } 46 | if let Some(crc32c) = self.crc32c { 47 | let sum = crc32c.finalize(); 48 | ans.checksum_crc32c = Some(Self::base64(&sum)); 49 | } 50 | if let Some(sha1) = self.sha1 { 51 | let sum = sha1.finalize(); 52 | ans.checksum_sha1 = Some(Self::base64(sum.as_ref())); 53 | } 54 | if let Some(sha256) = self.sha256 { 55 | let sum = sha256.finalize(); 56 | ans.checksum_sha256 = Some(Self::base64(sum.as_ref())); 57 | } 58 | if let Some(crc64nvme) = self.crc64nvme { 59 | let sum = crc64nvme.finalize(); 60 | ans.checksum_crc64nvme = Some(Self::base64(&sum)); 61 | } 62 | ans 63 | } 64 | 65 | fn base64(input: &[u8]) -> String { 66 | base64_simd::STANDARD.encode_to_string(input) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /crates/s3s/src/crypto.rs: -------------------------------------------------------------------------------- 1 | pub trait Checksum { 2 | type Output: AsRef<[u8]>; 3 | 4 | #[must_use] 5 | fn new() -> Self; 6 | 7 | fn update(&mut self, data: &[u8]); 8 | 9 | #[must_use] 10 | fn finalize(self) -> Self::Output; 11 | 12 | #[must_use] 13 | fn checksum(data: &[u8]) -> Self::Output 14 | where 15 | Self: Sized, 16 | { 17 | let mut hasher = Self::new(); 18 | hasher.update(data); 19 | hasher.finalize() 20 | } 21 | } 22 | 23 | #[derive(Default)] 24 | pub struct Crc32(crc32fast::Hasher); 25 | 26 | impl Checksum for Crc32 { 27 | type Output = [u8; 4]; 28 | 29 | fn new() -> Self { 30 | Self::default() 31 | } 32 | 33 | fn update(&mut self, data: &[u8]) { 34 | self.0.update(data); 35 | } 36 | 37 | fn finalize(self) -> Self::Output { 38 | self.0.finalize().to_be_bytes() 39 | } 40 | } 41 | 42 | #[derive(Default)] 43 | pub struct Crc32c(u32); 44 | 45 | impl Checksum for Crc32c { 46 | type Output = [u8; 4]; 47 | 48 | fn new() -> Self { 49 | Self::default() 50 | } 51 | 52 | fn update(&mut self, data: &[u8]) { 53 | self.0 = crc32c::crc32c_append(self.0, data); 54 | } 55 | 56 | fn finalize(self) -> Self::Output { 57 | self.0.to_be_bytes() 58 | } 59 | } 60 | 61 | #[derive(Default)] 62 | pub struct Crc64Nvme(crc64fast_nvme::Digest); 63 | 64 | impl Checksum for Crc64Nvme { 65 | type Output = [u8; 8]; 66 | 67 | fn new() -> Self { 68 | Self::default() 69 | } 70 | 71 | fn update(&mut self, data: &[u8]) { 72 | self.0.write(data); 73 | } 74 | 75 | fn finalize(self) -> Self::Output { 76 | self.0.sum64().to_be_bytes() 77 | } 78 | } 79 | 80 | #[derive(Default)] 81 | pub struct Sha1(sha1::Sha1); 82 | 83 | impl Checksum for Sha1 { 84 | type Output = [u8; 20]; 85 | 86 | fn new() -> Self { 87 | Self::default() 88 | } 89 | 90 | fn update(&mut self, data: &[u8]) { 91 | use sha1::Digest as _; 92 | self.0.update(data); 93 | } 94 | 95 | fn finalize(self) -> Self::Output { 96 | use sha1::Digest as _; 97 | self.0.finalize().into() 98 | } 99 | } 100 | 101 | #[derive(Default)] 102 | pub struct Sha256(sha2::Sha256); 103 | 104 | impl Checksum for Sha256 { 105 | type Output = [u8; 32]; 106 | 107 | fn new() -> Self { 108 | Self::default() 109 | } 110 | 111 | fn update(&mut self, data: &[u8]) { 112 | use sha2::Digest as _; 113 | self.0.update(data); 114 | } 115 | 116 | fn finalize(self) -> Self::Output { 117 | use sha2::Digest as _; 118 | self.0.finalize().into() 119 | } 120 | } 121 | 122 | #[derive(Default)] 123 | pub struct Md5(md5::Md5); 124 | 125 | impl Checksum for Md5 { 126 | type Output = [u8; 16]; 127 | 128 | fn new() -> Self { 129 | Self::default() 130 | } 131 | 132 | fn update(&mut self, data: &[u8]) { 133 | use md5::Digest as _; 134 | self.0.update(data); 135 | } 136 | 137 | fn finalize(self) -> Self::Output { 138 | use md5::Digest as _; 139 | self.0.finalize().into() 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/build_error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, thiserror::Error)] 2 | #[error(transparent)] 3 | pub struct BuildError { 4 | #[from] 5 | kind: BuildErrorKind, 6 | } 7 | 8 | #[derive(Debug, thiserror::Error)] 9 | enum BuildErrorKind { 10 | #[error("Missing field: {field:?}")] 11 | MissingField { field: &'static str }, 12 | // #[error("BuildError: {source}")] 13 | // Other { source: StdError }, 14 | } 15 | 16 | impl BuildError { 17 | pub(crate) fn missing_field(field: &'static str) -> Self { 18 | Self { 19 | kind: BuildErrorKind::MissingField { field }, 20 | } 21 | } 22 | 23 | // pub(crate) fn other(source: StdError) -> Self { 24 | // Self { 25 | // kind: BuildErrorKind::Other { source }, 26 | // } 27 | // } 28 | } 29 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/content_type.rs: -------------------------------------------------------------------------------- 1 | use crate::http; 2 | 3 | use hyper::header::InvalidHeaderValue; 4 | 5 | pub type ContentType = mime::Mime; 6 | 7 | #[derive(Debug, thiserror::Error)] 8 | pub enum ParseContentTypeError { 9 | #[error("Expected UTF-8")] 10 | ExpectedUtf8, 11 | #[error("Mime: {0}")] 12 | Mime(mime::FromStrError), 13 | } 14 | 15 | impl http::TryFromHeaderValue for ContentType { 16 | type Error = ParseContentTypeError; 17 | 18 | fn try_from_header_value(val: &http::HeaderValue) -> Result { 19 | let val = val.to_str().map_err(|_| ParseContentTypeError::ExpectedUtf8)?; 20 | val.parse().map_err(ParseContentTypeError::Mime) 21 | } 22 | } 23 | 24 | impl http::TryIntoHeaderValue for ContentType { 25 | type Error = InvalidHeaderValue; 26 | 27 | fn try_into_header_value(self) -> Result { 28 | http::HeaderValue::from_str(self.as_ref()) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/copy_source.rs: -------------------------------------------------------------------------------- 1 | //! x-amz-copy-source 2 | 3 | use crate::http; 4 | use crate::path; 5 | 6 | use std::fmt::Write; 7 | 8 | /// x-amz-copy-source 9 | #[derive(Debug, Clone, PartialEq)] 10 | pub enum CopySource { 11 | /// bucket repr 12 | Bucket { 13 | /// bucket 14 | bucket: Box, 15 | /// key 16 | key: Box, 17 | /// version id 18 | version_id: Option>, 19 | }, 20 | /// access point repr 21 | AccessPoint { 22 | /// region 23 | region: Box, 24 | /// account id 25 | account_id: Box, 26 | /// access point name 27 | access_point_name: Box, 28 | /// key 29 | key: Box, 30 | }, 31 | } 32 | 33 | /// [`CopySource`] 34 | #[derive(Debug, thiserror::Error)] 35 | pub enum ParseCopySourceError { 36 | /// pattern mismatch 37 | #[error("ParseAmzCopySourceError: PatternMismatch")] 38 | PatternMismatch, 39 | 40 | /// invalid bucket name 41 | #[error("ParseAmzCopySourceError: InvalidBucketName")] 42 | InvalidBucketName, 43 | 44 | /// invalid key 45 | #[error("ParseAmzCopySourceError: InvalidKey")] 46 | InvalidKey, 47 | 48 | #[error("ParseAmzCopySourceError: InvalidEncoding")] 49 | InvalidEncoding, 50 | } 51 | 52 | impl CopySource { 53 | /// Parses [`CopySource`] from header 54 | /// # Errors 55 | /// Returns an error if the header is invalid 56 | pub fn parse(header: &str) -> Result { 57 | let header = urlencoding::decode(header).map_err(|_| ParseCopySourceError::InvalidEncoding)?; 58 | let header = header.strip_prefix('/').unwrap_or(&header); 59 | 60 | // FIXME: support access point 61 | match header.split_once('/') { 62 | None => Err(ParseCopySourceError::PatternMismatch), 63 | Some((bucket, remaining)) => { 64 | let (key, version_id) = match remaining.split_once('?') { 65 | Some((key, remaining)) => { 66 | let version_id = remaining 67 | .split_once('=') 68 | .and_then(|(name, val)| (name == "versionId").then_some(val)); 69 | (key, version_id) 70 | } 71 | None => (remaining, None), 72 | }; 73 | 74 | if !path::check_bucket_name(bucket) { 75 | return Err(ParseCopySourceError::InvalidBucketName); 76 | } 77 | 78 | if !path::check_key(key) { 79 | return Err(ParseCopySourceError::InvalidKey); 80 | } 81 | 82 | Ok(Self::Bucket { 83 | bucket: bucket.into(), 84 | key: key.into(), 85 | version_id: version_id.map(Into::into), 86 | }) 87 | } 88 | } 89 | } 90 | 91 | #[must_use] 92 | pub fn format_to_string(&self) -> String { 93 | let mut buf = String::new(); 94 | match self { 95 | CopySource::Bucket { bucket, key, version_id } => { 96 | write!(&mut buf, "{bucket}/{key}").unwrap(); 97 | if let Some(version_id) = version_id { 98 | write!(&mut buf, "?versionId={version_id}").unwrap(); 99 | } 100 | } 101 | CopySource::AccessPoint { .. } => { 102 | unimplemented!() 103 | } 104 | } 105 | buf 106 | } 107 | } 108 | 109 | impl http::TryFromHeaderValue for CopySource { 110 | type Error = ParseCopySourceError; 111 | 112 | fn try_from_header_value(val: &http::HeaderValue) -> Result { 113 | let header = val.to_str().map_err(|_| ParseCopySourceError::InvalidEncoding)?; 114 | Self::parse(header) 115 | } 116 | } 117 | 118 | #[cfg(test)] 119 | mod tests { 120 | use super::*; 121 | 122 | #[test] 123 | fn path_style() { 124 | { 125 | let header = "awsexamplebucket/reports/january.pdf"; 126 | let val = CopySource::parse(header).unwrap(); 127 | match val { 128 | CopySource::Bucket { bucket, key, version_id } => { 129 | assert_eq!(&*bucket, "awsexamplebucket"); 130 | assert_eq!(&*key, "reports/january.pdf"); 131 | assert!(version_id.is_none()); 132 | } 133 | CopySource::AccessPoint { .. } => panic!(), 134 | } 135 | } 136 | 137 | { 138 | let header = "awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893"; 139 | let val = CopySource::parse(header).unwrap(); 140 | match val { 141 | CopySource::Bucket { bucket, key, version_id } => { 142 | assert_eq!(&*bucket, "awsexamplebucket"); 143 | assert_eq!(&*key, "reports/january.pdf"); 144 | assert_eq!(version_id.as_deref().unwrap(), "QUpfdndhfd8438MNFDN93jdnJFkdmqnh893"); 145 | } 146 | CopySource::AccessPoint { .. } => panic!(), 147 | } 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/event.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Clone, PartialEq)] 2 | pub struct Event(String); 3 | 4 | impl From for Event { 5 | fn from(value: String) -> Self { 6 | Self(value) 7 | } 8 | } 9 | 10 | impl AsRef for Event { 11 | fn as_ref(&self) -> &str { 12 | self.0.as_ref() 13 | } 14 | } 15 | 16 | impl From for String { 17 | fn from(value: Event) -> Self { 18 | value.0 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/mod.rs: -------------------------------------------------------------------------------- 1 | mod build_error; 2 | 3 | mod generated; 4 | pub use self::generated::*; 5 | 6 | mod streaming_blob; 7 | pub use self::streaming_blob::*; 8 | 9 | mod timestamp; 10 | pub use self::timestamp::*; 11 | 12 | mod copy_source; 13 | pub use self::copy_source::*; 14 | 15 | mod range; 16 | pub use self::range::Range; 17 | 18 | mod content_type; 19 | pub use self::content_type::*; 20 | 21 | mod event; 22 | pub use self::event::Event; 23 | 24 | mod event_stream; 25 | pub use self::event_stream::*; 26 | 27 | pub type List = Vec; 28 | pub type Map = std::collections::HashMap; 29 | 30 | pub type Body = hyper::body::Bytes; 31 | 32 | pub type Unit = (); 33 | 34 | impl From for ListObjectsV2Input { 35 | fn from(v1: ListObjectsInput) -> Self { 36 | let ListObjectsInput { 37 | bucket, 38 | delimiter, 39 | encoding_type, 40 | expected_bucket_owner, 41 | marker, 42 | max_keys, 43 | prefix, 44 | request_payer, 45 | optional_object_attributes, 46 | } = v1; 47 | 48 | Self { 49 | bucket, 50 | continuation_token: None, 51 | delimiter, 52 | encoding_type, 53 | expected_bucket_owner, 54 | fetch_owner: None, 55 | max_keys, 56 | prefix, 57 | request_payer, 58 | start_after: marker, 59 | optional_object_attributes, 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/streaming_blob.rs: -------------------------------------------------------------------------------- 1 | //! Streaming blob 2 | 3 | use crate::error::StdError; 4 | use crate::http::Body; 5 | use crate::stream::*; 6 | 7 | use std::fmt; 8 | use std::pin::Pin; 9 | use std::task::{Context, Poll}; 10 | 11 | use futures::Stream; 12 | use hyper::body::Bytes; 13 | 14 | pub struct StreamingBlob { 15 | inner: DynByteStream, 16 | } 17 | 18 | impl StreamingBlob { 19 | pub fn new(stream: S) -> Self 20 | where 21 | S: ByteStream> + Send + Sync + 'static, 22 | { 23 | Self { inner: Box::pin(stream) } 24 | } 25 | 26 | pub fn wrap(stream: S) -> Self 27 | where 28 | S: Stream> + Send + Sync + 'static, 29 | E: std::error::Error + Send + Sync + 'static, 30 | { 31 | Self { inner: wrap(stream) } 32 | } 33 | 34 | fn into_inner(self) -> DynByteStream { 35 | self.inner 36 | } 37 | } 38 | 39 | impl fmt::Debug for StreamingBlob { 40 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 41 | f.debug_struct("StreamingBlob") 42 | .field("remaining_length", &self.remaining_length()) 43 | .finish_non_exhaustive() 44 | } 45 | } 46 | 47 | impl Stream for StreamingBlob { 48 | type Item = Result; 49 | 50 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 51 | Pin::new(&mut self.inner).poll_next(cx) 52 | } 53 | 54 | fn size_hint(&self) -> (usize, Option) { 55 | self.inner.size_hint() 56 | } 57 | } 58 | 59 | impl ByteStream for StreamingBlob { 60 | fn remaining_length(&self) -> RemainingLength { 61 | self.inner.remaining_length() 62 | } 63 | } 64 | 65 | impl From for DynByteStream { 66 | fn from(value: StreamingBlob) -> Self { 67 | value.into_inner() 68 | } 69 | } 70 | 71 | impl From for StreamingBlob { 72 | fn from(value: DynByteStream) -> Self { 73 | Self { inner: value } 74 | } 75 | } 76 | 77 | impl From for Body { 78 | fn from(value: StreamingBlob) -> Self { 79 | Body::from(value.into_inner()) 80 | } 81 | } 82 | 83 | impl From for StreamingBlob { 84 | fn from(value: Body) -> Self { 85 | Self::new(value) 86 | } 87 | } 88 | 89 | pin_project_lite::pin_project! { 90 | pub(crate) struct StreamWrapper { 91 | #[pin] 92 | inner: S 93 | } 94 | } 95 | 96 | impl Stream for StreamWrapper 97 | where 98 | S: Stream> + Send + Sync + 'static, 99 | E: std::error::Error + Send + Sync + 'static, 100 | { 101 | type Item = Result; 102 | 103 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 104 | let this = self.project(); 105 | this.inner.poll_next(cx).map_err(|e| Box::new(e) as StdError) 106 | } 107 | 108 | fn size_hint(&self) -> (usize, Option) { 109 | self.inner.size_hint() 110 | } 111 | } 112 | 113 | impl ByteStream for StreamWrapper 114 | where 115 | StreamWrapper: Stream>, 116 | { 117 | fn remaining_length(&self) -> RemainingLength { 118 | RemainingLength::unknown() 119 | } 120 | } 121 | 122 | fn wrap(inner: S) -> DynByteStream 123 | where 124 | StreamWrapper: ByteStream> + Send + Sync + 'static, 125 | { 126 | Box::pin(StreamWrapper { inner }) 127 | } 128 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/timestamp.rs: -------------------------------------------------------------------------------- 1 | //! timestamp 2 | 3 | use std::io; 4 | use std::num::ParseIntError; 5 | use std::time::SystemTime; 6 | 7 | use time::format_description::FormatItem; 8 | use time::format_description::well_known::Rfc3339; 9 | use time::macros::format_description; 10 | 11 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 12 | pub struct Timestamp(time::OffsetDateTime); 13 | 14 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 15 | pub enum TimestampFormat { 16 | DateTime, 17 | HttpDate, 18 | EpochSeconds, 19 | } 20 | 21 | impl From for Timestamp { 22 | fn from(value: time::OffsetDateTime) -> Self { 23 | Self(value) 24 | } 25 | } 26 | 27 | impl From for time::OffsetDateTime { 28 | fn from(value: Timestamp) -> Self { 29 | value.0 30 | } 31 | } 32 | 33 | impl From for Timestamp { 34 | fn from(value: SystemTime) -> Self { 35 | Self(time::OffsetDateTime::from(value)) 36 | } 37 | } 38 | 39 | #[derive(Debug, thiserror::Error)] 40 | pub enum ParseTimestampError { 41 | #[error("time: {0}")] 42 | Time(#[from] time::error::Parse), 43 | #[error("int: {0}")] 44 | Int(#[from] ParseIntError), 45 | #[error("time overflow")] 46 | Overflow, 47 | #[error("component range: {0}")] 48 | ComponentRange(#[from] time::error::ComponentRange), 49 | } 50 | 51 | #[derive(Debug, thiserror::Error)] 52 | pub enum FormatTimestampError { 53 | #[error("time: {0}")] 54 | Time(#[from] time::error::Format), 55 | #[error("io: {0}")] 56 | Io(#[from] io::Error), 57 | } 58 | 59 | /// See 60 | const RFC1123: &[FormatItem<'_>] = 61 | format_description!("[weekday repr:short], [day] [month repr:short] [year] [hour]:[minute]:[second] GMT"); 62 | 63 | /// See 64 | const RFC3339: &[FormatItem<'_>] = format_description!("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3]Z"); 65 | 66 | impl Timestamp { 67 | /// Parses `Timestamp` from string 68 | /// 69 | /// # Errors 70 | /// Returns an error if the string is invalid 71 | pub fn parse(format: TimestampFormat, s: &str) -> Result { 72 | let ans = match format { 73 | TimestampFormat::DateTime => time::OffsetDateTime::parse(s, &Rfc3339)?, 74 | TimestampFormat::HttpDate => time::PrimitiveDateTime::parse(s, RFC1123)?.assume_utc(), 75 | TimestampFormat::EpochSeconds => match s.split_once('.') { 76 | Some((secs, frac)) => { 77 | let secs: i64 = secs.parse::()?.try_into().map_err(|_| ParseTimestampError::Overflow)?; 78 | let val: u32 = frac.parse::()?; 79 | let mul: u32 = match frac.len() { 80 | 1 => 100_000_000, 81 | 2 => 10_000_000, 82 | 3 => 1_000_000, 83 | 4 => 100_000, 84 | 5 => 10000, 85 | 6 => 1000, 86 | 7 => 100, 87 | 8 => 10, 88 | 9 => 1, 89 | _ => return Err(ParseTimestampError::Overflow), 90 | }; 91 | let nanos = i128::from(secs) * 1_000_000_000 + i128::from(val * mul); 92 | time::OffsetDateTime::from_unix_timestamp_nanos(nanos)? 93 | } 94 | None => { 95 | let secs: i64 = s.parse::()?.try_into().map_err(|_| ParseTimestampError::Overflow)?; 96 | time::OffsetDateTime::from_unix_timestamp(secs)? 97 | } 98 | }, 99 | }; 100 | Ok(Self(ans)) 101 | } 102 | 103 | /// Formats `Timestamp` into a writer 104 | /// 105 | /// # Errors 106 | /// Returns an error if the formatting fails 107 | pub fn format(&self, format: TimestampFormat, w: &mut impl io::Write) -> Result<(), FormatTimestampError> { 108 | match format { 109 | TimestampFormat::DateTime => { 110 | self.0.format_into(w, RFC3339)?; 111 | } 112 | TimestampFormat::HttpDate => { 113 | self.0.format_into(w, RFC1123)?; 114 | } 115 | TimestampFormat::EpochSeconds => { 116 | let val = self.0.unix_timestamp_nanos(); 117 | 118 | #[allow(clippy::cast_precision_loss)] // FIXME: accurate conversion? 119 | { 120 | let secs = (val / 1_000_000_000) as f64; 121 | let nanos = (val % 1_000_000_000) as f64 / 1_000_000_000.0; 122 | let ts = secs + nanos; 123 | write!(w, "{ts}")?; 124 | } 125 | } 126 | } 127 | Ok(()) 128 | } 129 | } 130 | 131 | #[cfg(test)] 132 | mod tests { 133 | use super::*; 134 | 135 | #[test] 136 | fn text_repr() { 137 | let cases = [ 138 | (TimestampFormat::DateTime, "1985-04-12T23:20:50.520Z"), 139 | (TimestampFormat::HttpDate, "Tue, 29 Apr 2014 18:30:38 GMT"), 140 | (TimestampFormat::HttpDate, "Wed, 21 Oct 2015 07:28:00 GMT"), 141 | // (TimestampFormat::HttpDate, "Sun, 02 Jan 2000 20:34:56.000 GMT"), // FIXME: optional fractional seconds 142 | (TimestampFormat::EpochSeconds, "1515531081.1234"), 143 | ]; 144 | 145 | for (fmt, expected) in cases { 146 | let time = Timestamp::parse(fmt, expected).unwrap(); 147 | 148 | let mut buf = Vec::new(); 149 | time.format(fmt, &mut buf).unwrap(); 150 | let text = String::from_utf8(buf).unwrap(); 151 | 152 | assert_eq!(expected, text); 153 | } 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /crates/s3s/src/header/mod.rs: -------------------------------------------------------------------------------- 1 | mod generated; 2 | pub use self::generated::*; 3 | -------------------------------------------------------------------------------- /crates/s3s/src/http/keep_alive_body.rs: -------------------------------------------------------------------------------- 1 | use crate::{StdError, http::Response}; 2 | 3 | use std::future::Future; 4 | use std::pin::Pin; 5 | use std::task::{Context, Poll}; 6 | use std::time::Duration; 7 | 8 | use bytes::Bytes; 9 | use http_body::{Body, Frame}; 10 | use tokio::time::Interval; 11 | 12 | // sends whitespace while the future is pending 13 | pin_project_lite::pin_project! { 14 | 15 | pub struct KeepAliveBody { 16 | #[pin] 17 | inner: F, 18 | initial_body: Option, 19 | response: Option, 20 | interval: Interval, 21 | done: bool, 22 | } 23 | } 24 | 25 | impl KeepAliveBody { 26 | pub fn new(inner: F, interval: Duration, initial_body: Option) -> Self { 27 | Self { 28 | inner, 29 | initial_body, 30 | response: None, 31 | interval: tokio::time::interval(interval), 32 | done: false, 33 | } 34 | } 35 | } 36 | 37 | impl Body for KeepAliveBody 38 | where 39 | F: Future>, 40 | { 41 | type Data = Bytes; 42 | 43 | type Error = StdError; 44 | 45 | fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { 46 | if self.done { 47 | return Poll::Ready(None); 48 | } 49 | let mut this = self.project(); 50 | if let Some(initial_body) = this.initial_body.take() { 51 | cx.waker().wake_by_ref(); 52 | return Poll::Ready(Some(Ok(Frame::data(initial_body)))); 53 | } 54 | loop { 55 | if let Some(response) = &mut this.response { 56 | let frame = std::task::ready!(Pin::new(&mut response.body).poll_frame(cx)?); 57 | if let Some(frame) = frame { 58 | return Poll::Ready(Some(Ok(frame))); 59 | } 60 | *this.done = true; 61 | return Poll::Ready(Some(Ok(Frame::trailers(std::mem::take(&mut response.headers))))); 62 | } 63 | match this.inner.as_mut().poll(cx) { 64 | Poll::Ready(response) => match response { 65 | Ok(response) => { 66 | *this.response = Some(response); 67 | } 68 | Err(e) => { 69 | *this.done = true; 70 | return Poll::Ready(Some(Err(e))); 71 | } 72 | }, 73 | Poll::Pending => match this.interval.poll_tick(cx) { 74 | Poll::Ready(_) => return Poll::Ready(Some(Ok(Frame::data(Bytes::from_static(b" "))))), 75 | Poll::Pending => return Poll::Pending, 76 | }, 77 | } 78 | } 79 | } 80 | 81 | fn is_end_stream(&self) -> bool { 82 | self.done 83 | } 84 | } 85 | 86 | #[cfg(test)] 87 | mod tests { 88 | use http_body_util::BodyExt; 89 | use hyper::{StatusCode, header::HeaderValue}; 90 | 91 | use super::*; 92 | 93 | #[tokio::test] 94 | async fn keep_alive_body() { 95 | let body = KeepAliveBody::new( 96 | async { 97 | let mut res = Response::with_status(StatusCode::OK); 98 | res.body = Bytes::from_static(b" world").into(); 99 | res.headers.insert("key", HeaderValue::from_static("value")); 100 | Ok(res) 101 | }, 102 | Duration::from_secs(1), 103 | Some(Bytes::from_static(b"hello")), 104 | ); 105 | 106 | let aggregated = body.collect().await.unwrap(); 107 | 108 | assert_eq!(aggregated.trailers().unwrap().get("key").unwrap(), "value"); 109 | 110 | let buf = aggregated.to_bytes(); 111 | 112 | assert_eq!(buf, b"hello world".as_slice()); 113 | } 114 | 115 | #[tokio::test] 116 | async fn keep_alive_body_no_initial() { 117 | let body = KeepAliveBody::new( 118 | async { 119 | let mut res = Response::with_status(StatusCode::OK); 120 | res.body = Bytes::from_static(b"hello world").into(); 121 | Ok(res) 122 | }, 123 | Duration::from_secs(1), 124 | None, 125 | ); 126 | 127 | let aggregated = body.collect().await.unwrap(); 128 | 129 | let buf = aggregated.to_bytes(); 130 | 131 | assert_eq!(buf, b"hello world".as_slice()); 132 | } 133 | 134 | #[tokio::test] 135 | async fn keep_alive_body_fill_withespace() { 136 | let body = KeepAliveBody::new( 137 | async { 138 | tokio::time::sleep(Duration::from_millis(450)).await; 139 | 140 | let mut res = Response::with_status(StatusCode::OK); 141 | res.body = Bytes::from_static(b"hello world").into(); 142 | Ok(res) 143 | }, 144 | Duration::from_millis(100), 145 | None, 146 | ); 147 | 148 | let aggregated = body.collect().await.unwrap(); 149 | 150 | let buf = aggregated.to_bytes(); 151 | 152 | let ans1 = b" hello world"; 153 | let ans2 = b" hello world"; 154 | 155 | assert!(buf.as_ref() == ans1 || buf.as_ref() == ans2, "buf: {buf:?}"); 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /crates/s3s/src/http/mod.rs: -------------------------------------------------------------------------------- 1 | mod ser; 2 | pub use self::ser::*; 3 | 4 | mod de; 5 | pub use self::de::*; 6 | 7 | mod ordered_qs; 8 | pub use self::ordered_qs::*; 9 | 10 | mod ordered_headers; 11 | pub use self::ordered_headers::*; 12 | 13 | mod aws_chunked_stream; 14 | pub use self::aws_chunked_stream::*; 15 | 16 | mod multipart; 17 | pub use self::multipart::*; 18 | 19 | mod body; 20 | pub use self::body::*; 21 | 22 | mod keep_alive_body; 23 | pub use self::keep_alive_body::KeepAliveBody; 24 | 25 | mod request; 26 | pub use self::request::Request; 27 | 28 | mod response; 29 | pub use self::response::Response; 30 | 31 | pub use hyper::header::{HeaderName, HeaderValue, InvalidHeaderValue}; 32 | pub use hyper::http::StatusCode; 33 | -------------------------------------------------------------------------------- /crates/s3s/src/http/ordered_headers.rs: -------------------------------------------------------------------------------- 1 | //! Ordered headers 2 | 3 | use hyper::HeaderMap; 4 | use hyper::header::ToStrError; 5 | use hyper::http::HeaderValue; 6 | 7 | use crate::utils::stable_sort_by_first; 8 | 9 | /// Immutable http header container 10 | #[derive(Debug, Default)] 11 | pub struct OrderedHeaders<'a> { 12 | /// Ascending headers (header names are lowercase) 13 | headers: Vec<(&'a str, &'a str)>, 14 | } 15 | 16 | impl<'a> OrderedHeaders<'a> { 17 | /// Constructs [`OrderedHeaders`] from slice 18 | /// 19 | /// + header names must be lowercase 20 | /// + header values must be valid 21 | #[cfg(test)] 22 | #[must_use] 23 | pub fn from_slice_unchecked(slice: &[(&'a str, &'a str)]) -> Self { 24 | for (name, _) in slice { 25 | let is_valid = |c: u8| c == b'-' || c.is_ascii_lowercase() || c.is_ascii_digit(); 26 | assert!(name.as_bytes().iter().copied().all(is_valid)); 27 | } 28 | let mut headers = Vec::new(); 29 | headers.extend_from_slice(slice); 30 | stable_sort_by_first(&mut headers); 31 | Self { headers } 32 | } 33 | 34 | /// Constructs [`OrderedHeaders`] from a header map 35 | /// 36 | /// # Errors 37 | /// Returns [`ToStrError`] if header value cannot be converted to string slice 38 | pub fn from_headers(map: &'a HeaderMap) -> Result { 39 | let mut headers: Vec<(&'a str, &'a str)> = Vec::with_capacity(map.len()); 40 | 41 | for (name, value) in map { 42 | headers.push((name.as_str(), value.to_str()?)); 43 | } 44 | stable_sort_by_first(&mut headers); 45 | 46 | Ok(Self { headers }) 47 | } 48 | 49 | fn get_all_pairs(&self, name: &str) -> impl Iterator + '_ + use<'a, '_> { 50 | let slice = self.headers.as_slice(); 51 | 52 | let lower_bound = slice.partition_point(|x| x.0 < name); 53 | let upper_bound = slice.partition_point(|x| x.0 <= name); 54 | 55 | slice[lower_bound..upper_bound].iter().copied() 56 | } 57 | 58 | pub fn get_all(&self, name: impl AsRef) -> impl Iterator + '_ { 59 | self.get_all_pairs(name.as_ref()).map(|x| x.1) 60 | } 61 | 62 | fn get_unique_pair(&self, name: &'_ str) -> Option<(&'a str, &'a str)> { 63 | let slice = self.headers.as_slice(); 64 | let lower_bound = slice.partition_point(|x| x.0 < name); 65 | 66 | let mut iter = slice[lower_bound..].iter().copied(); 67 | let pair = iter.next()?; 68 | 69 | if let Some(following) = iter.next() { 70 | if following.0 == name { 71 | return None; 72 | } 73 | } 74 | 75 | (pair.0 == name).then_some(pair) 76 | } 77 | 78 | /// Gets header value by name. Time `O(logn)` 79 | pub fn get_unique(&self, name: impl AsRef) -> Option<&'a str> { 80 | self.get_unique_pair(name.as_ref()).map(|(_, v)| v) 81 | } 82 | 83 | // /// Finds headers by names. Time `O(mlogn)` 84 | // #[must_use] 85 | // pub fn find_multiple(&self, names: &[impl AsRef]) -> Self { 86 | // let mut headers: Vec<(&'a str, &'a str)> = Vec::new(); 87 | // for name in names { 88 | // for pair in self.get_all_pairs(name.as_ref()) { 89 | // headers.push(pair); 90 | // } 91 | // } 92 | // Self { headers } 93 | // } 94 | 95 | /// Finds headers by names. Time `O(mlogn)` 96 | #[must_use] 97 | pub fn find_multiple_with_on_missing( 98 | &self, 99 | names: &'a [impl AsRef], 100 | on_missing: impl Fn(&'a str) -> Option<&'a str>, 101 | ) -> Self { 102 | let mut headers: Vec<(&'a str, &'a str)> = Vec::new(); 103 | for name in names { 104 | let mut has_value = false; 105 | for pair in self.get_all_pairs(name.as_ref()) { 106 | headers.push(pair); 107 | has_value = true; 108 | } 109 | if !has_value { 110 | if let Some(value) = on_missing(name.as_ref()) { 111 | headers.push((name.as_ref(), value)); 112 | } 113 | } 114 | } 115 | Self { headers } 116 | } 117 | } 118 | 119 | impl<'a> AsRef<[(&'a str, &'a str)]> for OrderedHeaders<'a> { 120 | fn as_ref(&self) -> &[(&'a str, &'a str)] { 121 | self.headers.as_ref() 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /crates/s3s/src/http/ordered_qs.rs: -------------------------------------------------------------------------------- 1 | //! Ordered query strings 2 | 3 | use crate::utils::stable_sort_by_first; 4 | 5 | /// Immutable query string container 6 | #[derive(Debug, Default, Clone)] 7 | pub struct OrderedQs { 8 | /// Ascending query strings 9 | qs: Vec<(String, String)>, 10 | } 11 | 12 | /// [`OrderedQs`] 13 | #[derive(Debug, thiserror::Error)] 14 | #[error("ParseOrderedQsError: {inner}")] 15 | pub struct ParseOrderedQsError { 16 | /// url decode error 17 | inner: serde_urlencoded::de::Error, 18 | } 19 | 20 | impl OrderedQs { 21 | /// Constructs [`OrderedQs`] from vec 22 | /// 23 | /// + strings must be url-decoded 24 | #[cfg(test)] 25 | #[must_use] 26 | pub fn from_vec_unchecked(mut v: Vec<(String, String)>) -> Self { 27 | stable_sort_by_first(&mut v); 28 | Self { qs: v } 29 | } 30 | 31 | /// Parses [`OrderedQs`] from query 32 | /// 33 | /// # Errors 34 | /// Returns [`ParseOrderedQsError`] if query cannot be decoded 35 | pub fn parse(query: &str) -> Result { 36 | let result = serde_urlencoded::from_str::>(query); 37 | let mut v = result.map_err(|e| ParseOrderedQsError { inner: e })?; 38 | stable_sort_by_first(&mut v); 39 | Ok(Self { qs: v }) 40 | } 41 | 42 | #[must_use] 43 | pub fn has(&self, name: &str) -> bool { 44 | self.qs.binary_search_by_key(&name, |x| x.0.as_str()).is_ok() 45 | } 46 | 47 | /// Gets query values by name. Time `O(logn)` 48 | pub fn get_all(&self, name: &str) -> impl Iterator + use<'_> { 49 | let qs = self.qs.as_slice(); 50 | 51 | let lower_bound = qs.partition_point(|x| x.0.as_str() < name); 52 | let upper_bound = qs.partition_point(|x| x.0.as_str() <= name); 53 | 54 | qs[lower_bound..upper_bound].iter().map(|x| x.1.as_str()) 55 | } 56 | 57 | pub fn get_unique(&self, name: &str) -> Option<&str> { 58 | let qs = self.qs.as_slice(); 59 | let lower_bound = qs.partition_point(|x| x.0.as_str() < name); 60 | 61 | let mut iter = qs[lower_bound..].iter(); 62 | let pair = iter.next()?; 63 | 64 | if let Some(following) = iter.next() { 65 | if following.0 == name { 66 | return None; 67 | } 68 | } 69 | 70 | (pair.0.as_str() == name).then_some(pair.1.as_str()) 71 | } 72 | } 73 | 74 | impl AsRef<[(String, String)]> for OrderedQs { 75 | fn as_ref(&self) -> &[(String, String)] { 76 | self.qs.as_ref() 77 | } 78 | } 79 | 80 | #[cfg(test)] 81 | mod tests { 82 | use super::*; 83 | 84 | #[test] 85 | fn tag() { 86 | { 87 | let query = "tagging"; 88 | let qs = OrderedQs::parse(query).unwrap(); 89 | assert_eq!(qs.as_ref(), &[("tagging".to_owned(), String::new())]); 90 | 91 | assert_eq!(qs.get_unique("taggin"), None); 92 | assert_eq!(qs.get_unique("tagging"), Some("")); 93 | assert_eq!(qs.get_unique("taggingg"), None); 94 | } 95 | 96 | { 97 | let query = "tagging&tagging"; 98 | let qs = OrderedQs::parse(query).unwrap(); 99 | assert_eq!( 100 | qs.as_ref(), 101 | &[("tagging".to_owned(), String::new()), ("tagging".to_owned(), String::new())] 102 | ); 103 | 104 | assert_eq!(qs.get_unique("taggin"), None); 105 | assert_eq!(qs.get_unique("tagging"), None); 106 | assert_eq!(qs.get_unique("taggingg"), None); 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /crates/s3s/src/http/request.rs: -------------------------------------------------------------------------------- 1 | use super::Body; 2 | use super::Multipart; 3 | use super::OrderedQs; 4 | 5 | use crate::HttpRequest; 6 | use crate::auth::Credentials; 7 | use crate::path::S3Path; 8 | use crate::stream::VecByteStream; 9 | 10 | use hyper::HeaderMap; 11 | use hyper::Method; 12 | use hyper::Uri; 13 | use hyper::http::Extensions; 14 | use hyper::http::HeaderValue; 15 | 16 | pub struct Request { 17 | pub version: http::Version, 18 | pub method: Method, 19 | pub uri: Uri, 20 | pub headers: HeaderMap, 21 | pub extensions: Extensions, 22 | pub body: Body, 23 | pub(crate) s3ext: S3Extensions, 24 | } 25 | 26 | #[derive(Default)] 27 | pub(crate) struct S3Extensions { 28 | pub s3_path: Option, 29 | pub qs: Option, 30 | 31 | pub multipart: Option, 32 | pub vec_stream: Option, 33 | 34 | pub credentials: Option, 35 | pub region: Option, 36 | pub service: Option, 37 | } 38 | 39 | impl From for Request { 40 | fn from(req: HttpRequest) -> Self { 41 | let (parts, body) = req.into_parts(); 42 | Self { 43 | version: parts.version, 44 | method: parts.method, 45 | uri: parts.uri, 46 | headers: parts.headers, 47 | extensions: parts.extensions, 48 | body, 49 | s3ext: S3Extensions::default(), 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s/src/http/response.rs: -------------------------------------------------------------------------------- 1 | use crate::HttpResponse; 2 | 3 | use super::Body; 4 | 5 | use hyper::HeaderMap; 6 | use hyper::StatusCode; 7 | use hyper::http::Extensions; 8 | use hyper::http::HeaderValue; 9 | 10 | #[derive(Default)] 11 | pub struct Response { 12 | pub status: StatusCode, 13 | pub headers: HeaderMap, 14 | pub body: Body, 15 | pub extensions: Extensions, 16 | } 17 | 18 | impl From for HttpResponse { 19 | fn from(res: Response) -> Self { 20 | let mut ans = HttpResponse::default(); 21 | *ans.status_mut() = res.status; 22 | *ans.headers_mut() = res.headers; 23 | *ans.body_mut() = res.body; 24 | *ans.extensions_mut() = res.extensions; 25 | ans 26 | } 27 | } 28 | 29 | impl Response { 30 | #[must_use] 31 | pub fn with_status(status: StatusCode) -> Self { 32 | Self { 33 | status, 34 | ..Default::default() 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /crates/s3s/src/http/ser.rs: -------------------------------------------------------------------------------- 1 | use super::Body; 2 | use super::Response; 3 | 4 | use crate::StdError; 5 | use crate::dto::SelectObjectContentEventStream; 6 | use crate::dto::{Metadata, StreamingBlob, Timestamp, TimestampFormat}; 7 | use crate::error::{S3Error, S3Result}; 8 | use crate::http::KeepAliveBody; 9 | use crate::http::{HeaderName, HeaderValue}; 10 | use crate::utils::format::fmt_timestamp; 11 | use crate::xml; 12 | 13 | use std::convert::Infallible; 14 | use std::fmt::Write as _; 15 | 16 | use hyper::header::{IntoHeaderName, InvalidHeaderValue}; 17 | 18 | // pub fn add_header(res: &mut Response, name: N, value: V) -> S3Result 19 | // where 20 | // N: IntoHeaderName, 21 | // V: TryIntoHeaderValue, 22 | // V::Error: std::error::Error + Send + Sync + 'static, 23 | // { 24 | // let val = value.try_into_header_value().map_err(S3Error::internal_error)?; 25 | // res.headers.insert(name, val); 26 | // Ok(()) 27 | // } 28 | 29 | pub fn add_opt_header(res: &mut Response, name: N, value: Option) -> S3Result 30 | where 31 | N: IntoHeaderName, 32 | V: TryIntoHeaderValue, 33 | V::Error: std::error::Error + Send + Sync + 'static, 34 | { 35 | if let Some(value) = value { 36 | let val = value.try_into_header_value().map_err(S3Error::internal_error)?; 37 | res.headers.insert(name, val); 38 | } 39 | Ok(()) 40 | } 41 | 42 | pub fn add_opt_header_timestamp(res: &mut Response, name: N, value: Option, fmt: TimestampFormat) -> S3Result 43 | where 44 | N: IntoHeaderName, 45 | { 46 | if let Some(value) = value { 47 | let val = fmt_timestamp(&value, fmt, HeaderValue::from_bytes).map_err(S3Error::internal_error)?; 48 | res.headers.insert(name, val); 49 | } 50 | Ok(()) 51 | } 52 | 53 | pub trait TryIntoHeaderValue { 54 | type Error; 55 | fn try_into_header_value(self) -> Result; 56 | } 57 | 58 | impl TryIntoHeaderValue for bool { 59 | type Error = Infallible; 60 | 61 | #[allow(clippy::declare_interior_mutable_const)] 62 | fn try_into_header_value(self) -> Result { 63 | const TRUE: HeaderValue = HeaderValue::from_static("true"); 64 | const FALSE: HeaderValue = HeaderValue::from_static("false"); 65 | Ok(if self { TRUE } else { FALSE }) 66 | } 67 | } 68 | 69 | impl TryIntoHeaderValue for i32 { 70 | type Error = Infallible; 71 | 72 | fn try_into_header_value(self) -> Result { 73 | Ok(HeaderValue::from(self)) 74 | } 75 | } 76 | 77 | impl TryIntoHeaderValue for i64 { 78 | type Error = Infallible; 79 | 80 | fn try_into_header_value(self) -> Result { 81 | Ok(HeaderValue::from(self)) 82 | } 83 | } 84 | 85 | impl TryIntoHeaderValue for String { 86 | type Error = InvalidHeaderValue; 87 | 88 | fn try_into_header_value(self) -> Result { 89 | HeaderValue::try_from(self) 90 | } 91 | } 92 | 93 | /// See 94 | #[allow(clippy::declare_interior_mutable_const)] 95 | const APPLICATION_XML: HeaderValue = HeaderValue::from_static("application/xml"); 96 | 97 | pub fn set_xml_body(res: &mut Response, val: &T) -> S3Result { 98 | let mut buf = Vec::with_capacity(256); 99 | { 100 | let mut ser = xml::Serializer::new(&mut buf); 101 | ser.decl() 102 | .and_then(|()| val.serialize(&mut ser)) 103 | .map_err(S3Error::internal_error)?; 104 | } 105 | res.body = Body::from(buf); 106 | res.headers.insert(hyper::header::CONTENT_TYPE, APPLICATION_XML); 107 | Ok(()) 108 | } 109 | 110 | #[allow(clippy::declare_interior_mutable_const)] 111 | const TRANSFER_ENCODING_CHUNKED: HeaderValue = HeaderValue::from_static("chunked"); 112 | 113 | pub fn set_keep_alive_xml_body( 114 | res: &mut Response, 115 | fut: impl std::future::Future> + Send + Sync + 'static, 116 | duration: std::time::Duration, 117 | ) -> S3Result { 118 | let mut buf = Vec::with_capacity(40); 119 | let mut ser = xml::Serializer::new(&mut buf); 120 | ser.decl().map_err(S3Error::internal_error)?; 121 | 122 | res.body = Body::http_body(KeepAliveBody::new(fut, duration, Some(buf.into()))); 123 | res.headers.insert(hyper::header::CONTENT_TYPE, APPLICATION_XML); 124 | res.headers 125 | .insert(hyper::header::TRANSFER_ENCODING, TRANSFER_ENCODING_CHUNKED); 126 | Ok(()) 127 | } 128 | 129 | pub fn set_xml_body_no_decl(res: &mut Response, val: &T) -> S3Result { 130 | let mut buf = Vec::with_capacity(256); 131 | let mut ser = xml::Serializer::new(&mut buf); 132 | val.serialize(&mut ser).map_err(S3Error::internal_error)?; 133 | res.body = Body::from(buf); 134 | res.headers.insert(hyper::header::CONTENT_TYPE, APPLICATION_XML); 135 | Ok(()) 136 | } 137 | 138 | pub fn set_stream_body(res: &mut Response, stream: StreamingBlob) { 139 | res.body = Body::from(stream); 140 | } 141 | 142 | pub fn set_event_stream_body(res: &mut Response, stream: SelectObjectContentEventStream) { 143 | res.body = Body::from(stream.into_byte_stream()); 144 | res.headers 145 | .insert(hyper::header::TRANSFER_ENCODING, HeaderValue::from_static("chunked")); 146 | } 147 | 148 | pub fn add_opt_metadata(res: &mut Response, metadata: Option) -> S3Result { 149 | if let Some(map) = metadata { 150 | let mut buf = String::new(); 151 | for (key, val) in map { 152 | write!(&mut buf, "x-amz-meta-{key}").unwrap(); 153 | let name = HeaderName::from_bytes(buf.as_bytes()).map_err(S3Error::internal_error)?; 154 | let value = HeaderValue::try_from(val).map_err(S3Error::internal_error)?; 155 | res.headers.insert(name, value); 156 | buf.clear(); 157 | } 158 | } 159 | Ok(()) 160 | } 161 | -------------------------------------------------------------------------------- /crates/s3s/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, feature(doc_cfg))] 2 | #![allow( 3 | clippy::bool_assert_comparison, // I don't like `assert!(!expression)`. It's very misleading. 4 | clippy::multiple_crate_versions, // Sometimes not fixable 5 | clippy::module_name_repetitions, 6 | clippy::single_match_else, 7 | clippy::wildcard_imports, 8 | clippy::let_underscore_untyped, 9 | clippy::inline_always, 10 | clippy::needless_continue, 11 | )] 12 | 13 | #[macro_use] 14 | mod utils; 15 | 16 | #[macro_use] 17 | mod error; 18 | 19 | mod http; 20 | mod ops; 21 | mod protocol; 22 | mod s3_op; 23 | mod s3_trait; 24 | mod sig_v2; 25 | mod sig_v4; 26 | 27 | pub mod access; 28 | pub mod auth; 29 | pub mod checksum; 30 | pub mod crypto; 31 | pub mod dto; 32 | pub mod header; 33 | pub mod host; 34 | pub mod path; 35 | pub mod route; 36 | pub mod service; 37 | pub mod stream; 38 | pub mod xml; 39 | 40 | pub use self::error::*; 41 | pub use self::http::Body; 42 | pub use self::s3_op::S3Operation; 43 | pub use self::s3_trait::S3; 44 | 45 | pub use self::protocol::HttpError; 46 | pub use self::protocol::HttpRequest; 47 | pub use self::protocol::HttpResponse; 48 | pub use self::protocol::S3Request; 49 | pub use self::protocol::S3Response; 50 | -------------------------------------------------------------------------------- /crates/s3s/src/ops/get_object.rs: -------------------------------------------------------------------------------- 1 | use crate::S3Request; 2 | use crate::S3Result; 3 | use crate::dto::GetObjectInput; 4 | use crate::dto::Timestamp; 5 | use crate::dto::TimestampFormat; 6 | use crate::header; 7 | use crate::http::Response; 8 | use crate::utils::format::fmt_timestamp; 9 | 10 | use hyper::HeaderMap; 11 | use hyper::header::CONTENT_LENGTH; 12 | use hyper::header::TRANSFER_ENCODING; 13 | use hyper::http::HeaderName; 14 | use hyper::http::HeaderValue; 15 | 16 | use stdx::default::default; 17 | 18 | pub fn extract_overridden_response_headers(req: &S3Request) -> S3Result> { 19 | let mut map: HeaderMap = default(); 20 | 21 | add(&mut map, header::CONTENT_TYPE, req.input.response_content_type.as_deref())?; 22 | add(&mut map, header::CONTENT_LANGUAGE, req.input.response_content_language.as_deref())?; 23 | add_ts(&mut map, header::EXPIRES, req.input.response_expires.as_ref())?; 24 | add(&mut map, header::CACHE_CONTROL, req.input.response_cache_control.as_deref())?; 25 | add(&mut map, header::CONTENT_DISPOSITION, req.input.response_content_disposition.as_deref())?; 26 | add(&mut map, header::CONTENT_ENCODING, req.input.response_content_encoding.as_deref())?; 27 | 28 | Ok(map) 29 | } 30 | 31 | fn add(map: &mut HeaderMap, name: HeaderName, value: Option<&str>) -> S3Result<()> { 32 | let error = |e| invalid_request!(e, "invalid overridden header: {name}: {value:?}"); 33 | if let Some(value) = value { 34 | let value = value.parse().map_err(error)?; 35 | map.insert(name, value); 36 | } 37 | Ok(()) 38 | } 39 | 40 | fn add_ts(map: &mut HeaderMap, name: HeaderName, value: Option<&Timestamp>) -> S3Result<()> { 41 | let error = |e| invalid_request!(e, "invalid overridden header: {name}: {value:?}"); 42 | if let Some(value) = value { 43 | let value = fmt_timestamp(value, TimestampFormat::HttpDate, HeaderValue::from_bytes).map_err(error)?; 44 | map.insert(name, value); 45 | } 46 | Ok(()) 47 | } 48 | 49 | pub fn merge_custom_headers(resp: &mut Response, headers: HeaderMap) { 50 | resp.headers.extend(headers); 51 | 52 | // special case for https://github.com/Nugine/s3s/issues/80 53 | if let Some(val) = resp.headers.get(TRANSFER_ENCODING) { 54 | if val.as_bytes() == b"chunked" { 55 | resp.headers.remove(CONTENT_LENGTH); 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /crates/s3s/src/ops/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | // use crate::service::S3Service; 4 | 5 | // use stdx::mem::output_size; 6 | 7 | // #[test] 8 | // #[ignore] 9 | // fn track_future_size() { 10 | // macro_rules! future_size { 11 | // ($f:path, $v:expr) => { 12 | // (stringify!($f), output_size(&$f), $v) 13 | // }; 14 | // } 15 | 16 | // #[rustfmt::skip] 17 | // let sizes = [ 18 | // future_size!(S3Service::call, 2704), 19 | // future_size!(call, 1512), 20 | // future_size!(prepare, 1440), 21 | // future_size!(SignatureContext::check, 776), 22 | // future_size!(SignatureContext::v2_check, 296), 23 | // future_size!(SignatureContext::v2_check_presigned_url, 168), 24 | // future_size!(SignatureContext::v2_check_header_auth, 184), 25 | // future_size!(SignatureContext::v4_check, 752), 26 | // future_size!(SignatureContext::v4_check_post_signature, 368), 27 | // future_size!(SignatureContext::v4_check_presigned_url, 456), 28 | // future_size!(SignatureContext::v4_check_header_auth, 640), 29 | // ]; 30 | 31 | // println!("{sizes:#?}"); 32 | // for (name, size, expected) in sizes { 33 | // assert_eq!(size, expected, "{name:?} size changed: prev {expected}, now {size}"); 34 | // } 35 | // } 36 | 37 | #[test] 38 | fn error_custom_headers() { 39 | fn redirect307(location: &str) -> S3Error { 40 | let mut err = S3Error::new(S3ErrorCode::TemporaryRedirect); 41 | 42 | err.set_headers({ 43 | let mut headers = HeaderMap::new(); 44 | headers.insert(crate::header::LOCATION, location.parse().unwrap()); 45 | headers 46 | }); 47 | 48 | err 49 | } 50 | 51 | let res = serialize_error(redirect307("http://example.com"), false).unwrap(); 52 | assert_eq!(res.status, StatusCode::TEMPORARY_REDIRECT); 53 | assert_eq!(res.headers.get("location").unwrap(), "http://example.com"); 54 | 55 | let body = res.body.bytes().unwrap(); 56 | let body = std::str::from_utf8(&body).unwrap(); 57 | assert_eq!( 58 | body, 59 | concat!( 60 | "", 61 | "TemporaryRedirect" 62 | ) 63 | ); 64 | } 65 | -------------------------------------------------------------------------------- /crates/s3s/src/protocol.rs: -------------------------------------------------------------------------------- 1 | use crate::Body; 2 | use crate::StdError; 3 | use crate::auth::Credentials; 4 | 5 | use http::Extensions; 6 | use http::HeaderMap; 7 | use http::Method; 8 | use http::StatusCode; 9 | use http::Uri; 10 | 11 | use stdx::default::default; 12 | 13 | pub type HttpRequest = http::Request; 14 | pub type HttpResponse = http::Response; 15 | 16 | /// An error that indicates a failure of an HTTP request. 17 | /// Passing this error to `hyper` will cause it to abort the connection. 18 | #[derive(Debug)] 19 | pub struct HttpError(StdError); 20 | 21 | impl HttpError { 22 | #[must_use] 23 | pub fn new(err: StdError) -> Self { 24 | Self(err) 25 | } 26 | } 27 | 28 | impl From for StdError { 29 | fn from(val: HttpError) -> Self { 30 | val.0 31 | } 32 | } 33 | 34 | /// S3 request 35 | #[derive(Debug, Clone)] 36 | pub struct S3Request { 37 | /// S3 operation input 38 | pub input: T, 39 | 40 | /// HTTP method 41 | pub method: Method, 42 | 43 | /// HTTP URI 44 | pub uri: Uri, 45 | 46 | /// HTTP headers 47 | pub headers: HeaderMap, 48 | 49 | /// Request extensions. 50 | /// This field is used to pass custom data between middlewares. 51 | pub extensions: Extensions, 52 | 53 | /// S3 identity information. 54 | /// `None` means anonymous request. 55 | pub credentials: Option, 56 | 57 | /// S3 requested region. 58 | pub region: Option, 59 | 60 | /// S3 requested service. 61 | pub service: Option, 62 | } 63 | 64 | impl S3Request { 65 | /// Map the input of the request to a new type. 66 | pub fn map_input(self, f: impl FnOnce(T) -> U) -> S3Request { 67 | S3Request { 68 | input: f(self.input), 69 | method: self.method, 70 | uri: self.uri, 71 | headers: self.headers, 72 | extensions: self.extensions, 73 | credentials: self.credentials, 74 | region: self.region, 75 | service: self.service, 76 | } 77 | } 78 | } 79 | 80 | /// S3 response 81 | #[derive(Debug, Clone)] 82 | pub struct S3Response { 83 | /// S3 operation output 84 | pub output: T, 85 | 86 | /// HTTP status code. 87 | /// This field overrides the status code implied by the output. 88 | pub status: Option, 89 | 90 | /// HTTP headers. 91 | /// This field overrides the headers implied by the output. 92 | pub headers: HeaderMap, 93 | 94 | /// Response extensions. 95 | /// This is used to pass custom data between middlewares. 96 | pub extensions: Extensions, 97 | } 98 | 99 | impl S3Response { 100 | /// Create a new S3 response with the given output. 101 | pub fn new(output: T) -> Self { 102 | Self { 103 | output, 104 | status: default(), 105 | headers: default(), 106 | extensions: default(), 107 | } 108 | } 109 | 110 | /// Create a new S3 response with the given output and status code. 111 | pub fn with_status(output: T, status: StatusCode) -> Self { 112 | Self { 113 | output, 114 | status: Some(status), 115 | headers: default(), 116 | extensions: default(), 117 | } 118 | } 119 | 120 | /// Create a new S3 response with the given output and headers. 121 | pub fn with_headers(output: T, headers: HeaderMap) -> Self { 122 | Self { 123 | output, 124 | status: default(), 125 | headers, 126 | extensions: default(), 127 | } 128 | } 129 | 130 | /// Map the output of the response to a new type. 131 | pub fn map_output(self, f: impl FnOnce(T) -> U) -> S3Response { 132 | S3Response { 133 | output: f(self.output), 134 | status: self.status, 135 | headers: self.headers, 136 | extensions: self.extensions, 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /crates/s3s/src/route.rs: -------------------------------------------------------------------------------- 1 | use crate::Body; 2 | use crate::S3Request; 3 | use crate::S3Response; 4 | use crate::S3Result; 5 | 6 | use hyper::HeaderMap; 7 | use hyper::Method; 8 | use hyper::Uri; 9 | use hyper::http::Extensions; 10 | 11 | #[async_trait::async_trait] 12 | pub trait S3Route: Send + Sync + 'static { 13 | fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, extensions: &mut Extensions) -> bool; 14 | 15 | async fn check_access(&self, req: &mut S3Request) -> S3Result<()> { 16 | match req.credentials { 17 | Some(_) => Ok(()), 18 | None => Err(s3_error!(AccessDenied, "Signature is required")), 19 | } 20 | } 21 | 22 | async fn call(&self, req: S3Request) -> S3Result>; 23 | } 24 | 25 | #[cfg(test)] 26 | mod tests { 27 | use super::*; 28 | 29 | use crate::header; 30 | 31 | #[allow(dead_code)] 32 | pub struct AssumeRole {} 33 | 34 | #[async_trait::async_trait] 35 | impl S3Route for AssumeRole { 36 | fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, _: &mut Extensions) -> bool { 37 | if method == Method::POST && uri.path() == "/" { 38 | if let Some(val) = headers.get(header::CONTENT_TYPE) { 39 | if val.as_bytes() == b"application/x-www-form-urlencoded" { 40 | return true; 41 | } 42 | } 43 | } 44 | false 45 | } 46 | 47 | async fn call(&self, _: S3Request) -> S3Result> { 48 | tracing::debug!("call AssumeRole"); 49 | return Err(s3_error!(NotImplemented)); 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s/src/s3_op.rs: -------------------------------------------------------------------------------- 1 | pub struct S3Operation { 2 | pub(crate) name: &'static str, 3 | } 4 | 5 | impl S3Operation { 6 | /// Returns the name of the operation. 7 | /// 8 | /// # Example 9 | /// ``` 10 | /// use s3s::S3Operation; 11 | /// fn is_basic_list_op(op: &S3Operation) -> bool { 12 | /// matches!(op.name(), "ListBuckets" | "ListObjects" | "ListObjectsV2") 13 | /// } 14 | /// ``` 15 | #[must_use] 16 | pub fn name(&self) -> &str { 17 | self.name 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v2/authorization_v2.rs: -------------------------------------------------------------------------------- 1 | //! Authorization V2 2 | //! 3 | //! 4 | //! 5 | 6 | pub struct AuthorizationV2<'a> { 7 | pub access_key: &'a str, 8 | pub signature: &'a str, 9 | } 10 | 11 | /// [`AuthorizationV2`] 12 | #[derive(Debug, thiserror::Error)] 13 | #[error("ParseAuthorizationError")] 14 | pub struct ParseAuthorizationV2Error { 15 | /// priv place holder 16 | _priv: (), 17 | } 18 | 19 | impl<'a> AuthorizationV2<'a> { 20 | pub fn parse(mut input: &'a str) -> Result { 21 | let err = || ParseAuthorizationV2Error { _priv: () }; 22 | 23 | input = input.strip_prefix("AWS ").ok_or_else(err)?; 24 | 25 | let (access_key, signature) = input.split_once(':').ok_or_else(err)?; 26 | 27 | Ok(Self { access_key, signature }) 28 | } 29 | } 30 | 31 | #[cfg(test)] 32 | mod tests { 33 | use super::*; 34 | 35 | #[test] 36 | fn example() { 37 | let input = "AWS AKIAIOSFODNN7EXAMPLE:qgk2+6Sv9/oM7G3qLEjTH1a1l1g="; 38 | let ans = AuthorizationV2::parse(input).unwrap(); 39 | assert_eq!(ans.access_key, "AKIAIOSFODNN7EXAMPLE"); 40 | assert_eq!(ans.signature, "qgk2+6Sv9/oM7G3qLEjTH1a1l1g="); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v2/mod.rs: -------------------------------------------------------------------------------- 1 | //! AWS Signature Version 2 2 | //! 3 | //! 4 | //! 5 | 6 | mod authorization_v2; 7 | pub use self::authorization_v2::*; 8 | 9 | mod presigned_url_v2; 10 | pub use self::presigned_url_v2::*; 11 | 12 | mod methods; 13 | pub use self::methods::*; 14 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v2/presigned_url_v2.rs: -------------------------------------------------------------------------------- 1 | use crate::http::OrderedQs; 2 | 3 | use std::borrow::Cow; 4 | 5 | use time::OffsetDateTime; 6 | 7 | pub struct PresignedUrlV2<'a> { 8 | pub access_key: &'a str, 9 | pub expires_time: OffsetDateTime, 10 | pub signature: Cow<'a, str>, 11 | } 12 | 13 | /// [`PresignedUrlV2`] 14 | #[derive(Debug, thiserror::Error)] 15 | #[error("ParsePresignedUrlError")] 16 | pub struct ParsePresignedUrlError { 17 | /// priv place holder 18 | _priv: (), 19 | } 20 | 21 | impl<'a> PresignedUrlV2<'a> { 22 | pub fn parse(qs: &'a OrderedQs) -> Result { 23 | let err = || ParsePresignedUrlError { _priv: () }; 24 | 25 | let access_key = qs.get_unique("AWSAccessKeyId").ok_or_else(err)?; 26 | let expires_str = qs.get_unique("Expires").ok_or_else(err)?; 27 | let signature = qs.get_unique("Signature").ok_or_else(err)?; 28 | 29 | let expires_time = parse_unix_timestamp(expires_str).ok_or_else(err)?; 30 | let signature = urlencoding::decode(signature).map_err(|_| err())?; 31 | 32 | Ok(Self { 33 | access_key, 34 | expires_time, 35 | signature, 36 | }) 37 | } 38 | } 39 | 40 | fn parse_unix_timestamp(s: &str) -> Option { 41 | let ts = s.parse::().ok().filter(|&x| x >= 0)?; 42 | OffsetDateTime::from_unix_timestamp(ts).ok() 43 | } 44 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/amz_content_sha256.rs: -------------------------------------------------------------------------------- 1 | //! x-amz-content-sha256 2 | 3 | use crate::utils::crypto::is_sha256_checksum; 4 | 5 | /// x-amz-content-sha256 6 | /// 7 | /// See [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) 8 | #[derive(Debug)] 9 | pub enum AmzContentSha256<'a> { 10 | /// `STREAMING-AWS4-HMAC-SHA256-PAYLOAD` 11 | MultipleChunks, 12 | /// single chunk 13 | SingleChunk { 14 | /// the checksum of single chunk payload 15 | #[allow(dead_code)] // TODO: check this field when calculating the payload checksum 16 | payload_checksum: &'a str, 17 | }, 18 | /// `UNSIGNED-PAYLOAD` 19 | UnsignedPayload, 20 | } 21 | 22 | /// [`AmzContentSha256`] 23 | #[derive(Debug, Clone, Copy, thiserror::Error)] 24 | pub enum ParseAmzContentSha256Error { 25 | /// invalid checksum 26 | #[error("ParseAmzContentSha256Error: InvalidChecksum")] 27 | InvalidChecksum, 28 | } 29 | 30 | impl<'a> AmzContentSha256<'a> { 31 | /// Parses `AmzContentSha256` from `x-amz-content-sha256` header 32 | /// 33 | /// # Errors 34 | /// Returns an `Err` if the header is invalid 35 | pub fn parse(header: &'a str) -> Result { 36 | match header { 37 | "UNSIGNED-PAYLOAD" => Ok(Self::UnsignedPayload), 38 | "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => Ok(Self::MultipleChunks), 39 | payload_checksum => { 40 | if !is_sha256_checksum(payload_checksum) { 41 | return Err(ParseAmzContentSha256Error::InvalidChecksum); 42 | } 43 | Ok(Self::SingleChunk { payload_checksum }) 44 | } 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/amz_date.rs: -------------------------------------------------------------------------------- 1 | //! x-amz-date 2 | 3 | use std::fmt::Write as _; 4 | 5 | use arrayvec::ArrayString; 6 | 7 | /// x-amz-date 8 | #[derive(Debug, Clone)] 9 | pub struct AmzDate { 10 | /// year 11 | year: u16, 12 | /// month 13 | month: u8, 14 | /// day 15 | day: u8, 16 | /// hour 17 | hour: u8, 18 | /// minute 19 | minute: u8, 20 | /// second 21 | second: u8, 22 | } 23 | 24 | /// [`AmzDate`] 25 | #[derive(Debug, thiserror::Error)] 26 | #[error("ParseAmzDateError")] 27 | pub struct ParseAmzDateError(()); 28 | 29 | impl AmzDate { 30 | /// Parses `AmzDate` from header 31 | /// # Errors 32 | /// Returns an error if the header is invalid 33 | pub fn parse(header: &str) -> Result { 34 | self::parser::parse(header).map_err(|_| ParseAmzDateError(())) 35 | } 36 | 37 | /// `{YYYY}{MM}{DD}T{HH}{MM}{SS}Z` 38 | #[must_use] 39 | pub fn fmt_iso8601(&self) -> ArrayString<16> { 40 | let mut buf = >::new(); 41 | let (y, m, d, hh, mm, ss) = (self.year, self.month, self.day, self.hour, self.minute, self.second); 42 | write!(&mut buf, "{y:04}{m:02}{d:02}T{hh:02}{mm:02}{ss:02}Z").unwrap(); 43 | buf 44 | } 45 | 46 | /// `{YYYY}{MM}{DD}` 47 | #[must_use] 48 | pub fn fmt_date(&self) -> ArrayString<8> { 49 | let mut buf = >::new(); 50 | write!(&mut buf, "{:04}{:02}{:02}", self.year, self.month, self.day).unwrap(); 51 | buf 52 | } 53 | 54 | pub fn to_time(&self) -> Option { 55 | let y = i32::from(self.year); 56 | let m: time::Month = self.month.try_into().ok()?; 57 | let d = self.day; 58 | 59 | let t = time::Date::from_calendar_date(y, m, d).ok()?; 60 | let t = t.with_hms(self.hour, self.minute, self.second).ok()?; 61 | Some(t.assume_utc()) 62 | } 63 | } 64 | 65 | mod parser { 66 | use super::*; 67 | 68 | use crate::utils::parser::{Error, digit2, digit4}; 69 | 70 | macro_rules! ensure { 71 | ($cond:expr) => { 72 | if !$cond { 73 | return Err(Error); 74 | } 75 | }; 76 | } 77 | 78 | pub fn parse(input: &str) -> Result { 79 | let x = input.as_bytes(); 80 | ensure!(x.len() == 16); 81 | 82 | let year = digit4([x[0], x[1], x[2], x[3]])?; 83 | let month = digit2([x[4], x[5]])?; 84 | let day = digit2([x[6], x[7]])?; 85 | ensure!(x[8] == b'T'); 86 | 87 | let hour = digit2([x[9], x[10]])?; 88 | let minute = digit2([x[11], x[12]])?; 89 | let second = digit2([x[13], x[14]])?; 90 | ensure!(x[15] == b'Z'); 91 | 92 | Ok(AmzDate { 93 | year, 94 | month, 95 | day, 96 | hour, 97 | minute, 98 | second, 99 | }) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/mod.rs: -------------------------------------------------------------------------------- 1 | //! AWS Signature Version 4 2 | //! 3 | //! See 4 | //! 5 | //! See 6 | //! 7 | 8 | mod presigned_url_v4; 9 | pub use self::presigned_url_v4::*; 10 | 11 | mod authorization_v4; 12 | pub use self::authorization_v4::*; 13 | 14 | mod amz_content_sha256; 15 | pub use self::amz_content_sha256::*; 16 | 17 | mod amz_date; 18 | pub use self::amz_date::*; 19 | 20 | mod post_signature; 21 | pub use self::post_signature::*; 22 | 23 | mod methods; 24 | pub use self::methods::*; 25 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/post_signature.rs: -------------------------------------------------------------------------------- 1 | use crate::http::Multipart; 2 | 3 | pub struct PostSignatureInfo<'a> { 4 | pub policy: &'a str, 5 | pub x_amz_algorithm: &'a str, 6 | pub x_amz_credential: &'a str, 7 | pub x_amz_date: &'a str, 8 | pub x_amz_signature: &'a str, 9 | } 10 | 11 | impl<'a> PostSignatureInfo<'a> { 12 | pub fn extract(m: &'a Multipart) -> Option { 13 | let policy = m.find_field_value("policy")?; 14 | let x_amz_algorithm = m.find_field_value("x-amz-algorithm")?; 15 | let x_amz_credential = m.find_field_value("x-amz-credential")?; 16 | let x_amz_date = m.find_field_value("x-amz-date")?; 17 | let x_amz_signature = m.find_field_value("x-amz-signature")?; 18 | Some(Self { 19 | policy, 20 | x_amz_algorithm, 21 | x_amz_credential, 22 | x_amz_date, 23 | x_amz_signature, 24 | }) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/presigned_url_v4.rs: -------------------------------------------------------------------------------- 1 | //! presigned url information 2 | 3 | use super::AmzDate; 4 | use super::CredentialV4; 5 | 6 | use crate::http::OrderedQs; 7 | use crate::utils::crypto::is_sha256_checksum; 8 | 9 | use smallvec::SmallVec; 10 | 11 | /// Presigned url information 12 | #[derive(Debug)] 13 | pub struct PresignedUrlV4<'a> { 14 | /// algorithm 15 | pub algorithm: &'a str, 16 | /// credential 17 | pub credential: CredentialV4<'a>, 18 | /// amz date 19 | pub amz_date: AmzDate, 20 | /// expires 21 | pub expires: time::Duration, 22 | /// signed headers 23 | pub signed_headers: SmallVec<[&'a str; 16]>, 24 | /// signature 25 | pub signature: &'a str, 26 | } 27 | 28 | /// [`PresignedUrlV4`] 29 | #[derive(Debug, thiserror::Error)] 30 | #[error("ParsePresignedUrlError")] 31 | pub struct ParsePresignedUrlError { 32 | /// priv place holder 33 | _priv: (), 34 | } 35 | 36 | /// query strings of a presigned url 37 | struct PresignedQs<'a> { 38 | /// X-Amz-Algorithm 39 | algorithm: &'a str, 40 | /// X-Amz-Credential 41 | credential: &'a str, 42 | /// X-Amz-Date 43 | date: &'a str, 44 | /// X-Amz-Expires 45 | expires: &'a str, 46 | /// X-Amz-SignedHeaders 47 | signed_headers: &'a str, 48 | /// X-Amz-Signature 49 | signature: &'a str, 50 | } 51 | 52 | impl<'a> PresignedQs<'a> { 53 | /// Creates `PresignedQs` from `OrderedQs` 54 | fn from_ordered_qs(qs: &'a OrderedQs) -> Option { 55 | Some(PresignedQs { 56 | algorithm: qs.get_unique("X-Amz-Algorithm")?, 57 | credential: qs.get_unique("X-Amz-Credential")?, 58 | date: qs.get_unique("X-Amz-Date")?, 59 | expires: qs.get_unique("X-Amz-Expires")?, 60 | signed_headers: qs.get_unique("X-Amz-SignedHeaders")?, 61 | signature: qs.get_unique("X-Amz-Signature")?, 62 | }) 63 | } 64 | } 65 | 66 | impl<'a> PresignedUrlV4<'a> { 67 | /// Parses `PresignedUrl` from query 68 | /// 69 | /// # Errors 70 | /// Returns `ParsePresignedUrlError` if it failed to parse 71 | pub fn parse(qs: &'a OrderedQs) -> Result { 72 | let err = || ParsePresignedUrlError { _priv: () }; 73 | 74 | let info = PresignedQs::from_ordered_qs(qs).ok_or_else(err)?; 75 | 76 | let algorithm = info.algorithm; 77 | 78 | let credential = CredentialV4::parse(info.credential).map_err(|_e| err())?; 79 | 80 | let amz_date = AmzDate::parse(info.date).map_err(|_e| err())?; 81 | 82 | let expires = parse_expires(info.expires).ok_or_else(err)?; 83 | 84 | if !info.signed_headers.is_ascii() { 85 | return Err(err()); 86 | } 87 | let signed_headers = info.signed_headers.split(';').collect(); 88 | 89 | if !is_sha256_checksum(info.signature) { 90 | return Err(err()); 91 | } 92 | let signature = info.signature; 93 | 94 | Ok(Self { 95 | algorithm, 96 | credential, 97 | amz_date, 98 | expires, 99 | signed_headers, 100 | signature, 101 | }) 102 | } 103 | } 104 | 105 | fn parse_expires(s: &str) -> Option { 106 | let x = s.parse::().ok().filter(|&x| x > 0)?; 107 | Some(time::Duration::new(i64::from(x), 0)) 108 | } 109 | -------------------------------------------------------------------------------- /crates/s3s/src/stream.rs: -------------------------------------------------------------------------------- 1 | use crate::error::StdError; 2 | 3 | use std::collections::VecDeque; 4 | use std::fmt; 5 | use std::pin::Pin; 6 | use std::task::{Context, Poll}; 7 | 8 | use bytes::Bytes; 9 | use futures::{Stream, StreamExt, pin_mut}; 10 | 11 | pub trait ByteStream: Stream { 12 | fn remaining_length(&self) -> RemainingLength { 13 | RemainingLength::unknown() 14 | } 15 | } 16 | 17 | pub type DynByteStream = Pin> + Send + Sync + 'static>>; 18 | 19 | pub struct RemainingLength { 20 | lower: usize, 21 | upper: Option, 22 | } 23 | 24 | impl RemainingLength { 25 | /// Creates a new `RemainingLength` with the given lower and upper bounds. 26 | /// 27 | /// # Panics 28 | /// This function asserts that `lower <= upper`. 29 | #[must_use] 30 | pub fn new(lower: usize, upper: Option) -> Self { 31 | if let Some(upper) = upper { 32 | assert!(lower <= upper); 33 | } 34 | Self { lower, upper } 35 | } 36 | 37 | #[must_use] 38 | pub fn unknown() -> Self { 39 | Self { lower: 0, upper: None } 40 | } 41 | 42 | #[must_use] 43 | pub fn new_exact(n: usize) -> Self { 44 | Self { 45 | lower: n, 46 | upper: Some(n), 47 | } 48 | } 49 | 50 | #[must_use] 51 | pub fn exact(&self) -> Option { 52 | self.upper.filter(|&upper| upper == self.lower) 53 | } 54 | 55 | #[must_use] 56 | fn into_size_hint(self) -> http_body::SizeHint { 57 | let mut sz = http_body::SizeHint::new(); 58 | sz.set_lower(self.lower as u64); 59 | if let Some(upper) = self.upper { 60 | sz.set_upper(upper as u64); 61 | } 62 | sz 63 | } 64 | 65 | #[must_use] 66 | fn from_size_hint(sz: &http_body::SizeHint) -> Self { 67 | // inaccurate conversion on 32-bit platforms 68 | let lower = usize::try_from(sz.lower()).unwrap_or(usize::MAX); 69 | let upper = sz.upper().and_then(|x| usize::try_from(x).ok()); 70 | Self { lower, upper } 71 | } 72 | } 73 | 74 | impl From for http_body::SizeHint { 75 | fn from(value: RemainingLength) -> Self { 76 | value.into_size_hint() 77 | } 78 | } 79 | 80 | impl From for RemainingLength { 81 | fn from(value: http_body::SizeHint) -> Self { 82 | Self::from_size_hint(&value) 83 | } 84 | } 85 | 86 | impl fmt::Debug for RemainingLength { 87 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 88 | if let Some(exact) = self.exact() { 89 | return write!(f, "{exact}"); 90 | } 91 | match self.upper { 92 | Some(upper) => write!(f, "({}..={})", self.lower, upper), 93 | None => write!(f, "({}..)", self.lower), 94 | } 95 | } 96 | } 97 | 98 | pub(crate) fn into_dyn(s: S) -> DynByteStream 99 | where 100 | S: ByteStream> + Send + Sync + Unpin + 'static, 101 | E: std::error::Error + Send + Sync + 'static, 102 | { 103 | Box::pin(Wrapper(s)) 104 | } 105 | 106 | struct Wrapper(S); 107 | 108 | impl Stream for Wrapper 109 | where 110 | S: ByteStream> + Send + Sync + Unpin + 'static, 111 | E: std::error::Error + Send + Sync + 'static, 112 | { 113 | type Item = Result; 114 | 115 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 116 | let this = Pin::new(&mut self.0); 117 | this.poll_next(cx).map_err(|e| Box::new(e) as StdError) 118 | } 119 | 120 | fn size_hint(&self) -> (usize, Option) { 121 | self.0.size_hint() 122 | } 123 | } 124 | 125 | impl ByteStream for Wrapper 126 | where 127 | S: ByteStream> + Send + Sync + Unpin + 'static, 128 | E: std::error::Error + Send + Sync + 'static, 129 | { 130 | fn remaining_length(&self) -> RemainingLength { 131 | self.0.remaining_length() 132 | } 133 | } 134 | 135 | // FIXME: unbounded memory allocation 136 | pub(crate) async fn aggregate_unlimited(stream: S) -> Result, E> 137 | where 138 | S: ByteStream>, 139 | { 140 | let mut vec = Vec::new(); 141 | pin_mut!(stream); 142 | while let Some(result) = stream.next().await { 143 | vec.push(result?); 144 | } 145 | Ok(vec) 146 | } 147 | 148 | pub(crate) struct VecByteStream { 149 | queue: VecDeque, 150 | remaining_bytes: usize, 151 | } 152 | 153 | impl VecByteStream { 154 | pub fn new(v: Vec) -> Self { 155 | let total = v 156 | .iter() 157 | .map(Bytes::len) 158 | .try_fold(0, usize::checked_add) 159 | .expect("length overflow"); 160 | 161 | Self { 162 | queue: v.into(), 163 | remaining_bytes: total, 164 | } 165 | } 166 | 167 | pub fn exact_remaining_length(&self) -> usize { 168 | self.remaining_bytes 169 | } 170 | } 171 | 172 | impl Stream for VecByteStream { 173 | type Item = Result; 174 | 175 | fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { 176 | let this = Pin::into_inner(self); 177 | match this.queue.pop_front() { 178 | Some(b) => { 179 | this.remaining_bytes -= b.len(); 180 | Poll::Ready(Some(Ok(b))) 181 | } 182 | None => Poll::Ready(None), 183 | } 184 | } 185 | 186 | fn size_hint(&self) -> (usize, Option) { 187 | let cnt = self.queue.len(); 188 | (cnt, Some(cnt)) 189 | } 190 | } 191 | 192 | impl ByteStream for VecByteStream { 193 | fn remaining_length(&self) -> RemainingLength { 194 | RemainingLength::new_exact(self.remaining_bytes) 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/crypto.rs: -------------------------------------------------------------------------------- 1 | use std::mem::MaybeUninit; 2 | 3 | use hex_simd::{AsOut, AsciiCase}; 4 | use hyper::body::Bytes; 5 | 6 | /// verify sha256 checksum string 7 | pub fn is_sha256_checksum(s: &str) -> bool { 8 | // TODO: optimize 9 | let is_lowercase_hex = |c: u8| matches!(c, b'0'..=b'9' | b'a'..=b'f'); 10 | s.len() == 64 && s.as_bytes().iter().copied().all(is_lowercase_hex) 11 | } 12 | 13 | /// `hmac_sha1(key, data)` 14 | pub fn hmac_sha1(key: impl AsRef<[u8]>, data: impl AsRef<[u8]>) -> [u8; 20] { 15 | use hmac::{Hmac, KeyInit, Mac}; 16 | use sha1::Sha1; 17 | 18 | let mut m = >::new_from_slice(key.as_ref()).unwrap(); 19 | m.update(data.as_ref()); 20 | m.finalize().into_bytes().into() 21 | } 22 | 23 | /// `hmac_sha256(key, data)` 24 | pub fn hmac_sha256(key: impl AsRef<[u8]>, data: impl AsRef<[u8]>) -> [u8; 32] { 25 | use hmac::{Hmac, KeyInit, Mac}; 26 | use sha2::Sha256; 27 | 28 | let mut m = >::new_from_slice(key.as_ref()).unwrap(); 29 | m.update(data.as_ref()); 30 | m.finalize().into_bytes().into() 31 | } 32 | 33 | pub fn hex(data: impl AsRef<[u8]>) -> String { 34 | hex_simd::encode_to_string(data, hex_simd::AsciiCase::Lower) 35 | } 36 | 37 | /// `f(hex(src))` 38 | fn hex_bytes32(src: impl AsRef<[u8]>, f: impl FnOnce(&str) -> R) -> R { 39 | let buf: &mut [_] = &mut [MaybeUninit::uninit(); 64]; 40 | let ans = hex_simd::encode_as_str(src.as_ref(), buf.as_out(), AsciiCase::Lower); 41 | f(ans) 42 | } 43 | 44 | #[cfg(not(all(feature = "openssl", not(windows))))] 45 | fn sha256(data: &[u8]) -> impl AsRef<[u8; 32]> + use<> { 46 | use sha2::{Digest, Sha256}; 47 | ::digest(data) 48 | } 49 | 50 | #[cfg(all(feature = "openssl", not(windows)))] 51 | fn sha256(data: &[u8]) -> impl AsRef<[u8]> { 52 | use openssl::hash::{Hasher, MessageDigest}; 53 | let mut h = Hasher::new(MessageDigest::sha256()).unwrap(); 54 | h.update(data).unwrap(); 55 | h.finish().unwrap() 56 | } 57 | 58 | #[cfg(not(all(feature = "openssl", not(windows))))] 59 | fn sha256_chunk(chunk: &[Bytes]) -> impl AsRef<[u8; 32]> + use<> { 60 | use sha2::{Digest, Sha256}; 61 | let mut h = ::new(); 62 | chunk.iter().for_each(|data| h.update(data)); 63 | h.finalize() 64 | } 65 | 66 | #[cfg(all(feature = "openssl", not(windows)))] 67 | fn sha256_chunk(chunk: &[Bytes]) -> impl AsRef<[u8]> { 68 | use openssl::hash::{Hasher, MessageDigest}; 69 | let mut h = Hasher::new(MessageDigest::sha256()).unwrap(); 70 | chunk.iter().for_each(|data| h.update(data).unwrap()); 71 | h.finish().unwrap() 72 | } 73 | 74 | /// `f(hex(sha256(data)))` 75 | pub fn hex_sha256(data: &[u8], f: impl FnOnce(&str) -> R) -> R { 76 | hex_bytes32(sha256(data).as_ref(), f) 77 | } 78 | 79 | /// `f(hex(sha256(chunk)))` 80 | pub fn hex_sha256_chunk(chunk: &[Bytes], f: impl FnOnce(&str) -> R) -> R { 81 | hex_bytes32(sha256_chunk(chunk).as_ref(), f) 82 | } 83 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/format.rs: -------------------------------------------------------------------------------- 1 | use crate::dto::{Timestamp, TimestampFormat}; 2 | 3 | use arrayvec::ArrayVec; 4 | 5 | pub const fn fmt_boolean(val: bool) -> &'static str { 6 | if val { "true" } else { "false" } 7 | } 8 | 9 | pub fn fmt_integer(val: i32, f: impl FnOnce(&str) -> T) -> T { 10 | let mut buf = itoa::Buffer::new(); 11 | f(buf.format(val)) 12 | } 13 | 14 | pub fn fmt_long(val: i64, f: impl FnOnce(&str) -> T) -> T { 15 | let mut buf = itoa::Buffer::new(); 16 | f(buf.format(val)) 17 | } 18 | 19 | pub fn fmt_usize(val: usize, f: impl FnOnce(&str) -> T) -> T { 20 | let mut buf = itoa::Buffer::new(); 21 | f(buf.format(val)) 22 | } 23 | 24 | pub fn fmt_timestamp(val: &Timestamp, fmt: TimestampFormat, f: impl FnOnce(&[u8]) -> T) -> T { 25 | let mut buf = ArrayVec::::new(); 26 | val.format(fmt, &mut buf).unwrap(); 27 | f(&buf) 28 | } 29 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod crypto; 2 | pub mod parser; 3 | 4 | pub mod format; 5 | 6 | use std::future::Future; 7 | use std::pin::Pin; 8 | 9 | /// `Pin + Send + Sync + 'a>>` 10 | pub type SyncBoxFuture<'a, T> = Pin + Send + Sync + 'a>>; 11 | 12 | pub fn stable_sort_by_first(v: &mut [(T, T)]) 13 | where 14 | T: Ord, 15 | { 16 | v.sort_by(|lhs, rhs| lhs.0.cmp(&rhs.0)); 17 | } 18 | 19 | pub fn is_base64_encoded(bytes: &[u8]) -> bool { 20 | base64_simd::STANDARD.check(bytes).is_ok() 21 | } 22 | 23 | macro_rules! invalid_request { 24 | ($msg:literal) => { 25 | s3_error!(InvalidRequest, $msg) 26 | }; 27 | ($fmt:literal, $($arg:tt)+) => { 28 | s3_error!(InvalidRequest, $fmt, $($arg)+) 29 | }; 30 | ($source:expr, $($arg:tt)+) => {{ 31 | let mut err = invalid_request!($($arg)+); 32 | err.set_source(Box::new($source)); 33 | err 34 | }}; 35 | } 36 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/parser.rs: -------------------------------------------------------------------------------- 1 | pub struct Error; 2 | 3 | #[inline(always)] 4 | fn digit(c: u8) -> Result { 5 | c.is_ascii_digit().then_some(c - b'0').ok_or(Error) 6 | } 7 | 8 | #[inline(always)] 9 | pub fn digit2(x: [u8; 2]) -> Result { 10 | let x0 = digit(x[0])?; 11 | let x1 = digit(x[1])?; 12 | Ok(x0 * 10 + x1) 13 | } 14 | 15 | #[inline(always)] 16 | pub fn digit4(x: [u8; 4]) -> Result { 17 | let x0 = u16::from(digit2([x[0], x[1]])?); 18 | let x1 = u16::from(digit2([x[2], x[3]])?); 19 | Ok(x0 * 100 + x1) 20 | } 21 | 22 | pub fn consume(input: &mut I, f: F) -> Result>> 23 | where 24 | F: FnOnce(I) -> nom::IResult, 25 | I: Copy, 26 | { 27 | let (remaining, output) = f(*input)?; 28 | *input = remaining; 29 | Ok(output) 30 | } 31 | -------------------------------------------------------------------------------- /crates/s3s/src/xml/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::missing_errors_doc)] // TODO 2 | 3 | mod de; 4 | pub use self::de::*; 5 | 6 | mod ser; 7 | pub use self::ser::*; 8 | 9 | mod generated; 10 | 11 | mod manually { 12 | use super::*; 13 | 14 | use crate::dto::BucketLocationConstraint; 15 | use crate::dto::GetBucketLocationOutput; 16 | 17 | impl Serialize for GetBucketLocationOutput { 18 | fn serialize(&self, s: &mut Serializer) -> SerResult { 19 | let xmlns = "http://s3.amazonaws.com/doc/2006-03-01/"; 20 | if let Some(location_constraint) = &self.location_constraint { 21 | s.content_with_ns("LocationConstraint", xmlns, location_constraint)?; 22 | } else { 23 | s.content_with_ns("LocationConstraint", xmlns, "")?; 24 | } 25 | Ok(()) 26 | } 27 | } 28 | 29 | impl<'xml> Deserialize<'xml> for GetBucketLocationOutput { 30 | fn deserialize(d: &mut Deserializer<'xml>) -> DeResult { 31 | let mut location_constraint: Option = None; 32 | d.for_each_element(|d, x| match x { 33 | b"LocationConstraint" => { 34 | if location_constraint.is_some() { 35 | return Err(DeError::DuplicateField); 36 | } 37 | let val: BucketLocationConstraint = d.content()?; 38 | if !val.as_str().is_empty() { 39 | location_constraint = Some(val); 40 | } 41 | Ok(()) 42 | } 43 | _ => Err(DeError::UnexpectedTagName), 44 | })?; 45 | Ok(Self { location_constraint }) 46 | } 47 | } 48 | 49 | use crate::dto::AssumeRoleOutput; 50 | 51 | impl Serialize for AssumeRoleOutput { 52 | fn serialize(&self, s: &mut Serializer) -> SerResult { 53 | let xmlns = "https://sts.amazonaws.com/doc/2011-06-15/"; 54 | s.element_with_ns("AssumeRoleResponse", xmlns, |s| { 55 | s.content("AssumeRoleResult", self) // 56 | })?; 57 | Ok(()) 58 | } 59 | } 60 | 61 | impl<'xml> Deserialize<'xml> for AssumeRoleOutput { 62 | fn deserialize(d: &mut Deserializer<'xml>) -> DeResult { 63 | d.named_element("AssumeRoleResponse", |d| { 64 | d.named_element("AssumeRoleResult", Self::deserialize_content) // 65 | }) 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /crates/s3s/tests/dto.rs: -------------------------------------------------------------------------------- 1 | use s3s::dto::GetObjectInput; 2 | 3 | #[test] 4 | fn builder() { 5 | let input = { 6 | let mut b = GetObjectInput::builder(); 7 | b.set_bucket("hello".to_owned()); 8 | b.set_key("world".to_owned()); 9 | b.build().unwrap() 10 | }; 11 | 12 | assert_eq!(input.bucket, "hello"); 13 | assert_eq!(input.key, "world"); 14 | } 15 | -------------------------------------------------------------------------------- /data/crawl.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from pprint import pprint # noqa: F401 3 | import re 4 | import json 5 | 6 | from bs4 import BeautifulSoup 7 | import requests 8 | import typer 9 | 10 | cli = typer.Typer(pretty_exceptions_show_locals=False) 11 | 12 | model_dir = Path(__file__).parent 13 | 14 | 15 | def save_json(path, data): 16 | with open(path, "w") as f: 17 | json.dump(data, f, indent=4) 18 | 19 | 20 | def download_aws_sdk(service: str, *, commit: str): 21 | url = f"https://github.com/awslabs/aws-sdk-rust/raw/{commit}/aws-models/{service}.json" 22 | resp = requests.get(url) 23 | assert resp.status_code == 200 24 | assert resp.json() 25 | with open(model_dir / f"{service}.json", "w") as f: 26 | f.write(resp.text) 27 | 28 | 29 | @cli.command() 30 | def download_s3_model(): 31 | # https://github.com/awslabs/aws-sdk-rust/commits/main/aws-models/s3.json 32 | download_aws_sdk("s3", commit="2c2a06e583392266669e075d4a47489d6da1e055") 33 | 34 | 35 | @cli.command() 36 | def download_sts_model(): 37 | # https://github.com/awslabs/aws-sdk-rust/commits/main/aws-models/sts.json 38 | download_aws_sdk("sts", commit="13eb310a6cbb4912f0a44db2fb2fca0b2bfee5d1") 39 | 40 | 41 | @cli.command() 42 | def crawl_error_codes(): 43 | url = "https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html" 44 | 45 | html = requests.get(url).text 46 | 47 | soup = BeautifulSoup(html, "lxml") 48 | 49 | kinds = [ 50 | ("S3", "ErrorCodeList"), 51 | ("Replication", "ReplicationErrorCodeList"), 52 | ("Tagging", "S3TaggingErrorCodeList"), 53 | ("SelectObjectContent", "SelectObjectContentErrorCodeList"), 54 | ] 55 | 56 | data = {} 57 | 58 | for kind, h2_id in kinds: 59 | h2 = soup.css.select(f"#{h2_id}")[0] # type:ignore 60 | 61 | # find the next table 62 | table = None 63 | for e in h2.next_elements: 64 | if e.name == "table": # type:ignore 65 | table = e 66 | break 67 | assert table is not None 68 | 69 | th_list = table.css.select("th") # type:ignore 70 | assert th_list[0].text in ("Error code", "Error Code") 71 | assert th_list[1].text == "Description" 72 | assert th_list[2].text in ("HTTP status code", "HTTP Status Code") 73 | 74 | tr_list = table.css.select("tr")[1:] # type:ignore 75 | tr_list = [[e for e in tr.children if e.name == "td"] for tr in tr_list] 76 | 77 | ans = [] 78 | for td_list in tr_list: 79 | t0 = td_list[0].css.select("code")[0].text.strip() 80 | t1 = td_list[1].text.strip() 81 | t2 = td_list[2].text.strip() 82 | 83 | error_code = t0 84 | 85 | description = re.sub(r"\n\t+", " ", t1).strip() 86 | 87 | if t2 == "N/A": 88 | http_status_code = None 89 | else: 90 | m = re.match(r"(\d{3})[\s\S]*", t2) 91 | assert m is not None, f"t2: {repr(t2)}" 92 | http_status_code = int(m.group(1)) 93 | 94 | ans.append( 95 | { 96 | "code": error_code, 97 | "description": description, 98 | "http_status_code": http_status_code, 99 | } 100 | ) 101 | 102 | ans.sort(key=lambda x: x["code"]) 103 | data[kind] = ans 104 | 105 | save_json(model_dir / "s3_error_codes.json", data) 106 | 107 | 108 | @cli.command() 109 | def update(): 110 | download_s3_model() 111 | download_sts_model() 112 | crawl_error_codes() 113 | 114 | 115 | if __name__ == "__main__": 116 | cli() 117 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | dev: 2 | just fetch 3 | just fmt 4 | just codegen 5 | just lint 6 | just test 7 | 8 | fetch: 9 | uv sync 10 | cargo fetch 11 | 12 | fmt: 13 | uvx ruff format 14 | cargo fmt 15 | 16 | lint: 17 | uvx ruff check 18 | cargo clippy --workspace --all-features --all-targets 19 | 20 | test: 21 | cargo test --workspace --all-features --all-targets 22 | 23 | doc: 24 | RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --open --no-deps --all-features 25 | 26 | crawl: 27 | uv run data/crawl.py update 28 | 29 | codegen: 30 | cargo run -p s3s-codegen 31 | cargo fmt 32 | cargo check 33 | 34 | install name *ARGS: 35 | uv run ./scripts/install.py {{name}} {{ARGS}} 36 | 37 | # ------------------------------------------------ 38 | 39 | sync-version: 40 | cargo set-version -p s3s 0.12.0-dev 41 | cargo set-version -p s3s-aws 0.12.0-dev 42 | cargo set-version -p s3s-model 0.12.0-dev 43 | cargo set-version -p s3s-policy 0.12.0-dev 44 | cargo set-version -p s3s-test 0.12.0-dev 45 | cargo set-version -p s3s-proxy 0.12.0-dev 46 | cargo set-version -p s3s-fs 0.12.0-dev 47 | cargo set-version -p s3s-e2e 0.12.0-dev 48 | 49 | # ------------------------------------------------ 50 | 51 | assert_unchanged: 52 | #!/bin/bash -ex 53 | [[ -z "$(git status -s)" ]] # https://stackoverflow.com/a/9393642 54 | 55 | ci-rust: 56 | cargo fmt --all --check 57 | cargo clippy --workspace --all-features --all-targets -- -D warnings 58 | just test 59 | just codegen 60 | just assert_unchanged 61 | 62 | ci-python: 63 | uvx ruff format --check 64 | uvx ruff check 65 | just crawl 66 | just assert_unchanged 67 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "s3s" 3 | version = "0.0.0" 4 | requires-python = ">=3.13" 5 | dependencies = [ 6 | "beautifulsoup4>=4.12.3", 7 | "lxml>=5.3.0", 8 | "requests>=2.32.3", 9 | "typer>=0.12.5", 10 | ] 11 | 12 | [tool.ruff] 13 | cache-dir = ".cache/ruff" 14 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 130 2 | fn_call_width = 90 3 | single_line_let_else_max_width = 100 4 | -------------------------------------------------------------------------------- /scripts/e2e-fs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | DATA_DIR="/tmp/s3s-e2e" 4 | mkdir -p "$DATA_DIR" 5 | 6 | if [ -z "$RUST_LOG" ]; then 7 | export RUST_LOG="s3s_fs=debug,s3s=debug" 8 | fi 9 | 10 | killall s3s-fs || echo 11 | 12 | s3s-fs \ 13 | --access-key AKEXAMPLES3S \ 14 | --secret-key SKEXAMPLES3S \ 15 | --host localhost \ 16 | --port 8014 \ 17 | --domain localhost:8014 \ 18 | --domain localhost \ 19 | "$DATA_DIR" | tee target/s3s-fs.log & 20 | 21 | sleep 1s 22 | 23 | export AWS_ACCESS_KEY_ID=AKEXAMPLES3S 24 | export AWS_SECRET_ACCESS_KEY=SKEXAMPLES3S 25 | export AWS_REGION=us-east-1 26 | export AWS_ENDPOINT_URL=http://localhost:8014 27 | 28 | if [ -z "$RUST_LOG" ]; then 29 | export RUST_LOG="s3s_e2e=debug,s3s_test=info,s3s=debug" 30 | fi 31 | export RUST_BACKTRACE=full 32 | 33 | s3s-e2e "$@" 34 | -------------------------------------------------------------------------------- /scripts/e2e-minio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | mkdir -p /tmp/minio 4 | docker stop e2e-minio || echo 5 | docker container rm e2e-minio || echo 6 | docker run \ 7 | --name e2e-minio \ 8 | -p 9000:9000 -p 9001:9001 \ 9 | -e "MINIO_DOMAIN=localhost:9000" \ 10 | -e "MINIO_HTTP_TRACE=1" \ 11 | -v /tmp/minio:/data \ 12 | minio/minio:latest server /data --console-address ":9001" & 13 | 14 | sleep 1s 15 | 16 | export AWS_ACCESS_KEY_ID=minioadmin 17 | export AWS_SECRET_ACCESS_KEY=minioadmin 18 | export AWS_REGION=us-east-1 19 | export AWS_ENDPOINT_URL=http://localhost:9000 20 | 21 | if [ -z "$RUST_LOG" ]; then 22 | export RUST_LOG="s3s_e2e=debug,s3s_test=info,s3s=debug" 23 | fi 24 | export RUST_BACKTRACE=full 25 | 26 | s3s-e2e "$@" 27 | -------------------------------------------------------------------------------- /scripts/e2e-mint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mkdir -p target 3 | ./scripts/s3s-proxy.sh > target/s3s-proxy.log & 4 | sleep 3s 5 | ./scripts/mint.sh | tee target/mint.log 6 | -------------------------------------------------------------------------------- /scripts/install.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from dataclasses import dataclass 3 | import argparse 4 | import subprocess 5 | 6 | INSTALLERS = {} 7 | 8 | 9 | def installer(name): 10 | def decorator(f): 11 | INSTALLERS[name] = f 12 | return f 13 | 14 | return decorator 15 | 16 | 17 | @dataclass(kw_only=True) 18 | class CliArgs: 19 | name: str 20 | 21 | offline: bool = False 22 | 23 | @staticmethod 24 | def parse(): 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument("name", type=str, choices=list(INSTALLERS.keys())) 27 | parser.add_argument("--offline", action="store_true") 28 | args = parser.parse_args() 29 | return CliArgs(**args.__dict__) 30 | 31 | 32 | def sh(cmd): 33 | print(cmd) 34 | subprocess.run(cmd, shell=True, check=True, stdin=subprocess.DEVNULL) 35 | 36 | 37 | def cargo_install( 38 | args: CliArgs, 39 | package: str, 40 | *, 41 | features: list[str] | None = None, 42 | bin: str | None = None, 43 | force: bool = True, 44 | ): 45 | opt_offline = "--offline" if args.offline else "" 46 | opt_features = f"--features {','.join(features)}" if features else "" 47 | opt_bin = f"--bin {bin}" if bin else "" 48 | opt_force = "--force" if force else "" 49 | sh( 50 | f"cargo install --path crates/{package} --locked" # 51 | f"{opt_offline} {opt_features} {opt_bin} {opt_force}" 52 | ) 53 | 54 | 55 | @installer("s3s-fs") 56 | def install_s3s_fs(args: CliArgs): 57 | cargo_install(args, "s3s-fs", features=["binary"]) 58 | 59 | 60 | @installer("s3s-proxy") 61 | def install_s3s_proxy(args: CliArgs): 62 | cargo_install(args, "s3s-proxy") 63 | 64 | 65 | @installer("s3s-e2e") 66 | def install_s3s_e2e(args: CliArgs): 67 | sh("touch crates/s3s-e2e/build.rs") 68 | cargo_install(args, "s3s-e2e") 69 | 70 | 71 | @installer("all") 72 | def install_all(args: CliArgs): 73 | if not args.offline: 74 | sh("cargo fetch") 75 | args.offline = True 76 | 77 | for name, f in INSTALLERS.items(): 78 | if name != "all": 79 | f(args) 80 | 81 | 82 | def main(args: CliArgs): 83 | INSTALLERS[args.name](args) 84 | 85 | 86 | if __name__ == "__main__": 87 | main(CliArgs.parse()) 88 | -------------------------------------------------------------------------------- /scripts/license.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from pathlib import Path 3 | 4 | 5 | def main(): 6 | crates = Path("crates") 7 | for crate in crates.iterdir(): 8 | license_file = crate / "LICENSE" 9 | if not license_file.exists(): 10 | license_file.symlink_to("../../LICENSE") 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /scripts/minio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | mkdir -p /tmp/minio 4 | docker run \ 5 | -p 9000:9000 -p 9001:9001 \ 6 | -e "MINIO_DOMAIN=localhost:9000" \ 7 | -e "MINIO_HTTP_TRACE=1" \ 8 | -v /tmp/minio:/data \ 9 | minio/minio:latest server /data --console-address ":9001" & 10 | -------------------------------------------------------------------------------- /scripts/mint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mkdir -p /tmp/mint 3 | docker run \ 4 | -e "SERVER_ENDPOINT=localhost:8014" \ 5 | -e "ACCESS_KEY=minioadmin" \ 6 | -e "SECRET_KEY=minioadmin" \ 7 | --network host \ 8 | -v /tmp/mint:/mint/log \ 9 | minio/mint:edge 10 | 11 | ./scripts/report-mint.py /tmp/mint/log.json 12 | -------------------------------------------------------------------------------- /scripts/report-mint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from dataclasses import dataclass 3 | from typing import Any, Dict, Optional 4 | import json 5 | import sys 6 | from pprint import pprint # noqa: F401 7 | from itertools import groupby 8 | 9 | 10 | # https://github.com/minio/mint#mint-log-format 11 | @dataclass 12 | class MintLog: 13 | name: str 14 | function: Optional[str] 15 | args: Optional[Dict[str, Any]] 16 | duration: int 17 | status: str 18 | alert: Optional[str] 19 | message: Optional[str] 20 | error: Optional[str] 21 | 22 | 23 | def from_json(x: Any) -> MintLog: 24 | return MintLog( 25 | name=x["name"], 26 | function=x.get("function"), 27 | args=x.get("args"), 28 | duration=x["duration"], 29 | status=x["status"], 30 | alert=x.get("alert"), 31 | message=x.get("message"), 32 | error=x.get("error"), 33 | ) 34 | 35 | 36 | if __name__ == "__main__": 37 | log_path = sys.argv[1] 38 | logs = [] 39 | with open(log_path) as f: 40 | for line in f.readlines(): 41 | line = line.strip() 42 | if len(line) == 0: 43 | continue 44 | 45 | json_str = line 46 | if json_str.find("{") != 0: 47 | json_str = json_str[json_str.find("{") :] 48 | 49 | try: 50 | json_value = json.loads(json_str) 51 | except Exception: 52 | print(f"error parsing log line: {line}") 53 | continue 54 | 55 | logs.append(from_json(json_value)) 56 | 57 | for x in logs: 58 | if ":" in x.name: 59 | name, function = x.name.split(":") 60 | x.name = name.strip() 61 | x.function = function.strip() 62 | 63 | groups = {k: list(v) for k, v in groupby(logs, lambda x: x.name)} 64 | counts = {} 65 | 66 | for name, group in groups.items(): 67 | pass_count = len(list(x for x in group if x.status == "PASS")) 68 | fail_count = len(list(x for x in group if x.status == "FAIL")) 69 | na_count = len(list(x for x in group if x.status == "NA")) 70 | counts[name] = {"pass": pass_count, "fail": fail_count, "na": na_count} 71 | 72 | print( 73 | f"{name:<20} " 74 | f"passed {pass_count:>3}, " 75 | f"failed {fail_count:>3}, " 76 | f"na {na_count:>3}" 77 | ) 78 | print() 79 | 80 | total_pass_count = sum(c["pass"] for c in counts.values()) 81 | total_fail_count = sum(c["fail"] for c in counts.values()) 82 | total_na_count = sum(c["na"] for c in counts.values()) 83 | name = "summary" 84 | print( 85 | f"{name:<20} " 86 | f"passed {total_pass_count:>3}, " 87 | f"failed {total_fail_count:>3}, " 88 | f"na {total_na_count:>3}" 89 | ) 90 | 91 | passed_groups = [ 92 | "aws-sdk-go", 93 | "aws-sdk-ruby", 94 | "awscli", 95 | "minio-go", 96 | "s3cmd", 97 | ] 98 | 99 | for group in passed_groups: 100 | assert counts[group]["fail"] == 0, f'group "{group}" failed' 101 | 102 | # FIXME: E2E tests 103 | # https://github.com/Nugine/s3s/issues/4 104 | # https://github.com/Nugine/s3s/pull/141#issuecomment-2142662531 105 | 106 | assert "minio-dotnet" not in counts 107 | assert counts["minio-js"]["pass"] >= 203 108 | assert counts["versioning"]["pass"] >= 4 109 | assert counts["minio-java"]["pass"] >= 17 110 | 111 | assert counts["aws-sdk-php"]["pass"] >= 10 112 | assert counts["minio-py"]["pass"] >= 2 113 | assert counts["mc"]["pass"] >= 2 114 | -------------------------------------------------------------------------------- /scripts/s3s-e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | cargo build -p s3s-e2e --release 4 | 5 | export AWS_ACCESS_KEY_ID=minioadmin 6 | export AWS_SECRET_ACCESS_KEY=minioadmin 7 | export AWS_REGION=us-east-1 8 | export AWS_ENDPOINT_URL=http://localhost:9000 9 | 10 | if [ -z "$RUST_LOG" ]; then 11 | export RUST_LOG="s3s_e2e=debug,s3s_test=info,s3s=debug" 12 | fi 13 | export RUST_BACKTRACE=full 14 | 15 | ./target/release/s3s-e2e "$@" 16 | -------------------------------------------------------------------------------- /scripts/s3s-fs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DATA_DIR="/tmp" 3 | 4 | if [ -n "$1" ]; then 5 | DATA_DIR="$1" 6 | fi 7 | 8 | if [ -z "$RUST_LOG" ]; then 9 | export RUST_LOG="s3s_fs=debug,s3s=debug" 10 | fi 11 | 12 | s3s-fs \ 13 | --access-key AKEXAMPLES3S \ 14 | --secret-key SKEXAMPLES3S \ 15 | --host localhost \ 16 | --port 8014 \ 17 | --domain localhost:8014 \ 18 | --domain localhost \ 19 | "$DATA_DIR" 20 | -------------------------------------------------------------------------------- /scripts/s3s-proxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | mkdir -p /tmp/minio 4 | docker run -p 9000:9000 -p 9001:9001 \ 5 | -e "MINIO_DOMAIN=localhost:9000" \ 6 | -e "MINIO_HTTP_TRACE=1" \ 7 | -v /tmp/minio:/data \ 8 | minio/minio:latest server /data --console-address ":9001" & 9 | 10 | sleep 1s 11 | 12 | export AWS_ACCESS_KEY_ID=minioadmin 13 | export AWS_SECRET_ACCESS_KEY=minioadmin 14 | export AWS_REGION=us-east-1 15 | 16 | if [ -z "$RUST_LOG" ]; then 17 | export RUST_LOG="s3s_proxy=debug,s3s_aws=debug,s3s=debug" 18 | fi 19 | export RUST_BACKTRACE=full 20 | 21 | s3s-proxy \ 22 | --host localhost \ 23 | --port 8014 \ 24 | --domain localhost:8014 \ 25 | --endpoint-url http://localhost:9000 26 | --------------------------------------------------------------------------------