├── .python-version ├── crates ├── s3s │ ├── LICENSE │ ├── src │ │ ├── dto │ │ │ ├── content_type.rs │ │ │ ├── event.rs │ │ │ ├── build_error.rs │ │ │ ├── mod.rs │ │ │ ├── streaming_blob.rs │ │ │ └── copy_source.rs │ │ ├── header │ │ │ └── mod.rs │ │ ├── sig_v2 │ │ │ ├── mod.rs │ │ │ ├── post_signature_v2.rs │ │ │ ├── authorization_v2.rs │ │ │ └── presigned_url_v2.rs │ │ ├── s3_op.rs │ │ ├── auth │ │ │ ├── mod.rs │ │ │ ├── simple_auth.rs │ │ │ └── secret_key.rs │ │ ├── access │ │ │ ├── mod.rs │ │ │ └── context.rs │ │ ├── sig_v4 │ │ │ ├── mod.rs │ │ │ ├── post_signature_v4.rs │ │ │ ├── amz_date.rs │ │ │ ├── presigned_url_v4.rs │ │ │ └── amz_content_sha256.rs │ │ ├── http │ │ │ ├── mod.rs │ │ │ ├── response.rs │ │ │ ├── etag.rs │ │ │ ├── request.rs │ │ │ ├── ordered_qs.rs │ │ │ └── ordered_headers.rs │ │ ├── utils │ │ │ ├── parser.rs │ │ │ ├── format.rs │ │ │ ├── mod.rs │ │ │ └── crypto.rs │ │ ├── time.rs │ │ ├── lib.rs │ │ ├── route.rs │ │ ├── ops │ │ │ ├── multipart.rs │ │ │ ├── get_object.rs │ │ │ └── tests.rs │ │ ├── checksum.rs │ │ ├── validation.rs │ │ ├── crypto.rs │ │ └── xml │ │ │ └── mod.rs │ ├── examples │ │ ├── tokio_util.rs │ │ └── axum.rs │ ├── tests │ │ └── dto.rs │ └── Cargo.toml ├── s3s-aws │ ├── LICENSE │ ├── src │ │ ├── body.rs │ │ ├── proxy │ │ │ ├── mod.rs │ │ │ └── meta.rs │ │ ├── lib.rs │ │ ├── event_stream.rs │ │ ├── conv │ │ │ └── mod.rs │ │ ├── error.rs │ │ └── connector.rs │ └── Cargo.toml ├── s3s-e2e │ ├── LICENSE │ ├── build.rs │ ├── src │ │ ├── main.rs │ │ └── utils.rs │ └── Cargo.toml ├── s3s-fs │ ├── LICENSE │ ├── src │ │ ├── lib.rs │ │ ├── utils.rs │ │ ├── error.rs │ │ ├── checksum.rs │ │ └── main.rs │ └── Cargo.toml ├── s3s-policy │ ├── LICENSE │ ├── src │ │ ├── lib.rs │ │ ├── tests.rs │ │ └── pattern.rs │ └── Cargo.toml ├── s3s-proxy │ ├── LICENSE │ ├── Cargo.toml │ └── src │ │ └── main.rs ├── s3s-test │ ├── LICENSE │ ├── src │ │ ├── lib.rs │ │ ├── build.rs │ │ ├── error.rs │ │ ├── traits.rs │ │ └── report.rs │ └── Cargo.toml ├── s3s-model │ ├── src │ │ └── lib.rs │ └── Cargo.toml └── s3s-wasm │ ├── Cargo.toml │ └── src │ └── lib.rs ├── codegen ├── src │ ├── v2 │ │ └── mod.rs │ ├── main.rs │ └── v1 │ │ ├── smithy.rs │ │ ├── utils.rs │ │ ├── s3_trait.rs │ │ ├── sts.rs │ │ ├── headers.rs │ │ ├── access.rs │ │ ├── mod.rs │ │ ├── aws_proxy.rs │ │ └── rust.rs └── Cargo.toml ├── .github ├── buildkitd.toml ├── actions │ └── setup │ │ └── action.yml ├── workflows │ ├── copilot-setup-steps.yml │ ├── audit.yml │ ├── publish.yml │ ├── docs.yml │ ├── ci.yml │ └── docker.yml ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml └── copilot-instructions.md ├── .dockerignore ├── .gitignore ├── .mergify.yml ├── rustfmt.toml ├── scripts ├── e2e-mint.sh ├── minio.sh ├── mint.sh ├── license.py ├── s3s-e2e.sh ├── s3s-fs.sh ├── e2e-minio.sh ├── s3s-proxy.sh ├── e2e-fs.sh ├── install.py └── report-mint.py ├── pyproject.toml ├── .cargo └── audit.toml ├── Cargo.toml ├── justfile ├── CONTRIBUTING.md ├── docker └── Dockerfile ├── CHANGELOG.md ├── README.md └── data └── crawl.py /.python-version: -------------------------------------------------------------------------------- 1 | 3.13 2 | -------------------------------------------------------------------------------- /crates/s3s/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-aws/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-e2e/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-fs/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-policy/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-proxy/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /crates/s3s-test/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /codegen/src/v2/mod.rs: -------------------------------------------------------------------------------- 1 | pub fn run() {} 2 | -------------------------------------------------------------------------------- /.github/buildkitd.toml: -------------------------------------------------------------------------------- 1 | [worker.oci] 2 | max-parallelism = 1 -------------------------------------------------------------------------------- /crates/s3s/src/dto/content_type.rs: -------------------------------------------------------------------------------- 1 | pub type ContentType = String; 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | CHANGELOG.md 3 | CONTRIBUTING.md 4 | README.md 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .DS_Store 3 | .vscode 4 | .idea 5 | __pycache__/ 6 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | queue_rules: 2 | - merge_method: fast-forward 3 | name: default 4 | -------------------------------------------------------------------------------- /crates/s3s-e2e/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | s3s_test::build::collect_info(); 3 | } 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 130 2 | fn_call_width = 90 3 | single_line_let_else_max_width = 100 4 | -------------------------------------------------------------------------------- /crates/s3s-policy/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod model; 2 | pub mod pattern; 3 | 4 | #[cfg(test)] 5 | mod tests; 6 | -------------------------------------------------------------------------------- /crates/s3s-model/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::missing_errors_doc, // 3 | )] 4 | 5 | pub mod smithy; 6 | -------------------------------------------------------------------------------- /scripts/e2e-mint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mkdir -p target 3 | ./scripts/s3s-proxy.sh > target/s3s-proxy.log & 4 | sleep 3s 5 | ./scripts/mint.sh | tee target/mint.log 6 | -------------------------------------------------------------------------------- /crates/s3s/src/header/mod.rs: -------------------------------------------------------------------------------- 1 | cfg_if::cfg_if! { 2 | if #[cfg(feature = "minio")] { 3 | mod generated_minio; 4 | use self::generated_minio as generated; 5 | } else { 6 | mod generated; 7 | } 8 | } 9 | 10 | pub use self::generated::*; 11 | -------------------------------------------------------------------------------- /codegen/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::single_match_else, // 3 | clippy::wildcard_imports, 4 | clippy::match_same_arms, 5 | clippy::let_underscore_untyped, 6 | )] 7 | 8 | mod v1; 9 | mod v2; 10 | 11 | fn main() { 12 | v1::run(); 13 | v2::run(); 14 | } 15 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/body.rs: -------------------------------------------------------------------------------- 1 | use aws_smithy_types::body::SdkBody; 2 | 3 | pub fn s3s_body_into_sdk_body(body: s3s::Body) -> SdkBody { 4 | SdkBody::from_body_1_x(body) 5 | } 6 | 7 | pub fn sdk_body_into_s3s_body(body: SdkBody) -> s3s::Body { 8 | s3s::Body::http_body(body) 9 | } 10 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "s3s" 3 | version = "0.0.0" 4 | requires-python = ">=3.13" 5 | dependencies = [ 6 | "beautifulsoup4>=4.12.3", 7 | "lxml>=5.3.0", 8 | "requests>=2.32.3", 9 | "typer>=0.12.5", 10 | ] 11 | 12 | [tool.ruff] 13 | cache-dir = ".cache/ruff" 14 | -------------------------------------------------------------------------------- /scripts/minio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | mkdir -p /tmp/minio 4 | docker run \ 5 | -p 9000:9000 -p 9001:9001 \ 6 | -e "MINIO_DOMAIN=localhost:9000" \ 7 | -e "MINIO_HTTP_TRACE=1" \ 8 | -v /tmp/minio:/data \ 9 | minio/minio:latest server /data --console-address ":9001" & 10 | -------------------------------------------------------------------------------- /scripts/mint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mkdir -p /tmp/mint 3 | docker run \ 4 | -e "SERVER_ENDPOINT=localhost:8014" \ 5 | -e "ACCESS_KEY=minioadmin" \ 6 | -e "SECRET_KEY=minioadmin" \ 7 | --network host \ 8 | -v /tmp/mint:/mint/log \ 9 | minio/mint:edge 10 | 11 | ./scripts/report-mint.py /tmp/mint/log.json 12 | -------------------------------------------------------------------------------- /crates/s3s-test/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::missing_errors_doc, // TODO 3 | clippy::missing_panics_doc, // TODO 4 | )] 5 | 6 | mod error; 7 | mod runner; 8 | mod traits; 9 | 10 | pub mod build; 11 | pub mod cli; 12 | pub mod report; 13 | pub mod tcx; 14 | 15 | pub use self::error::{Failed, Result}; 16 | pub use self::traits::*; 17 | -------------------------------------------------------------------------------- /.github/actions/setup/action.yml: -------------------------------------------------------------------------------- 1 | name: "setup" 2 | description: "setup environment for s3s" 3 | runs: 4 | using: "composite" 5 | steps: 6 | - uses: taiki-e/install-action@just 7 | - uses: astral-sh/setup-uv@v3 8 | with: 9 | enable-cache: true 10 | - uses: dtolnay/rust-toolchain@stable 11 | - uses: Swatinem/rust-cache@v2 12 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/proxy/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "minio")] 2 | mod generated_minio; 3 | 4 | #[cfg(not(feature = "minio"))] 5 | mod generated; 6 | 7 | mod meta; 8 | 9 | pub struct Proxy(aws_sdk_s3::Client); 10 | 11 | impl From for Proxy { 12 | fn from(value: aws_sdk_s3::Client) -> Self { 13 | Self(value) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /scripts/license.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from pathlib import Path 3 | 4 | 5 | def main(): 6 | crates = Path("crates") 7 | for crate in crates.iterdir(): 8 | license_file = crate / "LICENSE" 9 | if not license_file.exists(): 10 | license_file.symlink_to("../../LICENSE") 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /crates/s3s-e2e/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::missing_errors_doc, // TODO 3 | clippy::missing_panics_doc, // TODO 4 | clippy::wildcard_imports, 5 | )] 6 | 7 | mod utils; 8 | 9 | mod advanced; 10 | mod basic; 11 | 12 | use s3s_test::tcx::TestContext; 13 | 14 | fn register(tcx: &mut TestContext) { 15 | basic::register(tcx); 16 | advanced::register(tcx); 17 | } 18 | 19 | s3s_test::main!(register); 20 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v2/mod.rs: -------------------------------------------------------------------------------- 1 | //! AWS Signature Version 2 2 | //! 3 | //! 4 | //! 5 | 6 | mod authorization_v2; 7 | pub use self::authorization_v2::*; 8 | 9 | mod presigned_url_v2; 10 | pub use self::presigned_url_v2::*; 11 | 12 | mod post_signature_v2; 13 | pub use self::post_signature_v2::*; 14 | 15 | mod methods; 16 | pub use self::methods::*; 17 | -------------------------------------------------------------------------------- /scripts/s3s-e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | cargo build -p s3s-e2e --release 4 | 5 | export AWS_ACCESS_KEY_ID=minioadmin 6 | export AWS_SECRET_ACCESS_KEY=minioadmin 7 | export AWS_REGION=us-east-1 8 | export AWS_ENDPOINT_URL=http://localhost:9000 9 | 10 | if [ -z "$RUST_LOG" ]; then 11 | export RUST_LOG="s3s_e2e=debug,s3s_test=info,s3s=debug" 12 | fi 13 | export RUST_BACKTRACE=full 14 | 15 | ./target/release/s3s-e2e "$@" 16 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::wildcard_imports, 3 | clippy::missing_errors_doc, // TODO: docs 4 | clippy::let_underscore_untyped, 5 | clippy::module_name_repetitions, 6 | clippy::multiple_crate_versions, // TODO: check later 7 | )] 8 | 9 | #[macro_use] 10 | mod error; 11 | 12 | mod checksum; 13 | mod fs; 14 | mod s3; 15 | mod utils; 16 | 17 | pub use self::error::*; 18 | pub use self::fs::FileSystem; 19 | -------------------------------------------------------------------------------- /codegen/src/v1/smithy.rs: -------------------------------------------------------------------------------- 1 | pub use s3s_model::smithy::*; 2 | 3 | pub trait SmithyTraitsExt { 4 | #[doc(hidden)] 5 | fn base(&self) -> &Traits; 6 | 7 | fn minio(&self) -> bool { 8 | self.base().get("s3s#minio").is_some() 9 | } 10 | 11 | fn sealed(&self) -> bool { 12 | self.base().get("s3s#sealed").is_some() 13 | } 14 | } 15 | 16 | impl SmithyTraitsExt for Traits { 17 | fn base(&self) -> &Traits { 18 | self 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /scripts/s3s-fs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DATA_DIR="/tmp" 3 | 4 | if [ -n "$1" ]; then 5 | DATA_DIR="$1" 6 | fi 7 | 8 | if [ -z "$RUST_LOG" ]; then 9 | export RUST_LOG="s3s_fs=debug,s3s=debug" 10 | fi 11 | 12 | s3s-fs \ 13 | --access-key AKEXAMPLES3S \ 14 | --secret-key SKEXAMPLES3S \ 15 | --host localhost \ 16 | --port 8014 \ 17 | --domain localhost:8014 \ 18 | --domain localhost \ 19 | "$DATA_DIR" 20 | -------------------------------------------------------------------------------- /.github/workflows/copilot-setup-steps.yml: -------------------------------------------------------------------------------- 1 | name: "Copilot Setup Steps" 2 | 3 | on: 4 | push: 5 | paths: 6 | - .github/workflows/copilot-setup-steps.yml 7 | pull_request: 8 | paths: 9 | - .github/workflows/copilot-setup-steps.yml 10 | workflow_dispatch: 11 | 12 | jobs: 13 | copilot-setup-steps: 14 | runs-on: ubuntu-latest 15 | 16 | permissions: 17 | contents: read 18 | 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: ./.github/actions/setup 22 | -------------------------------------------------------------------------------- /.cargo/audit.toml: -------------------------------------------------------------------------------- 1 | # Project-level cargo-audit configuration 2 | # See: https://github.com/RustSec/rustsec/blob/main/cargo-audit/audit.toml.example 3 | 4 | [advisories] 5 | # Ignored advisories with reasons: 6 | # RUSTSEC-2025-0134: Transitive dependency 'rustls-pemfile' is marked 7 | # unmaintained; accepted temporarily due to upstream dependency chain 8 | # (hyper-rustls via aws-smithy). Re-evaluate and remove when aws-* deps 9 | # update or the advisory is resolved upstream. 10 | ignore = ["RUSTSEC-2025-0134"] 11 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/event.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 4 | pub struct Event(String); 5 | 6 | impl From for Event { 7 | fn from(value: String) -> Self { 8 | Self(value) 9 | } 10 | } 11 | 12 | impl AsRef for Event { 13 | fn as_ref(&self) -> &str { 14 | self.0.as_ref() 15 | } 16 | } 17 | 18 | impl From for String { 19 | fn from(value: Event) -> Self { 20 | value.0 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /crates/s3s/examples/tokio_util.rs: -------------------------------------------------------------------------------- 1 | use futures::TryStreamExt; 2 | use tokio::io::AsyncBufRead; 3 | use tokio_util::io::StreamReader; 4 | 5 | pub fn convert_body(body: s3s::Body) -> impl AsyncBufRead + Send + Sync + 'static { 6 | StreamReader::new(body.into_stream().map_err(std::io::Error::other)) 7 | } 8 | 9 | pub fn convert_streaming_blob(blob: s3s::dto::StreamingBlob) -> impl AsyncBufRead + Send + Sync + 'static { 10 | StreamReader::new(blob.into_stream().map_err(std::io::Error::other)) 11 | } 12 | 13 | fn main() {} 14 | -------------------------------------------------------------------------------- /crates/s3s/src/s3_op.rs: -------------------------------------------------------------------------------- 1 | pub struct S3Operation { 2 | pub(crate) name: &'static str, 3 | } 4 | 5 | impl S3Operation { 6 | /// Returns the name of the operation. 7 | /// 8 | /// # Example 9 | /// ``` 10 | /// use s3s::S3Operation; 11 | /// fn is_basic_list_op(op: &S3Operation) -> bool { 12 | /// matches!(op.name(), "ListBuckets" | "ListObjects" | "ListObjectsV2") 13 | /// } 14 | /// ``` 15 | #[must_use] 16 | pub fn name(&self) -> &str { 17 | self.name 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /crates/s3s-wasm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-wasm" 3 | version = "0.0.0" 4 | edition.workspace = true 5 | repository.workspace = true 6 | license.workspace = true 7 | rust-version.workspace = true 8 | publish = false 9 | 10 | [dependencies] 11 | futures = "0.3.31" 12 | getrandom = { version = "0.3.4", features = ["wasm_js"] } 13 | http = "1.4.0" 14 | s3s = { version = "0.12.0-rc.6", path = "../s3s", default-features = false } 15 | 16 | [lints] 17 | workspace = true 18 | 19 | [dev-dependencies] 20 | wasm-bindgen-test = "0.3.56" 21 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::module_name_repetitions,// 3 | clippy::match_same_arms, // 4 | clippy::missing_errors_doc, // TODO: docs 5 | clippy::wildcard_imports, // 6 | clippy::let_underscore_untyped, 7 | clippy::multiple_crate_versions, // TODO: check later 8 | )] 9 | 10 | #[macro_use] 11 | mod error; 12 | 13 | mod body; 14 | mod event_stream; 15 | 16 | pub mod conv; 17 | 18 | mod connector; 19 | pub use self::connector::{Client, Connector}; 20 | 21 | mod proxy; 22 | pub use self::proxy::Proxy; 23 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 14 | -------------------------------------------------------------------------------- /crates/s3s-model/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-model" 3 | version = "0.12.0-rc.6" 4 | description = "S3 Protocol Model" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | anyhow = "1.0.100" 18 | numeric_cast = "0.3.0" 19 | serde = { version = "1.0.228", features = ["derive"] } 20 | serde_json = "1.0.145" 21 | -------------------------------------------------------------------------------- /crates/s3s/src/auth/mod.rs: -------------------------------------------------------------------------------- 1 | //! S3 Authentication 2 | 3 | mod secret_key; 4 | pub use self::secret_key::{Credentials, SecretKey}; 5 | 6 | mod simple_auth; 7 | pub use self::simple_auth::SimpleAuth; 8 | 9 | use crate::error::S3Result; 10 | 11 | /// S3 Authentication Provider 12 | #[async_trait::async_trait] 13 | pub trait S3Auth: Send + Sync + 'static { 14 | /// Gets the corresponding secret key of the access key. 15 | /// 16 | /// This method is usually implemented as a database query. 17 | async fn get_secret_key(&self, access_key: &str) -> S3Result; 18 | } 19 | -------------------------------------------------------------------------------- /crates/s3s-policy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-policy" 3 | version = "0.12.0-rc.6" 4 | description = "S3 Policy Language" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | indexmap = { version = "2.12.1", features = ["serde"] } 18 | serde = { version = "1.0.228", features = ["derive"] } 19 | serde_json = "1.0.145" 20 | thiserror = "2.0.17" 21 | -------------------------------------------------------------------------------- /crates/s3s/src/access/mod.rs: -------------------------------------------------------------------------------- 1 | cfg_if::cfg_if! { 2 | if #[cfg(feature = "minio")] { 3 | mod generated_minio; 4 | use self::generated_minio as generated; 5 | } else { 6 | mod generated; 7 | } 8 | } 9 | 10 | pub use self::generated::S3Access; 11 | 12 | mod context; 13 | pub use self::context::S3AccessContext; 14 | 15 | use crate::error::S3Result; 16 | 17 | pub(crate) fn default_check(cx: &mut S3AccessContext<'_>) -> S3Result<()> { 18 | match cx.credentials() { 19 | Some(_) => Ok(()), 20 | None => Err(s3_error!(AccessDenied, "Signature is required")), 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v2/post_signature_v2.rs: -------------------------------------------------------------------------------- 1 | use crate::http::Multipart; 2 | 3 | pub struct PostSignatureV2<'a> { 4 | pub policy: &'a str, 5 | pub access_key_id: &'a str, 6 | pub signature: &'a str, 7 | } 8 | 9 | impl<'a> PostSignatureV2<'a> { 10 | pub fn extract(m: &'a Multipart) -> Option { 11 | let policy = m.find_field_value("policy")?; 12 | let access_key_id = m.find_field_value("awsaccesskeyid")?; 13 | let signature = m.find_field_value("signature")?; 14 | Some(Self { 15 | policy, 16 | access_key_id, 17 | signature, 18 | }) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "monthly" 12 | groups: 13 | dependencies: 14 | patterns: 15 | - "*" 16 | -------------------------------------------------------------------------------- /codegen/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-codegen" 3 | version = "0.0.0" 4 | edition.workspace = true 5 | repository.workspace = true 6 | license.workspace = true 7 | rust-version.workspace = true 8 | publish = false 9 | 10 | [lints] 11 | workspace = true 12 | 13 | [dependencies] 14 | scoped-writer = "0.3.0" 15 | heck = "0.5.0" 16 | std-next = "0.1.9" 17 | numeric_cast = "0.3.0" 18 | regex = "1.12.2" 19 | serde = { version = "1.0.228", features = ["derive"] } 20 | serde_json = { version = "1.0.145", features = ["preserve_order"] } 21 | serde_urlencoded = "0.7.1" 22 | s3s-model = { version = "0.12.0-rc.6", path = "../crates/s3s-model" } 23 | http = "1.4.0" 24 | -------------------------------------------------------------------------------- /codegen/src/v1/utils.rs: -------------------------------------------------------------------------------- 1 | pub fn o(x: &T) -> T::Owned { 2 | x.to_owned() 3 | } 4 | 5 | #[macro_export] 6 | macro_rules! function_name { 7 | () => {{ 8 | fn f() {} 9 | fn type_name_of(_: T) -> &'static str { 10 | std::any::type_name::() 11 | } 12 | let name = type_name_of(f); 13 | let name = name.strip_suffix("::f").unwrap(); 14 | name.strip_suffix("::{{closure}}").unwrap_or(name) 15 | }}; 16 | } 17 | 18 | #[macro_export] 19 | macro_rules! declare_codegen { 20 | () => { 21 | g!("//! Auto generated by `{}`", $crate::function_name!()); 22 | g!(); 23 | }; 24 | } 25 | -------------------------------------------------------------------------------- /.github/workflows/audit.yml: -------------------------------------------------------------------------------- 1 | name: Audit 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - 'feat/**' 8 | paths: 9 | - '**/Cargo.toml' 10 | - '**/Cargo.lock' 11 | pull_request: 12 | branches: 13 | - main 14 | - 'feat/**' 15 | paths: 16 | - '**/Cargo.toml' 17 | - '**/Cargo.lock' 18 | schedule: 19 | - cron: '0 0 * * 0' # at midnight of each sunday 20 | workflow_dispatch: 21 | 22 | jobs: 23 | audit: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v4 27 | - uses: dtolnay/rust-toolchain@stable 28 | - uses: taiki-e/install-action@cargo-audit 29 | - run: cargo audit -D warnings 30 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/mod.rs: -------------------------------------------------------------------------------- 1 | //! AWS Signature Version 4 2 | //! 3 | //! See 4 | //! 5 | //! See 6 | //! 7 | 8 | mod presigned_url_v4; 9 | pub use self::presigned_url_v4::*; 10 | 11 | mod authorization_v4; 12 | pub use self::authorization_v4::*; 13 | 14 | mod amz_content_sha256; 15 | pub use self::amz_content_sha256::*; 16 | 17 | mod amz_date; 18 | pub use self::amz_date::*; 19 | 20 | mod post_signature_v4; 21 | pub use self::post_signature_v4::*; 22 | 23 | mod upload_stream; 24 | pub use self::upload_stream::*; 25 | 26 | mod methods; 27 | pub use self::methods::*; 28 | -------------------------------------------------------------------------------- /crates/s3s/src/http/mod.rs: -------------------------------------------------------------------------------- 1 | mod ser; 2 | pub use self::ser::*; 3 | 4 | mod de; 5 | pub use self::de::*; 6 | 7 | mod ordered_qs; 8 | pub use self::ordered_qs::*; 9 | 10 | mod ordered_headers; 11 | pub use self::ordered_headers::*; 12 | 13 | mod aws_chunked_stream; 14 | pub use self::aws_chunked_stream::*; 15 | 16 | mod multipart; 17 | pub use self::multipart::*; 18 | 19 | mod body; 20 | pub use self::body::*; 21 | 22 | mod keep_alive_body; 23 | pub use self::keep_alive_body::KeepAliveBody; 24 | 25 | mod etag; 26 | 27 | mod request; 28 | pub use self::request::Request; 29 | 30 | mod response; 31 | pub use self::response::Response; 32 | 33 | pub use hyper::header::{HeaderName, HeaderValue, InvalidHeaderValue}; 34 | pub use hyper::http::StatusCode; 35 | -------------------------------------------------------------------------------- /scripts/e2e-minio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | mkdir -p /tmp/minio 4 | docker stop e2e-minio || echo 5 | docker container rm e2e-minio || echo 6 | docker run \ 7 | --name e2e-minio \ 8 | -p 9000:9000 -p 9001:9001 \ 9 | -e "MINIO_DOMAIN=localhost:9000" \ 10 | -e "MINIO_HTTP_TRACE=1" \ 11 | -v /tmp/minio:/data \ 12 | minio/minio:latest server /data --console-address ":9001" & 13 | 14 | sleep 3s 15 | 16 | export AWS_ACCESS_KEY_ID=minioadmin 17 | export AWS_SECRET_ACCESS_KEY=minioadmin 18 | export AWS_REGION=us-east-1 19 | export AWS_ENDPOINT_URL=http://localhost:9000 20 | 21 | if [ -z "$RUST_LOG" ]; then 22 | export RUST_LOG="s3s_e2e=debug,s3s_test=info,s3s=debug" 23 | fi 24 | export RUST_BACKTRACE=full 25 | 26 | s3s-e2e "$@" 27 | -------------------------------------------------------------------------------- /scripts/s3s-proxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | mkdir -p /tmp/minio 4 | docker run -p 9000:9000 -p 9001:9001 \ 5 | -e "MINIO_DOMAIN=localhost:9000" \ 6 | -e "MINIO_HTTP_TRACE=1" \ 7 | -v /tmp/minio:/data \ 8 | minio/minio:latest server /data --console-address ":9001" & 9 | 10 | sleep 1s 11 | 12 | export AWS_ACCESS_KEY_ID=minioadmin 13 | export AWS_SECRET_ACCESS_KEY=minioadmin 14 | export AWS_REGION=us-east-1 15 | 16 | if [ -z "$RUST_LOG" ]; then 17 | export RUST_LOG="s3s_proxy=debug,s3s_aws=debug,s3s=debug" 18 | fi 19 | export RUST_BACKTRACE=full 20 | 21 | s3s-proxy \ 22 | --host localhost \ 23 | --port 8014 \ 24 | --domain localhost:8014 \ 25 | --endpoint-url http://localhost:9000 26 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/build_error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, thiserror::Error)] 2 | #[error(transparent)] 3 | pub struct BuildError { 4 | #[from] 5 | kind: BuildErrorKind, 6 | } 7 | 8 | #[derive(Debug, thiserror::Error)] 9 | enum BuildErrorKind { 10 | #[error("Missing field: {field:?}")] 11 | MissingField { field: &'static str }, 12 | // #[error("BuildError: {source}")] 13 | // Other { source: StdError }, 14 | } 15 | 16 | impl BuildError { 17 | pub(crate) fn missing_field(field: &'static str) -> Self { 18 | Self { 19 | kind: BuildErrorKind::MissingField { field }, 20 | } 21 | } 22 | 23 | // pub(crate) fn other(source: StdError) -> Self { 24 | // Self { 25 | // kind: BuildErrorKind::Other { source }, 26 | // } 27 | // } 28 | } 29 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["crates/*", "codegen"] 3 | resolver = "3" 4 | 5 | [workspace.package] 6 | edition = "2024" 7 | repository = "https://github.com/Nugine/s3s" 8 | license = "Apache-2.0" 9 | rust-version = "1.86.0" 10 | 11 | [workspace.lints.rust] 12 | unsafe_code = "forbid" 13 | 14 | [workspace.lints.clippy] 15 | # deny 16 | all = { level = "deny", priority = -1 } 17 | pedantic = { level = "deny", priority = -1 } 18 | cargo = { level = "deny", priority = -1 } 19 | self_named_module_files = "deny" 20 | # warn 21 | dbg_macro = "warn" 22 | # allow 23 | module_name_repetitions = "allow" 24 | multiple_crate_versions = "allow" 25 | 26 | [profile.release] 27 | debug = "line-tables-only" 28 | 29 | [workspace.dependencies] 30 | hmac = "=0.13.0-rc.3" 31 | md-5 = "=0.11.0-rc.3" 32 | sha1 = "=0.11.0-rc.3" 33 | sha2 = "=0.11.0-rc.3" 34 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/parser.rs: -------------------------------------------------------------------------------- 1 | pub struct Error; 2 | 3 | #[inline(always)] 4 | fn digit(c: u8) -> Result { 5 | c.is_ascii_digit().then_some(c - b'0').ok_or(Error) 6 | } 7 | 8 | #[inline(always)] 9 | pub fn digit2(x: [u8; 2]) -> Result { 10 | let x0 = digit(x[0])?; 11 | let x1 = digit(x[1])?; 12 | Ok(x0 * 10 + x1) 13 | } 14 | 15 | #[inline(always)] 16 | pub fn digit4(x: [u8; 4]) -> Result { 17 | let x0 = u16::from(digit2([x[0], x[1]])?); 18 | let x1 = u16::from(digit2([x[2], x[3]])?); 19 | Ok(x0 * 100 + x1) 20 | } 21 | 22 | pub fn consume(input: &mut I, f: F) -> Result>> 23 | where 24 | F: FnOnce(I) -> nom::IResult, 25 | I: Copy, 26 | { 27 | let (remaining, output) = f(*input)?; 28 | *input = remaining; 29 | Ok(output) 30 | } 31 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/proxy/meta.rs: -------------------------------------------------------------------------------- 1 | use s3s::header::{X_AMZ_ID_2, X_AMZ_REQUEST_ID}; 2 | use s3s::{S3Result, s3_error}; 3 | 4 | use aws_sdk_s3::operation::{RequestId, RequestIdExt}; 5 | use hyper::HeaderMap; 6 | use hyper::header::HeaderValue; 7 | 8 | pub fn build_headers(output: &T) -> S3Result 9 | where 10 | T: RequestId + RequestIdExt, 11 | { 12 | let mut header = HeaderMap::new(); 13 | if let Some(id) = output.request_id() { 14 | let val = HeaderValue::from_str(id).map_err(|_| s3_error!(InternalError, "invalid request id"))?; 15 | header.insert(X_AMZ_REQUEST_ID, val); 16 | } 17 | if let Some(id) = output.extended_request_id() { 18 | let val = HeaderValue::from_str(id).map_err(|_| s3_error!(InternalError, "invalid extended request id"))?; 19 | header.insert(X_AMZ_ID_2, val); 20 | } 21 | Ok(header) 22 | } 23 | -------------------------------------------------------------------------------- /crates/s3s-wasm/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use s3s::S3; 4 | use s3s::service::S3ServiceBuilder; 5 | use wasm_bindgen_test::wasm_bindgen_test; 6 | 7 | struct DummyS3 {} 8 | 9 | impl S3 for DummyS3 {} 10 | 11 | #[wasm_bindgen_test] 12 | fn test_dummy_call() { 13 | let s3 = DummyS3 {}; 14 | 15 | let service = S3ServiceBuilder::new(s3).build(); 16 | 17 | let req = { 18 | let mut req = s3s::HttpRequest::default(); 19 | *req.method_mut() = http::Method::GET; 20 | *req.uri_mut() = "http://localhost/".parse().unwrap(); 21 | req 22 | }; 23 | 24 | let result = futures::executor::block_on(service.call(req)); 25 | assert!(result.is_ok()); 26 | 27 | let resp = result.unwrap(); 28 | assert_eq!(resp.status(), 501); // Not Implemented 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/format.rs: -------------------------------------------------------------------------------- 1 | use crate::dto::{Timestamp, TimestampFormat}; 2 | 3 | use arrayvec::ArrayVec; 4 | 5 | pub const fn fmt_boolean(val: bool) -> &'static str { 6 | if val { "true" } else { "false" } 7 | } 8 | 9 | pub fn fmt_integer(val: i32, f: impl FnOnce(&str) -> T) -> T { 10 | let mut buf = itoa::Buffer::new(); 11 | f(buf.format(val)) 12 | } 13 | 14 | pub fn fmt_long(val: i64, f: impl FnOnce(&str) -> T) -> T { 15 | let mut buf = itoa::Buffer::new(); 16 | f(buf.format(val)) 17 | } 18 | 19 | pub fn fmt_usize(val: usize, f: impl FnOnce(&str) -> T) -> T { 20 | let mut buf = itoa::Buffer::new(); 21 | f(buf.format(val)) 22 | } 23 | 24 | pub fn fmt_timestamp(val: &Timestamp, fmt: TimestampFormat, f: impl FnOnce(&[u8]) -> T) -> T { 25 | let mut buf = ArrayVec::::new(); 26 | val.format(fmt, &mut buf).unwrap(); 27 | f(&buf) 28 | } 29 | -------------------------------------------------------------------------------- /scripts/e2e-fs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | DATA_DIR="/tmp/s3s-e2e" 4 | mkdir -p "$DATA_DIR" 5 | 6 | if [ -z "$RUST_LOG" ]; then 7 | export RUST_LOG="s3s_fs=debug,s3s=debug" 8 | fi 9 | 10 | killall s3s-fs || echo 11 | 12 | s3s-fs \ 13 | --access-key AKEXAMPLES3S \ 14 | --secret-key SKEXAMPLES3S \ 15 | --host localhost \ 16 | --port 8014 \ 17 | --domain localhost:8014 \ 18 | --domain localhost \ 19 | "$DATA_DIR" | tee target/s3s-fs.log & 20 | 21 | sleep 1s 22 | 23 | export AWS_ACCESS_KEY_ID=AKEXAMPLES3S 24 | export AWS_SECRET_ACCESS_KEY=SKEXAMPLES3S 25 | export AWS_REGION=us-east-1 26 | export AWS_ENDPOINT_URL=http://localhost:8014 27 | 28 | if [ -z "$RUST_LOG" ]; then 29 | export RUST_LOG="s3s_e2e=debug,s3s_test=info,s3s=debug" 30 | fi 31 | export RUST_BACKTRACE=full 32 | 33 | s3s-e2e "$@" 34 | -------------------------------------------------------------------------------- /crates/s3s-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-test" 3 | version = "0.12.0-rc.6" 4 | description = "s3s test suite" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | serde = { version = "1.0.228", features = ["derive"] } 18 | tokio = { version = "1.48.0", features = ["full"] } 19 | tracing = "0.1.43" 20 | tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] } 21 | clap = { version = "4.5.53", features = ["derive"] } 22 | dotenvy = "0.15.7" 23 | serde_json = "1.0.145" 24 | indexmap = "2.12.1" 25 | colored = "3.0.0" 26 | regex = "1.12.2" 27 | backtrace = "0.3.76" 28 | const-str = { version = "0.7.0", features = ["std", "proc"] } 29 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: taiki-e/install-action@just 14 | - uses: dtolnay/rust-toolchain@nightly 15 | - name: Publish all crates 16 | env: 17 | CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_IO_API_TOKEN }} 18 | run: | 19 | cargo publish -p s3s --dry-run 20 | 21 | # fundamental 22 | cargo publish -p s3s 23 | cargo publish -p s3s-aws 24 | 25 | # supporting 26 | cargo publish -p s3s-model 27 | cargo publish -p s3s-policy 28 | cargo publish -p s3s-test 29 | 30 | # binary 31 | cargo publish -p s3s-proxy 32 | cargo publish -p s3s-fs 33 | cargo publish -p s3s-e2e 34 | -------------------------------------------------------------------------------- /crates/s3s/src/http/response.rs: -------------------------------------------------------------------------------- 1 | use crate::HttpResponse; 2 | 3 | use super::Body; 4 | 5 | use hyper::HeaderMap; 6 | use hyper::StatusCode; 7 | use hyper::http::Extensions; 8 | 9 | #[derive(Default)] 10 | pub struct Response { 11 | pub status: StatusCode, 12 | pub headers: HeaderMap, 13 | pub body: Body, 14 | pub extensions: Extensions, 15 | } 16 | 17 | impl From for HttpResponse { 18 | fn from(res: Response) -> Self { 19 | let mut ans = HttpResponse::default(); 20 | *ans.status_mut() = res.status; 21 | *ans.headers_mut() = res.headers; 22 | *ans.body_mut() = res.body; 23 | *ans.extensions_mut() = res.extensions; 24 | ans 25 | } 26 | } 27 | 28 | impl Response { 29 | #[must_use] 30 | pub fn with_status(status: StatusCode) -> Self { 31 | Self { 32 | status, 33 | ..Default::default() 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/post_signature_v4.rs: -------------------------------------------------------------------------------- 1 | use crate::http::Multipart; 2 | 3 | pub struct PostSignatureV4<'a> { 4 | pub policy: &'a str, 5 | pub x_amz_algorithm: &'a str, 6 | pub x_amz_credential: &'a str, 7 | pub x_amz_date: &'a str, 8 | pub x_amz_signature: &'a str, 9 | } 10 | 11 | impl<'a> PostSignatureV4<'a> { 12 | pub fn extract(m: &'a Multipart) -> Option { 13 | let policy = m.find_field_value("policy")?; 14 | let x_amz_algorithm = m.find_field_value("x-amz-algorithm")?; 15 | let x_amz_credential = m.find_field_value("x-amz-credential")?; 16 | let x_amz_date = m.find_field_value("x-amz-date")?; 17 | let x_amz_signature = m.find_field_value("x-amz-signature")?; 18 | Some(Self { 19 | policy, 20 | x_amz_algorithm, 21 | x_amz_credential, 22 | x_amz_date, 23 | x_amz_signature, 24 | }) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/event_stream.rs: -------------------------------------------------------------------------------- 1 | use sync_wrapper::{SyncFuture, SyncWrapper}; 2 | use transform_stream::AsyncStream; 3 | 4 | type AwsSelectObjectContentEventStream = aws_sdk_s3::primitives::event_stream::EventReceiver< 5 | aws_sdk_s3::types::SelectObjectContentEventStream, 6 | aws_sdk_s3::types::error::SelectObjectContentEventStreamError, 7 | >; 8 | 9 | pub fn from_aws(src: AwsSelectObjectContentEventStream) -> s3s::dto::SelectObjectContentEventStream { 10 | let mut src = SyncWrapper::new(src); 11 | s3s::dto::SelectObjectContentEventStream::new(AsyncStream::new(|mut y| async move { 12 | loop { 13 | let recv = SyncFuture::new(src.get_mut().recv()); 14 | let ans = recv.await; 15 | match ans { 16 | Ok(Some(ev)) => y.yield_(crate::conv::try_from_aws(ev)).await, 17 | Ok(None) => break, 18 | Err(err) => y.yield_err(wrap_sdk_error!(err)).await, 19 | } 20 | } 21 | })) 22 | } 23 | -------------------------------------------------------------------------------- /crates/s3s/src/time.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | pub fn now_utc() -> impl fmt::Debug { 4 | #[cfg(not(target_arch = "wasm32"))] 5 | { 6 | time::OffsetDateTime::now_utc() 7 | } 8 | #[cfg(target_arch = "wasm32")] 9 | { 10 | () 11 | } 12 | } 13 | 14 | #[cfg(not(target_arch = "wasm32"))] 15 | pub struct Instant(std::time::Instant); 16 | 17 | #[cfg(target_arch = "wasm32")] 18 | pub struct Instant(()); 19 | 20 | impl Instant { 21 | pub fn now() -> Self { 22 | #[cfg(not(target_arch = "wasm32"))] 23 | { 24 | Self(std::time::Instant::now()) 25 | } 26 | #[cfg(target_arch = "wasm32")] 27 | { 28 | Self(()) 29 | } 30 | } 31 | 32 | pub fn elapsed(&self) -> impl fmt::Debug { 33 | #[cfg(not(target_arch = "wasm32"))] 34 | { 35 | self.0.elapsed() 36 | } 37 | #[cfg(target_arch = "wasm32")] 38 | { 39 | () 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod crypto; 2 | pub mod parser; 3 | pub mod rfc2047; 4 | 5 | pub mod format; 6 | 7 | use std::future::Future; 8 | use std::pin::Pin; 9 | 10 | /// `Pin + Send + Sync + 'a>>` 11 | pub type SyncBoxFuture<'a, T> = Pin + Send + Sync + 'a>>; 12 | 13 | pub fn stable_sort_by_first(v: &mut [(T, T)]) 14 | where 15 | T: Ord, 16 | { 17 | v.sort_by(|lhs, rhs| lhs.0.cmp(&rhs.0)); 18 | } 19 | 20 | pub fn is_base64_encoded(bytes: &[u8]) -> bool { 21 | base64_simd::STANDARD.check(bytes).is_ok() 22 | } 23 | 24 | macro_rules! invalid_request { 25 | ($msg:literal) => { 26 | s3_error!(InvalidRequest, $msg) 27 | }; 28 | ($fmt:literal, $($arg:tt)+) => { 29 | s3_error!(InvalidRequest, $fmt, $($arg)+) 30 | }; 31 | ($source:expr, $($arg:tt)+) => {{ 32 | let mut err = invalid_request!($($arg)+); 33 | err.set_source(Box::new($source)); 34 | err 35 | }}; 36 | } 37 | -------------------------------------------------------------------------------- /crates/s3s-aws/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-aws" 3 | version = "0.12.0-rc.6" 4 | description = "S3 service adapter integrated with aws-sdk-s3" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [features] 17 | minio = ["s3s/minio"] 18 | 19 | [dependencies] 20 | async-trait = "0.1.89" 21 | aws-sdk-s3 = "1.107.0" 22 | aws-smithy-runtime-api = { version = "1.9.0", features = ["client", "http-1x"] } 23 | aws-smithy-types = { version = "1.3.4", features = ["http-body-1-x"] } 24 | aws-smithy-types-convert = { version = "0.60.10", features = ["convert-time"] } 25 | hyper = "1.8.1" 26 | s3s = { version = "0.12.0-rc.6", path = "../s3s", default-features = false } 27 | std-next = "0.1.9" 28 | sync_wrapper = "1.0.2" 29 | tracing = "0.1.43" 30 | transform-stream = "0.3.1" 31 | -------------------------------------------------------------------------------- /crates/s3s-proxy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-proxy" 3 | version = "0.12.0-rc.6" 4 | description = "S3 Proxy" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | aws-config = { version = "1.8.7", default-features = false, features = [ 18 | "behavior-version-latest", 19 | ] } 20 | aws-credential-types = "1.2.6" 21 | aws-sdk-s3 = "1.107.0" 22 | clap = { version = "4.5.53", features = ["derive"] } 23 | hyper-util = { version = "0.1.18", features = [ 24 | "server-auto", 25 | "server-graceful", 26 | "http1", 27 | "http2", 28 | "tokio", 29 | ] } 30 | s3s = { version = "0.12.0-rc.6", path = "../s3s" } 31 | s3s-aws = { version = "0.12.0-rc.6", path = "../s3s-aws" } 32 | tokio = { version = "1.48.0", features = ["full"] } 33 | tracing = "0.1.43" 34 | tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] } 35 | -------------------------------------------------------------------------------- /codegen/src/v1/s3_trait.rs: -------------------------------------------------------------------------------- 1 | use super::ops::Operations; 2 | use super::rust::codegen_doc; 3 | 4 | use crate::declare_codegen; 5 | 6 | use heck::ToSnakeCase; 7 | use scoped_writer::g; 8 | 9 | pub fn codegen(ops: &Operations) { 10 | declare_codegen!(); 11 | 12 | g([ 13 | "use crate::dto::*;", 14 | "use crate::error::S3Result;", 15 | "use crate::protocol::S3Request;", 16 | "use crate::protocol::S3Response;", 17 | "", 18 | "/// An async trait which represents the S3 API", 19 | "#[async_trait::async_trait]", 20 | "pub trait S3: Send + Sync + 'static {", 21 | "", 22 | ]); 23 | 24 | for op in ops.values() { 25 | let method_name = op.name.to_snake_case(); 26 | let input = &op.input; 27 | let output = &op.output; 28 | 29 | codegen_doc(op.doc.as_deref()); 30 | g!("async fn {method_name}(&self, _req: S3Request<{input}>) -> S3Result> {{"); 31 | g!("Err(s3_error!(NotImplemented, \"{} is not implemented yet\"))", op.name); 32 | g!("}}"); 33 | g!(); 34 | } 35 | 36 | g!("}}"); 37 | g!(); 38 | } 39 | -------------------------------------------------------------------------------- /crates/s3s-e2e/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-e2e" 3 | version = "0.12.0-rc.6" 4 | description = "s3s test suite" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [dependencies] 17 | s3s-test = { version = "0.12.0-rc.6", path = "../s3s-test" } 18 | tracing = "0.1.43" 19 | aws-credential-types = "1.2.6" 20 | aws-sdk-s3 = "1.107.0" 21 | aws-sdk-sts = { version = "1.87.0", features = ["behavior-version-latest"] } 22 | http-body-util = "0.1.3" 23 | futures = { version = "0.3.31", default-features = false } 24 | bytes = "1.11.0" 25 | http-body = "1.0.1" 26 | md-5.workspace = true 27 | base64-simd = "0.8.0" 28 | reqwest = { version = "0.12.24", default-features = false, features = ["rustls-tls"] } 29 | 30 | [dependencies.aws-config] 31 | version = "1.8.7" 32 | default-features = false 33 | features = ["behavior-version-latest"] 34 | 35 | [build-dependencies] 36 | s3s-test = { version = "0.12.0-rc.6", path = "../s3s-test" } 37 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | pages: write 12 | id-token: write 13 | 14 | concurrency: 15 | group: "pages" 16 | cancel-in-progress: false 17 | 18 | jobs: 19 | build: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/checkout@v4 23 | - uses: dtolnay/rust-toolchain@nightly 24 | - uses: Swatinem/rust-cache@v2 25 | - name: Build documentation 26 | run: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --no-deps --all-features 27 | - name: Add index redirect 28 | run: echo '' > target/doc/index.html 29 | - name: Add .nojekyll file 30 | run: touch target/doc/.nojekyll 31 | - name: Upload artifact 32 | uses: actions/upload-pages-artifact@v3 33 | with: 34 | path: ./target/doc 35 | 36 | deploy: 37 | environment: 38 | name: github-pages 39 | url: ${{ steps.deployment.outputs.page_url }} 40 | runs-on: ubuntu-latest 41 | needs: build 42 | steps: 43 | - name: Deploy to GitHub Pages 44 | id: deployment 45 | uses: actions/deploy-pages@v4 46 | -------------------------------------------------------------------------------- /crates/s3s/src/http/etag.rs: -------------------------------------------------------------------------------- 1 | use super::de::TryFromHeaderValue; 2 | use super::ser::TryIntoHeaderValue; 3 | 4 | use crate::dto::ETag; 5 | use crate::dto::ETagCondition; 6 | use crate::dto::ParseETagConditionError; 7 | use crate::dto::ParseETagError; 8 | 9 | use http::HeaderValue; 10 | use http::header::InvalidHeaderValue; 11 | 12 | impl TryFromHeaderValue for ETag { 13 | type Error = ParseETagError; 14 | 15 | fn try_from_header_value(value: &HeaderValue) -> Result { 16 | Self::parse_http_header(value.as_bytes()) 17 | } 18 | } 19 | 20 | impl TryIntoHeaderValue for ETag { 21 | type Error = InvalidHeaderValue; 22 | 23 | fn try_into_header_value(self) -> Result { 24 | self.to_http_header() 25 | } 26 | } 27 | 28 | impl TryFromHeaderValue for ETagCondition { 29 | type Error = ParseETagConditionError; 30 | 31 | fn try_from_header_value(value: &HeaderValue) -> Result { 32 | Self::parse_http_header(value.as_bytes()) 33 | } 34 | } 35 | 36 | impl TryIntoHeaderValue for ETagCondition { 37 | type Error = InvalidHeaderValue; 38 | 39 | fn try_into_header_value(self) -> Result { 40 | self.to_http_header() 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/conv/mod.rs: -------------------------------------------------------------------------------- 1 | mod builtin; 2 | 3 | #[cfg(feature = "minio")] 4 | mod generated_minio; 5 | 6 | #[cfg(not(feature = "minio"))] 7 | mod generated; 8 | 9 | use s3s::s3_error; 10 | use s3s::{S3Error, S3Result}; 11 | 12 | pub trait AwsConversion: Sized { 13 | type Target; 14 | type Error; 15 | 16 | fn try_from_aws(x: Self::Target) -> Result; 17 | 18 | fn try_into_aws(x: Self) -> Result; 19 | } 20 | 21 | pub fn try_from_aws(x: T::Target) -> Result { 22 | T::try_from_aws(x) 23 | } 24 | 25 | pub fn try_into_aws(x: T) -> S3Result { 26 | T::try_into_aws(x) 27 | } 28 | 29 | fn unwrap_from_aws(opt: Option, field_name: &str) -> S3Result 30 | where 31 | S3Error: From, 32 | { 33 | match opt { 34 | Some(x) => T::try_from_aws(x).map_err(Into::into), 35 | None => Err(s3_error!(InternalError, "missing field: {}", field_name)), 36 | } 37 | } 38 | 39 | #[must_use] 40 | pub fn string_from_integer(x: i32) -> String { 41 | x.to_string() 42 | } 43 | 44 | pub fn integer_from_string(x: &str) -> S3Result { 45 | x.parse::().map_err(S3Error::internal_error) 46 | } 47 | -------------------------------------------------------------------------------- /crates/s3s/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, feature(doc_cfg))] 2 | #![allow( 3 | clippy::bool_assert_comparison, // I don't like `assert!(!expression)`. It's very misleading. 4 | clippy::multiple_crate_versions, // Sometimes not fixable 5 | clippy::module_name_repetitions, 6 | clippy::single_match_else, 7 | clippy::wildcard_imports, 8 | clippy::let_underscore_untyped, 9 | clippy::inline_always, 10 | clippy::needless_continue, 11 | )] 12 | 13 | #[macro_use] 14 | mod utils; 15 | 16 | #[macro_use] 17 | mod error; 18 | 19 | mod http; 20 | mod ops; 21 | mod protocol; 22 | mod s3_op; 23 | mod s3_trait; 24 | mod sig_v2; 25 | mod sig_v4; 26 | mod time; 27 | 28 | pub mod access; 29 | pub mod auth; 30 | pub mod checksum; 31 | pub mod crypto; 32 | pub mod dto; 33 | pub mod header; 34 | pub mod host; 35 | pub mod path; 36 | pub mod route; 37 | pub mod service; 38 | pub mod stream; 39 | pub mod validation; 40 | pub mod xml; 41 | 42 | pub use self::error::*; 43 | pub use self::http::Body; 44 | pub use self::s3_op::S3Operation; 45 | pub use self::s3_trait::S3; 46 | 47 | pub use self::protocol::HttpError; 48 | pub use self::protocol::HttpRequest; 49 | pub use self::protocol::HttpResponse; 50 | pub use self::protocol::S3Request; 51 | pub use self::protocol::S3Response; 52 | pub use self::protocol::TrailingHeaders; 53 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v2/authorization_v2.rs: -------------------------------------------------------------------------------- 1 | //! Authorization V2 2 | //! 3 | //! 4 | //! 5 | 6 | pub struct AuthorizationV2<'a> { 7 | pub access_key: &'a str, 8 | pub signature: &'a str, 9 | } 10 | 11 | /// [`AuthorizationV2`] 12 | #[derive(Debug, thiserror::Error)] 13 | #[error("ParseAuthorizationError")] 14 | pub struct ParseAuthorizationV2Error { 15 | /// priv place holder 16 | _priv: (), 17 | } 18 | 19 | impl<'a> AuthorizationV2<'a> { 20 | pub fn parse(mut input: &'a str) -> Result { 21 | let err = || ParseAuthorizationV2Error { _priv: () }; 22 | 23 | input = input.strip_prefix("AWS ").ok_or_else(err)?; 24 | 25 | let (access_key, signature) = input.split_once(':').ok_or_else(err)?; 26 | 27 | Ok(Self { access_key, signature }) 28 | } 29 | } 30 | 31 | #[cfg(test)] 32 | mod tests { 33 | use super::*; 34 | 35 | #[test] 36 | fn example() { 37 | let input = "AWS AKIAIOSFODNN7EXAMPLE:qgk2+6Sv9/oM7G3qLEjTH1a1l1g="; 38 | let ans = AuthorizationV2::parse(input).unwrap(); 39 | assert_eq!(ans.access_key, "AKIAIOSFODNN7EXAMPLE"); 40 | assert_eq!(ans.signature, "qgk2+6Sv9/oM7G3qLEjTH1a1l1g="); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /crates/s3s-test/src/build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::process::Command; 3 | 4 | pub fn collect_info() { 5 | if let Some(val) = git_commit() { 6 | println!("cargo:rustc-env=S3S_GIT_COMMIT={val}"); 7 | } 8 | if let Some(branch) = git_branch() { 9 | println!("cargo:rustc-env=S3S_GIT_BRANCH={branch}"); 10 | } 11 | if let Some(tag) = git_tag() { 12 | println!("cargo:rustc-env=S3S_GIT_TAG={tag}"); 13 | } 14 | if let Ok(val) = env::var("PROFILE") { 15 | println!("cargo:rustc-env=S3S_PROFILE={val}"); 16 | } 17 | } 18 | 19 | #[must_use] 20 | fn git(args: &[&str]) -> Option { 21 | let output = Command::new("git").args(args).output().ok()?; 22 | if output.status.success() { 23 | Some(String::from_utf8_lossy(&output.stdout).trim().to_string()) 24 | } else { 25 | None 26 | } 27 | } 28 | 29 | #[must_use] 30 | pub fn git_commit() -> Option { 31 | git(&["rev-parse", "HEAD"]) 32 | } 33 | 34 | #[must_use] 35 | pub fn git_branch() -> Option { 36 | git(&["rev-parse", "--abbrev-ref", "HEAD"]) 37 | } 38 | 39 | #[must_use] 40 | pub fn git_tag() -> Option { 41 | git(&["describe", "--tags", "--exact-match"]) 42 | } 43 | 44 | #[cfg(test)] 45 | mod tests { 46 | use super::*; 47 | 48 | #[test] 49 | fn test_collect_info() { 50 | collect_info(); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v2/presigned_url_v2.rs: -------------------------------------------------------------------------------- 1 | use crate::http::OrderedQs; 2 | 3 | use std::borrow::Cow; 4 | 5 | use time::OffsetDateTime; 6 | 7 | pub struct PresignedUrlV2<'a> { 8 | pub access_key: &'a str, 9 | pub expires_time: OffsetDateTime, 10 | pub signature: Cow<'a, str>, 11 | } 12 | 13 | /// [`PresignedUrlV2`] 14 | #[derive(Debug, thiserror::Error)] 15 | #[error("ParsePresignedUrlError")] 16 | pub struct ParsePresignedUrlError { 17 | /// priv place holder 18 | _priv: (), 19 | } 20 | 21 | impl<'a> PresignedUrlV2<'a> { 22 | pub fn parse(qs: &'a OrderedQs) -> Result { 23 | let err = || ParsePresignedUrlError { _priv: () }; 24 | 25 | let access_key = qs.get_unique("AWSAccessKeyId").ok_or_else(err)?; 26 | let expires_str = qs.get_unique("Expires").ok_or_else(err)?; 27 | let signature = qs.get_unique("Signature").ok_or_else(err)?; 28 | 29 | let expires_time = parse_unix_timestamp(expires_str).ok_or_else(err)?; 30 | let signature = urlencoding::decode(signature).map_err(|_| err())?; 31 | 32 | Ok(Self { 33 | access_key, 34 | expires_time, 35 | signature, 36 | }) 37 | } 38 | } 39 | 40 | fn parse_unix_timestamp(s: &str) -> Option { 41 | let ts = s.parse::().ok().filter(|&x| x >= 0)?; 42 | OffsetDateTime::from_unix_timestamp(ts).ok() 43 | } 44 | -------------------------------------------------------------------------------- /crates/s3s/src/http/request.rs: -------------------------------------------------------------------------------- 1 | use super::Body; 2 | use super::Multipart; 3 | use super::OrderedQs; 4 | 5 | use crate::HttpRequest; 6 | use crate::auth::Credentials; 7 | use crate::path::S3Path; 8 | use crate::protocol::TrailingHeaders; 9 | use crate::stream::VecByteStream; 10 | 11 | use hyper::HeaderMap; 12 | use hyper::Method; 13 | use hyper::Uri; 14 | use hyper::http::Extensions; 15 | 16 | pub struct Request { 17 | pub version: http::Version, 18 | pub method: Method, 19 | pub uri: Uri, 20 | pub headers: HeaderMap, 21 | pub extensions: Extensions, 22 | pub body: Body, 23 | pub(crate) s3ext: S3Extensions, 24 | } 25 | 26 | #[derive(Default)] 27 | pub(crate) struct S3Extensions { 28 | pub s3_path: Option, 29 | pub qs: Option, 30 | 31 | pub multipart: Option, 32 | pub vec_stream: Option, 33 | 34 | pub credentials: Option, 35 | pub region: Option, 36 | pub service: Option, 37 | pub trailing_headers: Option, 38 | } 39 | 40 | impl From for Request { 41 | fn from(req: HttpRequest) -> Self { 42 | let (parts, body) = req.into_parts(); 43 | Self { 44 | version: parts.version, 45 | method: parts.method, 46 | uri: parts.uri, 47 | headers: parts.headers, 48 | extensions: parts.extensions, 49 | body, 50 | s3ext: S3Extensions::default(), 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /crates/s3s/src/auth/simple_auth.rs: -------------------------------------------------------------------------------- 1 | use super::S3Auth; 2 | 3 | use crate::auth::SecretKey; 4 | use crate::error::S3Result; 5 | 6 | use std::collections::HashMap; 7 | 8 | /// A simple authentication provider 9 | #[derive(Debug, Default)] 10 | pub struct SimpleAuth { 11 | /// key map 12 | map: HashMap, 13 | } 14 | 15 | impl SimpleAuth { 16 | /// Constructs a new `SimpleAuth` 17 | #[must_use] 18 | pub fn new() -> Self { 19 | Self { map: HashMap::new() } 20 | } 21 | 22 | #[must_use] 23 | pub fn from_single(access_key: impl Into, secret_key: impl Into) -> Self { 24 | let access_key = access_key.into(); 25 | let secret_key = secret_key.into(); 26 | let map = [(access_key, secret_key)].into_iter().collect(); 27 | Self { map } 28 | } 29 | 30 | /// register a pair of keys 31 | pub fn register(&mut self, access_key: String, secret_key: SecretKey) -> Option { 32 | self.map.insert(access_key, secret_key) 33 | } 34 | 35 | /// lookup a secret key 36 | #[must_use] 37 | pub fn lookup(&self, access_key: &str) -> Option<&SecretKey> { 38 | self.map.get(access_key) 39 | } 40 | } 41 | 42 | #[async_trait::async_trait] 43 | impl S3Auth for SimpleAuth { 44 | async fn get_secret_key(&self, access_key: &str) -> S3Result { 45 | match self.lookup(access_key) { 46 | None => Err(s3_error!(NotSignedUp, "Your account is not signed up")), 47 | Some(s) => Ok(s.clone()), 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /crates/s3s-test/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::fmt; 3 | 4 | pub type Result = std::result::Result; 5 | 6 | #[derive(Debug)] 7 | pub struct Failed { 8 | source: Option>, 9 | } 10 | 11 | impl From for Failed 12 | where 13 | E: std::error::Error + Send + Sync + 'static, 14 | { 15 | fn from(source: E) -> Self { 16 | if env::var("RUST_BACKTRACE").is_ok() { 17 | eprintln!("Failed: {source:#?}\n"); 18 | eprintln!("Backtrace:"); 19 | backtrace::trace(|frame| { 20 | backtrace::resolve_frame(frame, |symbol| { 21 | if let (Some(name), Some(filename), Some(colno)) = (symbol.name(), symbol.filename(), symbol.colno()) { 22 | if filename.components().any(|c| c.as_os_str().to_str() == Some("s3s")) { 23 | eprintln!("{name}\n at {}:{colno}\n", filename.display()); 24 | } 25 | } 26 | }); 27 | true 28 | }); 29 | } 30 | Self { 31 | source: Some(Box::new(source)), 32 | } 33 | } 34 | } 35 | 36 | impl fmt::Display for Failed { 37 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 38 | if let Some(source) = &self.source { 39 | write!(f, "Failed: {source}") 40 | } else { 41 | write!(f, "Failed") 42 | } 43 | } 44 | } 45 | 46 | impl Failed { 47 | pub fn from_string(s: impl Into) -> Self { 48 | Self { 49 | source: Some(s.into().into()), 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::error::*; 2 | 3 | use s3s::StdError; 4 | 5 | use tokio::io::AsyncWrite; 6 | use tokio::io::AsyncWriteExt; 7 | 8 | use bytes::Bytes; 9 | use futures::pin_mut; 10 | use futures::{Stream, StreamExt}; 11 | use transform_stream::AsyncTryStream; 12 | 13 | pub async fn copy_bytes(mut stream: S, writer: &mut W) -> Result 14 | where 15 | S: Stream> + Unpin, 16 | W: AsyncWrite + Unpin, 17 | { 18 | let mut nwritten: u64 = 0; 19 | while let Some(result) = stream.next().await { 20 | let bytes = match result { 21 | Ok(x) => x, 22 | Err(e) => return Err(Error::new(e)), 23 | }; 24 | writer.write_all(&bytes).await?; 25 | nwritten += bytes.len() as u64; 26 | } 27 | writer.flush().await?; 28 | Ok(nwritten) 29 | } 30 | 31 | pub fn bytes_stream(stream: S, content_length: usize) -> impl Stream> + Send + 'static 32 | where 33 | S: Stream> + Send + 'static, 34 | E: Send + 'static, 35 | { 36 | AsyncTryStream::::new(|mut y| async move { 37 | pin_mut!(stream); 38 | let mut remaining: usize = content_length; 39 | while let Some(result) = stream.next().await { 40 | let mut bytes = result?; 41 | if bytes.len() > remaining { 42 | bytes.truncate(remaining); 43 | } 44 | remaining -= bytes.len(); 45 | y.yield_ok(bytes).await; 46 | } 47 | Ok(()) 48 | }) 49 | } 50 | 51 | pub fn hex(input: impl AsRef<[u8]>) -> String { 52 | hex_simd::encode_to_string(input.as_ref(), hex_simd::AsciiCase::Lower) 53 | } 54 | -------------------------------------------------------------------------------- /crates/s3s/src/route.rs: -------------------------------------------------------------------------------- 1 | use crate::Body; 2 | use crate::S3Request; 3 | use crate::S3Response; 4 | use crate::S3Result; 5 | 6 | use hyper::HeaderMap; 7 | use hyper::Method; 8 | use hyper::Uri; 9 | use hyper::http::Extensions; 10 | 11 | #[async_trait::async_trait] 12 | pub trait S3Route: Send + Sync + 'static { 13 | fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, extensions: &mut Extensions) -> bool; 14 | 15 | async fn check_access(&self, req: &mut S3Request) -> S3Result<()> { 16 | match req.credentials { 17 | Some(_) => Ok(()), 18 | None => Err(s3_error!(AccessDenied, "Signature is required")), 19 | } 20 | } 21 | 22 | async fn call(&self, req: S3Request) -> S3Result>; 23 | } 24 | 25 | #[cfg(test)] 26 | mod tests { 27 | use super::*; 28 | 29 | use crate::header; 30 | 31 | #[allow(dead_code)] 32 | pub struct AssumeRole {} 33 | 34 | #[async_trait::async_trait] 35 | impl S3Route for AssumeRole { 36 | fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, _: &mut Extensions) -> bool { 37 | if method == Method::POST && uri.path() == "/" { 38 | if let Some(val) = headers.get(header::CONTENT_TYPE) { 39 | if val.as_bytes() == b"application/x-www-form-urlencoded" { 40 | return true; 41 | } 42 | } 43 | } 44 | false 45 | } 46 | 47 | async fn call(&self, _: S3Request) -> S3Result> { 48 | tracing::debug!("call AssumeRole"); 49 | return Err(s3_error!(NotImplemented)); 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /crates/s3s-test/src/traits.rs: -------------------------------------------------------------------------------- 1 | use crate::error::Result; 2 | 3 | use std::future::Future; 4 | use std::sync::Arc; 5 | 6 | pub trait TestSuite: Sized + Send + Sync + 'static { 7 | fn setup() -> impl Future> + Send + 'static; 8 | 9 | fn teardown(self) -> impl Future + Send + 'static { 10 | async { Ok(()) } 11 | } 12 | } 13 | 14 | pub trait TestFixture: Sized + Send + Sync + 'static { 15 | fn setup(suite: Arc) -> impl Future> + Send + 'static; 16 | 17 | fn teardown(self) -> impl Future + Send + 'static { 18 | async { Ok(()) } 19 | } 20 | } 21 | 22 | pub trait TestCase: Sized + Send + Sync + 'static 23 | where 24 | Self: Sized + Send + Sync + 'static, 25 | X: TestFixture, 26 | S: TestSuite, 27 | { 28 | fn run(&self, fixture: Arc) -> impl Future + Send + 'static; 29 | } 30 | 31 | trait AsyncFn<'a, A> { 32 | type Output; 33 | type Future: Future + Send + 'a; 34 | 35 | fn call(&self, args: A) -> Self::Future; 36 | } 37 | 38 | impl<'a, F, U, O, A> AsyncFn<'a, (A,)> for F 39 | where 40 | F: Fn(A) -> U, 41 | U: Future + Send + 'a, 42 | { 43 | type Output = O; 44 | 45 | type Future = U; 46 | 47 | fn call(&self, args: (A,)) -> Self::Future { 48 | (self)(args.0) 49 | } 50 | } 51 | 52 | impl TestCase for C 53 | where 54 | C: for<'a> AsyncFn<'a, (Arc,), Output = Result>, 55 | C: Send + Sync + 'static, 56 | X: TestFixture, 57 | S: TestSuite, 58 | { 59 | fn run(&self, fixture: Arc) -> impl Future + Send + 'static { 60 | AsyncFn::call(self, (fixture,)) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | dev: 2 | just fetch 3 | just fmt 4 | just codegen 5 | just lint 6 | just test 7 | 8 | fetch: 9 | uv sync 10 | cargo fetch 11 | 12 | fmt: 13 | uvx ruff format 14 | cargo fmt --all 15 | 16 | lint: 17 | uvx ruff check 18 | cargo clippy --workspace --all-features --all-targets 19 | 20 | test: 21 | cargo test --workspace --all-features --all-targets 22 | 23 | doc: 24 | RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --open --no-deps --all-features 25 | 26 | crawl: 27 | uv run data/crawl.py update 28 | 29 | codegen: 30 | cargo run -p s3s-codegen 31 | cargo fmt --all 32 | cargo check 33 | 34 | install name *ARGS: 35 | uv run ./scripts/install.py {{name}} {{ARGS}} 36 | 37 | # ------------------------------------------------ 38 | 39 | sync-version: 40 | cargo set-version -p s3s 0.12.0-rc.6 41 | cargo set-version -p s3s-aws 0.12.0-rc.6 42 | cargo set-version -p s3s-model 0.12.0-rc.6 43 | cargo set-version -p s3s-policy 0.12.0-rc.6 44 | cargo set-version -p s3s-test 0.12.0-rc.6 45 | cargo set-version -p s3s-proxy 0.12.0-rc.6 46 | cargo set-version -p s3s-fs 0.12.0-rc.6 47 | cargo set-version -p s3s-e2e 0.12.0-rc.6 48 | 49 | # ------------------------------------------------ 50 | 51 | assert_unchanged: 52 | #!/bin/bash -ex 53 | [[ -z "$(git status -s)" ]] # https://stackoverflow.com/a/9393642 54 | 55 | ci-rust: 56 | cargo fmt --all --check 57 | cargo clippy --workspace --all-features --all-targets -- -D warnings 58 | just test 59 | just codegen 60 | just assert_unchanged 61 | 62 | ci-python: 63 | uvx ruff format --check 64 | uvx ruff check 65 | just crawl 66 | just assert_unchanged 67 | -------------------------------------------------------------------------------- /crates/s3s/src/access/context.rs: -------------------------------------------------------------------------------- 1 | use crate::S3Operation; 2 | use crate::auth::Credentials; 3 | use crate::path::S3Path; 4 | 5 | use hyper::HeaderMap; 6 | use hyper::Method; 7 | use hyper::Uri; 8 | use hyper::http::Extensions; 9 | 10 | pub struct S3AccessContext<'a> { 11 | pub(crate) credentials: Option<&'a Credentials>, 12 | pub(crate) s3_path: &'a S3Path, 13 | pub(crate) s3_op: &'a S3Operation, 14 | 15 | pub(crate) method: &'a Method, 16 | pub(crate) uri: &'a Uri, 17 | pub(crate) headers: &'a HeaderMap, 18 | 19 | pub(crate) extensions: &'a mut Extensions, 20 | } 21 | 22 | impl S3AccessContext<'_> { 23 | /// Returns the credentials of current request. 24 | /// 25 | /// `None` means anonymous request. 26 | #[must_use] 27 | pub fn credentials(&self) -> Option<&Credentials> { 28 | self.credentials 29 | } 30 | 31 | /// Returns the S3 path of current request. 32 | /// 33 | /// An S3 path can be root, bucket, or object. 34 | #[must_use] 35 | pub fn s3_path(&self) -> &S3Path { 36 | self.s3_path 37 | } 38 | 39 | /// Returns the S3 operation of current request. 40 | #[must_use] 41 | pub fn s3_op(&self) -> &S3Operation { 42 | self.s3_op 43 | } 44 | 45 | #[must_use] 46 | pub fn method(&self) -> &Method { 47 | self.method 48 | } 49 | 50 | #[must_use] 51 | pub fn uri(&self) -> &Uri { 52 | self.uri 53 | } 54 | 55 | #[must_use] 56 | pub fn headers(&self) -> &HeaderMap { 57 | self.headers 58 | } 59 | 60 | /// Returns the extensions of current request. 61 | /// 62 | /// It is used to pass custom data between middlewares. 63 | #[must_use] 64 | pub fn extensions_mut(&mut self) -> &mut Extensions { 65 | self.extensions 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/error.rs: -------------------------------------------------------------------------------- 1 | use s3s::S3Error; 2 | use s3s::S3ErrorCode; 3 | use s3s::StdError; 4 | 5 | use std::panic::Location; 6 | 7 | use tracing::error; 8 | 9 | #[derive(Debug)] 10 | pub struct Error { 11 | source: StdError, 12 | } 13 | 14 | pub type Result = std::result::Result; 15 | 16 | impl Error { 17 | #[must_use] 18 | #[track_caller] 19 | pub fn new(source: StdError) -> Self { 20 | log(&*source); 21 | Self { source } 22 | } 23 | 24 | #[must_use] 25 | #[track_caller] 26 | pub fn from_string(s: impl Into) -> Self { 27 | Self::new(s.into().into()) 28 | } 29 | } 30 | 31 | impl From for Error 32 | where 33 | E: std::error::Error + Send + Sync + 'static, 34 | { 35 | #[track_caller] 36 | fn from(source: E) -> Self { 37 | Self::new(Box::new(source)) 38 | } 39 | } 40 | 41 | impl From for S3Error { 42 | fn from(e: Error) -> Self { 43 | S3Error::with_source(S3ErrorCode::InternalError, e.source) 44 | } 45 | } 46 | 47 | #[inline] 48 | #[track_caller] 49 | pub(crate) fn log(source: &dyn std::error::Error) { 50 | if cfg!(feature = "binary") { 51 | let location = Location::caller(); 52 | let span_trace = tracing_error::SpanTrace::capture(); 53 | 54 | error!( 55 | target: "s3s_fs_internal_error", 56 | %location, 57 | error=%source, 58 | "span trace:\n{span_trace}" 59 | ); 60 | } 61 | } 62 | 63 | macro_rules! try_ { 64 | ($result:expr) => { 65 | match $result { 66 | Ok(val) => val, 67 | Err(err) => { 68 | $crate::error::log(&err); 69 | return Err(::s3s::S3Error::internal_error(err)); 70 | } 71 | } 72 | }; 73 | } 74 | -------------------------------------------------------------------------------- /crates/s3s/src/ops/multipart.rs: -------------------------------------------------------------------------------- 1 | use super::CompleteMultipartUpload; 2 | 3 | use crate::dto::CompleteMultipartUploadOutput; 4 | use crate::error::S3Result; 5 | use crate::header::*; 6 | use crate::http; 7 | 8 | use sync_wrapper::SyncFuture; 9 | 10 | impl CompleteMultipartUpload { 11 | pub fn serialize_http(x: CompleteMultipartUploadOutput) -> S3Result { 12 | let mut res = http::Response::with_status(http::StatusCode::OK); 13 | 14 | if let Some(future) = x.future { 15 | let future = SyncFuture::new(async move { 16 | let result = future.await; 17 | match result { 18 | Ok(val) => { 19 | let mut res = http::Response::default(); 20 | http::set_xml_body_no_decl(&mut res, &val)?; 21 | Ok(res) 22 | } 23 | Err(err) => super::serialize_error(err, false).map_err(Into::into), 24 | } 25 | }); 26 | let duration = std::time::Duration::from_millis(100); 27 | http::set_keep_alive_xml_body(&mut res, future, duration)?; 28 | } else { 29 | http::set_xml_body(&mut res, &x)?; 30 | } 31 | 32 | http::add_opt_header(&mut res, X_AMZ_SERVER_SIDE_ENCRYPTION_BUCKET_KEY_ENABLED, x.bucket_key_enabled)?; 33 | http::add_opt_header(&mut res, X_AMZ_EXPIRATION, x.expiration)?; 34 | http::add_opt_header(&mut res, X_AMZ_REQUEST_CHARGED, x.request_charged)?; 35 | http::add_opt_header(&mut res, X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID, x.ssekms_key_id)?; 36 | http::add_opt_header(&mut res, X_AMZ_SERVER_SIDE_ENCRYPTION, x.server_side_encryption)?; 37 | http::add_opt_header(&mut res, X_AMZ_VERSION_ID, x.version_id)?; 38 | Ok(res) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/mod.rs: -------------------------------------------------------------------------------- 1 | mod build_error; 2 | 3 | cfg_if::cfg_if! { 4 | if #[cfg(feature = "minio")] { 5 | mod generated_minio; 6 | use self::generated_minio as generated; 7 | } else { 8 | mod generated; 9 | } 10 | } 11 | 12 | pub use self::generated::*; 13 | 14 | mod streaming_blob; 15 | pub use self::streaming_blob::*; 16 | 17 | mod timestamp; 18 | pub use self::timestamp::*; 19 | 20 | mod copy_source; 21 | pub use self::copy_source::*; 22 | 23 | mod range; 24 | pub use self::range::Range; 25 | 26 | mod content_type; 27 | pub use self::content_type::*; 28 | 29 | mod event; 30 | pub use self::event::Event; 31 | 32 | mod event_stream; 33 | pub use self::event_stream::*; 34 | 35 | mod etag; 36 | pub use self::etag::*; 37 | 38 | mod etag_condition; 39 | pub use self::etag_condition::*; 40 | 41 | pub type List = Vec; 42 | pub type Map = std::collections::HashMap; 43 | 44 | pub type Body = hyper::body::Bytes; 45 | 46 | pub type Unit = (); 47 | 48 | impl From for ListObjectsV2Input { 49 | fn from(v1: ListObjectsInput) -> Self { 50 | let ListObjectsInput { 51 | bucket, 52 | delimiter, 53 | encoding_type, 54 | expected_bucket_owner, 55 | marker, 56 | max_keys, 57 | prefix, 58 | request_payer, 59 | optional_object_attributes, 60 | } = v1; 61 | 62 | Self { 63 | bucket, 64 | continuation_token: None, 65 | delimiter, 66 | encoding_type, 67 | expected_bucket_owner, 68 | fetch_owner: None, 69 | max_keys, 70 | prefix, 71 | request_payer, 72 | start_after: marker, 73 | optional_object_attributes, 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/error.rs: -------------------------------------------------------------------------------- 1 | macro_rules! wrap_sdk_error { 2 | ($e:expr) => {{ 3 | use aws_sdk_s3::error::SdkError; 4 | use aws_sdk_s3::operation::RequestId; 5 | use s3s::{S3Error, S3ErrorCode}; 6 | 7 | let mut err = S3Error::new(S3ErrorCode::InternalError); 8 | let source = $e; 9 | tracing::debug!("sdk error: {:?}", source); 10 | 11 | if let SdkError::ServiceError(ref e) = source { 12 | let meta = e.err().meta(); 13 | if let Some(val) = meta.code().and_then(|s| S3ErrorCode::from_bytes(s.as_bytes())) { 14 | err.set_code(val); 15 | } 16 | if let Some(val) = meta.message() { 17 | err.set_message(val.to_owned()); 18 | } 19 | if let Some(val) = meta.request_id() { 20 | err.set_request_id(val); 21 | } 22 | crate::error::SetStatusCode(&mut err, e).call(); 23 | } 24 | err.set_source(Box::new(source)); 25 | 26 | err 27 | }}; 28 | } 29 | 30 | // FIXME: this is actually an overloaded function 31 | 32 | pub struct SetStatusCode<'a, 'b, E, R>( 33 | pub &'a mut s3s::S3Error, 34 | pub &'b aws_smithy_runtime_api::client::result::ServiceError, 35 | ); 36 | 37 | impl SetStatusCode<'_, '_, E, aws_smithy_runtime_api::client::orchestrator::HttpResponse> { 38 | pub fn call(self) { 39 | let Self(err, e) = self; 40 | err.set_status_code(hyper_status_code_from_aws(e.raw().status())); 41 | // TODO: headers? 42 | } 43 | } 44 | 45 | impl SetStatusCode<'_, '_, E, aws_smithy_types::event_stream::RawMessage> { 46 | #[allow(clippy::unused_self)] 47 | pub fn call(self) {} 48 | } 49 | 50 | fn hyper_status_code_from_aws(status_code: aws_smithy_runtime_api::http::StatusCode) -> hyper::StatusCode { 51 | hyper::StatusCode::from_u16(status_code.as_u16()).unwrap() 52 | } 53 | -------------------------------------------------------------------------------- /codegen/src/v1/sts.rs: -------------------------------------------------------------------------------- 1 | use super::smithy; 2 | use super::utils::o; 3 | 4 | use std::mem; 5 | use std::ops::Not; 6 | 7 | use heck::ToUpperCamelCase; 8 | 9 | pub const NAMES: &[&str] = &[ 10 | "AssumeRoleResponse", 11 | "AssumedRoleUser", 12 | "Credentials", 13 | "nonNegativeIntegerType", 14 | "sourceIdentityType", 15 | "arnType", 16 | "accessKeyIdType", 17 | "accessKeySecretType", 18 | "dateType", 19 | "tokenType", 20 | "assumedRoleIdType", 21 | ]; 22 | 23 | pub fn reduce(model: &mut smithy::Model) { 24 | for (shape_name, mut shape) in mem::take(&mut model.shapes) { 25 | let Some((_, name)) = shape_name.split_once('#') else { panic!() }; 26 | if NAMES.contains(&name).not() { 27 | continue; 28 | } 29 | 30 | let Some((_, name)) = shape_name.split_once('#') else { panic!() }; 31 | let new_name = match name { 32 | "AssumeRoleResponse" => o("AssumeRoleOutput"), 33 | _ if name.as_bytes()[0].is_ascii_lowercase() => name.to_upper_camel_case(), 34 | _ => o(name), 35 | }; 36 | 37 | if let smithy::Shape::Structure(ref mut shape) = shape { 38 | for member in shape.members.values_mut() { 39 | let Some((_, name)) = member.target.split_once('#') else { panic!() }; 40 | let new_name = match name { 41 | _ if name.as_bytes()[0].is_ascii_lowercase() => name.to_upper_camel_case(), 42 | _ => continue, 43 | }; 44 | member.target = member.target.replace(name, &new_name); 45 | } 46 | if name == "AssumeRoleResponse" { 47 | shape.traits.set("smithy.api#xmlName", name.into()); 48 | } 49 | } 50 | 51 | let new_shape_name = format!("com.amazonaws.s3#{new_name}"); 52 | assert!(model.shapes.insert(new_shape_name, shape).is_none()); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /crates/s3s-test/src/report.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Serialize, Deserialize)] 4 | pub struct Report { 5 | pub suite_count: CountSummary, 6 | pub duration_ns: u64, 7 | pub duration_ms: f64, 8 | 9 | pub suites: Vec, 10 | } 11 | 12 | #[derive(Serialize, Deserialize)] 13 | pub struct SuiteReport { 14 | pub name: String, 15 | 16 | pub fixture_count: CountSummary, 17 | pub duration_ns: u64, 18 | pub duration_ms: f64, 19 | 20 | pub setup: Option, 21 | pub teardown: Option, 22 | pub fixtures: Vec, 23 | } 24 | 25 | #[derive(Serialize, Deserialize)] 26 | pub struct FixtureReport { 27 | pub name: String, 28 | 29 | pub case_count: CountSummary, 30 | pub duration_ns: u64, 31 | pub duration_ms: f64, 32 | 33 | pub setup: Option, 34 | pub teardown: Option, 35 | pub cases: Vec, 36 | } 37 | 38 | #[derive(Serialize, Deserialize)] 39 | pub struct CaseReport { 40 | pub name: String, 41 | 42 | pub passed: bool, 43 | pub duration_ns: u64, 44 | pub duration_ms: f64, 45 | 46 | pub run: Option, 47 | } 48 | 49 | #[derive(Debug, Serialize, Deserialize)] 50 | pub struct FnSummary { 51 | pub result: FnResult, 52 | pub duration_ns: u64, 53 | pub duration_ms: f64, 54 | } 55 | 56 | #[derive(Debug, Serialize, Deserialize)] 57 | pub struct CountSummary { 58 | pub total: u64, 59 | pub passed: u64, 60 | pub failed: u64, 61 | } 62 | 63 | impl CountSummary { 64 | #[must_use] 65 | pub fn all_passed(&self) -> bool { 66 | self.passed == self.total 67 | } 68 | } 69 | 70 | #[derive(Debug, Serialize, Deserialize)] 71 | pub enum FnResult { 72 | Ok, 73 | Err(String), 74 | Panicked, 75 | } 76 | 77 | impl FnResult { 78 | #[must_use] 79 | pub fn is_ok(&self) -> bool { 80 | matches!(self, FnResult::Ok) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /codegen/src/v1/headers.rs: -------------------------------------------------------------------------------- 1 | use super::smithy; 2 | 3 | use crate::declare_codegen; 4 | 5 | use std::collections::BTreeSet; 6 | 7 | use heck::ToShoutySnakeCase; 8 | use scoped_writer::g; 9 | use stdx::default::default; 10 | 11 | pub fn codegen(model: &smithy::Model) { 12 | let mut headers: BTreeSet<&str> = default(); 13 | 14 | for (name, shape) in &model.shapes { 15 | if name.ends_with("Request") || name.ends_with("Output") { 16 | let smithy::Shape::Structure(sh) = shape else { panic!() }; 17 | 18 | for member in sh.members.values() { 19 | if let Some(header) = member.traits.http_header() { 20 | headers.insert(header); 21 | } 22 | } 23 | } 24 | } 25 | 26 | { 27 | headers.insert("x-amz-content-sha256"); 28 | headers.insert("x-amz-date"); 29 | headers.insert("authorization"); 30 | headers.insert("host"); 31 | headers.insert("x-amz-decoded-content-length"); 32 | headers.insert("x-amz-request-id"); 33 | headers.insert("x-amz-id-2"); 34 | } 35 | 36 | declare_codegen!(); 37 | 38 | g([ 39 | "#![allow(clippy::declare_interior_mutable_const)]", 40 | "", 41 | "use hyper::header::HeaderName;", 42 | "", 43 | ]); 44 | 45 | for header in headers { 46 | let name = to_constant_name(header); 47 | if header.starts_with("x-amz-") || header == "Content-MD5" || header.starts_with("x-minio") { 48 | let value = header.to_ascii_lowercase(); 49 | g!("pub const {name}: HeaderName = HeaderName::from_static({value:?});",); 50 | } else { 51 | g!("pub use hyper::header::{name};"); 52 | } 53 | g!(); 54 | } 55 | } 56 | 57 | pub fn to_constant_name(header_name: &str) -> String { 58 | if header_name == "ETag" { 59 | "ETAG".into() 60 | } else { 61 | header_name.to_shouty_snake_case() 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /crates/s3s-policy/src/tests.rs: -------------------------------------------------------------------------------- 1 | /// 2 | pub(crate) fn example1_json() -> &'static str { 3 | r#" 4 | { 5 | "Version": "2012-10-17", 6 | "Statement": [ 7 | { 8 | "Sid": "FirstStatement", 9 | "Effect": "Allow", 10 | "Action": ["iam:ChangePassword"], 11 | "Resource": "*" 12 | }, 13 | { 14 | "Sid": "SecondStatement", 15 | "Effect": "Allow", 16 | "Action": "s3:ListAllMyBuckets", 17 | "Resource": "*" 18 | }, 19 | { 20 | "Sid": "ThirdStatement", 21 | "Effect": "Allow", 22 | "Action": [ 23 | "s3:List*", 24 | "s3:Get*" 25 | ], 26 | "Resource": [ 27 | "arn:aws:s3:::confidential-data", 28 | "arn:aws:s3:::confidential-data/*" 29 | ], 30 | "Condition": {"Bool": {"aws:MultiFactorAuthPresent": "true"}} 31 | } 32 | ] 33 | } 34 | "# 35 | } 36 | 37 | /// 38 | pub(crate) fn example2_json() -> &'static str { 39 | r#" 40 | { 41 | "Version": "2012-10-17", 42 | "Statement": { 43 | "Effect": "Allow", 44 | "Action": "s3:ListBucket", 45 | "Resource": "arn:aws:s3:::example_bucket" 46 | } 47 | } 48 | "# 49 | } 50 | 51 | /// 52 | pub(crate) fn example3_json() -> &'static str { 53 | r#" 54 | { 55 | "Version": "2012-10-17", 56 | "Statement": [{ 57 | "Sid": "1", 58 | "Effect": "Allow", 59 | "Principal": {"AWS": ["arn:aws:iam::account-id:root"]}, 60 | "Action": "s3:*", 61 | "Resource": [ 62 | "arn:aws:s3:::mybucket", 63 | "arn:aws:s3:::mybucket/*" 64 | ] 65 | }] 66 | } 67 | "# 68 | } 69 | -------------------------------------------------------------------------------- /crates/s3s/tests/dto.rs: -------------------------------------------------------------------------------- 1 | use s3s::dto::{ 2 | AnalyticsConfiguration, BucketLifecycleConfiguration, GetObjectInput, IntelligentTieringConfiguration, 3 | InventoryConfiguration, LambdaFunctionConfiguration, MetadataTableConfiguration, MetricsConfiguration, QueueConfiguration, 4 | ReplicationConfiguration, RequestPaymentConfiguration, TopicConfiguration, 5 | }; 6 | 7 | #[test] 8 | fn builder() { 9 | let input = { 10 | let mut b = GetObjectInput::builder(); 11 | b.set_bucket("hello".to_owned()); 12 | b.set_key("world".to_owned()); 13 | b.build().unwrap() 14 | }; 15 | 16 | assert_eq!(input.bucket, "hello"); 17 | assert_eq!(input.key, "world"); 18 | } 19 | 20 | #[test] 21 | fn configuration_types_have_default() { 22 | // Test the two types mentioned in the issue 23 | let _ = BucketLifecycleConfiguration::default(); 24 | let _ = ReplicationConfiguration::default(); 25 | 26 | // Test a few more Configuration types 27 | let _ = AnalyticsConfiguration::default(); 28 | let _ = IntelligentTieringConfiguration::default(); 29 | let _ = InventoryConfiguration::default(); 30 | let _ = LambdaFunctionConfiguration::default(); 31 | let _ = MetadataTableConfiguration::default(); 32 | let _ = MetricsConfiguration::default(); 33 | let _ = QueueConfiguration::default(); 34 | let _ = RequestPaymentConfiguration::default(); 35 | let _ = TopicConfiguration::default(); 36 | } 37 | 38 | #[test] 39 | fn configuration_serialization() { 40 | // Test that Configuration types can be serialized and deserialized 41 | let config = BucketLifecycleConfiguration::default(); 42 | let json = serde_json::to_string(&config).expect("should serialize"); 43 | let _: BucketLifecycleConfiguration = serde_json::from_str(&json).expect("should deserialize"); 44 | 45 | let config = ReplicationConfiguration::default(); 46 | let json = serde_json::to_string(&config).expect("should serialize"); 47 | let _: ReplicationConfiguration = serde_json::from_str(&json).expect("should deserialize"); 48 | } 49 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/checksum.rs: -------------------------------------------------------------------------------- 1 | use crate::fs::InternalInfo; 2 | 3 | use stdx::default::default; 4 | 5 | pub fn modify_internal_info(info: &mut serde_json::Map, checksum: &s3s::dto::Checksum) { 6 | if let Some(checksum_crc32) = &checksum.checksum_crc32 { 7 | info.insert("checksum_crc32".to_owned(), serde_json::Value::String(checksum_crc32.clone())); 8 | } 9 | if let Some(checksum_crc32c) = &checksum.checksum_crc32c { 10 | info.insert("checksum_crc32c".to_owned(), serde_json::Value::String(checksum_crc32c.clone())); 11 | } 12 | if let Some(checksum_sha1) = &checksum.checksum_sha1 { 13 | info.insert("checksum_sha1".to_owned(), serde_json::Value::String(checksum_sha1.clone())); 14 | } 15 | if let Some(checksum_sha256) = &checksum.checksum_sha256 { 16 | info.insert("checksum_sha256".to_owned(), serde_json::Value::String(checksum_sha256.clone())); 17 | } 18 | 19 | if let Some(checksum_crc64nvme) = &checksum.checksum_crc64nvme { 20 | info.insert("checksum_crc64nvme".to_owned(), serde_json::Value::String(checksum_crc64nvme.clone())); 21 | } 22 | } 23 | 24 | pub fn from_internal_info(info: &InternalInfo) -> s3s::dto::Checksum { 25 | let mut ans: s3s::dto::Checksum = default(); 26 | if let Some(checksum_crc32) = info.get("checksum_crc32") { 27 | ans.checksum_crc32 = Some(checksum_crc32.as_str().unwrap().to_owned()); 28 | } 29 | if let Some(checksum_crc32c) = info.get("checksum_crc32c") { 30 | ans.checksum_crc32c = Some(checksum_crc32c.as_str().unwrap().to_owned()); 31 | } 32 | if let Some(checksum_sha1) = info.get("checksum_sha1") { 33 | ans.checksum_sha1 = Some(checksum_sha1.as_str().unwrap().to_owned()); 34 | } 35 | if let Some(checksum_sha256) = info.get("checksum_sha256") { 36 | ans.checksum_sha256 = Some(checksum_sha256.as_str().unwrap().to_owned()); 37 | } 38 | 39 | if let Some(checksum_crc64nvme) = info.get("checksum_crc64nvme") { 40 | ans.checksum_crc64nvme = Some(checksum_crc64nvme.as_str().unwrap().to_owned()); 41 | } 42 | ans 43 | } 44 | -------------------------------------------------------------------------------- /codegen/src/v1/access.rs: -------------------------------------------------------------------------------- 1 | use super::ops::Operations; 2 | 3 | use crate::declare_codegen; 4 | 5 | use heck::ToSnakeCase; 6 | use scoped_writer::g; 7 | 8 | pub fn codegen(ops: &Operations) { 9 | declare_codegen!(); 10 | 11 | g([ 12 | "#![allow(clippy::doc_markdown)]", 13 | "", 14 | "use super::S3AccessContext;", 15 | "", 16 | "use crate::dto::*;", 17 | "use crate::error::S3Result;", 18 | "use crate::protocol::S3Request;", 19 | "", 20 | "#[async_trait::async_trait]", 21 | "pub trait S3Access: Send + Sync + 'static {", 22 | "", 23 | ]); 24 | 25 | g([ 26 | "/// Checks whether the current request has accesses to the resources.", 27 | "///", 28 | "/// This method is called before deserializing the operation input.", 29 | "///", 30 | "/// By default, this method rejects all anonymous requests", 31 | "/// and returns [`AccessDenied`](crate::S3ErrorCode::AccessDenied) error.", 32 | "///", 33 | "/// An access control provider can override this method to implement custom logic.", 34 | "///", 35 | "/// Common fields in the context:", 36 | "/// + [`cx.credentials()`](S3AccessContext::credentials)", 37 | "/// + [`cx.s3_path()`](S3AccessContext::s3_path)", 38 | "/// + [`cx.s3_op().name()`](crate::S3Operation::name)", 39 | "/// + [`cx.extensions_mut()`](S3AccessContext::extensions_mut)", 40 | "async fn check(&self, cx: &mut S3AccessContext<'_>) -> S3Result<()> {", 41 | " super::default_check(cx)", 42 | "}", 43 | ]); 44 | 45 | for op in ops.values() { 46 | let method_name = op.name.to_snake_case(); 47 | let input = &op.input; 48 | 49 | g!("/// Checks whether the {} request has accesses to the resources.", op.name); 50 | g!("/// "); 51 | g!("/// This method returns `Ok(())` by default."); 52 | g!("async fn {method_name}(&self, _req: &mut S3Request<{input}>) -> S3Result<()> {{"); 53 | g!("Ok(())"); 54 | g!("}}"); 55 | g!(); 56 | } 57 | 58 | g!("}}"); 59 | g!(); 60 | } 61 | -------------------------------------------------------------------------------- /crates/s3s/src/auth/secret_key.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use serde::Deserialize; 4 | use serde::Serialize; 5 | use subtle::ConstantTimeEq; 6 | use zeroize::Zeroize; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct Credentials { 10 | pub access_key: String, 11 | pub secret_key: SecretKey, 12 | } 13 | 14 | #[derive(Clone)] 15 | pub struct SecretKey(Box); 16 | 17 | impl SecretKey { 18 | fn new(s: impl Into>) -> Self { 19 | Self(s.into()) 20 | } 21 | 22 | #[must_use] 23 | pub fn expose(&self) -> &str { 24 | &self.0 25 | } 26 | } 27 | 28 | impl Zeroize for SecretKey { 29 | fn zeroize(&mut self) { 30 | self.0.zeroize(); 31 | } 32 | } 33 | 34 | impl ConstantTimeEq for SecretKey { 35 | fn ct_eq(&self, other: &Self) -> subtle::Choice { 36 | self.0.as_bytes().ct_eq(other.0.as_bytes()) 37 | } 38 | } 39 | 40 | impl Drop for SecretKey { 41 | fn drop(&mut self) { 42 | self.zeroize(); 43 | } 44 | } 45 | 46 | impl From for SecretKey { 47 | fn from(value: String) -> Self { 48 | Self::new(value) 49 | } 50 | } 51 | 52 | impl From> for SecretKey { 53 | fn from(value: Box) -> Self { 54 | Self::new(value) 55 | } 56 | } 57 | 58 | impl From<&str> for SecretKey { 59 | fn from(value: &str) -> Self { 60 | Self::new(value) 61 | } 62 | } 63 | 64 | const PLACEHOLDER: &str = "[SENSITIVE-SECRET-KEY]"; 65 | 66 | impl fmt::Debug for SecretKey { 67 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 68 | f.debug_tuple("SecretKey").field(&PLACEHOLDER).finish() 69 | } 70 | } 71 | 72 | impl<'de> Deserialize<'de> for SecretKey { 73 | fn deserialize(deserializer: D) -> Result 74 | where 75 | D: serde::Deserializer<'de>, 76 | { 77 | ::deserialize(deserializer).map(SecretKey::from) 78 | } 79 | } 80 | 81 | impl Serialize for SecretKey { 82 | fn serialize(&self, serializer: S) -> Result 83 | where 84 | S: serde::Serializer, 85 | { 86 | ::serialize(PLACEHOLDER, serializer) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Development Guide 2 | 3 | ## Requirements 4 | 5 | | Toolchain | Version | 6 | | :-----------------------------------: | :-----: | 7 | | [Rust](https://rustup.rs/) | ^1.86.0 | 8 | | [just](https://github.com/casey/just) | ^1.36.0 | 9 | | [uv](https://github.com/astral-sh/uv) | ^0.5.0 | 10 | | Docker | - | 11 | 12 | ## Workflow 13 | 14 | Get the source code 15 | 16 | ```bash 17 | git clone https://github.com/Nugine/s3s.git 18 | cd s3s 19 | ``` 20 | 21 | ### Run basic checks and tests 22 | 23 | ```bash 24 | just dev 25 | ``` 26 | 27 | ### Run the codegen 28 | 29 | ```bash 30 | just crawl 31 | just codegen 32 | ``` 33 | 34 | It should change nothing if you are running the latest code. 35 | 36 | ### Open documentation 37 | 38 | ```bash 39 | just doc 40 | ``` 41 | 42 | ### Play the test server 43 | 44 | Install `s3s-fs` from source 45 | 46 | ```bash 47 | cargo install --path crates/s3s-fs --features binary 48 | ``` 49 | 50 | You can also use the shortcut 51 | 52 | ```bash 53 | just install s3s-fs 54 | ``` 55 | 56 | Or install from crates.io 57 | 58 | ```bash 59 | cargo install s3s-fs --features binary 60 | ``` 61 | 62 | Run `s3s-fs` with [example configuration](./scripts/s3s-fs.sh) 63 | 64 | ```bash 65 | ./scripts/s3s-fs.sh 66 | ``` 67 | 68 | Credentials used in the example configuration: 69 | 70 | ``` 71 | Access Key: AKEXAMPLES3S 72 | Secret Key: SKEXAMPLES3S 73 | ``` 74 | 75 | Then you can explore it with your favorite S3 client! 76 | 77 | ### Run E2E tests 78 | 79 | Install `s3s-proxy` 80 | 81 | ```bash 82 | just install s3s-proxy 83 | ``` 84 | 85 | Run the combined server and save logs 86 | 87 | ```bash 88 | ./scripts/s3s-proxy.sh | tee target/s3s-proxy.log 89 | ``` 90 | 91 | Open a new terminal, then run the test suite 92 | 93 | ```bash 94 | ./scripts/mint.sh | tee target/mint.log 95 | ``` 96 | 97 | ## Git 98 | 99 | ### Commit Message 100 | 101 | We follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification. 102 | -------------------------------------------------------------------------------- /crates/s3s/src/checksum.rs: -------------------------------------------------------------------------------- 1 | use crate::crypto::Checksum as _; 2 | use crate::crypto::Crc32; 3 | use crate::crypto::Crc32c; 4 | use crate::crypto::Crc64Nvme; 5 | use crate::crypto::Sha1; 6 | use crate::crypto::Sha256; 7 | use crate::dto::Checksum; 8 | 9 | use stdx::default::default; 10 | 11 | #[derive(Default)] 12 | pub struct ChecksumHasher { 13 | pub crc32: Option, 14 | pub crc32c: Option, 15 | pub sha1: Option, 16 | pub sha256: Option, 17 | pub crc64nvme: Option, 18 | } 19 | 20 | impl ChecksumHasher { 21 | pub fn update(&mut self, data: &[u8]) { 22 | if let Some(crc32) = &mut self.crc32 { 23 | crc32.update(data); 24 | } 25 | if let Some(crc32c) = &mut self.crc32c { 26 | crc32c.update(data); 27 | } 28 | if let Some(sha1) = &mut self.sha1 { 29 | sha1.update(data); 30 | } 31 | if let Some(sha256) = &mut self.sha256 { 32 | sha256.update(data); 33 | } 34 | if let Some(crc64nvme) = &mut self.crc64nvme { 35 | crc64nvme.update(data); 36 | } 37 | } 38 | 39 | #[must_use] 40 | pub fn finalize(self) -> Checksum { 41 | let mut ans: Checksum = default(); 42 | if let Some(crc32) = self.crc32 { 43 | let sum = crc32.finalize(); 44 | ans.checksum_crc32 = Some(Self::base64(&sum)); 45 | } 46 | if let Some(crc32c) = self.crc32c { 47 | let sum = crc32c.finalize(); 48 | ans.checksum_crc32c = Some(Self::base64(&sum)); 49 | } 50 | if let Some(sha1) = self.sha1 { 51 | let sum = sha1.finalize(); 52 | ans.checksum_sha1 = Some(Self::base64(sum.as_ref())); 53 | } 54 | if let Some(sha256) = self.sha256 { 55 | let sum = sha256.finalize(); 56 | ans.checksum_sha256 = Some(Self::base64(sum.as_ref())); 57 | } 58 | if let Some(crc64nvme) = self.crc64nvme { 59 | let sum = crc64nvme.finalize(); 60 | ans.checksum_crc64nvme = Some(Self::base64(&sum)); 61 | } 62 | ans 63 | } 64 | 65 | fn base64(input: &[u8]) -> String { 66 | base64_simd::STANDARD.encode_to_string(input) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /scripts/install.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from dataclasses import dataclass 3 | import argparse 4 | import subprocess 5 | 6 | INSTALLERS = {} 7 | 8 | 9 | def installer(name): 10 | def decorator(f): 11 | INSTALLERS[name] = f 12 | return f 13 | 14 | return decorator 15 | 16 | 17 | @dataclass(kw_only=True) 18 | class CliArgs: 19 | name: str 20 | 21 | offline: bool = False 22 | 23 | @staticmethod 24 | def parse(): 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument("name", type=str, choices=list(INSTALLERS.keys())) 27 | parser.add_argument("--offline", action="store_true") 28 | args = parser.parse_args() 29 | return CliArgs(**args.__dict__) 30 | 31 | 32 | def sh(cmd): 33 | print(cmd) 34 | subprocess.run(cmd, shell=True, check=True, stdin=subprocess.DEVNULL) 35 | 36 | 37 | def cargo_install( 38 | args: CliArgs, 39 | package: str, 40 | *, 41 | features: list[str] | None = None, 42 | bin: str | None = None, 43 | force: bool = True, 44 | ): 45 | opt_offline = "--offline" if args.offline else "" 46 | opt_features = f"--features {','.join(features)}" if features else "" 47 | opt_bin = f"--bin {bin}" if bin else "" 48 | opt_force = "--force" if force else "" 49 | sh( 50 | f"cargo install --path crates/{package} --locked " # 51 | f"{opt_offline} {opt_features} {opt_bin} {opt_force}" 52 | ) 53 | 54 | 55 | @installer("s3s-fs") 56 | def install_s3s_fs(args: CliArgs): 57 | cargo_install(args, "s3s-fs", features=["binary"]) 58 | 59 | 60 | @installer("s3s-proxy") 61 | def install_s3s_proxy(args: CliArgs): 62 | cargo_install(args, "s3s-proxy") 63 | 64 | 65 | @installer("s3s-e2e") 66 | def install_s3s_e2e(args: CliArgs): 67 | sh("touch crates/s3s-e2e/build.rs") 68 | cargo_install(args, "s3s-e2e") 69 | 70 | 71 | @installer("all") 72 | def install_all(args: CliArgs): 73 | if not args.offline: 74 | sh("cargo fetch") 75 | args.offline = True 76 | 77 | for name, f in INSTALLERS.items(): 78 | if name != "all": 79 | f(args) 80 | 81 | 82 | def main(args: CliArgs): 83 | INSTALLERS[args.name](args) 84 | 85 | 86 | if __name__ == "__main__": 87 | main(CliArgs.parse()) 88 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.89 AS builder 2 | 3 | # TARGETARCH is a built-in ARG provided by the Docker builder (e.g., "amd64", "arm64") 4 | # It's automatically available in RUN commands. 5 | ARG TARGETARCH 6 | 7 | WORKDIR /app 8 | 9 | # Set musl-gcc flags for aws-lc compatibility 10 | ENV CC=musl-gcc \ 11 | CFLAGS="-D__isoc23_sscanf=sscanf -D__isoc23_strtol=strtol" 12 | 13 | # Copy source files first to better leverage Docker's layer caching 14 | COPY ./Cargo.toml ./Cargo.toml 15 | COPY ./crates ./crates 16 | COPY ./codegen ./codegen 17 | 18 | # This single RUN command handles all platform-specific logic. 19 | # It sets variables, installs dependencies, adds the Rust target, and builds the binary. 20 | RUN \ 21 | case ${TARGETARCH} in \ 22 | "amd64") \ 23 | RUST_TARGET="x86_64-unknown-linux-musl" \ 24 | && GCC_PACKAGE="gcc-x86-64-linux-gnu" \ 25 | ;; \ 26 | "arm64") \ 27 | RUST_TARGET="aarch64-unknown-linux-musl" \ 28 | && GCC_PACKAGE="gcc-aarch64-linux-gnu" \ 29 | ;; \ 30 | *) \ 31 | echo "Unsupported architecture: ${TARGETARCH}" >&2 \ 32 | && exit 1 \ 33 | ;; \ 34 | esac \ 35 | && apt-get update \ 36 | && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 37 | musl \ 38 | musl-dev \ 39 | musl-tools \ 40 | ${GCC_PACKAGE} \ 41 | && rustup target add ${RUST_TARGET} \ 42 | && cargo build --release --target ${RUST_TARGET} --features="binary" --bin s3s-fs --bin s3s-e2e --bin s3s-proxy \ 43 | && rm -rf /var/lib/apt/lists/* \ 44 | && cp target/${RUST_TARGET}/release/s3s-fs target/${RUST_TARGET}/release/s3s-e2e target/${RUST_TARGET}/release/s3s-proxy target/ 45 | 46 | 47 | # Create and set permissions for the data directory 48 | RUN mkdir data && chmod -R 755 data 49 | 50 | #----------- FINAL STAGE ----------- 51 | FROM scratch 52 | 53 | # Copy the statically compiled binary from a known location in the builder stage. 54 | COPY --from=builder /app/target/s3s-fs . 55 | COPY --from=builder /app/target/s3s-e2e . 56 | COPY --from=builder /app/target/s3s-proxy . 57 | 58 | # Copy the data directory 59 | COPY --from=builder /app/data /data 60 | 61 | # Set the command to run the application 62 | CMD ["./s3s-fs", "--help"] 63 | -------------------------------------------------------------------------------- /crates/s3s/src/ops/get_object.rs: -------------------------------------------------------------------------------- 1 | use crate::S3Request; 2 | use crate::S3Result; 3 | use crate::dto::GetObjectInput; 4 | use crate::dto::Timestamp; 5 | use crate::dto::TimestampFormat; 6 | use crate::header; 7 | use crate::http::Response; 8 | use crate::utils::format::fmt_timestamp; 9 | 10 | use hyper::HeaderMap; 11 | use hyper::header::CONTENT_LENGTH; 12 | use hyper::header::TRANSFER_ENCODING; 13 | use hyper::http::HeaderName; 14 | use hyper::http::HeaderValue; 15 | 16 | use stdx::default::default; 17 | 18 | pub fn extract_overridden_response_headers(req: &S3Request) -> S3Result { 19 | let mut map: HeaderMap = default(); 20 | 21 | add(&mut map, header::CONTENT_TYPE, req.input.response_content_type.as_deref())?; 22 | add(&mut map, header::CONTENT_LANGUAGE, req.input.response_content_language.as_deref())?; 23 | add_ts(&mut map, header::EXPIRES, req.input.response_expires.as_ref())?; 24 | add(&mut map, header::CACHE_CONTROL, req.input.response_cache_control.as_deref())?; 25 | add(&mut map, header::CONTENT_DISPOSITION, req.input.response_content_disposition.as_deref())?; 26 | add(&mut map, header::CONTENT_ENCODING, req.input.response_content_encoding.as_deref())?; 27 | 28 | Ok(map) 29 | } 30 | 31 | fn add(map: &mut HeaderMap, name: HeaderName, value: Option<&str>) -> S3Result<()> { 32 | let error = |e| invalid_request!(e, "invalid overridden header: {name}: {value:?}"); 33 | if let Some(value) = value { 34 | let value = value.parse().map_err(error)?; 35 | map.insert(name, value); 36 | } 37 | Ok(()) 38 | } 39 | 40 | fn add_ts(map: &mut HeaderMap, name: HeaderName, value: Option<&Timestamp>) -> S3Result<()> { 41 | let error = |e| invalid_request!(e, "invalid overridden header: {name}: {value:?}"); 42 | if let Some(value) = value { 43 | let value = fmt_timestamp(value, TimestampFormat::HttpDate, HeaderValue::from_bytes).map_err(error)?; 44 | map.insert(name, value); 45 | } 46 | Ok(()) 47 | } 48 | 49 | pub fn merge_custom_headers(resp: &mut Response, headers: HeaderMap) { 50 | resp.headers.extend(headers); 51 | 52 | // special case for https://github.com/Nugine/s3s/issues/80 53 | if let Some(val) = resp.headers.get(TRANSFER_ENCODING) { 54 | if val.as_bytes() == b"chunked" { 55 | resp.headers.remove(CONTENT_LENGTH); 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /crates/s3s/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s" 3 | version = "0.12.0-rc.6" 4 | description = "S3 Service Adapter" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [package.metadata.docs.rs] 17 | all-features = true 18 | rustdoc-args = ["--cfg", "docsrs"] 19 | 20 | [features] 21 | openssl = ["dep:openssl"] 22 | minio = [] 23 | 24 | [target.'cfg(not(windows))'.dependencies] 25 | openssl = { version = "0.10.75", optional = true } 26 | 27 | [dependencies] 28 | arrayvec = "0.7.6" 29 | async-trait = "0.1.89" 30 | atoi = { version = "2.0.0", default-features = false } 31 | base64-simd = "0.8.0" 32 | bytes = "1.11.0" 33 | bytestring = "1.5.0" 34 | chrono = { version = "0.4.42", default-features = false } 35 | crc-fast = "1" 36 | futures = { version = "0.3.31", default-features = false, features = ["std"] } 37 | hex-simd = "0.8.0" 38 | hmac.workspace = true 39 | http-body = "1.0.1" 40 | http-body-util = "0.1.3" 41 | httparse = "1.10.1" 42 | hyper = { version = "1.8.1", features = ["http1", "server"] } 43 | itoa = "1.0.15" 44 | md-5.workspace = true 45 | memchr = "2.7.6" 46 | mime = "0.3.17" 47 | nom = "8.0.0" 48 | numeric_cast = "0.3.0" 49 | pin-project-lite = "0.2.16" 50 | quick-xml = { version = "0.37.5", features = ["serialize"] } 51 | serde = { version = "1.0.228", features = ["derive"] } 52 | serde_urlencoded = "0.7.1" 53 | sha1.workspace = true 54 | sha2.workspace = true 55 | smallvec = "1.15.1" 56 | thiserror = "2.0.17" 57 | time = { version = "0.3.44", features = ["formatting", "parsing", "macros"] } 58 | tower = { version = "0.5.2", default-features = false } 59 | tracing = "0.1.43" 60 | transform-stream = "0.3.1" 61 | urlencoding = "2.1.3" 62 | zeroize = "1.8.2" 63 | std-next = "0.1.9" 64 | sync_wrapper = { version = "1.0.2", default-features = false } 65 | tokio = { version = "1.48.0", features = ["time"] } 66 | const-str = "0.7.0" 67 | http = "1.4.0" 68 | subtle = "2.6.1" 69 | cfg-if = "1.0.4" 70 | 71 | [dev-dependencies] 72 | axum = "0.8.7" 73 | clap = { version = "4.5.53", features = ["derive"] } 74 | hyper-util = { version = "0.1.18", features = ["server-auto", "server-graceful", "http1", "http2", "tokio"] } 75 | rustls-pemfile = "2.2.0" 76 | serde_json = "1.0.145" 77 | tokio = { version = "1.48.0", features = ["full"] } 78 | tokio-rustls = "0.26.4" 79 | tokio-util = { version = "0.7.17", features = ["io"] } 80 | tracing-subscriber = { version = "0.3.22", features = ["env-filter"] } 81 | -------------------------------------------------------------------------------- /crates/s3s-fs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s3s-fs" 3 | version = "0.12.0-rc.6" 4 | description = "An experimental S3 server based on file system" 5 | readme = "../../README.md" 6 | keywords = ["s3"] 7 | categories = ["web-programming", "web-programming::http-server"] 8 | edition.workspace = true 9 | repository.workspace = true 10 | license.workspace = true 11 | rust-version.workspace = true 12 | 13 | [lints] 14 | workspace = true 15 | 16 | [[bin]] 17 | name = "s3s-fs" 18 | required-features = ["binary"] 19 | 20 | [features] 21 | binary = ["tokio/full", "dep:clap", "dep:tracing-subscriber", "dep:hyper-util"] 22 | 23 | [dependencies] 24 | async-trait = "0.1.89" 25 | base64-simd = "0.8.0" 26 | bytes = "1.11.0" 27 | chrono = { version = "0.4.42", default-features = false, features = [ 28 | "std", 29 | "clock", 30 | ] } 31 | clap = { version = "4.5.53", optional = true, features = ["derive"] } 32 | crc32c = "0.6.8" 33 | futures = "0.3.31" 34 | hex-simd = "0.8.0" 35 | hyper-util = { version = "0.1.18", optional = true, features = [ 36 | "server-auto", 37 | "server-graceful", 38 | "http1", 39 | "http2", 40 | "tokio", 41 | ] } 42 | mime = "0.3.17" 43 | std-next = "0.1.9" 44 | numeric_cast = "0.3.0" 45 | path-absolutize = "3.1.1" 46 | s3s = { version = "0.12.0-rc.6", path = "../s3s" } 47 | serde = { version = "1.0.228", features = ["derive"] } 48 | serde_json = "1.0.145" 49 | thiserror = "2.0.17" 50 | time = "0.3.44" 51 | tokio = { version = "1.48.0", features = ["fs", "io-util"] } 52 | tokio-util = { version = "0.7.17", features = ["io"] } 53 | tracing = "0.1.43" 54 | tracing-error = "0.2.1" 55 | tracing-subscriber = { version = "0.3.22", optional = true, features = [ 56 | "env-filter", 57 | "time", 58 | ] } 59 | transform-stream = "0.3.1" 60 | uuid = { version = "1.18.1", features = ["v4"] } 61 | 62 | [dev-dependencies] 63 | anyhow = { version = "1.0.100", features = ["backtrace"] } 64 | aws-config = { version = "1.8.7", default-features = false } 65 | aws-credential-types = { version = "1.2.6", features = ["test-util"] } 66 | aws-sdk-s3 = { version = "1.107.0", features = ["behavior-version-latest"] } 67 | aws-sdk-sts = { version = "1.87.0", features = ["behavior-version-latest"] } 68 | futures-util = "0.3.31" 69 | hyper = { version = "1.8.1", features = ["http1", "http2"] } 70 | hyper-util = { version = "0.1.18", features = ["server-auto", "server-graceful", "http1", "http2", "tokio"] } 71 | once_cell = "1.21.3" 72 | opendal = { version = "0.55.0", features = ["services-s3"] } 73 | s3s-aws = { version = "0.12.0-rc.6", path = "../s3s-aws" } 74 | tokio = { version = "1.48.0", features = ["full"] } 75 | tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] } 76 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/amz_date.rs: -------------------------------------------------------------------------------- 1 | //! x-amz-date 2 | 3 | use std::fmt::Write as _; 4 | 5 | use arrayvec::ArrayString; 6 | 7 | /// x-amz-date 8 | #[derive(Debug, Clone)] 9 | pub struct AmzDate { 10 | /// year 11 | year: u16, 12 | /// month 13 | month: u8, 14 | /// day 15 | day: u8, 16 | /// hour 17 | hour: u8, 18 | /// minute 19 | minute: u8, 20 | /// second 21 | second: u8, 22 | } 23 | 24 | /// [`AmzDate`] 25 | #[derive(Debug, thiserror::Error)] 26 | #[error("ParseAmzDateError")] 27 | pub struct ParseAmzDateError(()); 28 | 29 | impl AmzDate { 30 | /// Parses `AmzDate` from header 31 | /// # Errors 32 | /// Returns an error if the header is invalid 33 | pub fn parse(header: &str) -> Result { 34 | self::parser::parse(header).map_err(|_| ParseAmzDateError(())) 35 | } 36 | 37 | /// `{YYYY}{MM}{DD}T{HH}{MM}{SS}Z` 38 | #[must_use] 39 | pub fn fmt_iso8601(&self) -> ArrayString<16> { 40 | let mut buf = >::new(); 41 | let (y, m, d, hh, mm, ss) = (self.year, self.month, self.day, self.hour, self.minute, self.second); 42 | write!(&mut buf, "{y:04}{m:02}{d:02}T{hh:02}{mm:02}{ss:02}Z").unwrap(); 43 | buf 44 | } 45 | 46 | /// `{YYYY}{MM}{DD}` 47 | #[must_use] 48 | pub fn fmt_date(&self) -> ArrayString<8> { 49 | let mut buf = >::new(); 50 | write!(&mut buf, "{:04}{:02}{:02}", self.year, self.month, self.day).unwrap(); 51 | buf 52 | } 53 | 54 | pub fn to_time(&self) -> Option { 55 | let y = i32::from(self.year); 56 | let m: time::Month = self.month.try_into().ok()?; 57 | let d = self.day; 58 | 59 | let t = time::Date::from_calendar_date(y, m, d).ok()?; 60 | let t = t.with_hms(self.hour, self.minute, self.second).ok()?; 61 | Some(t.assume_utc()) 62 | } 63 | } 64 | 65 | mod parser { 66 | use super::*; 67 | 68 | use crate::utils::parser::{Error, digit2, digit4}; 69 | 70 | macro_rules! ensure { 71 | ($cond:expr) => { 72 | if !$cond { 73 | return Err(Error); 74 | } 75 | }; 76 | } 77 | 78 | pub fn parse(input: &str) -> Result { 79 | let x = input.as_bytes(); 80 | ensure!(x.len() == 16); 81 | 82 | let year = digit4([x[0], x[1], x[2], x[3]])?; 83 | let month = digit2([x[4], x[5]])?; 84 | let day = digit2([x[6], x[7]])?; 85 | ensure!(x[8] == b'T'); 86 | 87 | let hour = digit2([x[9], x[10]])?; 88 | let minute = digit2([x[11], x[12]])?; 89 | let second = digit2([x[13], x[14]])?; 90 | ensure!(x[15] == b'Z'); 91 | 92 | Ok(AmzDate { 93 | year, 94 | month, 95 | day, 96 | hour, 97 | minute, 98 | second, 99 | }) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /codegen/src/v1/mod.rs: -------------------------------------------------------------------------------- 1 | mod rust; 2 | mod smithy; 3 | mod utils; 4 | 5 | mod access; 6 | mod dto; 7 | mod error; 8 | mod headers; 9 | mod minio; 10 | mod ops; 11 | mod s3_trait; 12 | mod sts; 13 | mod xml; 14 | 15 | mod aws_conv; 16 | mod aws_proxy; 17 | 18 | use std::fs::File; 19 | use std::io::BufWriter; 20 | 21 | pub use self::utils::o; 22 | 23 | fn write_file(path: &str, f: impl FnOnce()) { 24 | let mut writer = BufWriter::new(File::create(path).unwrap()); 25 | scoped_writer::scoped(&mut writer, f); 26 | } 27 | 28 | #[derive(Debug, Clone, Copy)] 29 | enum Patch { 30 | Minio, 31 | } 32 | 33 | pub fn run() { 34 | inner_run(None); 35 | inner_run(Some(Patch::Minio)); 36 | } 37 | 38 | fn inner_run(code_patch: Option) { 39 | let model = { 40 | let mut s3_model = smithy::Model::load_json("data/s3.json").unwrap(); 41 | 42 | let mut sts_model = smithy::Model::load_json("data/sts.json").unwrap(); 43 | sts::reduce(&mut sts_model); 44 | s3_model.shapes.append(&mut sts_model.shapes); 45 | 46 | if matches!(code_patch, Some(Patch::Minio)) { 47 | minio::patch(&mut s3_model); 48 | } 49 | 50 | s3_model 51 | }; 52 | 53 | let ops = ops::collect_operations(&model); 54 | let rust_types = dto::collect_rust_types(&model, &ops); 55 | 56 | let suffix = match code_patch { 57 | Some(Patch::Minio) => "_minio", 58 | None => "", 59 | }; 60 | 61 | { 62 | let path = format!("crates/s3s/src/dto/generated{suffix}.rs"); 63 | write_file(&path, || dto::codegen(&rust_types, &ops, code_patch)); 64 | } 65 | 66 | { 67 | let path = format!("crates/s3s/src/header/generated{suffix}.rs"); 68 | write_file(&path, || headers::codegen(&model)); 69 | } 70 | 71 | { 72 | let path = format!("crates/s3s/src/error/generated{suffix}.rs"); 73 | write_file(&path, || error::codegen(&model)); 74 | } 75 | 76 | { 77 | let path = format!("crates/s3s/src/xml/generated{suffix}.rs"); 78 | write_file(&path, || xml::codegen(&ops, &rust_types)); 79 | } 80 | 81 | { 82 | let path = "crates/s3s/src/s3_trait.rs"; 83 | write_file(path, || s3_trait::codegen(&ops)); 84 | } 85 | 86 | { 87 | let path = format!("crates/s3s/src/ops/generated{suffix}.rs"); 88 | write_file(&path, || ops::codegen(&ops, &rust_types)); 89 | } 90 | 91 | { 92 | let path = format!("crates/s3s/src/access/generated{suffix}.rs"); 93 | write_file(&path, || access::codegen(&ops)); 94 | } 95 | 96 | { 97 | let path = format!("crates/s3s-aws/src/conv/generated{suffix}.rs"); 98 | write_file(&path, || aws_conv::codegen(&ops, &rust_types)); 99 | } 100 | 101 | { 102 | let path = format!("crates/s3s-aws/src/proxy/generated{suffix}.rs"); 103 | write_file(&path, || aws_proxy::codegen(&ops, &rust_types)); 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /crates/s3s/src/validation.rs: -------------------------------------------------------------------------------- 1 | //! Validation API for S3 bucket names 2 | 3 | /// Trait for validating S3 names 4 | /// 5 | /// Implementations should return `true` for valid names and `false` for invalid ones. 6 | pub trait NameValidation: Send + Sync + 'static { 7 | /// Validate a bucket name 8 | fn validate_bucket_name(&self, name: &str) -> bool; 9 | } 10 | 11 | /// AWS-compliant name validation 12 | #[derive(Debug, Clone, Default)] 13 | pub struct AwsNameValidation { 14 | _priv: (), 15 | } 16 | 17 | impl AwsNameValidation { 18 | #[must_use] 19 | pub const fn new() -> Self { 20 | Self { _priv: () } 21 | } 22 | } 23 | 24 | impl NameValidation for AwsNameValidation { 25 | fn validate_bucket_name(&self, name: &str) -> bool { 26 | crate::path::check_bucket_name(name) 27 | } 28 | } 29 | 30 | #[cfg(test)] 31 | pub(crate) mod tests { 32 | use super::*; 33 | 34 | /// A name validation that allows any non-empty bucket name. 35 | /// This is for test only. 36 | #[derive(Debug, Clone, Default)] 37 | pub struct RelaxedNameValidation { 38 | _priv: (), 39 | } 40 | 41 | impl RelaxedNameValidation { 42 | #[must_use] 43 | pub const fn new() -> Self { 44 | Self { _priv: () } 45 | } 46 | } 47 | 48 | impl NameValidation for RelaxedNameValidation { 49 | fn validate_bucket_name(&self, name: &str) -> bool { 50 | !name.is_empty() 51 | } 52 | } 53 | 54 | #[test] 55 | fn test_default_validation() { 56 | let validator = AwsNameValidation::new(); 57 | 58 | // Valid bucket names should pass 59 | assert!(validator.validate_bucket_name("valid-bucket")); 60 | assert!(validator.validate_bucket_name("my.example.bucket")); 61 | 62 | // Invalid bucket names should fail 63 | assert!(!validator.validate_bucket_name("InvalidBucket")); // Uppercase 64 | assert!(!validator.validate_bucket_name("invalid_bucket")); // Underscore 65 | assert!(!validator.validate_bucket_name("192.168.1.1")); // IP address 66 | 67 | assert!(!validator.validate_bucket_name("")); // Empty name should fail 68 | } 69 | 70 | #[test] 71 | fn test_relaxed_validation() { 72 | let validator = RelaxedNameValidation::new(); 73 | 74 | // All bucket names should pass, even invalid ones 75 | assert!(validator.validate_bucket_name("valid-bucket")); 76 | assert!(validator.validate_bucket_name("InvalidBucket")); // Uppercase - allowed 77 | assert!(validator.validate_bucket_name("invalid_bucket")); // Underscore - allowed 78 | assert!(validator.validate_bucket_name("192.168.1.1")); // IP address - allowed 79 | assert!(validator.validate_bucket_name("xn--example")); // xn-- prefix - allowed 80 | assert!(validator.validate_bucket_name("ab")); // Too short - allowed 81 | assert!(validator.validate_bucket_name(&"a".repeat(100))); // Too long - allowed 82 | 83 | assert!(!validator.validate_bucket_name("")); // Empty name should fail 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /crates/s3s/src/utils/crypto.rs: -------------------------------------------------------------------------------- 1 | use std::mem::MaybeUninit; 2 | 3 | use hex_simd::{AsOut, AsciiCase}; 4 | use hyper::body::Bytes; 5 | 6 | /// verify sha256 checksum string 7 | pub fn is_sha256_checksum(s: &str) -> bool { 8 | // TODO: optimize 9 | let is_lowercase_hex = |c: u8| matches!(c, b'0'..=b'9' | b'a'..=b'f'); 10 | s.len() == 64 && s.as_bytes().iter().copied().all(is_lowercase_hex) 11 | } 12 | 13 | /// `hmac_sha1(key, data)` 14 | pub fn hmac_sha1(key: impl AsRef<[u8]>, data: impl AsRef<[u8]>) -> [u8; 20] { 15 | use hmac::{Hmac, KeyInit, Mac}; 16 | use sha1::Sha1; 17 | 18 | let mut m = >::new_from_slice(key.as_ref()).unwrap(); 19 | m.update(data.as_ref()); 20 | m.finalize().into_bytes().into() 21 | } 22 | 23 | /// `hmac_sha256(key, data)` 24 | pub fn hmac_sha256(key: impl AsRef<[u8]>, data: impl AsRef<[u8]>) -> [u8; 32] { 25 | use hmac::{Hmac, KeyInit, Mac}; 26 | use sha2::Sha256; 27 | 28 | let mut m = >::new_from_slice(key.as_ref()).unwrap(); 29 | m.update(data.as_ref()); 30 | m.finalize().into_bytes().into() 31 | } 32 | 33 | pub fn hex(data: impl AsRef<[u8]>) -> String { 34 | hex_simd::encode_to_string(data, hex_simd::AsciiCase::Lower) 35 | } 36 | 37 | /// `f(hex(src))` 38 | fn hex_bytes32(src: impl AsRef<[u8]>, f: impl FnOnce(&str) -> R) -> R { 39 | let buf: &mut [_] = &mut [MaybeUninit::uninit(); 64]; 40 | let ans = hex_simd::encode_as_str(src.as_ref(), buf.as_out(), AsciiCase::Lower); 41 | f(ans) 42 | } 43 | 44 | #[cfg(not(all(feature = "openssl", not(windows))))] 45 | fn sha256(data: &[u8]) -> impl AsRef<[u8; 32]> + use<> { 46 | use sha2::{Digest, Sha256}; 47 | ::digest(data) 48 | } 49 | 50 | #[cfg(all(feature = "openssl", not(windows)))] 51 | fn sha256(data: &[u8]) -> impl AsRef<[u8]> { 52 | use openssl::hash::{Hasher, MessageDigest}; 53 | let mut h = Hasher::new(MessageDigest::sha256()).unwrap(); 54 | h.update(data).unwrap(); 55 | h.finish().unwrap() 56 | } 57 | 58 | #[cfg(not(all(feature = "openssl", not(windows))))] 59 | fn sha256_chunk(chunk: &[Bytes]) -> impl AsRef<[u8; 32]> + use<> { 60 | use sha2::{Digest, Sha256}; 61 | let mut h = ::new(); 62 | for data in chunk { 63 | h.update(data); 64 | } 65 | h.finalize() 66 | } 67 | 68 | #[cfg(all(feature = "openssl", not(windows)))] 69 | fn sha256_chunk(chunk: &[Bytes]) -> impl AsRef<[u8]> { 70 | use openssl::hash::{Hasher, MessageDigest}; 71 | let mut h = Hasher::new(MessageDigest::sha256()).unwrap(); 72 | for data in chunk { 73 | h.update(data).unwrap(); 74 | } 75 | h.finish().unwrap() 76 | } 77 | 78 | /// `f(hex(sha256(data)))` 79 | pub fn hex_sha256(data: &[u8], f: impl FnOnce(&str) -> R) -> R { 80 | hex_bytes32(sha256(data).as_ref(), f) 81 | } 82 | 83 | /// `f(hex(sha256(chunk)))` 84 | pub fn hex_sha256_chunk(chunk: &[Bytes], f: impl FnOnce(&str) -> R) -> R { 85 | hex_bytes32(sha256_chunk(chunk).as_ref(), f) 86 | } 87 | 88 | #[cfg(test)] 89 | pub fn hex_sha256_string(data: &[u8]) -> String { 90 | hex_sha256(data, str::to_owned) 91 | } 92 | -------------------------------------------------------------------------------- /crates/s3s-e2e/src/utils.rs: -------------------------------------------------------------------------------- 1 | use s3s_test::Result; 2 | 3 | use std::fmt; 4 | 5 | use aws_sdk_s3::error::ProvideErrorMetadata; 6 | use aws_sdk_s3::error::SdkError; 7 | use tracing::error; 8 | 9 | #[macro_export] 10 | macro_rules! case { 11 | ($tcx: expr, $s:ident, $x:ident, $c:ident) => {{ 12 | let mut suite = $tcx.suite::<$s>(stringify!($s)); 13 | let mut fixture = suite.fixture::<$x>(stringify!($x)); 14 | fixture.case(stringify!($c), $x::$c); 15 | }}; 16 | } 17 | 18 | #[allow(clippy::result_large_err)] 19 | pub fn check(result: Result>, allowed_codes: &[&str]) -> Result, SdkError> 20 | where 21 | E: fmt::Debug + ProvideErrorMetadata, 22 | { 23 | if let Err(SdkError::ServiceError(ref err)) = result { 24 | if let Some(code) = err.err().code() { 25 | if allowed_codes.contains(&code) { 26 | return Ok(None); 27 | } 28 | } 29 | } 30 | if let Err(ref err) = result { 31 | error!(?err); 32 | } 33 | match result { 34 | Ok(val) => Ok(Some(val)), 35 | Err(err) => Err(err), 36 | } 37 | } 38 | 39 | #[tracing::instrument(skip(s3))] 40 | pub async fn create_bucket(s3: &aws_sdk_s3::Client, bucket: &str) -> Result { 41 | s3.create_bucket().bucket(bucket).send().await?; 42 | Ok(()) 43 | } 44 | 45 | #[tracing::instrument(skip(s3))] 46 | pub async fn delete_bucket_loose(s3: &aws_sdk_s3::Client, bucket: &str) -> Result { 47 | let result = s3.delete_bucket().bucket(bucket).send().await; 48 | check(result, &["NoSuchBucket"])?; 49 | Ok(()) 50 | } 51 | 52 | #[tracing::instrument(skip(s3))] 53 | pub async fn delete_bucket_strict(s3: &aws_sdk_s3::Client, bucket: &str) -> Result { 54 | s3.delete_bucket().bucket(bucket).send().await?; 55 | Ok(()) 56 | } 57 | 58 | #[tracing::instrument(skip(s3))] 59 | pub async fn delete_bucket_all(s3: &aws_sdk_s3::Client, bucket: &str) -> Result { 60 | let mut continuation_token = None; 61 | loop { 62 | let result = s3 63 | .list_objects_v2() 64 | .bucket(bucket) 65 | .set_continuation_token(continuation_token) 66 | .send() 67 | .await; 68 | let Some(list_resp) = check(result, &["NoSuchBucket"])? else { 69 | return Ok(()); 70 | }; 71 | 72 | for obj in list_resp.contents() { 73 | if let Some(key) = obj.key() { 74 | s3.delete_object().bucket(bucket).key(key).send().await?; 75 | } 76 | } 77 | 78 | if list_resp.is_truncated() == Some(true) { 79 | continuation_token = list_resp.next_continuation_token().map(String::from); 80 | } else { 81 | break; 82 | } 83 | } 84 | 85 | delete_bucket_loose(s3, bucket).await?; 86 | Ok(()) 87 | } 88 | 89 | #[tracing::instrument(skip(s3))] 90 | pub async fn delete_object_loose(s3: &aws_sdk_s3::Client, bucket: &str, key: &str) -> Result { 91 | let result = s3.delete_object().bucket(bucket).key(key).send().await; 92 | check(result, &["NoSuchKey", "NoSuchBucket"])?; 93 | Ok(()) 94 | } 95 | 96 | #[tracing::instrument(skip(s3))] 97 | pub async fn delete_object_strict(s3: &aws_sdk_s3::Client, bucket: &str, key: &str) -> Result { 98 | s3.delete_object().bucket(bucket).key(key).send().await?; 99 | Ok(()) 100 | } 101 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/presigned_url_v4.rs: -------------------------------------------------------------------------------- 1 | //! presigned url information 2 | 3 | use super::AmzDate; 4 | use super::CredentialV4; 5 | 6 | use crate::http::OrderedQs; 7 | use crate::utils::crypto::is_sha256_checksum; 8 | 9 | use smallvec::SmallVec; 10 | 11 | /// Presigned url information 12 | #[derive(Debug)] 13 | pub struct PresignedUrlV4<'a> { 14 | /// algorithm 15 | pub algorithm: &'a str, 16 | /// credential 17 | pub credential: CredentialV4<'a>, 18 | /// amz date 19 | pub amz_date: AmzDate, 20 | /// expires 21 | pub expires: time::Duration, 22 | /// signed headers 23 | pub signed_headers: SmallVec<[&'a str; 16]>, 24 | /// signature 25 | pub signature: &'a str, 26 | } 27 | 28 | /// [`PresignedUrlV4`] 29 | #[derive(Debug, thiserror::Error)] 30 | #[error("ParsePresignedUrlError")] 31 | pub struct ParsePresignedUrlError { 32 | /// priv place holder 33 | _priv: (), 34 | } 35 | 36 | /// query strings of a presigned url 37 | struct PresignedQs<'a> { 38 | /// X-Amz-Algorithm 39 | algorithm: &'a str, 40 | /// X-Amz-Credential 41 | credential: &'a str, 42 | /// X-Amz-Date 43 | date: &'a str, 44 | /// X-Amz-Expires 45 | expires: &'a str, 46 | /// X-Amz-SignedHeaders 47 | signed_headers: &'a str, 48 | /// X-Amz-Signature 49 | signature: &'a str, 50 | } 51 | 52 | impl<'a> PresignedQs<'a> { 53 | /// Creates `PresignedQs` from `OrderedQs` 54 | fn from_ordered_qs(qs: &'a OrderedQs) -> Option { 55 | Some(PresignedQs { 56 | algorithm: qs.get_unique("X-Amz-Algorithm")?, 57 | credential: qs.get_unique("X-Amz-Credential")?, 58 | date: qs.get_unique("X-Amz-Date")?, 59 | expires: qs.get_unique("X-Amz-Expires")?, 60 | signed_headers: qs.get_unique("X-Amz-SignedHeaders")?, 61 | signature: qs.get_unique("X-Amz-Signature")?, 62 | }) 63 | } 64 | } 65 | 66 | impl<'a> PresignedUrlV4<'a> { 67 | /// Parses `PresignedUrl` from query 68 | /// 69 | /// # Errors 70 | /// Returns `ParsePresignedUrlError` if it failed to parse 71 | pub fn parse(qs: &'a OrderedQs) -> Result { 72 | let err = || ParsePresignedUrlError { _priv: () }; 73 | 74 | let info = PresignedQs::from_ordered_qs(qs).ok_or_else(err)?; 75 | 76 | let algorithm = info.algorithm; 77 | 78 | let credential = CredentialV4::parse(info.credential).map_err(|_e| err())?; 79 | 80 | let amz_date = AmzDate::parse(info.date).map_err(|_e| err())?; 81 | 82 | let expires = parse_expires(info.expires).ok_or_else(err)?; 83 | 84 | if !info.signed_headers.is_ascii() { 85 | return Err(err()); 86 | } 87 | let signed_headers = info.signed_headers.split(';').collect(); 88 | 89 | if !is_sha256_checksum(info.signature) { 90 | return Err(err()); 91 | } 92 | let signature = info.signature; 93 | 94 | Ok(Self { 95 | algorithm, 96 | credential, 97 | amz_date, 98 | expires, 99 | signed_headers, 100 | signature, 101 | }) 102 | } 103 | } 104 | 105 | fn parse_expires(s: &str) -> Option { 106 | let x = s.parse::().ok().filter(|&x| x > 0)?; 107 | Some(time::Duration::new(i64::from(x), 0)) 108 | } 109 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | [Unreleased]: https://github.com/Nugine/s3s/compare/v0.11.0...HEAD 11 | 12 | ## [v0.11.0] - 2025-03-28 13 | 14 | [v0.11.0]: https://github.com/Nugine/s3s/compare/v0.10.1...v0.11.0 15 | 16 | Tracking in [#267](https://github.com/Nugine/s3s/issues/267). 17 | 18 | MSRV of this minor version: 1.85.0 19 | 20 | ### s3s 21 | 22 | **BREAKING**: Following the latest model definitions in [aws-sdk-rust](https://github.com/awslabs/aws-sdk-rust), `s3s::dto` is updated. 23 | + You may come across some type changes reported by rustc. 24 | + The migration is not hard but requires some time. 25 | 26 | **BREAKING**: More request parameters are accepted via upgrading model definitions. 27 | + S3 preconditions ([#241](https://github.com/Nugine/s3s/issues/241)) 28 | + PutObject write_offset_bytes ([#249](https://github.com/Nugine/s3s/issues/249)) 29 | 30 | **BREAKING**: Policy-based access control is supported in `s3s::access` ([#161](https://github.com/Nugine/s3s/issues/161)) 31 | + Add `S3Access` trait for access control. 32 | + Add `S3ServiceBuilder::set_access`. 33 | + Move `S3Auth::check_access` to `S3Access::check`. 34 | 35 | **BREAKING**: Multi-domain is supported in `s3s::host`. ([#175](https://github.com/Nugine/s3s/issues/175)) 36 | + Add `S3Host` trait for parsing host header. 37 | + Change `S3ServiceBuilder::set_base_domain` to `S3ServiceBuilder::set_host`. 38 | + Add `SingleDomain` parser. 39 | + Add `MultiDomain` parser. 40 | 41 | Custom route is supported in `s3s::route` ([#195](https://github.com/Nugine/s3s/issues/195)) 42 | + Add `S3Route` trait for custom route protected by signature verification. 43 | + Add `S3ServiceBuilder::set_route`. 44 | + Signature v4 supports AWS STS requests ([#208](https://github.com/Nugine/s3s/pull/208)) 45 | + Add example using [axum](https://github.com/tokio-rs/axum) web framework ([#263](https://github.com/Nugine/s3s/pull/263)) 46 | 47 | Unstable `minio` branch: 48 | + Add `minio` branch for MinIO compatibility. 49 | + This branch is automatically force-rebased to the latest `main` branch. 50 | 51 | Other notable changes 52 | + feat(s3s): export xml module ([#189](https://github.com/Nugine/s3s/pull/189)) 53 | + fix(s3s/ops): allow presigned url requests with up to 15 minutes clock skew ([#216](https://github.com/Nugine/s3s/pull/216)) 54 | + handle fmt message with implicit arguments in s3_error macro ([#228](https://github.com/Nugine/s3s/pull/228)) 55 | + feat(s3s/dto): ignore empty strings ([#244](https://github.com/Nugine/s3s/pull/244)) 56 | + feat(model): extra error codes ([#255](https://github.com/Nugine/s3s/pull/255)) 57 | + feat(s3s/checksum): add crc64nvme ([#256](https://github.com/Nugine/s3s/pull/256)) 58 | + feat(s3s/xml): support xmlns ([#265](https://github.com/Nugine/s3s/pull/265)) 59 | 60 | ### s3s-model 61 | 62 | + Add crate `s3s-model` for S3 model definitions. 63 | 64 | ### s3s-policy 65 | 66 | + Add crate `s3s-policy` for S3 policy language. 67 | + Add grammar model types for serialization and deserialization in `s3s_policy::model`. 68 | + Add `PatternSet` for matching multiple patterns in `s3s_policy::pattern`. 69 | 70 | ### s3s-test 71 | 72 | + Add crate `s3s-test` for custom test framework. 73 | 74 | ### s3s-e2e 75 | 76 | + Add crate `s3s-e2e` for S3 compatibility tests. 77 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/streaming_blob.rs: -------------------------------------------------------------------------------- 1 | //! Streaming blob 2 | 3 | use crate::error::StdError; 4 | use crate::http::Body; 5 | use crate::stream::*; 6 | 7 | use std::fmt; 8 | use std::pin::Pin; 9 | use std::task::{Context, Poll}; 10 | 11 | use futures::Stream; 12 | use hyper::body::Bytes; 13 | 14 | pub struct StreamingBlob { 15 | inner: DynByteStream, 16 | } 17 | 18 | impl StreamingBlob { 19 | pub fn new(stream: S) -> Self 20 | where 21 | S: ByteStream> + Send + Sync + 'static, 22 | { 23 | Self { inner: Box::pin(stream) } 24 | } 25 | 26 | pub fn wrap(stream: S) -> Self 27 | where 28 | S: Stream> + Send + Sync + 'static, 29 | E: std::error::Error + Send + Sync + 'static, 30 | { 31 | Self { inner: wrap(stream) } 32 | } 33 | 34 | fn into_inner(self) -> DynByteStream { 35 | self.inner 36 | } 37 | } 38 | 39 | impl fmt::Debug for StreamingBlob { 40 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 41 | f.debug_struct("StreamingBlob") 42 | .field("remaining_length", &self.remaining_length()) 43 | .finish_non_exhaustive() 44 | } 45 | } 46 | 47 | impl Stream for StreamingBlob { 48 | type Item = Result; 49 | 50 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 51 | Pin::new(&mut self.inner).poll_next(cx) 52 | } 53 | 54 | fn size_hint(&self) -> (usize, Option) { 55 | self.inner.size_hint() 56 | } 57 | } 58 | 59 | impl ByteStream for StreamingBlob { 60 | fn remaining_length(&self) -> RemainingLength { 61 | self.inner.remaining_length() 62 | } 63 | } 64 | 65 | impl From for DynByteStream { 66 | fn from(value: StreamingBlob) -> Self { 67 | value.into_inner() 68 | } 69 | } 70 | 71 | impl From for StreamingBlob { 72 | fn from(value: DynByteStream) -> Self { 73 | Self { inner: value } 74 | } 75 | } 76 | 77 | impl From for Body { 78 | fn from(value: StreamingBlob) -> Self { 79 | Body::from(value.into_inner()) 80 | } 81 | } 82 | 83 | impl From for StreamingBlob { 84 | fn from(value: Body) -> Self { 85 | Self::new(value) 86 | } 87 | } 88 | 89 | pin_project_lite::pin_project! { 90 | pub(crate) struct StreamWrapper { 91 | #[pin] 92 | inner: S 93 | } 94 | } 95 | 96 | impl Stream for StreamWrapper 97 | where 98 | S: Stream> + Send + Sync + 'static, 99 | E: std::error::Error + Send + Sync + 'static, 100 | { 101 | type Item = Result; 102 | 103 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 104 | let this = self.project(); 105 | this.inner.poll_next(cx).map_err(|e| Box::new(e) as StdError) 106 | } 107 | 108 | fn size_hint(&self) -> (usize, Option) { 109 | self.inner.size_hint() 110 | } 111 | } 112 | 113 | impl ByteStream for StreamWrapper 114 | where 115 | StreamWrapper: Stream>, 116 | { 117 | fn remaining_length(&self) -> RemainingLength { 118 | RemainingLength::unknown() 119 | } 120 | } 121 | 122 | fn wrap(inner: S) -> DynByteStream 123 | where 124 | StreamWrapper: ByteStream> + Send + Sync + 'static, 125 | { 126 | Box::pin(StreamWrapper { inner }) 127 | } 128 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # s3s 2 | 3 | [![Apache 2.0 licensed][license-badge]][license-url] 4 | [![Unsafe Forbidden][unsafe-forbidden-badge]][unsafe-forbidden-url] 5 | 6 | [license-badge]: https://img.shields.io/badge/license-Apache--2.0-blue.svg 7 | [license-url]: ./LICENSE 8 | [unsafe-forbidden-badge]: https://img.shields.io/badge/unsafe-forbidden-success.svg 9 | [unsafe-forbidden-url]: https://github.com/rust-secure-code/safety-dance/ 10 | 11 | S3 Service Adapter 12 | 13 | 14 | | crate | version | docs | 15 | | :------------------------- | :-----------------------------------------------------------------------------------------: | :------------------------------------------------------------------: | 16 | | [s3s](./crates/s3s/) | [![Crates.io](https://img.shields.io/crates/v/s3s.svg)](https://crates.io/crates/s3s) | [![Docs](https://docs.rs/s3s/badge.svg)](https://docs.rs/s3s/) | 17 | | [s3s-aws](./crates/s3s-aws/) | [![Crates.io](https://img.shields.io/crates/v/s3s-aws.svg)](https://crates.io/crates/s3s-aws) | [![Docs](https://docs.rs/s3s-aws/badge.svg)](https://docs.rs/s3s-aws/) | 18 | | [s3s-fs](./crates/s3s-fs/) | [![Crates.io](https://img.shields.io/crates/v/s3s-fs.svg)](https://crates.io/crates/s3s-fs) | [![Docs](https://docs.rs/s3s-fs/badge.svg)](https://docs.rs/s3s-fs/) | 19 | 20 | 📚 **[Development documentation](https://s3s-project.github.io/s3s/)** for the `main` branch is available on GitHub Pages. 21 | 22 | This experimental project intends to offer an ergonomic adapter for building S3-compatible services. 23 | 24 | `s3s` implements Amazon S3 REST API in the form of a generic [hyper](https://github.com/hyperium/hyper) service. S3-compatible services can focus on the S3 API itself and don't have to care about the HTTP layer. 25 | 26 | `s3s-aws` provides useful types and integration with [`aws-sdk-s3`](https://crates.io/crates/aws-sdk-s3). 27 | 28 | `s3s-fs` implements the S3 API based on file system, as a sample implementation. It is designed for integration testing, which can be used to [mock an S3 client](https://github.com/Nugine/s3s/blob/main/crates/s3s-fs/tests/it_aws.rs). It also provides a binary for debugging. [Play it!](./CONTRIBUTING.md#play-the-test-server) 29 | 30 | ## How it works 31 | 32 | ![architecture diagram](docs/arch/arch.svg) 33 | 34 | The diagram above shows how `s3s` works. 35 | 36 | `s3s` converts HTTP requests to operation inputs before calling the user-defined service. 37 | 38 | `s3s` converts operation outputs or errors to HTTP responses after calling the user-defined service. 39 | 40 | The data types, serialization and deserialization are generated from the smithy model in [aws-sdk-rust](https://github.com/awslabs/aws-sdk-rust) repository. We apply manual hacks to fix some problems in [smithy server codegen](https://smithy-lang.github.io/smithy-rs/design/server/overview.html) and make `s3s` ready to use now. 41 | 42 | ## Security 43 | 44 | `S3Service` and other adapters in this project have no security protection. If they are exposed to the Internet directly, they may be **attacked**. 45 | 46 | It is up to the user to implement security enhancements such as **HTTP body length limit**, rate limit and back pressure. 47 | 48 | ## Contributing 49 | 50 | + [Development Guide](./CONTRIBUTING.md) 51 | 52 | ## Sponsor 53 | 54 | We have a reward funds pool for contributors: 55 | 56 | If my open-source work has been helpful to you, please [sponsor me](https://github.com/Nugine#sponsor). 57 | 58 | Every little bit helps. Thank you! 59 | -------------------------------------------------------------------------------- /crates/s3s/src/http/ordered_qs.rs: -------------------------------------------------------------------------------- 1 | //! Ordered query strings 2 | 3 | use crate::utils::stable_sort_by_first; 4 | 5 | /// Immutable query string container 6 | #[derive(Debug, Default, Clone)] 7 | pub struct OrderedQs { 8 | /// Ascending query strings 9 | qs: Vec<(String, String)>, 10 | } 11 | 12 | /// [`OrderedQs`] 13 | #[derive(Debug, thiserror::Error)] 14 | #[error("ParseOrderedQsError: {inner}")] 15 | pub struct ParseOrderedQsError { 16 | /// url decode error 17 | inner: serde_urlencoded::de::Error, 18 | } 19 | 20 | impl OrderedQs { 21 | /// Constructs [`OrderedQs`] from vec 22 | /// 23 | /// + strings must be url-decoded 24 | #[cfg(test)] 25 | #[must_use] 26 | pub fn from_vec_unchecked(mut v: Vec<(String, String)>) -> Self { 27 | stable_sort_by_first(&mut v); 28 | Self { qs: v } 29 | } 30 | 31 | /// Parses [`OrderedQs`] from query 32 | /// 33 | /// # Errors 34 | /// Returns [`ParseOrderedQsError`] if query cannot be decoded 35 | pub fn parse(query: &str) -> Result { 36 | let result = serde_urlencoded::from_str::>(query); 37 | let mut v = result.map_err(|e| ParseOrderedQsError { inner: e })?; 38 | stable_sort_by_first(&mut v); 39 | Ok(Self { qs: v }) 40 | } 41 | 42 | #[must_use] 43 | pub fn has(&self, name: &str) -> bool { 44 | self.qs.binary_search_by_key(&name, |x| x.0.as_str()).is_ok() 45 | } 46 | 47 | /// Gets query values by name. Time `O(logn)` 48 | pub fn get_all(&self, name: &str) -> impl Iterator + use<'_> { 49 | let qs = self.qs.as_slice(); 50 | 51 | let lower_bound = qs.partition_point(|x| x.0.as_str() < name); 52 | let upper_bound = qs.partition_point(|x| x.0.as_str() <= name); 53 | 54 | qs[lower_bound..upper_bound].iter().map(|x| x.1.as_str()) 55 | } 56 | 57 | pub fn get_unique(&self, name: &str) -> Option<&str> { 58 | let qs = self.qs.as_slice(); 59 | let lower_bound = qs.partition_point(|x| x.0.as_str() < name); 60 | 61 | let mut iter = qs[lower_bound..].iter(); 62 | let pair = iter.next()?; 63 | 64 | if let Some(following) = iter.next() { 65 | if following.0 == name { 66 | return None; 67 | } 68 | } 69 | 70 | (pair.0.as_str() == name).then_some(pair.1.as_str()) 71 | } 72 | } 73 | 74 | impl AsRef<[(String, String)]> for OrderedQs { 75 | fn as_ref(&self) -> &[(String, String)] { 76 | self.qs.as_ref() 77 | } 78 | } 79 | 80 | #[cfg(test)] 81 | mod tests { 82 | use super::*; 83 | 84 | #[test] 85 | fn tag() { 86 | { 87 | let query = "tagging"; 88 | let qs = OrderedQs::parse(query).unwrap(); 89 | assert_eq!(qs.as_ref(), &[("tagging".to_owned(), String::new())]); 90 | 91 | assert_eq!(qs.get_unique("taggin"), None); 92 | assert_eq!(qs.get_unique("tagging"), Some("")); 93 | assert_eq!(qs.get_unique("taggingg"), None); 94 | } 95 | 96 | { 97 | let query = "tagging&tagging"; 98 | let qs = OrderedQs::parse(query).unwrap(); 99 | assert_eq!( 100 | qs.as_ref(), 101 | &[("tagging".to_owned(), String::new()), ("tagging".to_owned(), String::new())] 102 | ); 103 | 104 | assert_eq!(qs.get_unique("taggin"), None); 105 | assert_eq!(qs.get_unique("tagging"), None); 106 | assert_eq!(qs.get_unique("taggingg"), None); 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /crates/s3s-aws/src/connector.rs: -------------------------------------------------------------------------------- 1 | use crate::body::{s3s_body_into_sdk_body, sdk_body_into_s3s_body}; 2 | 3 | use s3s::service::S3Service; 4 | 5 | use std::ops::Not; 6 | 7 | use aws_sdk_s3::config::RuntimeComponents; 8 | use aws_smithy_runtime_api::client::http::{HttpClient, HttpConnectorSettings, SharedHttpConnector}; 9 | use aws_smithy_runtime_api::client::http::{HttpConnector, HttpConnectorFuture}; 10 | use aws_smithy_runtime_api::client::orchestrator::HttpRequest as AwsHttpRequest; 11 | use aws_smithy_runtime_api::client::orchestrator::HttpResponse as AwsHttpResponse; 12 | use aws_smithy_runtime_api::client::result::ConnectorError; 13 | 14 | use hyper::header::HOST; 15 | use hyper::http; 16 | 17 | #[derive(Debug)] 18 | pub struct Client(S3Service); 19 | 20 | impl HttpClient for Client { 21 | fn http_connector(&self, _: &HttpConnectorSettings, _: &RuntimeComponents) -> SharedHttpConnector { 22 | SharedHttpConnector::new(Connector(self.0.clone())) 23 | } 24 | } 25 | 26 | impl From for Client { 27 | fn from(val: S3Service) -> Self { 28 | Self(val) 29 | } 30 | } 31 | 32 | #[derive(Debug, Clone)] 33 | pub struct Connector(S3Service); 34 | 35 | impl From for Connector { 36 | fn from(val: S3Service) -> Self { 37 | Self(val) 38 | } 39 | } 40 | 41 | fn on_err(e: E) -> ConnectorError 42 | where 43 | E: std::error::Error + Send + Sync + 'static, 44 | { 45 | let kind = aws_smithy_runtime_api::client::retries::ErrorKind::ServerError; 46 | ConnectorError::other(Box::new(e), Some(kind)) 47 | } 48 | 49 | impl HttpConnector for Connector { 50 | fn call(&self, req: AwsHttpRequest) -> HttpConnectorFuture { 51 | let service = self.0.clone(); 52 | HttpConnectorFuture::new_boxed(Box::pin(async move { convert_output(service.call(convert_input(req)?).await) })) 53 | } 54 | } 55 | 56 | fn convert_input(req: AwsHttpRequest) -> Result { 57 | let mut req = req.try_into_http1x().map_err(on_err)?; 58 | 59 | if req.headers().contains_key(HOST).not() { 60 | let host = auto_host_header(req.uri()); 61 | req.headers_mut().insert(HOST, host); 62 | } 63 | 64 | Ok(req.map(sdk_body_into_s3s_body)) 65 | } 66 | 67 | fn convert_output(result: Result) -> Result { 68 | match result { 69 | Ok(res) => res.map(s3s_body_into_sdk_body).try_into().map_err(on_err), 70 | Err(e) => { 71 | let kind = aws_smithy_runtime_api::client::retries::ErrorKind::ServerError; 72 | Err(ConnectorError::other(e.into(), Some(kind))) 73 | } 74 | } 75 | } 76 | 77 | // From 78 | fn auto_host_header(uri: &http::Uri) -> http::HeaderValue { 79 | let hostname = uri.host().expect("authority implies host"); 80 | match get_non_default_port(uri) { 81 | Some(port) => http::HeaderValue::try_from(format!("{hostname}:{port}")), 82 | None => http::HeaderValue::from_str(hostname), 83 | } 84 | .expect("uri host is valid header value") 85 | } 86 | 87 | /// From 88 | fn get_non_default_port(uri: &http::Uri) -> Option> { 89 | match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { 90 | (Some(443), true) => None, 91 | (Some(80), false) => None, 92 | _ => uri.port(), 93 | } 94 | } 95 | 96 | fn is_schema_secure(uri: &http::Uri) -> bool { 97 | uri.scheme_str() 98 | .is_some_and(|scheme_str| matches!(scheme_str, "wss" | "https")) 99 | } 100 | -------------------------------------------------------------------------------- /scripts/report-mint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from dataclasses import dataclass 3 | from typing import Any, Dict, Optional 4 | import json 5 | import sys 6 | from pprint import pprint # noqa: F401 7 | from itertools import groupby 8 | 9 | 10 | # https://github.com/minio/mint#mint-log-format 11 | @dataclass 12 | class MintLog: 13 | name: str 14 | function: Optional[str] 15 | args: Optional[Dict[str, Any]] 16 | duration: int 17 | status: str 18 | alert: Optional[str] 19 | message: Optional[str] 20 | error: Optional[str] 21 | 22 | 23 | def from_json(x: Any) -> MintLog: 24 | return MintLog( 25 | name=x["name"], 26 | function=x.get("function"), 27 | args=x.get("args"), 28 | duration=x["duration"], 29 | status=x["status"], 30 | alert=x.get("alert"), 31 | message=x.get("message"), 32 | error=x.get("error"), 33 | ) 34 | 35 | 36 | if __name__ == "__main__": 37 | log_path = sys.argv[1] 38 | logs = [] 39 | with open(log_path) as f: 40 | for line in f.readlines(): 41 | line = line.strip() 42 | if len(line) == 0: 43 | continue 44 | 45 | json_str = line 46 | if json_str.find("{") != 0: 47 | json_str = json_str[json_str.find("{") :] 48 | 49 | try: 50 | json_value = json.loads(json_str) 51 | except Exception: 52 | print(f"error parsing log line: {line}") 53 | continue 54 | 55 | logs.append(from_json(json_value)) 56 | 57 | for x in logs: 58 | if ":" in x.name: 59 | name, function = x.name.split(":") 60 | x.name = name.strip() 61 | x.function = function.strip() 62 | 63 | groups = {k: list(v) for k, v in groupby(logs, lambda x: x.name)} 64 | counts = {} 65 | 66 | for name, group in groups.items(): 67 | pass_count = len(list(x for x in group if x.status == "PASS")) 68 | fail_count = len(list(x for x in group if x.status == "FAIL")) 69 | na_count = len(list(x for x in group if x.status == "NA")) 70 | counts[name] = {"pass": pass_count, "fail": fail_count, "na": na_count} 71 | 72 | print( 73 | f"{name:<20} " 74 | f"passed {pass_count:>3}, " 75 | f"failed {fail_count:>3}, " 76 | f"na {na_count:>3}" 77 | ) 78 | print() 79 | 80 | total_pass_count = sum(c["pass"] for c in counts.values()) 81 | total_fail_count = sum(c["fail"] for c in counts.values()) 82 | total_na_count = sum(c["na"] for c in counts.values()) 83 | name = "summary" 84 | print( 85 | f"{name:<20} " 86 | f"passed {total_pass_count:>3}, " 87 | f"failed {total_fail_count:>3}, " 88 | f"na {total_na_count:>3}" 89 | ) 90 | 91 | passed_groups = [ 92 | # FIXME: https://github.com/minio/mint/blob/master/run/core/aws-sdk-go-v2/main.go#L294 93 | # "aws-sdk-go", version outdated 94 | "aws-sdk-ruby", 95 | "awscli", 96 | "minio-go", 97 | "s3cmd", 98 | ] 99 | 100 | for group in passed_groups: 101 | assert counts[group]["fail"] == 0, f'group "{group}" failed' 102 | 103 | # FIXME: E2E tests 104 | # https://github.com/Nugine/s3s/issues/4 105 | # https://github.com/Nugine/s3s/pull/141#issuecomment-2142662531 106 | 107 | assert "minio-dotnet" not in counts 108 | assert counts["minio-js"]["pass"] >= 190 109 | assert counts["versioning"]["pass"] >= 4 110 | assert counts["minio-java"]["pass"] >= 17 111 | 112 | assert counts["aws-sdk-php"]["pass"] >= 10 113 | assert counts["minio-py"]["pass"] >= 2 114 | assert counts["mc"]["pass"] >= 2 115 | -------------------------------------------------------------------------------- /crates/s3s-proxy/src/main.rs: -------------------------------------------------------------------------------- 1 | use s3s::auth::SimpleAuth; 2 | use s3s::host::SingleDomain; 3 | use s3s::service::S3ServiceBuilder; 4 | use tokio::net::TcpListener; 5 | 6 | use std::error::Error; 7 | use std::io::IsTerminal; 8 | 9 | use aws_credential_types::provider::ProvideCredentials; 10 | 11 | use clap::Parser; 12 | use tracing::info; 13 | 14 | use hyper_util::rt::{TokioExecutor, TokioIo}; 15 | use hyper_util::server::conn::auto::Builder as ConnBuilder; 16 | 17 | #[derive(Debug, Parser)] 18 | struct Opt { 19 | #[clap(long, default_value = "localhost")] 20 | host: String, 21 | 22 | #[clap(long, default_value = "8014")] 23 | port: u16, 24 | 25 | #[clap(long)] 26 | domain: Option, 27 | 28 | #[clap(long)] 29 | endpoint_url: String, 30 | } 31 | 32 | fn setup_tracing() { 33 | use tracing_subscriber::EnvFilter; 34 | 35 | let env_filter = EnvFilter::from_default_env(); 36 | let enable_color = std::io::stdout().is_terminal(); 37 | 38 | tracing_subscriber::fmt() 39 | .pretty() 40 | .with_env_filter(env_filter) 41 | .with_ansi(enable_color) 42 | .init(); 43 | } 44 | 45 | #[tokio::main] 46 | async fn main() -> Result<(), Box> { 47 | setup_tracing(); 48 | let opt = Opt::parse(); 49 | 50 | // Setup S3 provider 51 | let sdk_conf = aws_config::from_env().endpoint_url(&opt.endpoint_url).load().await; 52 | let client = aws_sdk_s3::Client::from_conf(aws_sdk_s3::config::Builder::from(&sdk_conf).force_path_style(true).build()); 53 | let proxy = s3s_aws::Proxy::from(client); 54 | 55 | // Setup S3 service 56 | let service = { 57 | let mut b = S3ServiceBuilder::new(proxy); 58 | 59 | // Enable authentication 60 | if let Some(cred_provider) = sdk_conf.credentials_provider() { 61 | let cred = cred_provider.provide_credentials().await?; 62 | b.set_auth(SimpleAuth::from_single(cred.access_key_id(), cred.secret_access_key())); 63 | } 64 | 65 | // Enable parsing virtual-hosted-style requests 66 | if let Some(domain) = opt.domain { 67 | b.set_host(SingleDomain::new(&domain)?); 68 | } 69 | 70 | b.build() 71 | }; 72 | 73 | // Run server 74 | let listener = TcpListener::bind((opt.host.as_str(), opt.port)).await?; 75 | 76 | let http_server = ConnBuilder::new(TokioExecutor::new()); 77 | let graceful = hyper_util::server::graceful::GracefulShutdown::new(); 78 | 79 | let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c()); 80 | 81 | info!("server is running at http://{}:{}/", opt.host, opt.port); 82 | info!("server is forwarding requests to {}", opt.endpoint_url); 83 | 84 | loop { 85 | let (socket, _) = tokio::select! { 86 | res = listener.accept() => { 87 | match res { 88 | Ok(conn) => conn, 89 | Err(err) => { 90 | tracing::error!("error accepting connection: {err}"); 91 | continue; 92 | } 93 | } 94 | } 95 | _ = ctrl_c.as_mut() => { 96 | break; 97 | } 98 | }; 99 | 100 | let conn = http_server.serve_connection(TokioIo::new(socket), service.clone()); 101 | let conn = graceful.watch(conn.into_owned()); 102 | tokio::spawn(async move { 103 | let _ = conn.await; 104 | }); 105 | } 106 | 107 | tokio::select! { 108 | () = graceful.shutdown() => { 109 | tracing::debug!("Gracefully shutdown!"); 110 | }, 111 | () = tokio::time::sleep(std::time::Duration::from_secs(10)) => { 112 | tracing::debug!("Waited 10 seconds for graceful shutdown, aborting..."); 113 | } 114 | } 115 | 116 | info!("server is stopped"); 117 | 118 | Ok(()) 119 | } 120 | -------------------------------------------------------------------------------- /data/crawl.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from pprint import pprint # noqa: F401 3 | import re 4 | import json 5 | 6 | from bs4 import BeautifulSoup 7 | import requests 8 | import typer 9 | 10 | cli = typer.Typer(pretty_exceptions_show_locals=False) 11 | 12 | model_dir = Path(__file__).parent 13 | 14 | 15 | def save_json(path, data): 16 | with open(path, "w") as f: 17 | json.dump(data, f, indent=4) 18 | 19 | 20 | def download_aws_sdk(service: str, *, commit: str): 21 | url = f"https://github.com/awslabs/aws-sdk-rust/raw/{commit}/aws-models/{service}.json" 22 | resp = requests.get(url) 23 | assert resp.status_code == 200 24 | assert resp.json() 25 | with open(model_dir / f"{service}.json", "w") as f: 26 | f.write(resp.text) 27 | 28 | 29 | @cli.command() 30 | def download_s3_model(): 31 | # https://github.com/awslabs/aws-sdk-rust/commits/main/aws-models/s3.json 32 | download_aws_sdk("s3", commit="2c2a06e583392266669e075d4a47489d6da1e055") 33 | 34 | 35 | @cli.command() 36 | def download_sts_model(): 37 | # https://github.com/awslabs/aws-sdk-rust/commits/main/aws-models/sts.json 38 | download_aws_sdk("sts", commit="13eb310a6cbb4912f0a44db2fb2fca0b2bfee5d1") 39 | 40 | 41 | @cli.command() 42 | def crawl_error_codes(): 43 | url = "https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html" 44 | 45 | html = requests.get(url).text 46 | 47 | soup = BeautifulSoup(html, "lxml") 48 | 49 | kinds = [ 50 | ("S3", "ErrorCodeList"), 51 | ("Replication", "ReplicationErrorCodeList"), 52 | ("Tagging", "S3TaggingErrorCodeList"), 53 | ("SelectObjectContent", "SelectObjectContentErrorCodeList"), 54 | ] 55 | 56 | data = {} 57 | 58 | for kind, h2_id in kinds: 59 | h2 = soup.css.select(f"#{h2_id}")[0] # type:ignore 60 | 61 | # find the next table 62 | table = None 63 | for e in h2.next_elements: 64 | if e.name == "table": # type:ignore 65 | table = e 66 | break 67 | assert table is not None 68 | 69 | th_list = table.css.select("th") # type:ignore 70 | assert th_list[0].text in ("Error code", "Error Code") 71 | assert th_list[1].text == "Description" 72 | assert th_list[2].text in ("HTTP status code", "HTTP Status Code") 73 | 74 | tr_list = table.css.select("tr")[1:] # type:ignore 75 | tr_list = [[e for e in tr.children if e.name == "td"] for tr in tr_list] 76 | 77 | ans = [] 78 | for td_list in tr_list: 79 | td0_code = td_list[0].css.select("code") 80 | if td0_code: 81 | t0 = td0_code[0].text.strip() 82 | else: 83 | t0 = td_list[0].text.strip() 84 | 85 | t1 = td_list[1].text.strip() 86 | t2 = td_list[2].text.strip() 87 | 88 | error_code = t0 89 | 90 | description = re.sub(r"\n\t+", " ", t1).strip() 91 | 92 | if t2 == "N/A": 93 | http_status_code = None 94 | else: 95 | m = re.match(r"(\d{3})[\s\S]*", t2) 96 | if m is None: 97 | continue # FIXME: EntityTooLarge 405 98 | # assert m is not None, f"t2: {repr(t2)}" 99 | http_status_code = int(m.group(1)) 100 | 101 | ans.append( 102 | { 103 | "code": error_code, 104 | "description": description, 105 | "http_status_code": http_status_code, 106 | } 107 | ) 108 | 109 | ans.sort(key=lambda x: x["code"]) 110 | data[kind] = ans 111 | 112 | save_json(model_dir / "s3_error_codes.json", data) 113 | 114 | 115 | @cli.command() 116 | def update(): 117 | download_s3_model() 118 | download_sts_model() 119 | crawl_error_codes() 120 | 121 | 122 | if __name__ == "__main__": 123 | cli() 124 | -------------------------------------------------------------------------------- /crates/s3s/src/crypto.rs: -------------------------------------------------------------------------------- 1 | use numeric_cast::TruncatingCast; 2 | 3 | pub trait Checksum { 4 | type Output: AsRef<[u8]>; 5 | 6 | #[must_use] 7 | fn new() -> Self; 8 | 9 | fn update(&mut self, data: &[u8]); 10 | 11 | #[must_use] 12 | fn finalize(self) -> Self::Output; 13 | 14 | #[must_use] 15 | fn checksum(data: &[u8]) -> Self::Output 16 | where 17 | Self: Sized, 18 | { 19 | let mut hasher = Self::new(); 20 | hasher.update(data); 21 | hasher.finalize() 22 | } 23 | } 24 | 25 | pub struct Crc32(crc_fast::Digest); 26 | 27 | impl Default for Crc32 { 28 | fn default() -> Self { 29 | Self(crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc)) 30 | } 31 | } 32 | 33 | impl Crc32 { 34 | #[must_use] 35 | pub fn checksum_u32(data: &[u8]) -> u32 { 36 | let mut hasher = Self::new(); 37 | hasher.update(data); 38 | hasher.0.finalize().truncating_cast::() 39 | } 40 | } 41 | 42 | impl Checksum for Crc32 { 43 | type Output = [u8; 4]; 44 | 45 | fn new() -> Self { 46 | Self::default() 47 | } 48 | 49 | fn update(&mut self, data: &[u8]) { 50 | self.0.update(data); 51 | } 52 | 53 | fn finalize(self) -> Self::Output { 54 | self.0.finalize().truncating_cast::().to_be_bytes() 55 | } 56 | } 57 | 58 | pub struct Crc32c(crc_fast::Digest); 59 | 60 | impl Default for Crc32c { 61 | fn default() -> Self { 62 | Self(crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32Iscsi)) 63 | } 64 | } 65 | 66 | impl Checksum for Crc32c { 67 | type Output = [u8; 4]; 68 | 69 | fn new() -> Self { 70 | Self::default() 71 | } 72 | 73 | fn update(&mut self, data: &[u8]) { 74 | self.0.update(data); 75 | } 76 | 77 | fn finalize(self) -> Self::Output { 78 | self.0.finalize().truncating_cast::().to_be_bytes() 79 | } 80 | } 81 | 82 | pub struct Crc64Nvme(crc_fast::Digest); 83 | 84 | impl Default for Crc64Nvme { 85 | fn default() -> Self { 86 | Self(crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc64Nvme)) 87 | } 88 | } 89 | 90 | impl Checksum for Crc64Nvme { 91 | type Output = [u8; 8]; 92 | 93 | fn new() -> Self { 94 | Self::default() 95 | } 96 | 97 | fn update(&mut self, data: &[u8]) { 98 | self.0.update(data); 99 | } 100 | 101 | fn finalize(self) -> Self::Output { 102 | self.0.finalize().to_be_bytes() 103 | } 104 | } 105 | 106 | #[derive(Default)] 107 | pub struct Sha1(sha1::Sha1); 108 | 109 | impl Checksum for Sha1 { 110 | type Output = [u8; 20]; 111 | 112 | fn new() -> Self { 113 | Self::default() 114 | } 115 | 116 | fn update(&mut self, data: &[u8]) { 117 | use sha1::Digest as _; 118 | self.0.update(data); 119 | } 120 | 121 | fn finalize(self) -> Self::Output { 122 | use sha1::Digest as _; 123 | self.0.finalize().into() 124 | } 125 | } 126 | 127 | #[derive(Default)] 128 | pub struct Sha256(sha2::Sha256); 129 | 130 | impl Checksum for Sha256 { 131 | type Output = [u8; 32]; 132 | 133 | fn new() -> Self { 134 | Self::default() 135 | } 136 | 137 | fn update(&mut self, data: &[u8]) { 138 | use sha2::Digest as _; 139 | self.0.update(data); 140 | } 141 | 142 | fn finalize(self) -> Self::Output { 143 | use sha2::Digest as _; 144 | self.0.finalize().into() 145 | } 146 | } 147 | 148 | #[derive(Default)] 149 | pub struct Md5(md5::Md5); 150 | 151 | impl Checksum for Md5 { 152 | type Output = [u8; 16]; 153 | 154 | fn new() -> Self { 155 | Self::default() 156 | } 157 | 158 | fn update(&mut self, data: &[u8]) { 159 | use md5::Digest as _; 160 | self.0.update(data); 161 | } 162 | 163 | fn finalize(self) -> Self::Output { 164 | use md5::Digest as _; 165 | self.0.finalize().into() 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /crates/s3s-policy/src/pattern.rs: -------------------------------------------------------------------------------- 1 | pub struct PatternSet { 2 | // TODO: rewrite the naive implementation with something like Aho-Corasick 3 | patterns: Vec, 4 | } 5 | 6 | #[derive(Debug, thiserror::Error)] 7 | pub enum PatternError { 8 | #[error("Invalid pattern")] 9 | InvalidPattern, 10 | } 11 | 12 | #[derive(Debug)] 13 | struct Pattern { 14 | bytes: Vec, 15 | } 16 | 17 | impl PatternSet { 18 | /// Create a new matcher from a list of patterns. 19 | /// 20 | /// Patterns can contain 21 | /// + `*` to match any sequence of characters (including empty sequence) 22 | /// + `?` to match any single character 23 | /// + any other character to match itself 24 | /// 25 | /// # Errors 26 | /// Returns an error if any pattern is invalid. 27 | pub fn new<'a>(patterns: impl IntoIterator) -> Result { 28 | let patterns = patterns.into_iter().map(Self::parse_pattern).collect::>()?; 29 | Ok(PatternSet { patterns }) 30 | } 31 | 32 | fn parse_pattern(pattern: &str) -> Result { 33 | if pattern.is_empty() { 34 | return Err(PatternError::InvalidPattern); 35 | } 36 | Ok(Pattern { 37 | bytes: pattern.as_bytes().to_owned(), 38 | }) 39 | } 40 | 41 | /// Check if the input matches any of the patterns. 42 | #[must_use] 43 | pub fn is_match(&self, input: &str) -> bool { 44 | for pattern in &self.patterns { 45 | if Self::match_pattern(&pattern.bytes, input.as_bytes()) { 46 | return true; 47 | } 48 | } 49 | false 50 | } 51 | 52 | /// 53 | fn match_pattern(pattern: &[u8], input: &[u8]) -> bool { 54 | let mut p_idx = 0; 55 | let mut s_idx = 0; 56 | 57 | let mut p_back = usize::MAX - 1; 58 | let mut s_back = usize::MAX - 1; 59 | 60 | loop { 61 | if p_idx < pattern.len() { 62 | let p = pattern[p_idx]; 63 | if p == b'*' { 64 | p_idx += 1; 65 | p_back = p_idx; 66 | s_back = s_idx; 67 | continue; 68 | } 69 | 70 | if s_idx < input.len() { 71 | let c = input[s_idx]; 72 | if p == c || p == b'?' { 73 | p_idx += 1; 74 | s_idx += 1; 75 | continue; 76 | } 77 | } 78 | } else if s_idx == input.len() { 79 | return true; 80 | } 81 | 82 | if p_back == pattern.len() { 83 | return true; 84 | } 85 | 86 | if s_back + 1 < input.len() { 87 | s_back += 1; 88 | p_idx = p_back; 89 | s_idx = s_back; 90 | continue; 91 | } 92 | 93 | return false; 94 | } 95 | } 96 | } 97 | 98 | #[cfg(test)] 99 | mod tests { 100 | use super::*; 101 | 102 | #[test] 103 | fn test_match() { 104 | let cases = &[ 105 | ("*", "", true), 106 | ("**", "", true), 107 | ("***", "abc", true), 108 | ("a", "aa", false), 109 | ("***a", "aaaa", true), 110 | ("*abc???def", "abcdefabc123def", true), 111 | ("a*c?b", "acdcb", false), 112 | ("*a*b*c*", "abc", true), 113 | ("a*b*c*", "abc", true), 114 | ("*a*b*c", "abc", true), 115 | ("a*b*c", "abc", true), 116 | ]; 117 | 118 | for &(pattern, input, expected) in cases { 119 | let pattern = PatternSet::parse_pattern(pattern).unwrap(); 120 | let ans = PatternSet::match_pattern(&pattern.bytes, input.as_bytes()); 121 | assert_eq!(ans, expected, "pattern: {pattern:?}, input: {input:?}"); 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /crates/s3s/examples/axum.rs: -------------------------------------------------------------------------------- 1 | use s3s::route::S3Route; 2 | use s3s::{Body, S3Request, S3Response, S3Result}; 3 | 4 | use axum::http; 5 | use http::{Extensions, HeaderMap, Method, Uri}; 6 | use tower::Service; 7 | 8 | pub struct CustomRoute { 9 | router: axum::Router, 10 | } 11 | 12 | impl CustomRoute { 13 | #[must_use] 14 | pub fn build() -> Self { 15 | Self { 16 | router: self::handlers::register(), 17 | } 18 | } 19 | } 20 | 21 | #[derive(Debug, Clone)] 22 | pub struct Extra { 23 | pub credentials: Option, 24 | pub region: Option, 25 | pub service: Option, 26 | } 27 | 28 | fn convert_request(req: S3Request) -> http::Request { 29 | let (mut parts, _) = http::Request::new(Body::empty()).into_parts(); 30 | parts.method = req.method; 31 | parts.uri = req.uri; 32 | parts.headers = req.headers; 33 | parts.extensions = req.extensions; 34 | parts.extensions.insert(Extra { 35 | credentials: req.credentials, 36 | region: req.region, 37 | service: req.service, 38 | }); 39 | http::Request::from_parts(parts, req.input) 40 | } 41 | 42 | fn convert_response(resp: http::Response) -> S3Response { 43 | let (parts, body) = resp.into_parts(); 44 | let mut s3_resp = S3Response::new(Body::http_body_unsync(body)); 45 | s3_resp.status = Some(parts.status); 46 | s3_resp.headers = parts.headers; 47 | s3_resp.extensions = parts.extensions; 48 | s3_resp 49 | } 50 | 51 | #[async_trait::async_trait] 52 | impl S3Route for CustomRoute { 53 | fn is_match(&self, _method: &Method, uri: &Uri, _headers: &HeaderMap, _extensions: &mut Extensions) -> bool { 54 | let path = uri.path(); 55 | let prefix = const_str::concat!(self::handlers::PREFIX, "/"); 56 | path.starts_with(prefix) 57 | } 58 | 59 | async fn check_access(&self, req: &mut S3Request) -> S3Result<()> { 60 | if req.credentials.is_none() { 61 | tracing::debug!("anonymous access"); 62 | } 63 | Ok(()) // allow all requests 64 | } 65 | 66 | async fn call(&self, req: S3Request) -> S3Result> { 67 | let mut service = self.router.clone().into_service::(); 68 | let req = convert_request(req); 69 | let result = service.call(req).await; 70 | match result { 71 | Ok(resp) => Ok(convert_response(resp)), 72 | Err(e) => match e {}, 73 | } 74 | } 75 | } 76 | 77 | mod handlers { 78 | use std::collections::HashMap; 79 | 80 | use axum::Json; 81 | use axum::Router; 82 | use axum::body::Body; 83 | use axum::extract::Path; 84 | use axum::extract::Query; 85 | use axum::extract::Request; 86 | use axum::http::Response; 87 | use axum::response; 88 | use axum::routing::get; 89 | use axum::routing::post; 90 | 91 | pub async fn echo(req: Request) -> Response { 92 | Response::new(req.into_body()) 93 | } 94 | 95 | pub async fn hello() -> &'static str { 96 | "Hello, World!" 97 | } 98 | 99 | pub async fn show_path(Path(path): Path) -> String { 100 | path 101 | } 102 | 103 | pub async fn show_query(Query(query): Query>) -> String { 104 | format!("{query:?}") 105 | } 106 | 107 | pub async fn show_json(Json(json): Json) -> response::Json { 108 | tracing::debug!(?json); 109 | response::Json(json) 110 | } 111 | 112 | pub const PREFIX: &str = "/custom"; 113 | 114 | pub fn register() -> Router { 115 | let router = Router::new() 116 | .route("/echo", post(echo)) 117 | .route("/hello", get(hello)) 118 | .route("/show_path/{*path}", get(show_path)) 119 | .route("/show_query", get(show_query)) 120 | .route("/show_json", post(show_json)); 121 | 122 | Router::new().nest(PREFIX, router) 123 | } 124 | } 125 | 126 | fn main() {} 127 | -------------------------------------------------------------------------------- /crates/s3s/src/xml/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::missing_errors_doc)] // TODO 2 | 3 | mod de; 4 | pub use self::de::*; 5 | 6 | mod ser; 7 | pub use self::ser::*; 8 | 9 | #[cfg(feature = "minio")] 10 | mod generated_minio; 11 | 12 | #[cfg(not(feature = "minio"))] 13 | mod generated; 14 | 15 | mod manually { 16 | use super::*; 17 | 18 | use crate::dto::BucketLocationConstraint; 19 | use crate::dto::GetBucketLocationOutput; 20 | 21 | impl Serialize for GetBucketLocationOutput { 22 | fn serialize(&self, s: &mut Serializer) -> SerResult { 23 | let xmlns = "http://s3.amazonaws.com/doc/2006-03-01/"; 24 | if let Some(location_constraint) = &self.location_constraint { 25 | s.content_with_ns("LocationConstraint", xmlns, location_constraint)?; 26 | } else { 27 | s.content_with_ns("LocationConstraint", xmlns, "")?; 28 | } 29 | Ok(()) 30 | } 31 | } 32 | 33 | impl<'xml> Deserialize<'xml> for GetBucketLocationOutput { 34 | fn deserialize(d: &mut Deserializer<'xml>) -> DeResult { 35 | let mut location_constraint: Option = None; 36 | d.for_each_element(|d, x| match x { 37 | b"LocationConstraint" => { 38 | if location_constraint.is_some() { 39 | return Err(DeError::DuplicateField); 40 | } 41 | let val: BucketLocationConstraint = d.content()?; 42 | if !val.as_str().is_empty() { 43 | location_constraint = Some(val); 44 | } 45 | Ok(()) 46 | } 47 | _ => Err(DeError::UnexpectedTagName), 48 | })?; 49 | Ok(Self { location_constraint }) 50 | } 51 | } 52 | 53 | use crate::dto::AssumeRoleOutput; 54 | 55 | impl Serialize for AssumeRoleOutput { 56 | fn serialize(&self, s: &mut Serializer) -> SerResult { 57 | let xmlns = "https://sts.amazonaws.com/doc/2011-06-15/"; 58 | s.element_with_ns("AssumeRoleResponse", xmlns, |s| { 59 | s.content("AssumeRoleResult", self) // 60 | })?; 61 | Ok(()) 62 | } 63 | } 64 | 65 | impl<'xml> Deserialize<'xml> for AssumeRoleOutput { 66 | fn deserialize(d: &mut Deserializer<'xml>) -> DeResult { 67 | d.named_element("AssumeRoleResponse", |d| { 68 | d.named_element("AssumeRoleResult", Self::deserialize_content) // 69 | }) 70 | } 71 | } 72 | 73 | use crate::dto::ETag; 74 | use crate::dto::ParseETagError; 75 | 76 | use stdx::default::default; 77 | 78 | impl SerializeContent for ETag { 79 | fn serialize_content(&self, s: &mut Serializer) -> SerResult { 80 | let val = self.value(); 81 | if val.len() <= 64 { 82 | let mut buf: arrayvec::ArrayString<72> = default(); 83 | buf.push('"'); 84 | buf.push_str(val); 85 | buf.push('"'); 86 | ::serialize_content(&buf, s) 87 | } else { 88 | let buf = format!("\"{val}\""); 89 | ::serialize_content(&buf, s) 90 | } 91 | } 92 | } 93 | 94 | impl<'xml> DeserializeContent<'xml> for ETag { 95 | fn deserialize_content(d: &mut Deserializer<'xml>) -> DeResult { 96 | let val: String = d.content()?; 97 | 98 | // try to parse as quoted ETag first 99 | // fallback if the ETag is not quoted 100 | match ETag::parse_http_header(val.as_bytes()) { 101 | Ok(v) => Ok(v), 102 | Err(ParseETagError::InvalidFormat) => Ok(ETag::Strong(val)), 103 | Err(ParseETagError::InvalidChar) => Err(DeError::InvalidContent), 104 | } 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /crates/s3s/src/sig_v4/amz_content_sha256.rs: -------------------------------------------------------------------------------- 1 | use crate::utils::crypto::is_sha256_checksum; 2 | 3 | /// [x-amz-content-sha256](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) 4 | /// 5 | /// See also [Common Request Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) 6 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 7 | pub enum AmzContentSha256<'a> { 8 | /// Actual payload checksum value 9 | SingleChunk(&'a str), 10 | 11 | /// `UNSIGNED-PAYLOAD` 12 | UnsignedPayload, 13 | 14 | /// `STREAMING-UNSIGNED-PAYLOAD-TRAILER` 15 | StreamingUnsignedPayloadTrailer, 16 | 17 | /// `STREAMING-AWS4-HMAC-SHA256-PAYLOAD` 18 | StreamingAws4HmacSha256Payload, 19 | 20 | /// `STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER` 21 | StreamingAws4HmacSha256PayloadTrailer, 22 | 23 | /// `STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD` 24 | StreamingAws4EcdsaP256Sha256Payload, 25 | 26 | /// `STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER` 27 | StreamingAws4EcdsaP256Sha256PayloadTrailer, 28 | } 29 | 30 | /// [`AmzContentSha256`] 31 | #[derive(Debug, Clone, Copy, thiserror::Error)] 32 | pub enum ParseAmzContentSha256Error { 33 | /// unknown variant 34 | #[error("ParseAmzContentSha256Error: UnknownVariant")] 35 | UnknownVariant, 36 | } 37 | 38 | impl<'a> AmzContentSha256<'a> { 39 | /// Parses `AmzContentSha256` from `x-amz-content-sha256` header 40 | /// 41 | /// # Errors 42 | /// Returns an `Err` if the header is invalid 43 | pub fn parse(header: &'a str) -> Result { 44 | if is_sha256_checksum(header) { 45 | return Ok(Self::SingleChunk(header)); 46 | } 47 | 48 | match header { 49 | "UNSIGNED-PAYLOAD" => Ok(Self::UnsignedPayload), 50 | "STREAMING-UNSIGNED-PAYLOAD-TRAILER" => Ok(Self::StreamingUnsignedPayloadTrailer), 51 | "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => Ok(Self::StreamingAws4HmacSha256Payload), 52 | "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" => Ok(Self::StreamingAws4HmacSha256PayloadTrailer), 53 | "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD" => Ok(Self::StreamingAws4EcdsaP256Sha256Payload), 54 | "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER" => Ok(Self::StreamingAws4EcdsaP256Sha256PayloadTrailer), 55 | _ => Err(ParseAmzContentSha256Error::UnknownVariant), 56 | } 57 | } 58 | 59 | // pub fn to_str(self) -> &'a str { 60 | // match self { 61 | // AmzContentSha256::SingleChunk(checksum) => checksum, 62 | // AmzContentSha256::UnsignedPayload => "UNSIGNED-PAYLOAD", 63 | // AmzContentSha256::StreamingUnsignedPayloadTrailer => "STREAMING-UNSIGNED-PAYLOAD-TRAILER", 64 | // AmzContentSha256::StreamingAws4HmacSha256Payload => "STREAMING-AWS4-HMAC-SHA256-PAYLOAD", 65 | // AmzContentSha256::StreamingAws4HmacSha256PayloadTrailer => "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER", 66 | // AmzContentSha256::StreamingAws4EcdsaP256Sha256Payload => "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD", 67 | // AmzContentSha256::StreamingAws4EcdsaP256Sha256PayloadTrailer => "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER", 68 | // } 69 | // } 70 | 71 | pub fn is_streaming(&self) -> bool { 72 | match self { 73 | AmzContentSha256::SingleChunk(_) | AmzContentSha256::UnsignedPayload => false, 74 | AmzContentSha256::StreamingUnsignedPayloadTrailer 75 | | AmzContentSha256::StreamingAws4HmacSha256Payload 76 | | AmzContentSha256::StreamingAws4HmacSha256PayloadTrailer 77 | | AmzContentSha256::StreamingAws4EcdsaP256Sha256Payload 78 | | AmzContentSha256::StreamingAws4EcdsaP256Sha256PayloadTrailer => true, 79 | } 80 | } 81 | 82 | pub fn has_trailer(&self) -> bool { 83 | match self { 84 | AmzContentSha256::SingleChunk(_) 85 | | AmzContentSha256::UnsignedPayload 86 | | AmzContentSha256::StreamingAws4HmacSha256Payload 87 | | AmzContentSha256::StreamingAws4EcdsaP256Sha256Payload => false, 88 | AmzContentSha256::StreamingUnsignedPayloadTrailer 89 | | AmzContentSha256::StreamingAws4HmacSha256PayloadTrailer 90 | | AmzContentSha256::StreamingAws4EcdsaP256Sha256PayloadTrailer => true, 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | - 'feat/**' 8 | merge_group: 9 | types: [checks_requested] 10 | schedule: 11 | - cron: '0 0 * * 0' # at midnight of each sunday 12 | workflow_dispatch: 13 | 14 | jobs: 15 | skip-check: 16 | permissions: 17 | actions: write 18 | contents: read 19 | runs-on: ubuntu-latest 20 | outputs: 21 | should_skip: ${{ steps.skip_check.outputs.should_skip }} 22 | steps: 23 | - id: skip_check 24 | uses: fkirc/skip-duplicate-actions@v5 25 | with: 26 | cancel_others: true 27 | paths_ignore: '["*.md"]' 28 | 29 | python: 30 | needs: skip-check 31 | if: needs.skip-check.outputs.should_skip != 'true' 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | - uses: taiki-e/install-action@just 36 | - uses: astral-sh/setup-uv@v3 37 | with: 38 | enable-cache: true 39 | - run: just ci-python 40 | 41 | rust: 42 | needs: skip-check 43 | if: needs.skip-check.outputs.should_skip != 'true' 44 | runs-on: ubuntu-latest 45 | strategy: 46 | fail-fast: false 47 | matrix: 48 | toolchain: 49 | - 1.86.0 # MSRV 50 | - stable 51 | - nightly 52 | steps: 53 | - uses: actions/checkout@v4 54 | - uses: taiki-e/install-action@just 55 | - uses: dtolnay/rust-toolchain@master 56 | with: 57 | toolchain: ${{ matrix.toolchain }} 58 | components: rustfmt, clippy 59 | - uses: Swatinem/rust-cache@v2 60 | - run: just ci-rust 61 | 62 | cross-test: 63 | needs: skip-check 64 | if: needs.skip-check.outputs.should_skip != 'true' 65 | runs-on: ${{ matrix.os }} 66 | strategy: 67 | fail-fast: false 68 | matrix: 69 | os: 70 | - windows-latest 71 | - macos-latest 72 | steps: 73 | - uses: actions/checkout@v4 74 | - uses: taiki-e/install-action@just 75 | - uses: dtolnay/rust-toolchain@stable 76 | - uses: Swatinem/rust-cache@v2 77 | - run: just test 78 | 79 | wasm-test: 80 | needs: skip-check 81 | if: needs.skip-check.outputs.should_skip != 'true' 82 | runs-on: ubuntu-latest 83 | steps: 84 | - uses: actions/checkout@v4 85 | - uses: taiki-e/install-action@just 86 | - uses: dtolnay/rust-toolchain@stable 87 | - uses: Swatinem/rust-cache@v2 88 | - uses: taiki-e/install-action@wasm-pack 89 | - uses: taiki-e/install-action@wasm-bindgen-cli 90 | - name: wasm-pack test 91 | run: | 92 | cd crates/s3s-wasm 93 | wasm-pack test --node 94 | 95 | mint-proxy-minio: 96 | name: e2e (mint, s3s-proxy, minio) 97 | needs: skip-check 98 | if: needs.skip-check.outputs.should_skip != 'true' 99 | runs-on: ubuntu-latest 100 | steps: 101 | - uses: actions/checkout@v4 102 | - uses: ./.github/actions/setup 103 | - run: docker pull minio/mint:edge 104 | - run: docker pull minio/minio:latest 105 | - run: just install s3s-proxy 106 | - run: ./scripts/e2e-mint.sh 107 | - run: ./scripts/report-mint.py /tmp/mint/log.json 108 | - uses: actions/upload-artifact@v4 109 | with: 110 | name: mint-proxy-minio.logs 111 | path: ./target/s3s-proxy.log 112 | 113 | e2e-fs: 114 | name: e2e (s3s-e2e, s3s-fs) 115 | needs: skip-check 116 | if: needs.skip-check.outputs.should_skip != 'true' 117 | runs-on: ubuntu-latest 118 | steps: 119 | - uses: actions/checkout@v4 120 | - uses: ./.github/actions/setup 121 | - run: just install s3s-e2e 122 | - run: just install s3s-fs 123 | - run: ./scripts/e2e-fs.sh --filter '^Basic' 124 | - uses: actions/upload-artifact@v4 125 | with: 126 | name: e2e-fs.logs 127 | path: ./target/s3s-fs.log 128 | 129 | e2e-minio: 130 | name: e2e (s3s-e2e, minio) 131 | needs: skip-check 132 | if: needs.skip-check.outputs.should_skip != 'true' 133 | runs-on: ubuntu-latest 134 | steps: 135 | - uses: actions/checkout@v4 136 | - uses: ./.github/actions/setup 137 | - run: docker pull minio/minio:latest 138 | - run: just install s3s-e2e 139 | - run: ./scripts/e2e-minio.sh 140 | -------------------------------------------------------------------------------- /crates/s3s/src/http/ordered_headers.rs: -------------------------------------------------------------------------------- 1 | //! Ordered headers 2 | 3 | use std::str::Utf8Error; 4 | 5 | use hyper::HeaderMap; 6 | 7 | use crate::utils::stable_sort_by_first; 8 | 9 | /// Immutable http header container 10 | #[derive(Debug, Default)] 11 | pub struct OrderedHeaders<'a> { 12 | /// Ascending headers (header names are lowercase) 13 | headers: Vec<(&'a str, &'a str)>, 14 | } 15 | 16 | impl<'a> OrderedHeaders<'a> { 17 | /// Constructs [`OrderedHeaders`] from slice 18 | /// 19 | /// + header names must be lowercase 20 | /// + header values must be valid 21 | #[cfg(test)] 22 | #[must_use] 23 | pub fn from_slice_unchecked(slice: &[(&'a str, &'a str)]) -> Self { 24 | for (name, _) in slice { 25 | let is_valid = |c: u8| c == b'-' || c.is_ascii_lowercase() || c.is_ascii_digit(); 26 | assert!(name.as_bytes().iter().copied().all(is_valid)); 27 | } 28 | let mut headers = Vec::new(); 29 | headers.extend_from_slice(slice); 30 | stable_sort_by_first(&mut headers); 31 | Self { headers } 32 | } 33 | 34 | /// Constructs [`OrderedHeaders`] from a header map 35 | /// 36 | /// # Errors 37 | /// Returns [`ToStrError`] if header value cannot be converted to string slice 38 | pub fn from_headers(map: &'a HeaderMap) -> Result { 39 | let mut headers: Vec<(&'a str, &'a str)> = Vec::with_capacity(map.len()); 40 | 41 | for (name, value) in map { 42 | let value = std::str::from_utf8(value.as_bytes())?; 43 | headers.push((name.as_str(), value)); 44 | } 45 | stable_sort_by_first(&mut headers); 46 | 47 | Ok(Self { headers }) 48 | } 49 | 50 | fn get_all_pairs(&self, name: &str) -> impl Iterator + '_ + use<'a, '_> { 51 | let slice = self.headers.as_slice(); 52 | 53 | let lower_bound = slice.partition_point(|x| x.0 < name); 54 | let upper_bound = slice.partition_point(|x| x.0 <= name); 55 | 56 | slice[lower_bound..upper_bound].iter().copied() 57 | } 58 | 59 | pub fn get_all(&self, name: impl AsRef) -> impl Iterator + '_ { 60 | self.get_all_pairs(name.as_ref()).map(|x| x.1) 61 | } 62 | 63 | fn get_unique_pair(&self, name: &'_ str) -> Option<(&'a str, &'a str)> { 64 | let slice = self.headers.as_slice(); 65 | let lower_bound = slice.partition_point(|x| x.0 < name); 66 | 67 | let mut iter = slice[lower_bound..].iter().copied(); 68 | let pair = iter.next()?; 69 | 70 | if let Some(following) = iter.next() { 71 | if following.0 == name { 72 | return None; 73 | } 74 | } 75 | 76 | (pair.0 == name).then_some(pair) 77 | } 78 | 79 | /// Gets header value by name. Time `O(logn)` 80 | pub fn get_unique(&self, name: impl AsRef) -> Option<&'a str> { 81 | self.get_unique_pair(name.as_ref()).map(|(_, v)| v) 82 | } 83 | 84 | // /// Finds headers by names. Time `O(mlogn)` 85 | // #[must_use] 86 | // pub fn find_multiple(&self, names: &[impl AsRef]) -> Self { 87 | // let mut headers: Vec<(&'a str, &'a str)> = Vec::new(); 88 | // for name in names { 89 | // for pair in self.get_all_pairs(name.as_ref()) { 90 | // headers.push(pair); 91 | // } 92 | // } 93 | // Self { headers } 94 | // } 95 | 96 | /// Finds headers by names. Time `O(mlogn)` 97 | #[must_use] 98 | pub fn find_multiple_with_on_missing( 99 | &self, 100 | names: &'a [impl AsRef], 101 | on_missing: impl Fn(&'a str) -> Option<&'a str>, 102 | ) -> Self { 103 | let mut headers: Vec<(&'a str, &'a str)> = Vec::new(); 104 | for name in names { 105 | let mut has_value = false; 106 | for pair in self.get_all_pairs(name.as_ref()) { 107 | headers.push(pair); 108 | has_value = true; 109 | } 110 | if !has_value { 111 | if let Some(value) = on_missing(name.as_ref()) { 112 | headers.push((name.as_ref(), value)); 113 | } 114 | } 115 | } 116 | Self { headers } 117 | } 118 | } 119 | 120 | impl<'a> AsRef<[(&'a str, &'a str)]> for OrderedHeaders<'a> { 121 | fn as_ref(&self) -> &[(&'a str, &'a str)] { 122 | self.headers.as_ref() 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /codegen/src/v1/aws_proxy.rs: -------------------------------------------------------------------------------- 1 | use super::dto::RustTypes; 2 | use super::ops::Operations; 3 | use super::rust; 4 | 5 | use crate::declare_codegen; 6 | 7 | use std::format as f; 8 | 9 | use heck::ToSnakeCase; 10 | use scoped_writer::g; 11 | 12 | pub fn codegen(ops: &Operations, rust_types: &RustTypes) { 13 | declare_codegen!(); 14 | 15 | g([ 16 | "use super::*;", 17 | "", 18 | "use crate::conv::{try_from_aws, try_into_aws};", 19 | "use crate::conv::string_from_integer;", 20 | "", 21 | "use s3s::S3;", 22 | "use s3s::{S3Request, S3Response};", 23 | "use s3s::S3Result;", 24 | "", 25 | "use tracing::debug;", 26 | "", 27 | ]); 28 | 29 | g!("#[async_trait::async_trait]"); 30 | g!("impl S3 for Proxy {{"); 31 | 32 | for op in ops.values() { 33 | let method_name = op.name.to_snake_case(); 34 | let s3s_input = f!("s3s::dto::{}", op.input); 35 | let s3s_output = f!("s3s::dto::{}", op.output); 36 | 37 | g!("#[tracing::instrument(skip(self, req))]"); 38 | g!("async fn {method_name}(&self, req: S3Request<{s3s_input}>) -> S3Result> {{"); 39 | 40 | g!("let input = req.input;"); 41 | g!("debug!(?input);"); 42 | 43 | if op.smithy_input == "Unit" { 44 | g!("let result = self.0.{method_name}().send().await;"); 45 | } else { 46 | g!("let mut b = self.0.{method_name}();"); 47 | let rust::Type::Struct(ty) = &rust_types[op.input.as_str()] else { panic!() }; 48 | 49 | let flattened_fields = if ty.name == "SelectObjectContentInput" { 50 | let rust::Type::Struct(flattened_ty) = &rust_types["SelectObjectContentRequest"] else { panic!() }; 51 | flattened_ty.fields.as_slice() 52 | } else { 53 | &[] 54 | }; 55 | 56 | for field in ty.fields.iter().chain(flattened_fields) { 57 | if field.is_custom_extension { 58 | continue; 59 | } 60 | 61 | let s3s_field_name = match ty.name.as_str() { 62 | "SelectObjectContentInput" if field.name == "request" => continue, 63 | "SelectObjectContentInput" if field.position == "xml" => f!("request.{}", field.name), 64 | _ => field.name.clone(), 65 | }; 66 | let aws_field_name = match ty.name.as_str() { 67 | "SelectObjectContentInput" => field.name.as_str(), 68 | _ => match s3s_field_name.as_str() { 69 | "checksum_crc32c" => "checksum_crc32_c", 70 | "checksum_crc64nvme" => "checksum_crc64_nvme", 71 | "type_" => "type", 72 | s => s, 73 | }, 74 | }; 75 | 76 | // // hack 77 | // if op.name == "PutObject" && field.type_ == "ChecksumAlgorithm" { 78 | // assert!(field.option_type); 79 | // let default_val = "aws_sdk_s3::model::ChecksumAlgorithm::Sha256"; 80 | // let val = f!("try_into_aws(input.{s3s_field_name})?.or(Some({default_val}))"); 81 | // g!("b = b.set_{aws_field_name}({val});"); 82 | // continue; 83 | // } 84 | 85 | if field.type_ == "PartNumberMarker" || field.type_ == "NextPartNumberMarker" { 86 | g!("b = b.set_{aws_field_name}(input.{s3s_field_name}.map(string_from_integer));"); 87 | continue; 88 | } 89 | 90 | if field.option_type { 91 | g!("b = b.set_{aws_field_name}(try_into_aws(input.{s3s_field_name})?);"); 92 | } else { 93 | g!("b = b.set_{aws_field_name}(Some(try_into_aws(input.{s3s_field_name})?));"); 94 | } 95 | } 96 | g!("let result = b.send().await;"); 97 | } 98 | 99 | g([ 100 | "match result {", 101 | " Ok(output) => {", 102 | " let headers = super::meta::build_headers(&output)?;", 103 | " let output = try_from_aws(output)?;", 104 | " debug!(?output);", 105 | " Ok(S3Response::with_headers(output, headers))", 106 | " },", 107 | " Err(e) => Err(wrap_sdk_error!(e)),", 108 | "}", 109 | ]); 110 | 111 | g!("}}"); 112 | g!(); 113 | } 114 | 115 | g!("}}"); 116 | } 117 | -------------------------------------------------------------------------------- /crates/s3s-fs/src/main.rs: -------------------------------------------------------------------------------- 1 | use s3s_fs::FileSystem; 2 | use s3s_fs::Result; 3 | 4 | use s3s::auth::SimpleAuth; 5 | use s3s::host::MultiDomain; 6 | use s3s::service::S3ServiceBuilder; 7 | 8 | use std::io::IsTerminal; 9 | use std::ops::Not; 10 | use std::path::PathBuf; 11 | 12 | use tokio::net::TcpListener; 13 | 14 | use clap::{CommandFactory, Parser}; 15 | use tracing::info; 16 | 17 | use hyper_util::rt::{TokioExecutor, TokioIo}; 18 | use hyper_util::server::conn::auto::Builder as ConnBuilder; 19 | 20 | #[derive(Debug, Parser)] 21 | #[command(version)] 22 | struct Opt { 23 | /// Host name to listen on. 24 | #[arg(long, default_value = "localhost")] 25 | host: String, 26 | 27 | /// Port number to listen on. 28 | #[arg(long, default_value = "8014")] // The original design was finished on 2020-08-14. 29 | port: u16, 30 | 31 | /// Access key used for authentication. 32 | #[arg(long)] 33 | access_key: Option, 34 | 35 | /// Secret key used for authentication. 36 | #[arg(long)] 37 | secret_key: Option, 38 | 39 | /// Domain names used for virtual-hosted-style requests. 40 | #[arg(long)] 41 | domain: Vec, 42 | 43 | /// Root directory of stored data. 44 | root: PathBuf, 45 | } 46 | 47 | fn setup_tracing() { 48 | use tracing_subscriber::EnvFilter; 49 | 50 | let env_filter = EnvFilter::from_default_env(); 51 | let enable_color = std::io::stdout().is_terminal(); 52 | 53 | tracing_subscriber::fmt() 54 | .pretty() 55 | .with_env_filter(env_filter) 56 | .with_ansi(enable_color) 57 | .init(); 58 | } 59 | 60 | fn check_cli_args(opt: &Opt) { 61 | use clap::error::ErrorKind; 62 | 63 | let mut cmd = Opt::command(); 64 | 65 | // TODO: how to specify the requirements with clap derive API? 66 | if let (Some(_), None) | (None, Some(_)) = (&opt.access_key, &opt.secret_key) { 67 | let msg = "access key and secret key must be specified together"; 68 | cmd.error(ErrorKind::MissingRequiredArgument, msg).exit(); 69 | } 70 | 71 | for s in &opt.domain { 72 | if s.contains('/') { 73 | let msg = format!("expected domain name, found URL-like string: {s:?}"); 74 | cmd.error(ErrorKind::InvalidValue, msg).exit(); 75 | } 76 | } 77 | } 78 | 79 | fn main() -> Result { 80 | let opt = Opt::parse(); 81 | check_cli_args(&opt); 82 | 83 | setup_tracing(); 84 | 85 | run(opt) 86 | } 87 | 88 | #[tokio::main] 89 | async fn run(opt: Opt) -> Result { 90 | // Setup S3 provider 91 | let fs = FileSystem::new(opt.root)?; 92 | 93 | // Setup S3 service 94 | let service = { 95 | let mut b = S3ServiceBuilder::new(fs); 96 | 97 | // Enable authentication 98 | if let (Some(ak), Some(sk)) = (opt.access_key, opt.secret_key) { 99 | b.set_auth(SimpleAuth::from_single(ak, sk)); 100 | info!("authentication is enabled"); 101 | } 102 | 103 | // Enable parsing virtual-hosted-style requests 104 | if opt.domain.is_empty().not() { 105 | b.set_host(MultiDomain::new(&opt.domain)?); 106 | info!("virtual-hosted-style requests are enabled"); 107 | } 108 | 109 | b.build() 110 | }; 111 | 112 | // Run server 113 | let listener = TcpListener::bind((opt.host.as_str(), opt.port)).await?; 114 | let local_addr = listener.local_addr()?; 115 | 116 | let http_server = ConnBuilder::new(TokioExecutor::new()); 117 | let graceful = hyper_util::server::graceful::GracefulShutdown::new(); 118 | 119 | let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c()); 120 | 121 | info!("server is running at http://{local_addr}"); 122 | 123 | loop { 124 | let (socket, _) = tokio::select! { 125 | res = listener.accept() => { 126 | match res { 127 | Ok(conn) => conn, 128 | Err(err) => { 129 | tracing::error!("error accepting connection: {err}"); 130 | continue; 131 | } 132 | } 133 | } 134 | _ = ctrl_c.as_mut() => { 135 | break; 136 | } 137 | }; 138 | 139 | let conn = http_server.serve_connection(TokioIo::new(socket), service.clone()); 140 | let conn = graceful.watch(conn.into_owned()); 141 | tokio::spawn(async move { 142 | let _ = conn.await; 143 | }); 144 | } 145 | 146 | tokio::select! { 147 | () = graceful.shutdown() => { 148 | tracing::debug!("Gracefully shutdown!"); 149 | }, 150 | () = tokio::time::sleep(std::time::Duration::from_secs(10)) => { 151 | tracing::debug!("Waited 10 seconds for graceful shutdown, aborting..."); 152 | } 153 | } 154 | 155 | info!("server is stopped"); 156 | Ok(()) 157 | } 158 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | schedule: 8 | # Run weekly on Sundays at 00:00 UTC 9 | - cron: '0 0 * * 0' 10 | workflow_dispatch: 11 | 12 | env: 13 | REGISTRY_IMAGE: ${{ vars.DOCKER_USERNAME }}/s3s 14 | 15 | jobs: 16 | skip-check: 17 | permissions: 18 | actions: write 19 | contents: read 20 | runs-on: ubuntu-latest 21 | outputs: 22 | should_skip: ${{ steps.skip_check.outputs.should_skip }} 23 | steps: 24 | - id: skip_check 25 | uses: fkirc/skip-duplicate-actions@v5 26 | with: 27 | cancel_others: true 28 | paths_ignore: '["*.md"]' 29 | # Don't skip scheduled or tag-based builds 30 | skip_after_successful_duplicate: ${{ github.event_name == 'workflow_dispatch' }} 31 | 32 | build: 33 | needs: skip-check 34 | if: needs.skip-check.outputs.should_skip != 'true' 35 | runs-on: ${{ matrix.runs-on }} 36 | strategy: 37 | fail-fast: false 38 | matrix: 39 | include: 40 | - platform: linux/amd64 41 | runs-on: ubuntu-24.04 42 | arch: amd64 43 | - platform: linux/arm64 44 | runs-on: ubuntu-24.04-arm 45 | arch: arm64 46 | steps: 47 | - uses: actions/checkout@v4 48 | with: 49 | # For scheduled builds, checkout main branch; otherwise use the triggering ref 50 | ref: ${{ github.event_name == 'schedule' && 'main' || github.ref }} 51 | - name: Prepare 52 | run: | 53 | platform=${{ matrix.platform }} 54 | echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV 55 | 56 | - name: Docker meta 57 | id: meta 58 | uses: docker/metadata-action@v5 59 | with: 60 | images: ${{ env.REGISTRY_IMAGE }} 61 | 62 | - name: Login to Docker Hub 63 | uses: docker/login-action@v3 64 | with: 65 | username: ${{ vars.DOCKER_USERNAME }} 66 | password: ${{ secrets.DOCKER_PASSWORD }} 67 | 68 | - name: Set up QEMU 69 | uses: docker/setup-qemu-action@v3 70 | 71 | - name: Set up Docker Buildx 72 | uses: docker/setup-buildx-action@v3 73 | 74 | - name: Build and push by digest 75 | id: build 76 | uses: docker/build-push-action@v6 77 | with: 78 | context: . 79 | file: docker/Dockerfile 80 | platforms: ${{ matrix.platform }} 81 | labels: ${{ steps.meta.outputs.labels }} 82 | tags: ${{ env.REGISTRY_IMAGE }} 83 | outputs: type=image,push-by-digest=true,name-canonical=true,push=true 84 | 85 | - name: Export digest 86 | run: | 87 | mkdir -p ${{ runner.temp }}/digests 88 | digest="${{ steps.build.outputs.digest }}" 89 | touch "${{ runner.temp }}/digests/${digest#sha256:}" 90 | 91 | - name: Upload digest 92 | uses: actions/upload-artifact@v4 93 | with: 94 | name: digests-${{ env.PLATFORM_PAIR }} 95 | path: ${{ runner.temp }}/digests/* 96 | if-no-files-found: error 97 | retention-days: 1 98 | 99 | publish: 100 | runs-on: ubuntu-latest 101 | needs: [build, skip-check] 102 | steps: 103 | - name: Download digests 104 | uses: actions/download-artifact@v4 105 | with: 106 | path: ${{ runner.temp }}/digests 107 | pattern: digests-* 108 | merge-multiple: true 109 | 110 | - name: Login to Docker Hub 111 | uses: docker/login-action@v3 112 | with: 113 | username: ${{ vars.DOCKER_USERNAME }} 114 | password: ${{ secrets.DOCKER_PASSWORD }} 115 | 116 | - name: Set up Docker Buildx 117 | uses: docker/setup-buildx-action@v3 118 | 119 | - name: Docker meta 120 | id: meta 121 | uses: docker/metadata-action@v5 122 | with: 123 | images: ${{ env.REGISTRY_IMAGE }} 124 | tags: | 125 | type=semver,pattern={{version}},enable=${{ startsWith(github.ref, 'refs/tags/v') }} 126 | type=semver,pattern={{major}}.{{minor}},enable=${{ startsWith(github.ref, 'refs/tags/v') }} 127 | type=semver,pattern={{major}},enable=${{ startsWith(github.ref, 'refs/tags/v') }} 128 | type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }} 129 | type=raw,value=edge,enable=${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} 130 | 131 | - name: Create manifest list and push 132 | working-directory: ${{ runner.temp }}/digests 133 | run: | 134 | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ 135 | $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) 136 | 137 | - name: Inspect image 138 | run: | 139 | docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} 140 | -------------------------------------------------------------------------------- /crates/s3s/src/ops/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | // use crate::service::S3Service; 4 | 5 | // use stdx::mem::output_size; 6 | 7 | // #[test] 8 | // #[ignore] 9 | // fn track_future_size() { 10 | // macro_rules! future_size { 11 | // ($f:path, $v:expr) => { 12 | // (stringify!($f), output_size(&$f), $v) 13 | // }; 14 | // } 15 | 16 | // #[rustfmt::skip] 17 | // let sizes = [ 18 | // future_size!(S3Service::call, 2704), 19 | // future_size!(call, 1512), 20 | // future_size!(prepare, 1440), 21 | // future_size!(SignatureContext::check, 776), 22 | // future_size!(SignatureContext::v2_check, 296), 23 | // future_size!(SignatureContext::v2_check_presigned_url, 168), 24 | // future_size!(SignatureContext::v2_check_header_auth, 184), 25 | // future_size!(SignatureContext::v4_check, 752), 26 | // future_size!(SignatureContext::v4_check_post_signature, 368), 27 | // future_size!(SignatureContext::v4_check_presigned_url, 456), 28 | // future_size!(SignatureContext::v4_check_header_auth, 640), 29 | // ]; 30 | 31 | // println!("{sizes:#?}"); 32 | // for (name, size, expected) in sizes { 33 | // assert_eq!(size, expected, "{name:?} size changed: prev {expected}, now {size}"); 34 | // } 35 | // } 36 | 37 | #[test] 38 | fn error_custom_headers() { 39 | fn redirect307(location: &str) -> S3Error { 40 | let mut err = S3Error::new(S3ErrorCode::TemporaryRedirect); 41 | 42 | err.set_headers({ 43 | let mut headers = HeaderMap::new(); 44 | headers.insert(crate::header::LOCATION, location.parse().unwrap()); 45 | headers 46 | }); 47 | 48 | err 49 | } 50 | 51 | let res = serialize_error(redirect307("http://example.com"), false).unwrap(); 52 | assert_eq!(res.status, StatusCode::TEMPORARY_REDIRECT); 53 | assert_eq!(res.headers.get("location").unwrap(), "http://example.com"); 54 | 55 | let body = res.body.bytes().unwrap(); 56 | let body = std::str::from_utf8(&body).unwrap(); 57 | assert_eq!( 58 | body, 59 | concat!( 60 | "", 61 | "TemporaryRedirect" 62 | ) 63 | ); 64 | } 65 | 66 | #[test] 67 | fn extract_host_from_uri() { 68 | use crate::http::Request; 69 | use crate::ops::extract_host; 70 | 71 | let mut req = Request::from( 72 | hyper::Request::builder() 73 | .method(Method::GET) 74 | .version(::http::Version::HTTP_2) 75 | .uri("https://test.example.com:9001/rust.pdf?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20251213T084305Z&X-Amz-SignedHeaders=host&X-Amz-Credential=rustfsadmin%2F20251213%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Expires=3600&X-Amz-Signature=57133ee54dab71c00a10106c33cde2615b301bd2cf00e2439f3ddb4bc999ec66") 76 | .body(Body::empty()) 77 | .unwrap(), 78 | ); 79 | 80 | let host = extract_host(&req).unwrap(); 81 | assert_eq!(host, Some("test.example.com:9001".to_string())); 82 | 83 | req.version = ::http::Version::HTTP_11; 84 | let host = extract_host(&req).unwrap(); 85 | assert_eq!(host, None); 86 | 87 | req.version = ::http::Version::HTTP_3; 88 | let host = extract_host(&req).unwrap(); 89 | assert_eq!(host, Some("test.example.com:9001".to_string())); 90 | 91 | let mut req = Request::from( 92 | hyper::Request::builder() 93 | .version(::http::Version::HTTP_10) 94 | .method(Method::GET) 95 | .uri("http://another.example.org/resource") 96 | .body(Body::empty()) 97 | .unwrap(), 98 | ); 99 | let host = extract_host(&req).unwrap(); 100 | assert_eq!(host, None); 101 | 102 | req.version = ::http::Version::HTTP_2; 103 | let host = extract_host(&req).unwrap(); 104 | assert_eq!(host, Some("another.example.org".to_string())); 105 | 106 | req.version = ::http::Version::HTTP_3; 107 | let host = extract_host(&req).unwrap(); 108 | assert_eq!(host, Some("another.example.org".to_string())); 109 | 110 | let req = Request::from( 111 | hyper::Request::builder() 112 | .method(Method::GET) 113 | .uri("/no/host/header") 114 | .header("Host", "header.example.com:8080") 115 | .body(Body::empty()) 116 | .unwrap(), 117 | ); 118 | let host = extract_host(&req).unwrap(); 119 | assert_eq!(host, Some("header.example.com:8080".to_string())); 120 | 121 | let req = Request::from( 122 | hyper::Request::builder() 123 | .method(Method::GET) 124 | .uri("/no/host/header") 125 | .body(Body::empty()) 126 | .unwrap(), 127 | ); 128 | let host = extract_host(&req).unwrap(); 129 | assert_eq!(host, None); 130 | } 131 | -------------------------------------------------------------------------------- /crates/s3s/src/dto/copy_source.rs: -------------------------------------------------------------------------------- 1 | //! x-amz-copy-source 2 | 3 | use crate::http; 4 | use crate::path; 5 | 6 | use std::fmt::Write; 7 | 8 | /// x-amz-copy-source 9 | #[derive(Debug, Clone, PartialEq)] 10 | pub enum CopySource { 11 | /// bucket repr 12 | Bucket { 13 | /// bucket 14 | bucket: Box, 15 | /// key 16 | key: Box, 17 | /// version id 18 | version_id: Option>, 19 | }, 20 | /// access point repr 21 | AccessPoint { 22 | /// region 23 | region: Box, 24 | /// account id 25 | account_id: Box, 26 | /// access point name 27 | access_point_name: Box, 28 | /// key 29 | key: Box, 30 | }, 31 | } 32 | 33 | /// [`CopySource`] 34 | #[derive(Debug, thiserror::Error)] 35 | pub enum ParseCopySourceError { 36 | /// pattern mismatch 37 | #[error("ParseAmzCopySourceError: PatternMismatch")] 38 | PatternMismatch, 39 | 40 | /// invalid bucket name 41 | #[error("ParseAmzCopySourceError: InvalidBucketName")] 42 | InvalidBucketName, 43 | 44 | /// invalid key 45 | #[error("ParseAmzCopySourceError: InvalidKey")] 46 | InvalidKey, 47 | 48 | #[error("ParseAmzCopySourceError: InvalidEncoding")] 49 | InvalidEncoding, 50 | } 51 | 52 | impl CopySource { 53 | /// Parses [`CopySource`] from header 54 | /// # Errors 55 | /// Returns an error if the header is invalid 56 | pub fn parse(header: &str) -> Result { 57 | let header = urlencoding::decode(header).map_err(|_| ParseCopySourceError::InvalidEncoding)?; 58 | let header = header.strip_prefix('/').unwrap_or(&header); 59 | 60 | // FIXME: support access point 61 | match header.split_once('/') { 62 | None => Err(ParseCopySourceError::PatternMismatch), 63 | Some((bucket, remaining)) => { 64 | let (key, version_id) = match remaining.split_once('?') { 65 | Some((key, remaining)) => { 66 | let version_id = remaining 67 | .split_once('=') 68 | .and_then(|(name, val)| (name == "versionId").then_some(val)); 69 | (key, version_id) 70 | } 71 | None => (remaining, None), 72 | }; 73 | 74 | if !path::check_bucket_name(bucket) { 75 | return Err(ParseCopySourceError::InvalidBucketName); 76 | } 77 | 78 | if !path::check_key(key) { 79 | return Err(ParseCopySourceError::InvalidKey); 80 | } 81 | 82 | Ok(Self::Bucket { 83 | bucket: bucket.into(), 84 | key: key.into(), 85 | version_id: version_id.map(Into::into), 86 | }) 87 | } 88 | } 89 | } 90 | 91 | #[must_use] 92 | pub fn format_to_string(&self) -> String { 93 | let mut buf = String::new(); 94 | match self { 95 | CopySource::Bucket { bucket, key, version_id } => { 96 | write!(&mut buf, "{bucket}/{key}").unwrap(); 97 | if let Some(version_id) = version_id { 98 | write!(&mut buf, "?versionId={version_id}").unwrap(); 99 | } 100 | } 101 | CopySource::AccessPoint { .. } => { 102 | unimplemented!() 103 | } 104 | } 105 | buf 106 | } 107 | } 108 | 109 | impl http::TryFromHeaderValue for CopySource { 110 | type Error = ParseCopySourceError; 111 | 112 | fn try_from_header_value(val: &http::HeaderValue) -> Result { 113 | let header = val.to_str().map_err(|_| ParseCopySourceError::InvalidEncoding)?; 114 | Self::parse(header) 115 | } 116 | } 117 | 118 | #[cfg(test)] 119 | mod tests { 120 | use super::*; 121 | 122 | #[test] 123 | fn path_style() { 124 | { 125 | let header = "awsexamplebucket/reports/january.pdf"; 126 | let val = CopySource::parse(header).unwrap(); 127 | match val { 128 | CopySource::Bucket { bucket, key, version_id } => { 129 | assert_eq!(&*bucket, "awsexamplebucket"); 130 | assert_eq!(&*key, "reports/january.pdf"); 131 | assert!(version_id.is_none()); 132 | } 133 | CopySource::AccessPoint { .. } => panic!(), 134 | } 135 | } 136 | 137 | { 138 | let header = "awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893"; 139 | let val = CopySource::parse(header).unwrap(); 140 | match val { 141 | CopySource::Bucket { bucket, key, version_id } => { 142 | assert_eq!(&*bucket, "awsexamplebucket"); 143 | assert_eq!(&*key, "reports/january.pdf"); 144 | assert_eq!(version_id.as_deref().unwrap(), "QUpfdndhfd8438MNFDN93jdnJFkdmqnh893"); 145 | } 146 | CopySource::AccessPoint { .. } => panic!(), 147 | } 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | # Copilot Instructions for s3s 2 | 3 | 4 | 5 | ## Project Overview 6 | s3s is an experimental Rust project that provides an ergonomic adapter for building S3-compatible services. It implements the Amazon S3 REST API as a generic hyper service, allowing S3-compatible services to focus on the S3 API itself without worrying about the HTTP layer. 7 | 8 | ## Key Architecture 9 | - **s3s**: Core crate implementing S3 REST API as a hyper service 10 | - **s3s-aws**: Provides integration with aws-sdk-s3 and useful types 11 | - **s3s-fs**: Sample implementation using file system (for testing and debugging) 12 | - **s3s-model**: Generated data types from AWS Smithy models 13 | - **s3s-policy**: S3 policy handling 14 | - **s3s-test**: Testing utilities 15 | - **s3s-proxy**: Proxy implementation for E2E testing 16 | - **s3s-e2e**: End-to-end testing framework 17 | 18 | The project converts HTTP requests to operation inputs, calls user-defined services, and converts outputs back to HTTP responses. 19 | 20 | ## Development Workflow 21 | 22 | ### Required Tools 23 | - **Rust**: ^1.86.0 (MSRV - minimum supported version) 24 | - **just**: ^1.36.0 (task runner, like make) 25 | - **uv**: ^0.5.0 (Python package manager) 26 | - **Docker**: For E2E testing 27 | 28 | ### Main Commands 29 | Use `just` for all development tasks: 30 | 31 | - `just dev` - Run complete development cycle (fetch, fmt, codegen, lint, test) 32 | - `just fetch` - Fetch dependencies (uv sync + cargo fetch) 33 | - `just fmt` - Format code (ruff format + cargo fmt) 34 | - `just lint` - Lint code (ruff check + cargo clippy) 35 | - `just test` - Run tests (cargo test) 36 | - `just codegen` - Run code generation from Smithy models 37 | - `just doc` - Generate and open documentation 38 | 39 | ### Code Generation 40 | The project uses code generation from AWS Smithy models: 41 | - `just crawl` - Update data from AWS 42 | - `just codegen` - Generate Rust code from models 43 | 44 | **Important**: Always run `just codegen` after making changes to generation code. 45 | 46 | ### Project Structure 47 | ``` 48 | ├── crates/ # Main Rust crates 49 | │ ├── s3s/ # Core S3 service implementation 50 | │ ├── s3s-aws/ # AWS integration 51 | │ ├── s3s-fs/ # File system implementation 52 | │ └── ... 53 | ├── codegen/ # Code generation utilities 54 | ├── data/ # Smithy model data and crawling scripts 55 | ├── scripts/ # Development and testing scripts 56 | └── docs/ # Documentation 57 | ``` 58 | 59 | ### Testing Strategy 60 | 1. **Unit tests**: `cargo test` - Test individual components 61 | 2. **Integration tests**: Test crate interactions 62 | 3. **E2E tests**: Full system testing with Docker 63 | - `s3s-proxy` + MinIO integration 64 | - `s3s-fs` standalone testing 65 | - MinT test suite compatibility 66 | 67 | ### Code Style & Linting 68 | - **Rust**: Uses clippy with strict lints (all, pedantic, cargo = deny) 69 | - **Python**: Uses ruff for formatting and linting 70 | - **Safety**: `unsafe` code is forbidden 71 | - **Commit Messages**: Follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification 72 | 73 | ### Important Files 74 | - `justfile` - Task definitions and development commands 75 | - `Cargo.toml` - Workspace configuration and lint settings 76 | - `pyproject.toml` - Python project configuration 77 | - `CONTRIBUTING.md` - Detailed development guide 78 | - `.github/actions/setup/action.yml` - CI setup steps 79 | 80 | ### Working with the Codebase 81 | 82 | #### When modifying S3 operations: 83 | 1. Check if code generation is needed (`just codegen`) 84 | 2. Update tests in relevant crates 85 | 3. Run full test suite (`just test`) 86 | 4. Consider E2E impact 87 | 88 | #### When adding new features: 89 | 1. Follow the existing crate structure 90 | 2. Add appropriate unit and integration tests 91 | 3. Update documentation if public APIs change 92 | 4. Run `just dev` to ensure quality 93 | 94 | #### When fixing bugs: 95 | 1. Add a test that reproduces the issue 96 | 2. Fix the issue with minimal changes 97 | 3. Ensure the test passes 98 | 4. Run full test suite to prevent regressions 99 | 100 | ### CI/CD 101 | - GitHub Actions run on push/PR to main 102 | - Tests run on multiple Rust versions (MSRV, stable, nightly) 103 | - Cross-platform testing (Linux, Windows, macOS) 104 | - E2E tests with MinIO and custom implementations 105 | 106 | ### Common Pitfalls 107 | - Don't forget to run `just codegen` after modifying generation code 108 | - Always format code before committing (`just fmt`) 109 | - Test changes against both unit tests and E2E tests 110 | - Be aware that this is a security-sensitive project (HTTP body limits, rate limiting needed) 111 | 112 | ### Security Considerations 113 | The S3Service and adapters have no built-in security protection. When working on this project: 114 | - Consider HTTP body length limits 115 | - Think about rate limiting and back pressure 116 | - Remember that services may be exposed to the Internet 117 | - Test security implications of any changes 118 | 119 | ### Getting Help 120 | - Check `CONTRIBUTING.md` for detailed development setup 121 | - Review existing tests for implementation patterns 122 | - Use `just doc` to explore API documentation 123 | - Look at `s3s-fs` as a reference implementation 124 | -------------------------------------------------------------------------------- /codegen/src/v1/rust.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::format as f; 3 | 4 | use scoped_writer::g; 5 | use serde_json::Value; 6 | 7 | #[derive(Debug, Clone)] 8 | pub enum Type { 9 | Alias(Alias), 10 | Provided(Provided), 11 | List(List), 12 | Map(Map), 13 | StrEnum(StrEnum), 14 | Struct(Struct), 15 | StructEnum(StructEnum), 16 | Timestamp(Timestamp), 17 | } 18 | 19 | #[derive(Debug, Clone)] 20 | pub struct Alias { 21 | pub name: String, 22 | pub type_: String, 23 | pub doc: Option, 24 | } 25 | 26 | #[derive(Debug, Clone)] 27 | pub struct Provided { 28 | pub name: String, 29 | } 30 | 31 | #[derive(Debug, Clone)] 32 | pub struct List { 33 | pub name: String, 34 | pub member: ListMember, 35 | pub doc: Option, 36 | } 37 | 38 | #[derive(Debug, Clone)] 39 | pub struct ListMember { 40 | pub type_: String, 41 | pub xml_name: Option, 42 | } 43 | 44 | #[derive(Debug, Clone)] 45 | pub struct Map { 46 | pub name: String, 47 | pub key_type: String, 48 | pub value_type: String, 49 | pub doc: Option, 50 | } 51 | 52 | #[derive(Debug, Clone)] 53 | pub struct StrEnum { 54 | pub name: String, 55 | pub variants: Vec, 56 | pub doc: Option, 57 | 58 | pub is_custom_extension: bool, 59 | } 60 | 61 | #[derive(Debug, Clone)] 62 | pub struct StrEnumVariant { 63 | pub name: String, 64 | pub value: String, 65 | pub doc: Option, 66 | } 67 | 68 | #[derive(Debug, Clone)] 69 | pub struct Struct { 70 | pub name: String, 71 | pub fields: Vec, 72 | pub doc: Option, 73 | 74 | pub xml_name: Option, 75 | pub is_error_type: bool, 76 | 77 | pub is_custom_extension: bool, 78 | } 79 | 80 | #[allow(clippy::struct_excessive_bools)] 81 | #[derive(Debug, Clone, Default)] 82 | pub struct StructField { 83 | pub name: String, 84 | pub type_: String, 85 | pub doc: Option, 86 | 87 | pub camel_name: String, 88 | 89 | pub option_type: bool, 90 | pub default_value: Option, 91 | pub is_required: bool, 92 | 93 | pub position: String, 94 | 95 | pub http_header: Option, 96 | pub http_query: Option, 97 | 98 | pub xml_name: Option, 99 | pub xml_flattened: bool, 100 | 101 | pub is_xml_attr: bool, 102 | pub xml_namespace_uri: Option, 103 | pub xml_namespace_prefix: Option, 104 | 105 | pub is_custom_extension: bool, 106 | 107 | pub custom_in_derive_debug: Option, 108 | } 109 | 110 | #[derive(Debug, Clone)] 111 | pub struct StructEnum { 112 | pub name: String, 113 | pub variants: Vec, 114 | pub doc: Option, 115 | } 116 | 117 | #[derive(Debug, Clone)] 118 | pub struct StructEnumVariant { 119 | pub name: String, 120 | pub type_: String, 121 | pub doc: Option, 122 | } 123 | 124 | #[derive(Debug, Clone)] 125 | pub struct Timestamp { 126 | pub name: String, 127 | pub format: Option, 128 | pub doc: Option, 129 | } 130 | 131 | impl Type { 132 | pub fn provided(name: &str) -> Self { 133 | Self::Provided(Provided { name: name.to_owned() }) 134 | } 135 | 136 | pub fn alias(name: &str, type_: &str, doc: Option<&str>) -> Self { 137 | Self::Alias(Alias { 138 | name: name.to_owned(), 139 | type_: type_.to_owned(), 140 | doc: doc.map(ToOwned::to_owned), 141 | }) 142 | } 143 | } 144 | 145 | pub fn codegen_doc(doc: Option<&str>) { 146 | let Some(doc) = doc else { return }; 147 | 148 | for line in doc.lines() { 149 | let mut line = line.trim_start().to_owned(); 150 | 151 | let word_fixes_type1 = [ 152 | "Region", 153 | "account-id", 154 | "access-point-name", 155 | "outpost-id", 156 | "key", 157 | "version-id", 158 | "Code", 159 | "Message", 160 | ]; 161 | 162 | for word in word_fixes_type1 { 163 | if line.contains(word) { 164 | line = line.replace(&f!("<{word}>"), &f!("<{word}>")); 165 | } 166 | } 167 | 168 | let word_fixes_type2 = ["Grantee", "BucketLoggingStatus"]; 169 | 170 | for word in word_fixes_type2 { 171 | if line.contains(word) { 172 | line = line.replace(&f!("<{word} xmlns"), &f!("<{word} xmlns")); 173 | } 174 | } 175 | 176 | let word_fixes_type3 = ["NotificationConfiguration", "Grantee"]; 177 | 178 | for word in word_fixes_type3 { 179 | if line.contains(word) { 180 | line = line.replace(&f!("<{word}>"), &f!("<{word}>")); 181 | line = line.replace(&f!(""), &f!("</{word}>")); 182 | } 183 | } 184 | 185 | if line.ends_with("200") { 186 | line = line.replace("200", "200"); 187 | } 188 | if line.starts_with("OK") { 189 | line = line.replace("OK", "OK"); 190 | } 191 | 192 | g!("/// {line}"); 193 | } 194 | } 195 | 196 | pub fn default_value_literal(v: &Value) -> &dyn fmt::Display { 197 | match v { 198 | Value::Bool(x) => x, 199 | Value::Number(x) => x, 200 | _ => unimplemented!(), 201 | } 202 | } 203 | --------------------------------------------------------------------------------