├── .github ├── dependabot.yml └── workflows │ ├── base.yml │ ├── coverage.yml │ ├── deny.yml │ ├── docs.yml │ ├── exhaustive.yml │ ├── lint.yml │ ├── nightly.yml │ ├── release.yml │ └── wasi.yml ├── .gitignore ├── .release-plz.toml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── ci └── remove-dev-dependencies │ ├── .gitignore │ ├── Cargo.toml │ └── src │ └── main.rs ├── deny.toml ├── examples ├── zlib_tokio_write.rs └── zstd_gzip.rs ├── justfile ├── src ├── brotli.rs ├── codec │ ├── brotli │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── bzip2 │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── deflate │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── deflate64 │ │ ├── decoder.rs │ │ └── mod.rs │ ├── flate │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── gzip │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ ├── header.rs │ │ └── mod.rs │ ├── lz4 │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── lzma │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── mod.rs │ ├── xz │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── xz2 │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ ├── zlib │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ └── zstd │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs ├── futures │ ├── bufread │ │ ├── generic │ │ │ ├── decoder.rs │ │ │ ├── encoder.rs │ │ │ └── mod.rs │ │ ├── macros │ │ │ ├── decoder.rs │ │ │ ├── encoder.rs │ │ │ └── mod.rs │ │ └── mod.rs │ ├── mod.rs │ └── write │ │ ├── buf_write.rs │ │ ├── buf_writer.rs │ │ ├── generic │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ │ ├── macros │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ │ └── mod.rs ├── lib.rs ├── lz4.rs ├── macros.rs ├── tokio │ ├── bufread │ │ ├── generic │ │ │ ├── decoder.rs │ │ │ ├── encoder.rs │ │ │ └── mod.rs │ │ ├── macros │ │ │ ├── decoder.rs │ │ │ ├── encoder.rs │ │ │ └── mod.rs │ │ └── mod.rs │ ├── mod.rs │ └── write │ │ ├── buf_write.rs │ │ ├── buf_writer.rs │ │ ├── generic │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ │ ├── macros │ │ ├── decoder.rs │ │ ├── encoder.rs │ │ └── mod.rs │ │ └── mod.rs ├── unshared.rs ├── util.rs └── zstd.rs └── tests ├── artifacts ├── dictionary-rust ├── dictionary-rust-other ├── lib.rs ├── lib.rs.zst └── long-window-size-lib.rs.zst ├── brotli.rs ├── bzip2.rs ├── deflate.rs ├── gzip.rs ├── lz4.rs ├── lzma.rs ├── proptest.proptest-regressions ├── proptest.rs ├── utils ├── algos.rs ├── impls.rs ├── input_stream.rs ├── mod.rs ├── test_cases.rs ├── tokio_ext │ ├── copy_buf.rs │ ├── interleave_pending.rs │ ├── limited.rs │ └── mod.rs ├── track_closed.rs └── track_eof.rs ├── xz.rs ├── zlib.rs ├── zstd-dict.rs ├── zstd-window-size.rs └── zstd.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Dependabot dependency version checks / updates 2 | 3 | version: 2 4 | updates: 5 | - package-ecosystem: "github-actions" 6 | # Workflow files stored in the 7 | # default location of `.github/workflows` 8 | directory: "/" 9 | schedule: 10 | interval: "daily" 11 | rebase-strategy: "disabled" 12 | - package-ecosystem: "cargo" 13 | directory: "/" 14 | versioning-strategy: "widen" 15 | schedule: 16 | interval: "daily" 17 | rebase-strategy: "disabled" 18 | -------------------------------------------------------------------------------- /.github/workflows/base.yml: -------------------------------------------------------------------------------- 1 | name: base 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | test: 8 | name: cargo test 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions-rust-lang/setup-rust-toolchain@v1 13 | - uses: taiki-e/install-action@v2 14 | with: { tool: cargo-nextest } 15 | - run: cargo --locked nextest run --workspace --all-features 16 | - run: cargo --locked test --workspace --doc --all-features 17 | 18 | on: 19 | merge_group: 20 | types: [checks_requested] 21 | pull_request: 22 | branches: [main] 23 | types: [opened, synchronize, reopened, ready_for_review] 24 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | name: coverage 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | codecov: 8 | name: codecov 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions-rust-lang/setup-rust-toolchain@v1 13 | - uses: taiki-e/install-action@v2 14 | with: { tool: cargo-tarpaulin } 15 | - run: cargo --locked tarpaulin --all-features -- --skip 'proptest::' 16 | - uses: codecov/codecov-action@v5 17 | env: 18 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 19 | 20 | on: 21 | push: 22 | branches: [main] 23 | pull_request: 24 | branches: [main] 25 | types: [opened, synchronize, reopened, ready_for_review] 26 | schedule: 27 | - cron: '0 0 * * 5' 28 | -------------------------------------------------------------------------------- /.github/workflows/deny.yml: -------------------------------------------------------------------------------- 1 | name: deny 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | cargo-deny-advisories: 8 | name: cargo deny advisories 9 | runs-on: ubuntu-latest 10 | continue-on-error: true 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: EmbarkStudios/cargo-deny-action@v2 14 | with: 15 | command: check advisories 16 | 17 | cargo-deny-licenses: 18 | name: cargo deny bans licenses sources 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Ignore dev-dependencies 23 | run: sed -i 's/\[dev-dependencies\]/[workaround-avoid-dev-deps]/g' Cargo.toml 24 | - uses: EmbarkStudios/cargo-deny-action@v2 25 | with: 26 | command: check bans licenses sources 27 | 28 | on: 29 | merge_group: 30 | types: [checks_requested] 31 | pull_request: 32 | branches: [main] 33 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: docs 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | docsrs: 8 | name: cargo doc --cfg docsrs 9 | runs-on: ubuntu-latest 10 | env: 11 | RUSTDOCFLAGS: '--cfg=docsrs -Dwarnings' 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: actions-rust-lang/setup-rust-toolchain@v1 15 | with: 16 | toolchain: nightly 17 | - run: cargo doc --all-features --no-deps 18 | 19 | on: 20 | merge_group: 21 | types: [checks_requested] 22 | pull_request: 23 | branches: [main] 24 | types: [opened, synchronize, reopened, ready_for_review] 25 | -------------------------------------------------------------------------------- /.github/workflows/exhaustive.yml: -------------------------------------------------------------------------------- 1 | name: exhaustive 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | test: 8 | name: cargo test 9 | strategy: 10 | matrix: 11 | platform: 12 | - { toolchain: stable, target: i686-pc-windows-msvc, os: windows-latest } 13 | - { toolchain: stable, target: i686-unknown-linux-gnu, os: ubuntu-latest } 14 | - { toolchain: stable, target: x86_64-apple-darwin, os: macos-latest } 15 | - { toolchain: stable, target: x86_64-pc-windows-msvc, os: windows-latest } 16 | - { toolchain: stable, target: x86_64-unknown-linux-gnu, os: ubuntu-latest } 17 | runs-on: ${{ matrix.platform.os }} 18 | steps: 19 | - uses: actions/checkout@v4 20 | - uses: actions-rust-lang/setup-rust-toolchain@v1 21 | with: 22 | toolchain: ${{ matrix.platform.toolchain }} 23 | target: ${{ matrix.platform.target }} 24 | - uses: taiki-e/install-action@v2 25 | with: { tool: cargo-nextest } 26 | - run: cargo --locked nextest run --workspace --all-features 27 | - run: cargo --locked test --workspace --doc --all-features 28 | 29 | min-versions-shallow: 30 | name: cargo test --shallow-minimal-versions 31 | runs-on: ubuntu-latest 32 | steps: 33 | - uses: actions/checkout@v4 34 | - uses: actions-rust-lang/setup-rust-toolchain@v1 35 | with: { toolchain: nightly } 36 | - name: Update to shallow minimal versions 37 | run: 38 | cargo update $( 39 | cargo metadata --all-features --format-version 1 40 | | jq -r ' 41 | . as $root 42 | | .resolve.nodes[] 43 | | select(.id == $root.resolve.root) 44 | | .deps[].pkg 45 | | . as $dep 46 | | $root.packages[] 47 | | select(.id == $dep) 48 | | "-p", "\(.name):\(.version)" 49 | ' 50 | ) -Z minimal-versions 51 | - uses: actions-rust-lang/setup-rust-toolchain@v1 52 | with: { toolchain: stable } 53 | - uses: taiki-e/install-action@v2 54 | with: { tool: cargo-nextest } 55 | - run: cargo --locked nextest run --workspace --all-features 56 | - run: cargo --locked test --workspace --doc --all-features 57 | 58 | min-versions: 59 | name: cargo test minimal-versions 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v4 63 | - uses: actions-rust-lang/setup-rust-toolchain@v1 64 | with: { toolchain: nightly } 65 | - name: Update to minimal versions 66 | run: 67 | cargo update -Z minimal-versions 68 | - uses: actions-rust-lang/setup-rust-toolchain@v1 69 | with: { toolchain: stable } 70 | - uses: taiki-e/install-action@v2 71 | with: { tool: cargo-nextest } 72 | - run: cargo --locked nextest run --workspace --all-features 73 | - run: cargo --locked test --workspace --doc --all-features 74 | 75 | check-features: 76 | name: cargo hack check --feature-powerset 77 | runs-on: ubuntu-latest 78 | env: 79 | RUSTFLAGS: -Dwarnings 80 | steps: 81 | - uses: actions/checkout@v4 82 | - uses: actions-rust-lang/setup-rust-toolchain@v1 83 | - uses: taiki-e/install-action@v2 84 | with: { tool: cargo-hack } 85 | - run: 86 | cargo hack check 87 | --workspace 88 | --feature-powerset 89 | --no-dev-deps 90 | --skip 'all,all-algorithms,all-implementations' 91 | 92 | check-test-features: 93 | name: cargo hack check --all-targets --feature-powerset 94 | runs-on: ubuntu-latest 95 | env: 96 | RUSTFLAGS: -Dwarnings 97 | steps: 98 | - uses: actions/checkout@v4 99 | - uses: actions-rust-lang/setup-rust-toolchain@v1 100 | - uses: taiki-e/install-action@v2 101 | with: { tool: cargo-hack } 102 | - run: 103 | cargo hack check 104 | --workspace 105 | --feature-powerset 106 | --all-targets 107 | --skip 'all,all-algorithms,all-implementations' 108 | 109 | on: 110 | merge_group: 111 | types: [checks_requested] 112 | pull_request: 113 | branches: [main] 114 | types: [opened, synchronize, reopened, ready_for_review] 115 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | fmt: 8 | name: cargo fmt --check 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions-rust-lang/setup-rust-toolchain@v1 13 | with: { components: rustfmt } 14 | - run: cargo fmt --all -- --check 15 | 16 | clippy: 17 | name: cargo clippy 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: actions-rust-lang/setup-rust-toolchain@v1 22 | with: { components: clippy } 23 | - run: cargo --locked clippy --all --all-targets --all-features -- -D warnings 24 | 25 | on: 26 | merge_group: 27 | types: [checks_requested] 28 | pull_request: 29 | branches: [main] 30 | types: [opened, synchronize, reopened, ready_for_review] 31 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | name: nightly 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | test: 8 | name: cargo +nightly test 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions-rust-lang/setup-rust-toolchain@v1 13 | with: 14 | toolchain: nightly 15 | - uses: taiki-e/install-action@v2 16 | with: { tool: cargo-nextest } 17 | - run: cargo --locked nextest run --workspace --all-features 18 | - run: cargo --locked test --workspace --doc --all-features 19 | 20 | fmt: 21 | name: cargo +nightly fmt --check 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v4 25 | - uses: actions-rust-lang/setup-rust-toolchain@v1 26 | with: 27 | toolchain: nightly 28 | components: rustfmt 29 | - run: cargo fmt --all -- --check 30 | 31 | clippy: 32 | name: cargo +nightly clippy 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v4 36 | - uses: actions-rust-lang/setup-rust-toolchain@v1 37 | with: 38 | toolchain: nightly 39 | components: clippy 40 | - run: cargo --locked clippy --all --all-targets --all-features -- -D warnings 41 | 42 | on: 43 | schedule: 44 | - cron: '0 2 * * *' 45 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release-plz 2 | 3 | permissions: 4 | pull-requests: write 5 | contents: write 6 | 7 | on: 8 | push: 9 | branches: 10 | - main 11 | 12 | jobs: 13 | release-plz: 14 | name: Release-plz 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout repository 18 | uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | 22 | - name: Install Rust toolchain 23 | uses: dtolnay/rust-toolchain@stable 24 | 25 | - name: Run release-plz 26 | uses: MarcoIeni/release-plz-action@v0.5 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 30 | -------------------------------------------------------------------------------- /.github/workflows/wasi.yml: -------------------------------------------------------------------------------- 1 | name: wasi 2 | 3 | env: 4 | RUST_BACKTRACE: 1 5 | 6 | jobs: 7 | build: 8 | name: Build for wasm32-wasip1-threads 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions-rust-lang/setup-rust-toolchain@v1 13 | with: 14 | toolchain: nightly 15 | target: wasm32-wasip1-threads 16 | - run: | 17 | curl -L https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sysroot-25.0.tar.gz -o wasi-sysroot.tar.gz 18 | mkdir -p wasi-sysroot 19 | tar xf wasi-sysroot.tar.gz --strip-components=1 -C wasi-sysroot 20 | - run: | 21 | export "CFLAGS_wasm32_wasip1_threads=--sysroot=${{ github.workspace }}/wasi-sysroot -I${{ github.workspace }}/wasi-sysroot/include/wasm32-wasip1-threads -L-I${{ github.workspace }}/wasi-sysroot/lib/wasm32-wasip1-threads" 22 | cargo +nightly build --lib --features all --target wasm32-wasip1-threads 23 | 24 | on: 25 | merge_group: 26 | types: [checks_requested] 27 | pull_request: 28 | branches: [main] 29 | types: [opened, synchronize, reopened, ready_for_review] 30 | push: 31 | branches: 32 | - main 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | 4 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 5 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 6 | Cargo.lock 7 | 8 | .DS_Store 9 | -------------------------------------------------------------------------------- /.release-plz.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | pr_draft = true 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## Unreleased 8 | 9 | ## [0.4.23](https://github.com/Nullus157/async-compression/compare/v0.4.22...v0.4.23) - 2025-04-21 10 | 11 | ### Changed 12 | 13 | - Update `brotli` dependency to `8.0`. 14 | - Update `liblzma` dependency to `0.4`. 15 | 16 | ## [0.4.22](https://github.com/Nullus157/async-compression/compare/v0.4.21...v0.4.22) - 2025-03-25 17 | 18 | ### Added 19 | 20 | - Add LZ4 encoders and decoders. 21 | - Expose `DeflateEncoder::{total_in, total_out}()` methods. 22 | 23 | 24 | ## [0.4.21](https://github.com/Nullus157/async-compression/compare/v0.4.20...v0.4.21) - 2025-03-15 25 | 26 | ### Fixed 27 | 28 | - When flate encoding, do not mark internal state as flushed if it ran out of buffer space. 29 | - Add debug assertion in `produce` method to check buffer capacity in implementations for `BufWriter`. 30 | 31 | ## [0.4.20](https://github.com/Nullus157/async-compression/compare/v0.4.19...v0.4.20) - 2025-02-28 32 | 33 | ### Added 34 | 35 | - Add support for `wasm32-wasip1-*` targets. 36 | 37 | ## [0.4.19](https://github.com/Nullus157/async-compression/compare/v0.4.18...v0.4.19) - 2025-02-27 38 | 39 | ### Changed 40 | 41 | - Update `bzip2` dependency to `0.5`. 42 | 43 | ### Fixed 44 | 45 | - Ensure that flush finishes before continuing. 46 | 47 | ## [0.4.18](https://github.com/Nullus157/async-compression/compare/v0.4.17...v0.4.18) - 2024-11-23 48 | 49 | ### Fixed 50 | 51 | - Adjust `Level::Precise` clamp range for flate2. 52 | 53 | ## [0.4.17](https://github.com/Nullus157/async-compression/compare/v0.4.16...v0.4.17) - 2024-10-20 54 | 55 | ### Fixed 56 | 57 | - Fix occasional panics when consuming from pending buffers. 58 | 59 | ## [0.4.16](https://github.com/Nullus157/async-compression/compare/v0.4.15...v0.4.16) - 2024-10-16 60 | 61 | ### Other 62 | 63 | - Implement pass-through `AsyncBufRead` on write-based encoders & decoders. 64 | 65 | ## [0.4.15](https://github.com/Nullus157/async-compression/compare/v0.4.14...v0.4.15) - 2024-10-13 66 | 67 | ### Feature 68 | - Implement pass-through `AsyncRead` or `AsyncWrite` where appropriate. 69 | - Relax `AsyncRead`/`AsyncWrite` bounds on `*::{get_ref, get_mut, get_pin_mut, into_inner}()` methods. 70 | 71 | ## [0.4.14](https://github.com/Nullus157/async-compression/compare/v0.4.13...v0.4.14) - 2024-10-10 72 | 73 | ### Fixed 74 | - In Tokio-based decoders, attempt to decode from internal state even if nothing was read. 75 | 76 | ## [0.4.13](https://github.com/Nullus157/async-compression/compare/v0.4.12...v0.4.13) - 2024-10-02 77 | 78 | ### Feature 79 | - Update `brotli` dependency to to `7`. 80 | 81 | ## [0.4.12](https://github.com/Nullus157/async-compression/compare/v0.4.11...v0.4.12) - 2024-07-21 82 | 83 | ### Feature 84 | - Enable customizing Zstd decoding parameters. 85 | 86 | ## [0.4.11](https://github.com/Nullus157/async-compression/compare/v0.4.10...v0.4.11) - 2024-05-30 87 | 88 | ### Other 89 | - Expose total_in/total_out from underlying flate2 encoder types. 90 | 91 | ## [0.4.10](https://github.com/Nullus157/async-compression/compare/v0.4.9...v0.4.10) - 2024-05-09 92 | 93 | ### Other 94 | - *(deps)* update brotli requirement from 5.0 to 6.0 ([#274](https://github.com/Nullus157/async-compression/pull/274)) 95 | - Fix pipeline doc: Warn on unexpected cfgs instead of error ([#276](https://github.com/Nullus157/async-compression/pull/276)) 96 | - Update name of release-pr.yml 97 | - Create release.yml 98 | - Create release-pr.yml 99 | 100 | ## 0.4.9 101 | 102 | - bump dep brotli from 4.0 to 5.0 103 | 104 | ## 0.4.8 105 | 106 | - bump dep brotli from 3.3 to 4.0 107 | 108 | ## 0.4.7 109 | 110 | - Flush available data in decoder even when there's no incoming input. 111 | 112 | ## 0.4.6 113 | 114 | - Return errors instead of panicking in all encode and decode operations. 115 | 116 | ## 0.4.5 117 | 118 | - Add `{Lzma, Xz}Decoder::with_mem_limit()` methods. 119 | 120 | ## 0.4.4 121 | 122 | - Update `zstd` dependency to `0.13`. 123 | 124 | ## 0.4.3 125 | 126 | - Implement `Default` for `brotli::EncoderParams`. 127 | 128 | ## 0.4.2 129 | 130 | - Add top-level `brotli` module containing stable `brotli` crate wrapper types. 131 | - Add `BrotliEncoder::with_quality_and_params()` constructors. 132 | - Add `Deflate64Decoder` behind new crate feature `deflate64`. 133 | 134 | ## 0.4.1 - 2023-07-10 135 | 136 | - Add `Zstd{Encoder,Decoder}::with_dict()` constructors. 137 | - Add `zstdmt` crate feature that enables `zstd-safe/zstdmt`, allowing multi-threaded functionality to work as expected. 138 | 139 | ## 0.4.0 - 2023-05-10 140 | 141 | - `Level::Precise` variant now takes a `i32` instead of `u32`. 142 | - Add top-level `zstd` module containing stable `zstd` crate wrapper types. 143 | - Add `ZstdEncoder::with_quality_and_params()` constructors. 144 | - Update `zstd` dependency to `0.12`. 145 | - Remove deprecated `stream`, `futures-bufread` and `futures-write` crate features. 146 | - Remove Tokio 0.2.x and 0.3.x support (`tokio-02` and `tokio-03` crate features). 147 | 148 | ## 0.3.15 - 2022-10-08 149 | 150 | - `Level::Default::into_zstd()` now returns zstd's default value `3`. 151 | - Fix endianness when reading the `extra` field of a gzip header. 152 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "async-compression" 3 | version = "0.4.23" 4 | authors = ["Wim Looman ", "Allen Bui "] 5 | edition = "2018" 6 | resolver = "2" 7 | license = "MIT OR Apache-2.0" 8 | keywords = ["compression", "gzip", "zstd", "brotli", "async"] 9 | categories = ["compression", "asynchronous"] 10 | repository = "https://github.com/Nullus157/async-compression" 11 | description = """ 12 | Adaptors between compression crates and Rust's modern asynchronous IO types. 13 | """ 14 | 15 | [package.metadata.docs.rs] 16 | all-features = true 17 | rustdoc-args = ["--cfg", "docsrs"] 18 | 19 | [features] 20 | # groups 21 | all = ["all-implementations", "all-algorithms"] 22 | all-implementations = ["futures-io", "tokio"] 23 | all-algorithms = ["brotli", "bzip2", "deflate", "gzip", "lz4", "lzma", "xz", "zlib", "zstd", "deflate64"] 24 | 25 | # algorithms 26 | deflate = ["flate2"] 27 | gzip = ["flate2"] 28 | lz4 = ["dep:lz4"] 29 | lzma = ["dep:liblzma"] 30 | xz = ["lzma"] 31 | xz2 = ["xz"] 32 | zlib = ["flate2"] 33 | zstd = ["libzstd", "zstd-safe"] 34 | zstdmt = ["zstd", "zstd-safe/zstdmt"] 35 | deflate64 = ["dep:deflate64"] 36 | 37 | [dependencies] 38 | brotli = { version = "8", optional = true } 39 | bzip2 = { version = "0.5", optional = true } 40 | flate2 = { version = "1.0.13", optional = true } 41 | futures-core = { version = "0.3", default-features = false } 42 | futures-io = { version = "0.3", default-features = false, features = ["std"], optional = true } 43 | libzstd = { package = "zstd", version = "0.13.1", optional = true, default-features = false } 44 | lz4 = { version = "1.28.1", optional = true } 45 | memchr = "2" 46 | pin-project-lite = "0.2" 47 | tokio = { version = "1.24.2", optional = true, default-features = false } 48 | liblzma = { version = "0.4.0", optional = true } 49 | zstd-safe = { version = "7", optional = true, default-features = false } 50 | deflate64 = { version = "0.1.5", optional = true } 51 | 52 | [dev-dependencies] 53 | bytes = "1" 54 | futures = "0.3.5" 55 | futures-test = "0.3.5" 56 | ntest = "0.9" 57 | proptest = "1" 58 | proptest-derive = "0.5" 59 | rand = "0.9" 60 | tokio = { version = "1.38.2", default-features = false, features = ["io-util", "macros", "rt-multi-thread", "io-std"] } 61 | tokio-util = { version = "0.7", default-features = false, features = ["io"] } 62 | 63 | [[test]] 64 | name = "brotli" 65 | required-features = ["brotli"] 66 | 67 | [[test]] 68 | name = "bzip2" 69 | required-features = ["bzip2"] 70 | 71 | [[test]] 72 | name = "deflate" 73 | required-features = ["deflate"] 74 | 75 | [[test]] 76 | name = "gzip" 77 | required-features = ["gzip"] 78 | 79 | [[test]] 80 | name = "lz4" 81 | required-features = ["lz4"] 82 | 83 | [[test]] 84 | name = "lzma" 85 | required-features = ["lzma"] 86 | 87 | [[test]] 88 | name = "xz" 89 | required-features = ["xz"] 90 | 91 | [[test]] 92 | name = "zlib" 93 | required-features = ["zlib"] 94 | 95 | [[test]] 96 | name = "zstd" 97 | required-features = ["zstd"] 98 | 99 | [[test]] 100 | name = "zstd-dict" 101 | required-features = ["zstd", "tokio"] 102 | 103 | [[test]] 104 | name = "zstd-window-size" 105 | required-features = ["zstd", "tokio"] 106 | 107 | [[example]] 108 | name = "zlib_tokio_write" 109 | required-features = ["zlib", "tokio"] 110 | 111 | [[example]] 112 | name = "zstd_gzip" 113 | required-features = ["zstd", "gzip", "tokio"] 114 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 the rustasync developers 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # async-compression 2 | 3 | [![crates.io version][1]][2] ![build status][3] 4 | [![downloads][5]][6] [![docs.rs docs][7]][8] 5 | ![MIT or Apache 2.0 licensed][9] 6 | [![dependency status][10]][11] 7 | 8 | This crate provides adaptors between compression crates and Rust's modern 9 | asynchronous IO types. 10 | 11 | - [Documentation][8] 12 | - [Crates.io][2] 13 | - [Releases][releases] 14 | 15 | ## Development 16 | 17 | When developing you will need to enable appropriate features for the different 18 | test cases to run, the simplest is `cargo test --all-features`, but you can 19 | enable different subsets of features as appropriate for the code you are 20 | testing to avoid compiling all dependencies, e.g. `cargo test --features 21 | tokio,gzip`. 22 | 23 | ## License 24 | 25 | Licensed under either of 26 | 27 | - [Apache License, Version 2.0](LICENSE-APACHE) 28 | - [MIT license](LICENSE-MIT) 29 | 30 | at your option. 31 | 32 | ### Contribution 33 | 34 | Unless you explicitly state otherwise, any contribution intentionally submitted 35 | for inclusion in the work by you shall be dual licensed as above, without any 36 | additional terms or conditions. 37 | 38 | [1]: https://img.shields.io/crates/v/async-compression.svg?style=flat-square 39 | [2]: https://crates.io/crates/async-compression 40 | [3]: https://img.shields.io/github/actions/workflow/status/Nullus157/async-compression/base.yml?style=flat-square 41 | [5]: https://img.shields.io/crates/d/async-compression.svg?style=flat-square 42 | [6]: https://crates.io/crates/async-compression 43 | [7]: https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square 44 | [8]: https://docs.rs/async-compression 45 | [9]: https://img.shields.io/crates/l/async-compression.svg?style=flat-square 46 | [10]: https://deps.rs/crate/async-compression/0.4.8/status.svg?style=flat-square 47 | [11]: https://deps.rs/crate/async-compression/0.4.8 48 | [releases]: https://github.com/Nullus157/async-compression/releases 49 | -------------------------------------------------------------------------------- /ci/remove-dev-dependencies/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /ci/remove-dev-dependencies/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "remove-dev-dependencies" 3 | version = "0.1.0" 4 | authors = ["Wim Looman "] 5 | edition = "2018" 6 | publish = false 7 | 8 | [workspace] 9 | 10 | [dependencies] 11 | toml_edit = "0.1.5" 12 | -------------------------------------------------------------------------------- /ci/remove-dev-dependencies/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{error::Error, fs}; 2 | 3 | fn main() -> Result<(), Box> { 4 | let content = fs::read_to_string("Cargo.toml")?; 5 | let mut doc: toml_edit::Document = content.parse()?; 6 | doc.as_table_mut().remove("dev-dependencies"); 7 | fs::write("Cargo.toml", doc.to_string())?; 8 | Ok(()) 9 | } 10 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [advisories] 2 | ignore = [ 3 | ] 4 | 5 | [licenses] 6 | allow = [ 7 | "MIT", 8 | "Apache-2.0", 9 | "BSD-3-Clause", 10 | "Unicode-DFS-2016", 11 | ] 12 | 13 | [bans] 14 | multiple-versions = "warn" 15 | skip = [ 16 | ] 17 | skip-tree = [ 18 | { name = "proptest", version = "1.0" }, 19 | ] 20 | -------------------------------------------------------------------------------- /examples/zlib_tokio_write.rs: -------------------------------------------------------------------------------- 1 | //! Run this example with the following command in a terminal: 2 | //! 3 | //! ```console 4 | //! $ cargo run --example zlib_tokio_write --features="tokio,zlib" 5 | //! "example" 6 | //! ``` 7 | 8 | use std::io::Result; 9 | 10 | use async_compression::tokio::write::{ZlibDecoder, ZlibEncoder}; 11 | use tokio::io::AsyncWriteExt as _; // for `write_all` and `shutdown` 12 | 13 | #[tokio::main(flavor = "current_thread")] 14 | async fn main() -> Result<()> { 15 | let data = b"example"; 16 | let compressed_data = compress(data).await?; 17 | let de_compressed_data = decompress(&compressed_data).await?; 18 | assert_eq!(de_compressed_data, data); 19 | println!("{:?}", String::from_utf8(de_compressed_data).unwrap()); 20 | Ok(()) 21 | } 22 | 23 | async fn compress(in_data: &[u8]) -> Result> { 24 | let mut encoder = ZlibEncoder::new(Vec::new()); 25 | encoder.write_all(in_data).await?; 26 | encoder.shutdown().await?; 27 | Ok(encoder.into_inner()) 28 | } 29 | 30 | async fn decompress(in_data: &[u8]) -> Result> { 31 | let mut decoder = ZlibDecoder::new(Vec::new()); 32 | decoder.write_all(in_data).await?; 33 | decoder.shutdown().await?; 34 | Ok(decoder.into_inner()) 35 | } 36 | -------------------------------------------------------------------------------- /examples/zstd_gzip.rs: -------------------------------------------------------------------------------- 1 | //! Run this example with the following command in a terminal: 2 | //! 3 | //! ```console 4 | //! $ echo -n 'example' | zstd | cargo run --example zstd_gzip --features="tokio,zstd,gzip" | gunzip -c 5 | //! 7example 6 | //! ``` 7 | //! 8 | //! Note that the "7" prefix (input length) is printed to stdout but will likely show up as shown 9 | //! above. This is not an encoding error; see the code in `main`. 10 | 11 | use std::io::Result; 12 | 13 | use async_compression::tokio::{bufread::ZstdDecoder, write::GzipEncoder}; 14 | use tokio::io::{stderr, stdin, stdout, BufReader}; 15 | use tokio::io::{ 16 | AsyncReadExt as _, // for `read_to_end` 17 | AsyncWriteExt as _, // for `write_all` and `shutdown` 18 | }; 19 | 20 | #[tokio::main(flavor = "current_thread")] 21 | async fn main() -> Result<()> { 22 | // Read zstd encoded data from stdin and decode 23 | let mut reader = ZstdDecoder::new(BufReader::new(stdin())); 24 | let mut x: Vec = vec![]; 25 | reader.read_to_end(&mut x).await?; 26 | 27 | // print to stderr the length of the decoded data 28 | let mut error = stderr(); 29 | error.write_all(x.len().to_string().as_bytes()).await?; 30 | error.shutdown().await?; 31 | 32 | // print to stdin encoded gzip data 33 | let mut writer = GzipEncoder::new(stdout()); 34 | writer.write_all(&x).await?; 35 | writer.shutdown().await?; 36 | 37 | // flush stdout 38 | let mut res = writer.into_inner(); 39 | res.flush().await?; 40 | 41 | Ok(()) 42 | } 43 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | _list: 2 | @just --list 3 | 4 | # Format project. 5 | fmt: 6 | just --unstable --fmt 7 | cargo +nightly fmt 8 | 9 | # Check project. 10 | [group("lint")] 11 | check: && clippy 12 | just --unstable --fmt --check 13 | cargo +nightly fmt -- --check 14 | 15 | # Lint workspace with Clippy. 16 | clippy: 17 | cargo clippy --workspace --all-targets --no-default-features 18 | cargo clippy --workspace --all-targets --all-features 19 | 20 | # Document crates in workspace. 21 | doc *args: 22 | RUSTDOCFLAGS="--cfg=docsrs -Dwarnings" cargo +nightly doc --workspace --all-features {{ args }} 23 | -------------------------------------------------------------------------------- /src/brotli.rs: -------------------------------------------------------------------------------- 1 | //! This module contains Brotli-specific types for async-compression. 2 | 3 | use brotli::enc::backward_references::{BrotliEncoderMode, BrotliEncoderParams}; 4 | 5 | /// Brotli compression parameters builder. This is a stable wrapper around Brotli's own encoder 6 | /// params type, to abstract over different versions of the Brotli library. 7 | /// 8 | /// See the [Brotli documentation](https://www.brotli.org/encode.html#a9a8) for more information on 9 | /// these parameters. 10 | /// 11 | /// # Examples 12 | /// 13 | /// ``` 14 | /// use async_compression::brotli; 15 | /// 16 | /// let params = brotli::EncoderParams::default() 17 | /// .window_size(12) 18 | /// .text_mode(); 19 | /// ``` 20 | #[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] 21 | pub struct EncoderParams { 22 | window_size: Option, 23 | block_size: Option, 24 | size_hint: Option, 25 | mode: Option, 26 | } 27 | 28 | impl EncoderParams { 29 | /// Sets window size in bytes (as a power of two). 30 | /// 31 | /// Used as Brotli's `lgwin` parameter. 32 | /// 33 | /// `window_size` is clamped to `0 <= window_size <= 24`. 34 | pub fn window_size(mut self, window_size: i32) -> Self { 35 | self.window_size = Some(window_size.clamp(0, 24)); 36 | self 37 | } 38 | 39 | /// Sets input block size in bytes (as a power of two). 40 | /// 41 | /// Used as Brotli's `lgblock` parameter. 42 | /// 43 | /// `block_size` is clamped to `16 <= block_size <= 24`. 44 | pub fn block_size(mut self, block_size: i32) -> Self { 45 | self.block_size = Some(block_size.clamp(16, 24)); 46 | self 47 | } 48 | 49 | /// Sets hint for size of data to be compressed. 50 | pub fn size_hint(mut self, size_hint: usize) -> Self { 51 | self.size_hint = Some(size_hint); 52 | self 53 | } 54 | 55 | /// Sets encoder to text mode. 56 | /// 57 | /// If input data is known to be UTF-8 text, this allows the compressor to make assumptions and 58 | /// optimizations. 59 | /// 60 | /// Used as Brotli's `mode` parameter. 61 | pub fn text_mode(mut self) -> Self { 62 | self.mode = Some(BrotliEncoderMode::BROTLI_MODE_TEXT); 63 | self 64 | } 65 | 66 | pub(crate) fn as_brotli(&self) -> BrotliEncoderParams { 67 | let mut params = BrotliEncoderParams::default(); 68 | 69 | let Self { 70 | window_size, 71 | block_size, 72 | size_hint, 73 | mode, 74 | } = self; 75 | 76 | if let Some(window_size) = window_size { 77 | params.lgwin = *window_size; 78 | } 79 | 80 | if let Some(block_size) = block_size { 81 | params.lgblock = *block_size; 82 | } 83 | 84 | if let Some(size_hint) = size_hint { 85 | params.size_hint = *size_hint; 86 | } 87 | 88 | if let Some(mode) = mode { 89 | params.mode = *mode; 90 | } 91 | 92 | params 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/codec/brotli/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Decode, util::PartialBuffer}; 2 | use std::{fmt, io}; 3 | 4 | use brotli::{enc::StandardAlloc, BrotliDecompressStream, BrotliResult, BrotliState}; 5 | 6 | pub struct BrotliDecoder { 7 | // `BrotliState` is very large (over 2kb) which is why we're boxing it. 8 | state: Box>, 9 | } 10 | 11 | impl BrotliDecoder { 12 | pub(crate) fn new() -> Self { 13 | Self { 14 | state: Box::new(BrotliState::new( 15 | StandardAlloc::default(), 16 | StandardAlloc::default(), 17 | StandardAlloc::default(), 18 | )), 19 | } 20 | } 21 | 22 | fn decode( 23 | &mut self, 24 | input: &mut PartialBuffer>, 25 | output: &mut PartialBuffer + AsMut<[u8]>>, 26 | ) -> io::Result { 27 | let in_buf = input.unwritten(); 28 | let mut out_buf = output.unwritten_mut(); 29 | 30 | let mut input_len = 0; 31 | let mut output_len = 0; 32 | 33 | let status = match BrotliDecompressStream( 34 | &mut in_buf.len(), 35 | &mut input_len, 36 | in_buf, 37 | &mut out_buf.len(), 38 | &mut output_len, 39 | out_buf, 40 | &mut 0, 41 | &mut self.state, 42 | ) { 43 | BrotliResult::ResultFailure => { 44 | return Err(io::Error::new(io::ErrorKind::Other, "brotli error")) 45 | } 46 | status => status, 47 | }; 48 | 49 | input.advance(input_len); 50 | output.advance(output_len); 51 | 52 | Ok(status) 53 | } 54 | } 55 | 56 | impl Decode for BrotliDecoder { 57 | fn reinit(&mut self) -> io::Result<()> { 58 | self.state = Box::new(BrotliState::new( 59 | StandardAlloc::default(), 60 | StandardAlloc::default(), 61 | StandardAlloc::default(), 62 | )); 63 | Ok(()) 64 | } 65 | 66 | fn decode( 67 | &mut self, 68 | input: &mut PartialBuffer>, 69 | output: &mut PartialBuffer + AsMut<[u8]>>, 70 | ) -> io::Result { 71 | match self.decode(input, output)? { 72 | BrotliResult::ResultSuccess => Ok(true), 73 | BrotliResult::NeedsMoreOutput | BrotliResult::NeedsMoreInput => Ok(false), 74 | BrotliResult::ResultFailure => unreachable!(), 75 | } 76 | } 77 | 78 | fn flush( 79 | &mut self, 80 | output: &mut PartialBuffer + AsMut<[u8]>>, 81 | ) -> io::Result { 82 | match self.decode(&mut PartialBuffer::new(&[][..]), output)? { 83 | BrotliResult::ResultSuccess | BrotliResult::NeedsMoreInput => Ok(true), 84 | BrotliResult::NeedsMoreOutput => Ok(false), 85 | BrotliResult::ResultFailure => unreachable!(), 86 | } 87 | } 88 | 89 | fn finish( 90 | &mut self, 91 | output: &mut PartialBuffer + AsMut<[u8]>>, 92 | ) -> io::Result { 93 | match self.decode(&mut PartialBuffer::new(&[][..]), output)? { 94 | BrotliResult::ResultSuccess => Ok(true), 95 | BrotliResult::NeedsMoreOutput => Ok(false), 96 | BrotliResult::NeedsMoreInput => Err(io::Error::new( 97 | io::ErrorKind::UnexpectedEof, 98 | "reached unexpected EOF", 99 | )), 100 | BrotliResult::ResultFailure => unreachable!(), 101 | } 102 | } 103 | } 104 | 105 | impl fmt::Debug for BrotliDecoder { 106 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 107 | f.debug_struct("BrotliDecoder") 108 | .field("decompress", &"") 109 | .finish() 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/codec/brotli/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | use std::{fmt, io}; 3 | 4 | use brotli::enc::{ 5 | backward_references::BrotliEncoderParams, 6 | encode::{BrotliEncoderOperation, BrotliEncoderStateStruct}, 7 | StandardAlloc, 8 | }; 9 | 10 | pub struct BrotliEncoder { 11 | state: BrotliEncoderStateStruct, 12 | } 13 | 14 | impl BrotliEncoder { 15 | pub(crate) fn new(params: BrotliEncoderParams) -> Self { 16 | let mut state = BrotliEncoderStateStruct::new(StandardAlloc::default()); 17 | state.params = params; 18 | Self { state } 19 | } 20 | 21 | fn encode( 22 | &mut self, 23 | input: &mut PartialBuffer>, 24 | output: &mut PartialBuffer + AsMut<[u8]>>, 25 | op: BrotliEncoderOperation, 26 | ) -> io::Result<()> { 27 | let in_buf = input.unwritten(); 28 | let mut out_buf = output.unwritten_mut(); 29 | 30 | let mut input_len = 0; 31 | let mut output_len = 0; 32 | 33 | if !self.state.compress_stream( 34 | op, 35 | &mut in_buf.len(), 36 | in_buf, 37 | &mut input_len, 38 | &mut out_buf.len(), 39 | out_buf, 40 | &mut output_len, 41 | &mut None, 42 | &mut |_, _, _, _| (), 43 | ) { 44 | return Err(io::Error::new(io::ErrorKind::Other, "brotli error")); 45 | } 46 | 47 | input.advance(input_len); 48 | output.advance(output_len); 49 | 50 | Ok(()) 51 | } 52 | } 53 | 54 | impl Encode for BrotliEncoder { 55 | fn encode( 56 | &mut self, 57 | input: &mut PartialBuffer>, 58 | output: &mut PartialBuffer + AsMut<[u8]>>, 59 | ) -> io::Result<()> { 60 | self.encode( 61 | input, 62 | output, 63 | BrotliEncoderOperation::BROTLI_OPERATION_PROCESS, 64 | ) 65 | } 66 | 67 | fn flush( 68 | &mut self, 69 | output: &mut PartialBuffer + AsMut<[u8]>>, 70 | ) -> io::Result { 71 | self.encode( 72 | &mut PartialBuffer::new(&[][..]), 73 | output, 74 | BrotliEncoderOperation::BROTLI_OPERATION_FLUSH, 75 | )?; 76 | 77 | Ok(!self.state.has_more_output()) 78 | } 79 | 80 | fn finish( 81 | &mut self, 82 | output: &mut PartialBuffer + AsMut<[u8]>>, 83 | ) -> io::Result { 84 | self.encode( 85 | &mut PartialBuffer::new(&[][..]), 86 | output, 87 | BrotliEncoderOperation::BROTLI_OPERATION_FINISH, 88 | )?; 89 | 90 | Ok(self.state.is_finished()) 91 | } 92 | } 93 | 94 | impl fmt::Debug for BrotliEncoder { 95 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 96 | f.debug_struct("BrotliEncoder") 97 | .field("compress", &"") 98 | .finish() 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/codec/brotli/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::BrotliDecoder, encoder::BrotliEncoder}; 5 | -------------------------------------------------------------------------------- /src/codec/bzip2/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Decode, util::PartialBuffer}; 2 | use std::{fmt, io}; 3 | 4 | use bzip2::{Decompress, Status}; 5 | 6 | pub struct BzDecoder { 7 | decompress: Decompress, 8 | } 9 | 10 | impl fmt::Debug for BzDecoder { 11 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 12 | write!( 13 | f, 14 | "BzDecoder {{total_in: {}, total_out: {}}}", 15 | self.decompress.total_in(), 16 | self.decompress.total_out() 17 | ) 18 | } 19 | } 20 | 21 | impl BzDecoder { 22 | pub(crate) fn new() -> Self { 23 | Self { 24 | decompress: Decompress::new(false), 25 | } 26 | } 27 | 28 | fn decode( 29 | &mut self, 30 | input: &mut PartialBuffer>, 31 | output: &mut PartialBuffer + AsMut<[u8]>>, 32 | ) -> io::Result { 33 | let prior_in = self.decompress.total_in(); 34 | let prior_out = self.decompress.total_out(); 35 | 36 | let status = self 37 | .decompress 38 | .decompress(input.unwritten(), output.unwritten_mut()) 39 | .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; 40 | 41 | input.advance((self.decompress.total_in() - prior_in) as usize); 42 | output.advance((self.decompress.total_out() - prior_out) as usize); 43 | 44 | Ok(status) 45 | } 46 | } 47 | 48 | impl Decode for BzDecoder { 49 | fn reinit(&mut self) -> io::Result<()> { 50 | self.decompress = Decompress::new(false); 51 | Ok(()) 52 | } 53 | 54 | fn decode( 55 | &mut self, 56 | input: &mut PartialBuffer>, 57 | output: &mut PartialBuffer + AsMut<[u8]>>, 58 | ) -> io::Result { 59 | match self.decode(input, output)? { 60 | // Decompression went fine, nothing much to report. 61 | Status::Ok => Ok(false), 62 | 63 | // The Flush action on a compression went ok. 64 | Status::FlushOk => unreachable!(), 65 | 66 | // THe Run action on compression went ok. 67 | Status::RunOk => unreachable!(), 68 | 69 | // The Finish action on compression went ok. 70 | Status::FinishOk => unreachable!(), 71 | 72 | // The stream's end has been met, meaning that no more data can be input. 73 | Status::StreamEnd => Ok(true), 74 | 75 | // There was insufficient memory in the input or output buffer to complete 76 | // the request, but otherwise everything went normally. 77 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), 78 | } 79 | } 80 | 81 | fn flush( 82 | &mut self, 83 | output: &mut PartialBuffer + AsMut<[u8]>>, 84 | ) -> io::Result { 85 | self.decode(&mut PartialBuffer::new(&[][..]), output)?; 86 | 87 | loop { 88 | let old_len = output.written().len(); 89 | self.decode(&mut PartialBuffer::new(&[][..]), output)?; 90 | if output.written().len() == old_len { 91 | break; 92 | } 93 | } 94 | 95 | Ok(!output.unwritten().is_empty()) 96 | } 97 | 98 | fn finish( 99 | &mut self, 100 | _output: &mut PartialBuffer + AsMut<[u8]>>, 101 | ) -> io::Result { 102 | Ok(true) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/codec/bzip2/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | use std::{fmt, io}; 3 | 4 | use bzip2::{Action, Compress, Compression, Status}; 5 | 6 | pub struct BzEncoder { 7 | compress: Compress, 8 | } 9 | 10 | impl fmt::Debug for BzEncoder { 11 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 12 | write!( 13 | f, 14 | "BzEncoder {{total_in: {}, total_out: {}}}", 15 | self.compress.total_in(), 16 | self.compress.total_out() 17 | ) 18 | } 19 | } 20 | 21 | impl BzEncoder { 22 | /// Creates a new stream prepared for compression. 23 | /// 24 | /// The `work_factor` parameter controls how the compression phase behaves 25 | /// when presented with worst case, highly repetitive, input data. If 26 | /// compression runs into difficulties caused by repetitive data, the 27 | /// library switches from the standard sorting algorithm to a fallback 28 | /// algorithm. The fallback is slower than the standard algorithm by perhaps 29 | /// a factor of three, but always behaves reasonably, no matter how bad the 30 | /// input. 31 | /// 32 | /// Lower values of `work_factor` reduce the amount of effort the standard 33 | /// algorithm will expend before resorting to the fallback. You should set 34 | /// this parameter carefully; too low, and many inputs will be handled by 35 | /// the fallback algorithm and so compress rather slowly, too high, and your 36 | /// average-to-worst case compression times can become very large. The 37 | /// default value of 30 gives reasonable behaviour over a wide range of 38 | /// circumstances. 39 | /// 40 | /// Allowable values range from 0 to 250 inclusive. 0 is a special case, 41 | /// equivalent to using the default value of 30. 42 | pub(crate) fn new(level: Compression, work_factor: u32) -> Self { 43 | Self { 44 | compress: Compress::new(level, work_factor), 45 | } 46 | } 47 | 48 | fn encode( 49 | &mut self, 50 | input: &mut PartialBuffer>, 51 | output: &mut PartialBuffer + AsMut<[u8]>>, 52 | action: Action, 53 | ) -> io::Result { 54 | let prior_in = self.compress.total_in(); 55 | let prior_out = self.compress.total_out(); 56 | 57 | let status = self 58 | .compress 59 | .compress(input.unwritten(), output.unwritten_mut(), action) 60 | .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; 61 | 62 | input.advance((self.compress.total_in() - prior_in) as usize); 63 | output.advance((self.compress.total_out() - prior_out) as usize); 64 | 65 | Ok(status) 66 | } 67 | } 68 | 69 | impl Encode for BzEncoder { 70 | fn encode( 71 | &mut self, 72 | input: &mut PartialBuffer>, 73 | output: &mut PartialBuffer + AsMut<[u8]>>, 74 | ) -> io::Result<()> { 75 | match self.encode(input, output, Action::Run)? { 76 | // Decompression went fine, nothing much to report. 77 | Status::Ok => Ok(()), 78 | 79 | // The Flush action on a compression went ok. 80 | Status::FlushOk => unreachable!(), 81 | 82 | // The Run action on compression went ok. 83 | Status::RunOk => Ok(()), 84 | 85 | // The Finish action on compression went ok. 86 | Status::FinishOk => unreachable!(), 87 | 88 | // The stream's end has been met, meaning that no more data can be input. 89 | Status::StreamEnd => unreachable!(), 90 | 91 | // There was insufficient memory in the input or output buffer to complete 92 | // the request, but otherwise everything went normally. 93 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), 94 | } 95 | } 96 | 97 | fn flush( 98 | &mut self, 99 | output: &mut PartialBuffer + AsMut<[u8]>>, 100 | ) -> io::Result { 101 | match self.encode(&mut PartialBuffer::new(&[][..]), output, Action::Flush)? { 102 | // Decompression went fine, nothing much to report. 103 | Status::Ok => unreachable!(), 104 | 105 | // The Flush action on a compression went ok. 106 | Status::FlushOk => Ok(false), 107 | 108 | // The Run action on compression went ok. 109 | Status::RunOk => Ok(true), 110 | 111 | // The Finish action on compression went ok. 112 | Status::FinishOk => unreachable!(), 113 | 114 | // The stream's end has been met, meaning that no more data can be input. 115 | Status::StreamEnd => unreachable!(), 116 | 117 | // There was insufficient memory in the input or output buffer to complete 118 | // the request, but otherwise everything went normally. 119 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), 120 | } 121 | } 122 | 123 | fn finish( 124 | &mut self, 125 | output: &mut PartialBuffer + AsMut<[u8]>>, 126 | ) -> io::Result { 127 | match self.encode(&mut PartialBuffer::new(&[][..]), output, Action::Finish)? { 128 | // Decompression went fine, nothing much to report. 129 | Status::Ok => Ok(false), 130 | 131 | // The Flush action on a compression went ok. 132 | Status::FlushOk => unreachable!(), 133 | 134 | // The Run action on compression went ok. 135 | Status::RunOk => unreachable!(), 136 | 137 | // The Finish action on compression went ok. 138 | Status::FinishOk => Ok(false), 139 | 140 | // The stream's end has been met, meaning that no more data can be input. 141 | Status::StreamEnd => Ok(true), 142 | 143 | // There was insufficient memory in the input or output buffer to complete 144 | // the request, but otherwise everything went normally. 145 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), 146 | } 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/codec/bzip2/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::BzDecoder, encoder::BzEncoder}; 5 | -------------------------------------------------------------------------------- /src/codec/deflate/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::util::PartialBuffer; 2 | use std::io::Result; 3 | 4 | #[derive(Debug)] 5 | pub struct DeflateDecoder { 6 | inner: crate::codec::FlateDecoder, 7 | } 8 | 9 | impl DeflateDecoder { 10 | pub(crate) fn new() -> Self { 11 | Self { 12 | inner: crate::codec::FlateDecoder::new(false), 13 | } 14 | } 15 | } 16 | 17 | impl crate::codec::Decode for DeflateDecoder { 18 | fn reinit(&mut self) -> Result<()> { 19 | self.inner.reinit()?; 20 | Ok(()) 21 | } 22 | 23 | fn decode( 24 | &mut self, 25 | input: &mut PartialBuffer>, 26 | output: &mut PartialBuffer + AsMut<[u8]>>, 27 | ) -> Result { 28 | self.inner.decode(input, output) 29 | } 30 | 31 | fn flush( 32 | &mut self, 33 | output: &mut PartialBuffer + AsMut<[u8]>>, 34 | ) -> Result { 35 | self.inner.flush(output) 36 | } 37 | 38 | fn finish( 39 | &mut self, 40 | output: &mut PartialBuffer + AsMut<[u8]>>, 41 | ) -> Result { 42 | self.inner.finish(output) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/codec/deflate/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | use std::io::Result; 3 | 4 | use flate2::Compression; 5 | 6 | #[derive(Debug)] 7 | pub struct DeflateEncoder { 8 | inner: crate::codec::FlateEncoder, 9 | } 10 | 11 | impl DeflateEncoder { 12 | pub(crate) fn new(level: Compression) -> Self { 13 | Self { 14 | inner: crate::codec::FlateEncoder::new(level, false), 15 | } 16 | } 17 | 18 | pub(crate) fn get_ref(&self) -> &crate::codec::FlateEncoder { 19 | &self.inner 20 | } 21 | } 22 | 23 | impl Encode for DeflateEncoder { 24 | fn encode( 25 | &mut self, 26 | input: &mut PartialBuffer>, 27 | output: &mut PartialBuffer + AsMut<[u8]>>, 28 | ) -> Result<()> { 29 | self.inner.encode(input, output) 30 | } 31 | 32 | fn flush( 33 | &mut self, 34 | output: &mut PartialBuffer + AsMut<[u8]>>, 35 | ) -> Result { 36 | self.inner.flush(output) 37 | } 38 | 39 | fn finish( 40 | &mut self, 41 | output: &mut PartialBuffer + AsMut<[u8]>>, 42 | ) -> Result { 43 | self.inner.finish(output) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/codec/deflate/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::DeflateDecoder, encoder::DeflateEncoder}; 5 | -------------------------------------------------------------------------------- /src/codec/deflate64/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Decode, util::PartialBuffer}; 2 | use std::io::{Error, ErrorKind, Result}; 3 | 4 | use deflate64::InflaterManaged; 5 | 6 | #[derive(Debug)] 7 | pub struct Deflate64Decoder { 8 | inflater: Box, 9 | } 10 | 11 | impl Deflate64Decoder { 12 | pub(crate) fn new() -> Self { 13 | Self { 14 | inflater: Box::new(InflaterManaged::new()), 15 | } 16 | } 17 | 18 | fn decode( 19 | &mut self, 20 | input: &mut PartialBuffer>, 21 | output: &mut PartialBuffer + AsMut<[u8]>>, 22 | ) -> Result { 23 | let result = self 24 | .inflater 25 | .inflate(input.unwritten(), output.unwritten_mut()); 26 | 27 | input.advance(result.bytes_consumed); 28 | output.advance(result.bytes_written); 29 | 30 | if result.data_error { 31 | Err(Error::new(ErrorKind::InvalidData, "invalid data")) 32 | } else { 33 | Ok(self.inflater.finished() && self.inflater.available_output() == 0) 34 | } 35 | } 36 | } 37 | 38 | impl Decode for Deflate64Decoder { 39 | fn reinit(&mut self) -> Result<()> { 40 | self.inflater = Box::new(InflaterManaged::new()); 41 | Ok(()) 42 | } 43 | 44 | fn decode( 45 | &mut self, 46 | input: &mut PartialBuffer>, 47 | output: &mut PartialBuffer + AsMut<[u8]>>, 48 | ) -> Result { 49 | self.decode(input, output) 50 | } 51 | 52 | fn flush( 53 | &mut self, 54 | output: &mut PartialBuffer + AsMut<[u8]>>, 55 | ) -> Result { 56 | self.decode(&mut PartialBuffer::new([]), output)?; 57 | 58 | loop { 59 | let old_len = output.written().len(); 60 | self.decode(&mut PartialBuffer::new([]), output)?; 61 | if output.written().len() == old_len { 62 | break; 63 | } 64 | } 65 | 66 | Ok(!output.unwritten().is_empty()) 67 | } 68 | 69 | fn finish( 70 | &mut self, 71 | output: &mut PartialBuffer + AsMut<[u8]>>, 72 | ) -> Result { 73 | self.decode(&mut PartialBuffer::new([]), output) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/codec/deflate64/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | 3 | pub(crate) use self::decoder::Deflate64Decoder; 4 | -------------------------------------------------------------------------------- /src/codec/flate/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Decode, util::PartialBuffer}; 2 | use std::io; 3 | 4 | use flate2::{Decompress, FlushDecompress, Status}; 5 | 6 | #[derive(Debug)] 7 | pub struct FlateDecoder { 8 | zlib_header: bool, 9 | decompress: Decompress, 10 | } 11 | 12 | impl FlateDecoder { 13 | pub(crate) fn new(zlib_header: bool) -> Self { 14 | Self { 15 | zlib_header, 16 | decompress: Decompress::new(zlib_header), 17 | } 18 | } 19 | 20 | fn decode( 21 | &mut self, 22 | input: &mut PartialBuffer>, 23 | output: &mut PartialBuffer + AsMut<[u8]>>, 24 | flush: FlushDecompress, 25 | ) -> io::Result { 26 | let prior_in = self.decompress.total_in(); 27 | let prior_out = self.decompress.total_out(); 28 | 29 | let status = 30 | self.decompress 31 | .decompress(input.unwritten(), output.unwritten_mut(), flush)?; 32 | 33 | input.advance((self.decompress.total_in() - prior_in) as usize); 34 | output.advance((self.decompress.total_out() - prior_out) as usize); 35 | 36 | Ok(status) 37 | } 38 | } 39 | 40 | impl Decode for FlateDecoder { 41 | fn reinit(&mut self) -> io::Result<()> { 42 | self.decompress.reset(self.zlib_header); 43 | Ok(()) 44 | } 45 | 46 | fn decode( 47 | &mut self, 48 | input: &mut PartialBuffer>, 49 | output: &mut PartialBuffer + AsMut<[u8]>>, 50 | ) -> io::Result { 51 | match self.decode(input, output, FlushDecompress::None)? { 52 | Status::Ok => Ok(false), 53 | Status::StreamEnd => Ok(true), 54 | Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), 55 | } 56 | } 57 | 58 | fn flush( 59 | &mut self, 60 | output: &mut PartialBuffer + AsMut<[u8]>>, 61 | ) -> io::Result { 62 | self.decode( 63 | &mut PartialBuffer::new(&[][..]), 64 | output, 65 | FlushDecompress::Sync, 66 | )?; 67 | 68 | loop { 69 | let old_len = output.written().len(); 70 | self.decode( 71 | &mut PartialBuffer::new(&[][..]), 72 | output, 73 | FlushDecompress::None, 74 | )?; 75 | if output.written().len() == old_len { 76 | break; 77 | } 78 | } 79 | 80 | Ok(!output.unwritten().is_empty()) 81 | } 82 | 83 | fn finish( 84 | &mut self, 85 | output: &mut PartialBuffer + AsMut<[u8]>>, 86 | ) -> io::Result { 87 | match self.decode( 88 | &mut PartialBuffer::new(&[][..]), 89 | output, 90 | FlushDecompress::Finish, 91 | )? { 92 | Status::Ok => Ok(false), 93 | Status::StreamEnd => Ok(true), 94 | Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), 95 | } 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/codec/flate/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | use std::io; 3 | 4 | use flate2::{Compress, Compression, FlushCompress, Status}; 5 | 6 | #[derive(Debug)] 7 | pub struct FlateEncoder { 8 | compress: Compress, 9 | flushed: bool, 10 | } 11 | 12 | impl FlateEncoder { 13 | pub(crate) fn new(level: Compression, zlib_header: bool) -> Self { 14 | Self { 15 | compress: Compress::new(level, zlib_header), 16 | flushed: true, 17 | } 18 | } 19 | 20 | pub(crate) fn get_ref(&self) -> &Compress { 21 | &self.compress 22 | } 23 | 24 | fn encode( 25 | &mut self, 26 | input: &mut PartialBuffer>, 27 | output: &mut PartialBuffer + AsMut<[u8]>>, 28 | flush: FlushCompress, 29 | ) -> io::Result { 30 | let prior_in = self.compress.total_in(); 31 | let prior_out = self.compress.total_out(); 32 | 33 | let status = self 34 | .compress 35 | .compress(input.unwritten(), output.unwritten_mut(), flush)?; 36 | 37 | input.advance((self.compress.total_in() - prior_in) as usize); 38 | output.advance((self.compress.total_out() - prior_out) as usize); 39 | 40 | Ok(status) 41 | } 42 | } 43 | 44 | impl Encode for FlateEncoder { 45 | fn encode( 46 | &mut self, 47 | input: &mut PartialBuffer>, 48 | output: &mut PartialBuffer + AsMut<[u8]>>, 49 | ) -> io::Result<()> { 50 | self.flushed = false; 51 | match self.encode(input, output, FlushCompress::None)? { 52 | Status::Ok => Ok(()), 53 | Status::StreamEnd => unreachable!(), 54 | Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), 55 | } 56 | } 57 | 58 | fn flush( 59 | &mut self, 60 | output: &mut PartialBuffer + AsMut<[u8]>>, 61 | ) -> io::Result { 62 | // We need to keep track of whether we've already flushed otherwise we'll just keep writing 63 | // out sync blocks continuously and probably never complete flushing. 64 | if self.flushed { 65 | return Ok(true); 66 | } 67 | 68 | self.encode( 69 | &mut PartialBuffer::new(&[][..]), 70 | output, 71 | FlushCompress::Sync, 72 | )?; 73 | 74 | loop { 75 | let old_len = output.written().len(); 76 | self.encode( 77 | &mut PartialBuffer::new(&[][..]), 78 | output, 79 | FlushCompress::None, 80 | )?; 81 | if output.written().len() == old_len { 82 | break; 83 | } 84 | } 85 | 86 | let internal_flushed = !output.unwritten().is_empty(); 87 | self.flushed = internal_flushed; 88 | Ok(internal_flushed) 89 | } 90 | 91 | fn finish( 92 | &mut self, 93 | output: &mut PartialBuffer + AsMut<[u8]>>, 94 | ) -> io::Result { 95 | self.flushed = false; 96 | match self.encode( 97 | &mut PartialBuffer::new(&[][..]), 98 | output, 99 | FlushCompress::Finish, 100 | )? { 101 | Status::Ok => Ok(false), 102 | Status::StreamEnd => Ok(true), 103 | Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/codec/flate/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::FlateDecoder, encoder::FlateEncoder}; 5 | -------------------------------------------------------------------------------- /src/codec/gzip/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | codec::{ 3 | gzip::header::{self, Header}, 4 | Decode, 5 | }, 6 | util::PartialBuffer, 7 | }; 8 | use std::io::{Error, ErrorKind, Result}; 9 | 10 | use flate2::Crc; 11 | 12 | #[derive(Debug)] 13 | enum State { 14 | Header(header::Parser), 15 | Decoding, 16 | Footer(PartialBuffer>), 17 | Done, 18 | } 19 | 20 | #[derive(Debug)] 21 | pub struct GzipDecoder { 22 | inner: crate::codec::FlateDecoder, 23 | crc: Crc, 24 | state: State, 25 | header: Header, 26 | } 27 | 28 | fn check_footer(crc: &Crc, input: &[u8]) -> Result<()> { 29 | if input.len() < 8 { 30 | return Err(Error::new( 31 | ErrorKind::InvalidData, 32 | "Invalid gzip footer length", 33 | )); 34 | } 35 | 36 | let crc_sum = crc.sum().to_le_bytes(); 37 | let bytes_read = crc.amount().to_le_bytes(); 38 | 39 | if crc_sum != input[0..4] { 40 | return Err(Error::new( 41 | ErrorKind::InvalidData, 42 | "CRC computed does not match", 43 | )); 44 | } 45 | 46 | if bytes_read != input[4..8] { 47 | return Err(Error::new( 48 | ErrorKind::InvalidData, 49 | "amount of bytes read does not match", 50 | )); 51 | } 52 | 53 | Ok(()) 54 | } 55 | 56 | impl GzipDecoder { 57 | pub(crate) fn new() -> Self { 58 | Self { 59 | inner: crate::codec::FlateDecoder::new(false), 60 | crc: Crc::new(), 61 | state: State::Header(header::Parser::default()), 62 | header: Header::default(), 63 | } 64 | } 65 | 66 | fn process, O: AsRef<[u8]> + AsMut<[u8]>>( 67 | &mut self, 68 | input: &mut PartialBuffer, 69 | output: &mut PartialBuffer, 70 | inner: impl Fn(&mut Self, &mut PartialBuffer, &mut PartialBuffer) -> Result, 71 | ) -> Result { 72 | loop { 73 | match &mut self.state { 74 | State::Header(parser) => { 75 | if let Some(header) = parser.input(input)? { 76 | self.header = header; 77 | self.state = State::Decoding; 78 | } 79 | } 80 | 81 | State::Decoding => { 82 | let prior = output.written().len(); 83 | 84 | let res = inner(self, input, output); 85 | 86 | if (output.written().len() > prior) { 87 | // update CRC even if there was an error 88 | self.crc.update(&output.written()[prior..]); 89 | } 90 | 91 | let done = res?; 92 | 93 | if done { 94 | self.state = State::Footer(vec![0; 8].into()) 95 | } 96 | } 97 | 98 | State::Footer(footer) => { 99 | footer.copy_unwritten_from(input); 100 | 101 | if footer.unwritten().is_empty() { 102 | check_footer(&self.crc, footer.written())?; 103 | self.state = State::Done 104 | } 105 | } 106 | 107 | State::Done => {} 108 | }; 109 | 110 | if let State::Done = self.state { 111 | return Ok(true); 112 | } 113 | 114 | if input.unwritten().is_empty() || output.unwritten().is_empty() { 115 | return Ok(false); 116 | } 117 | } 118 | } 119 | } 120 | 121 | impl Decode for GzipDecoder { 122 | fn reinit(&mut self) -> Result<()> { 123 | self.inner.reinit()?; 124 | self.crc = Crc::new(); 125 | self.state = State::Header(header::Parser::default()); 126 | self.header = Header::default(); 127 | Ok(()) 128 | } 129 | 130 | fn decode( 131 | &mut self, 132 | input: &mut PartialBuffer>, 133 | output: &mut PartialBuffer + AsMut<[u8]>>, 134 | ) -> Result { 135 | self.process(input, output, |this, input, output| { 136 | this.inner.decode(input, output) 137 | }) 138 | } 139 | 140 | fn flush( 141 | &mut self, 142 | output: &mut PartialBuffer + AsMut<[u8]>>, 143 | ) -> Result { 144 | loop { 145 | match self.state { 146 | State::Header(_) | State::Footer(_) | State::Done => return Ok(true), 147 | 148 | State::Decoding => { 149 | let prior = output.written().len(); 150 | let done = self.inner.flush(output)?; 151 | self.crc.update(&output.written()[prior..]); 152 | if done { 153 | return Ok(true); 154 | } 155 | } 156 | }; 157 | 158 | if output.unwritten().is_empty() { 159 | return Ok(false); 160 | } 161 | } 162 | } 163 | 164 | fn finish( 165 | &mut self, 166 | _output: &mut PartialBuffer + AsMut<[u8]>>, 167 | ) -> Result { 168 | // Because of the footer we have to have already flushed all the data out before we get here 169 | if let State::Done = self.state { 170 | Ok(true) 171 | } else { 172 | Err(Error::new( 173 | ErrorKind::UnexpectedEof, 174 | "unexpected end of file", 175 | )) 176 | } 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /src/codec/gzip/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | use std::io; 3 | 4 | use flate2::{Compression, Crc}; 5 | 6 | #[derive(Debug)] 7 | enum State { 8 | Header(PartialBuffer>), 9 | Encoding, 10 | Footer(PartialBuffer>), 11 | Done, 12 | } 13 | 14 | #[derive(Debug)] 15 | pub struct GzipEncoder { 16 | inner: crate::codec::FlateEncoder, 17 | crc: Crc, 18 | state: State, 19 | } 20 | 21 | fn header(level: Compression) -> Vec { 22 | let level_byte = if level.level() >= Compression::best().level() { 23 | 0x02 24 | } else if level.level() <= Compression::fast().level() { 25 | 0x04 26 | } else { 27 | 0x00 28 | }; 29 | 30 | vec![0x1f, 0x8b, 0x08, 0, 0, 0, 0, 0, level_byte, 0xff] 31 | } 32 | 33 | impl GzipEncoder { 34 | pub(crate) fn new(level: Compression) -> Self { 35 | Self { 36 | inner: crate::codec::FlateEncoder::new(level, false), 37 | crc: Crc::new(), 38 | state: State::Header(header(level).into()), 39 | } 40 | } 41 | 42 | fn footer(&mut self) -> Vec { 43 | let mut output = Vec::with_capacity(8); 44 | 45 | output.extend(&self.crc.sum().to_le_bytes()); 46 | output.extend(&self.crc.amount().to_le_bytes()); 47 | 48 | output 49 | } 50 | } 51 | 52 | impl Encode for GzipEncoder { 53 | fn encode( 54 | &mut self, 55 | input: &mut PartialBuffer>, 56 | output: &mut PartialBuffer + AsMut<[u8]>>, 57 | ) -> io::Result<()> { 58 | loop { 59 | match &mut self.state { 60 | State::Header(header) => { 61 | output.copy_unwritten_from(&mut *header); 62 | 63 | if header.unwritten().is_empty() { 64 | self.state = State::Encoding; 65 | } 66 | } 67 | 68 | State::Encoding => { 69 | let prior_written = input.written().len(); 70 | self.inner.encode(input, output)?; 71 | self.crc.update(&input.written()[prior_written..]); 72 | } 73 | 74 | State::Footer(_) | State::Done => { 75 | return Err(io::Error::new( 76 | io::ErrorKind::Other, 77 | "encode after complete", 78 | )); 79 | } 80 | }; 81 | 82 | if input.unwritten().is_empty() || output.unwritten().is_empty() { 83 | return Ok(()); 84 | } 85 | } 86 | } 87 | 88 | fn flush( 89 | &mut self, 90 | output: &mut PartialBuffer + AsMut<[u8]>>, 91 | ) -> io::Result { 92 | loop { 93 | let done = match &mut self.state { 94 | State::Header(header) => { 95 | output.copy_unwritten_from(&mut *header); 96 | 97 | if header.unwritten().is_empty() { 98 | self.state = State::Encoding; 99 | } 100 | false 101 | } 102 | 103 | State::Encoding => self.inner.flush(output)?, 104 | 105 | State::Footer(footer) => { 106 | output.copy_unwritten_from(&mut *footer); 107 | 108 | if footer.unwritten().is_empty() { 109 | self.state = State::Done; 110 | true 111 | } else { 112 | false 113 | } 114 | } 115 | 116 | State::Done => true, 117 | }; 118 | 119 | if done { 120 | return Ok(true); 121 | } 122 | 123 | if output.unwritten().is_empty() { 124 | return Ok(false); 125 | } 126 | } 127 | } 128 | 129 | fn finish( 130 | &mut self, 131 | output: &mut PartialBuffer + AsMut<[u8]>>, 132 | ) -> io::Result { 133 | loop { 134 | match &mut self.state { 135 | State::Header(header) => { 136 | output.copy_unwritten_from(&mut *header); 137 | 138 | if header.unwritten().is_empty() { 139 | self.state = State::Encoding; 140 | } 141 | } 142 | 143 | State::Encoding => { 144 | if self.inner.finish(output)? { 145 | self.state = State::Footer(self.footer().into()); 146 | } 147 | } 148 | 149 | State::Footer(footer) => { 150 | output.copy_unwritten_from(&mut *footer); 151 | 152 | if footer.unwritten().is_empty() { 153 | self.state = State::Done; 154 | } 155 | } 156 | 157 | State::Done => {} 158 | }; 159 | 160 | if let State::Done = self.state { 161 | return Ok(true); 162 | } 163 | 164 | if output.unwritten().is_empty() { 165 | return Ok(false); 166 | } 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /src/codec/gzip/header.rs: -------------------------------------------------------------------------------- 1 | use crate::util::PartialBuffer; 2 | use std::io; 3 | 4 | #[derive(Debug, Default)] 5 | struct Flags { 6 | ascii: bool, 7 | crc: bool, 8 | extra: bool, 9 | filename: bool, 10 | comment: bool, 11 | } 12 | 13 | #[derive(Debug, Default)] 14 | pub(super) struct Header { 15 | flags: Flags, 16 | } 17 | 18 | #[derive(Debug)] 19 | enum State { 20 | Fixed(PartialBuffer<[u8; 10]>), 21 | ExtraLen(PartialBuffer<[u8; 2]>), 22 | Extra(PartialBuffer>), 23 | Filename(Vec), 24 | Comment(Vec), 25 | Crc(PartialBuffer<[u8; 2]>), 26 | Done, 27 | } 28 | 29 | impl Default for State { 30 | fn default() -> Self { 31 | State::Fixed(<_>::default()) 32 | } 33 | } 34 | 35 | #[derive(Debug, Default)] 36 | pub(super) struct Parser { 37 | state: State, 38 | header: Header, 39 | } 40 | 41 | impl Header { 42 | fn parse(input: &[u8; 10]) -> io::Result { 43 | if input[0..3] != [0x1f, 0x8b, 0x08] { 44 | return Err(io::Error::new( 45 | io::ErrorKind::InvalidData, 46 | "Invalid gzip header", 47 | )); 48 | } 49 | 50 | let flag = input[3]; 51 | 52 | let flags = Flags { 53 | ascii: (flag & 0b0000_0001) != 0, 54 | crc: (flag & 0b0000_0010) != 0, 55 | extra: (flag & 0b0000_0100) != 0, 56 | filename: (flag & 0b0000_1000) != 0, 57 | comment: (flag & 0b0001_0000) != 0, 58 | }; 59 | 60 | Ok(Header { flags }) 61 | } 62 | } 63 | 64 | impl Parser { 65 | pub(super) fn input( 66 | &mut self, 67 | input: &mut PartialBuffer>, 68 | ) -> io::Result> { 69 | loop { 70 | match &mut self.state { 71 | State::Fixed(data) => { 72 | data.copy_unwritten_from(input); 73 | 74 | if data.unwritten().is_empty() { 75 | self.header = Header::parse(&data.take().into_inner())?; 76 | self.state = State::ExtraLen(<_>::default()); 77 | } else { 78 | return Ok(None); 79 | } 80 | } 81 | 82 | State::ExtraLen(data) => { 83 | if !self.header.flags.extra { 84 | self.state = State::Filename(<_>::default()); 85 | continue; 86 | } 87 | 88 | data.copy_unwritten_from(input); 89 | 90 | if data.unwritten().is_empty() { 91 | let len = u16::from_le_bytes(data.take().into_inner()); 92 | self.state = State::Extra(vec![0; usize::from(len)].into()); 93 | } else { 94 | return Ok(None); 95 | } 96 | } 97 | 98 | State::Extra(data) => { 99 | data.copy_unwritten_from(input); 100 | 101 | if data.unwritten().is_empty() { 102 | self.state = State::Filename(<_>::default()); 103 | } else { 104 | return Ok(None); 105 | } 106 | } 107 | 108 | State::Filename(data) => { 109 | if !self.header.flags.filename { 110 | self.state = State::Comment(<_>::default()); 111 | continue; 112 | } 113 | 114 | if let Some(len) = memchr::memchr(0, input.unwritten()) { 115 | data.extend_from_slice(&input.unwritten()[..len]); 116 | input.advance(len + 1); 117 | self.state = State::Comment(<_>::default()); 118 | } else { 119 | data.extend_from_slice(input.unwritten()); 120 | input.advance(input.unwritten().len()); 121 | return Ok(None); 122 | } 123 | } 124 | 125 | State::Comment(data) => { 126 | if !self.header.flags.comment { 127 | self.state = State::Crc(<_>::default()); 128 | continue; 129 | } 130 | 131 | if let Some(len) = memchr::memchr(0, input.unwritten()) { 132 | data.extend_from_slice(&input.unwritten()[..len]); 133 | input.advance(len + 1); 134 | self.state = State::Crc(<_>::default()); 135 | } else { 136 | data.extend_from_slice(input.unwritten()); 137 | input.advance(input.unwritten().len()); 138 | return Ok(None); 139 | } 140 | } 141 | 142 | State::Crc(data) => { 143 | if !self.header.flags.crc { 144 | self.state = State::Done; 145 | return Ok(Some(std::mem::take(&mut self.header))); 146 | } 147 | 148 | data.copy_unwritten_from(input); 149 | 150 | if data.unwritten().is_empty() { 151 | self.state = State::Done; 152 | return Ok(Some(std::mem::take(&mut self.header))); 153 | } else { 154 | return Ok(None); 155 | } 156 | } 157 | 158 | State::Done => { 159 | return Err(io::Error::new( 160 | io::ErrorKind::Other, 161 | "parser used after done", 162 | )); 163 | } 164 | }; 165 | } 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /src/codec/gzip/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | mod header; 4 | 5 | pub(crate) use self::{decoder::GzipDecoder, encoder::GzipEncoder}; 6 | -------------------------------------------------------------------------------- /src/codec/lz4/decoder.rs: -------------------------------------------------------------------------------- 1 | use std::io::Result; 2 | 3 | use lz4::liblz4::{ 4 | check_error, LZ4FDecompressionContext, LZ4F_createDecompressionContext, LZ4F_decompress, 5 | LZ4F_freeDecompressionContext, LZ4F_resetDecompressionContext, LZ4F_VERSION, 6 | }; 7 | 8 | use crate::{codec::Decode, unshared::Unshared, util::PartialBuffer}; 9 | 10 | #[derive(Debug)] 11 | struct DecoderContext { 12 | ctx: LZ4FDecompressionContext, 13 | } 14 | 15 | #[derive(Debug)] 16 | pub struct Lz4Decoder { 17 | ctx: Unshared, 18 | } 19 | 20 | impl DecoderContext { 21 | fn new() -> Result { 22 | let mut context = LZ4FDecompressionContext(core::ptr::null_mut()); 23 | check_error(unsafe { LZ4F_createDecompressionContext(&mut context, LZ4F_VERSION) })?; 24 | Ok(Self { ctx: context }) 25 | } 26 | } 27 | 28 | impl Drop for DecoderContext { 29 | fn drop(&mut self) { 30 | unsafe { LZ4F_freeDecompressionContext(self.ctx) }; 31 | } 32 | } 33 | 34 | impl Lz4Decoder { 35 | pub(crate) fn new() -> Self { 36 | Self { 37 | ctx: Unshared::new(DecoderContext::new().unwrap()), 38 | } 39 | } 40 | } 41 | 42 | impl Decode for Lz4Decoder { 43 | fn reinit(&mut self) -> Result<()> { 44 | unsafe { LZ4F_resetDecompressionContext(self.ctx.get_mut().ctx) }; 45 | Ok(()) 46 | } 47 | 48 | fn decode( 49 | &mut self, 50 | input: &mut PartialBuffer>, 51 | output: &mut PartialBuffer + AsMut<[u8]>>, 52 | ) -> Result { 53 | let mut output_size = output.unwritten().len(); 54 | let mut input_size = input.unwritten().len(); 55 | let remaining = unsafe { 56 | check_error(LZ4F_decompress( 57 | self.ctx.get_mut().ctx, 58 | output.unwritten_mut().as_mut_ptr(), 59 | &mut output_size, 60 | input.unwritten().as_ptr(), 61 | &mut input_size, 62 | core::ptr::null(), 63 | ))? 64 | }; 65 | input.advance(input_size); 66 | output.advance(output_size); 67 | Ok(remaining == 0) 68 | } 69 | 70 | fn flush( 71 | &mut self, 72 | output: &mut PartialBuffer + AsMut<[u8]>>, 73 | ) -> Result { 74 | self.decode(&mut PartialBuffer::new(&[][..]), output)?; 75 | 76 | loop { 77 | let old_len = output.written().len(); 78 | self.decode(&mut PartialBuffer::new(&[][..]), output)?; 79 | if output.written().len() == old_len { 80 | break; 81 | } 82 | } 83 | 84 | Ok(!output.unwritten().is_empty()) 85 | } 86 | 87 | fn finish( 88 | &mut self, 89 | output: &mut PartialBuffer + AsMut<[u8]>>, 90 | ) -> Result { 91 | self.flush(output) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/codec/lz4/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::Lz4Decoder, encoder::Lz4Encoder}; 5 | -------------------------------------------------------------------------------- /src/codec/lzma/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Decode, util::PartialBuffer}; 2 | 3 | use std::io::Result; 4 | 5 | #[derive(Debug)] 6 | pub struct LzmaDecoder { 7 | inner: crate::codec::Xz2Decoder, 8 | } 9 | 10 | impl LzmaDecoder { 11 | pub fn new() -> Self { 12 | Self { 13 | inner: crate::codec::Xz2Decoder::new(u64::MAX), 14 | } 15 | } 16 | 17 | pub fn with_memlimit(memlimit: u64) -> Self { 18 | Self { 19 | inner: crate::codec::Xz2Decoder::new(memlimit), 20 | } 21 | } 22 | } 23 | 24 | impl Decode for LzmaDecoder { 25 | fn reinit(&mut self) -> Result<()> { 26 | self.inner.reinit() 27 | } 28 | 29 | fn decode( 30 | &mut self, 31 | input: &mut PartialBuffer>, 32 | output: &mut PartialBuffer + AsMut<[u8]>>, 33 | ) -> Result { 34 | self.inner.decode(input, output) 35 | } 36 | 37 | fn flush( 38 | &mut self, 39 | output: &mut PartialBuffer + AsMut<[u8]>>, 40 | ) -> Result { 41 | self.inner.flush(output) 42 | } 43 | 44 | fn finish( 45 | &mut self, 46 | output: &mut PartialBuffer + AsMut<[u8]>>, 47 | ) -> Result { 48 | self.inner.finish(output) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/codec/lzma/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | 3 | use std::io::Result; 4 | 5 | #[derive(Debug)] 6 | pub struct LzmaEncoder { 7 | inner: crate::codec::Xz2Encoder, 8 | } 9 | 10 | impl LzmaEncoder { 11 | pub fn new(level: u32) -> Self { 12 | Self { 13 | inner: crate::codec::Xz2Encoder::new(crate::codec::Xz2FileFormat::Lzma, level), 14 | } 15 | } 16 | } 17 | 18 | impl Encode for LzmaEncoder { 19 | fn encode( 20 | &mut self, 21 | input: &mut PartialBuffer>, 22 | output: &mut PartialBuffer + AsMut<[u8]>>, 23 | ) -> Result<()> { 24 | self.inner.encode(input, output) 25 | } 26 | 27 | fn flush( 28 | &mut self, 29 | _output: &mut PartialBuffer + AsMut<[u8]>>, 30 | ) -> Result { 31 | // Flush on LZMA 1 is not supported 32 | Ok(true) 33 | } 34 | 35 | fn finish( 36 | &mut self, 37 | output: &mut PartialBuffer + AsMut<[u8]>>, 38 | ) -> Result { 39 | self.inner.finish(output) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/codec/lzma/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::LzmaDecoder, encoder::LzmaEncoder}; 5 | -------------------------------------------------------------------------------- /src/codec/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::util::PartialBuffer; 2 | use std::io::Result; 3 | 4 | #[cfg(feature = "brotli")] 5 | mod brotli; 6 | #[cfg(feature = "bzip2")] 7 | mod bzip2; 8 | #[cfg(feature = "deflate")] 9 | mod deflate; 10 | #[cfg(feature = "deflate64")] 11 | mod deflate64; 12 | #[cfg(feature = "flate2")] 13 | mod flate; 14 | #[cfg(feature = "gzip")] 15 | mod gzip; 16 | #[cfg(feature = "lz4")] 17 | mod lz4; 18 | #[cfg(feature = "lzma")] 19 | mod lzma; 20 | #[cfg(feature = "xz")] 21 | mod xz; 22 | #[cfg(feature = "lzma")] 23 | mod xz2; 24 | #[cfg(feature = "zlib")] 25 | mod zlib; 26 | #[cfg(feature = "zstd")] 27 | mod zstd; 28 | 29 | #[cfg(feature = "brotli")] 30 | pub(crate) use self::brotli::{BrotliDecoder, BrotliEncoder}; 31 | #[cfg(feature = "bzip2")] 32 | pub(crate) use self::bzip2::{BzDecoder, BzEncoder}; 33 | #[cfg(feature = "deflate")] 34 | pub(crate) use self::deflate::{DeflateDecoder, DeflateEncoder}; 35 | #[cfg(feature = "deflate64")] 36 | pub(crate) use self::deflate64::Deflate64Decoder; 37 | #[cfg(feature = "flate2")] 38 | pub(crate) use self::flate::{FlateDecoder, FlateEncoder}; 39 | #[cfg(feature = "gzip")] 40 | pub(crate) use self::gzip::{GzipDecoder, GzipEncoder}; 41 | #[cfg(feature = "lz4")] 42 | pub(crate) use self::lz4::{Lz4Decoder, Lz4Encoder}; 43 | #[cfg(feature = "lzma")] 44 | pub(crate) use self::lzma::{LzmaDecoder, LzmaEncoder}; 45 | #[cfg(feature = "xz")] 46 | pub(crate) use self::xz::{XzDecoder, XzEncoder}; 47 | #[cfg(feature = "lzma")] 48 | pub(crate) use self::xz2::{Xz2Decoder, Xz2Encoder, Xz2FileFormat}; 49 | #[cfg(feature = "zlib")] 50 | pub(crate) use self::zlib::{ZlibDecoder, ZlibEncoder}; 51 | #[cfg(feature = "zstd")] 52 | pub(crate) use self::zstd::{ZstdDecoder, ZstdEncoder}; 53 | 54 | pub trait Encode { 55 | fn encode( 56 | &mut self, 57 | input: &mut PartialBuffer>, 58 | output: &mut PartialBuffer + AsMut<[u8]>>, 59 | ) -> Result<()>; 60 | 61 | /// Returns whether the internal buffers are flushed 62 | fn flush(&mut self, output: &mut PartialBuffer + AsMut<[u8]>>) 63 | -> Result; 64 | 65 | /// Returns whether the internal buffers are flushed and the end of the stream is written 66 | fn finish( 67 | &mut self, 68 | output: &mut PartialBuffer + AsMut<[u8]>>, 69 | ) -> Result; 70 | } 71 | 72 | pub trait Decode { 73 | /// Reinitializes this decoder ready to decode a new member/frame of data. 74 | fn reinit(&mut self) -> Result<()>; 75 | 76 | /// Returns whether the end of the stream has been read 77 | fn decode( 78 | &mut self, 79 | input: &mut PartialBuffer>, 80 | output: &mut PartialBuffer + AsMut<[u8]>>, 81 | ) -> Result; 82 | 83 | /// Returns whether the internal buffers are flushed 84 | fn flush(&mut self, output: &mut PartialBuffer + AsMut<[u8]>>) 85 | -> Result; 86 | 87 | /// Returns whether the internal buffers are flushed 88 | fn finish( 89 | &mut self, 90 | output: &mut PartialBuffer + AsMut<[u8]>>, 91 | ) -> Result; 92 | } 93 | -------------------------------------------------------------------------------- /src/codec/xz/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Decode, util::PartialBuffer}; 2 | 3 | use std::io::{Error, ErrorKind, Result}; 4 | 5 | #[derive(Debug)] 6 | pub struct XzDecoder { 7 | inner: crate::codec::Xz2Decoder, 8 | skip_padding: Option, 9 | } 10 | 11 | impl XzDecoder { 12 | pub fn new() -> Self { 13 | Self { 14 | inner: crate::codec::Xz2Decoder::new(u64::MAX), 15 | skip_padding: None, 16 | } 17 | } 18 | 19 | pub fn with_memlimit(memlimit: u64) -> Self { 20 | Self { 21 | inner: crate::codec::Xz2Decoder::new(memlimit), 22 | skip_padding: None, 23 | } 24 | } 25 | } 26 | 27 | impl Decode for XzDecoder { 28 | fn reinit(&mut self) -> Result<()> { 29 | self.skip_padding = Some(4); 30 | self.inner.reinit() 31 | } 32 | 33 | fn decode( 34 | &mut self, 35 | input: &mut PartialBuffer>, 36 | output: &mut PartialBuffer + AsMut<[u8]>>, 37 | ) -> Result { 38 | if let Some(ref mut count) = self.skip_padding { 39 | while input.unwritten().first() == Some(&0) { 40 | input.advance(1); 41 | *count -= 1; 42 | if *count == 0 { 43 | *count = 4; 44 | } 45 | } 46 | if input.unwritten().is_empty() { 47 | return Ok(true); 48 | } 49 | // If this is non-padding then it cannot start with null bytes, so it must be invalid 50 | // padding 51 | if *count != 4 { 52 | return Err(Error::new( 53 | ErrorKind::InvalidData, 54 | "stream padding was not a multiple of 4 bytes", 55 | )); 56 | } 57 | self.skip_padding = None; 58 | } 59 | self.inner.decode(input, output) 60 | } 61 | 62 | fn flush( 63 | &mut self, 64 | output: &mut PartialBuffer + AsMut<[u8]>>, 65 | ) -> Result { 66 | if self.skip_padding.is_some() { 67 | return Ok(true); 68 | } 69 | self.inner.flush(output) 70 | } 71 | 72 | fn finish( 73 | &mut self, 74 | output: &mut PartialBuffer + AsMut<[u8]>>, 75 | ) -> Result { 76 | if self.skip_padding.is_some() { 77 | return Ok(true); 78 | } 79 | self.inner.finish(output) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/codec/xz/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | 3 | use std::io::Result; 4 | 5 | #[derive(Debug)] 6 | pub struct XzEncoder { 7 | inner: crate::codec::Xz2Encoder, 8 | } 9 | 10 | impl XzEncoder { 11 | pub fn new(level: u32) -> Self { 12 | Self { 13 | inner: crate::codec::Xz2Encoder::new(crate::codec::Xz2FileFormat::Xz, level), 14 | } 15 | } 16 | } 17 | 18 | impl Encode for XzEncoder { 19 | fn encode( 20 | &mut self, 21 | input: &mut PartialBuffer>, 22 | output: &mut PartialBuffer + AsMut<[u8]>>, 23 | ) -> Result<()> { 24 | self.inner.encode(input, output) 25 | } 26 | 27 | fn flush( 28 | &mut self, 29 | output: &mut PartialBuffer + AsMut<[u8]>>, 30 | ) -> Result { 31 | self.inner.flush(output) 32 | } 33 | 34 | fn finish( 35 | &mut self, 36 | output: &mut PartialBuffer + AsMut<[u8]>>, 37 | ) -> Result { 38 | self.inner.finish(output) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/codec/xz/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::XzDecoder, encoder::XzEncoder}; 5 | -------------------------------------------------------------------------------- /src/codec/xz2/decoder.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, io}; 2 | 3 | use liblzma::stream::{Action, Status, Stream}; 4 | 5 | use crate::{codec::Decode, util::PartialBuffer}; 6 | 7 | pub struct Xz2Decoder { 8 | stream: Stream, 9 | } 10 | 11 | impl fmt::Debug for Xz2Decoder { 12 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 13 | f.debug_struct("Xz2Decoder").finish_non_exhaustive() 14 | } 15 | } 16 | 17 | impl Xz2Decoder { 18 | pub fn new(mem_limit: u64) -> Self { 19 | Self { 20 | stream: Stream::new_auto_decoder(mem_limit, 0).unwrap(), 21 | } 22 | } 23 | } 24 | 25 | impl Decode for Xz2Decoder { 26 | fn reinit(&mut self) -> io::Result<()> { 27 | *self = Self::new(self.stream.memlimit()); 28 | Ok(()) 29 | } 30 | 31 | fn decode( 32 | &mut self, 33 | input: &mut PartialBuffer>, 34 | output: &mut PartialBuffer + AsMut<[u8]>>, 35 | ) -> io::Result { 36 | let previous_in = self.stream.total_in() as usize; 37 | let previous_out = self.stream.total_out() as usize; 38 | 39 | let status = self 40 | .stream 41 | .process(input.unwritten(), output.unwritten_mut(), Action::Run)?; 42 | 43 | input.advance(self.stream.total_in() as usize - previous_in); 44 | output.advance(self.stream.total_out() as usize - previous_out); 45 | 46 | match status { 47 | Status::Ok => Ok(false), 48 | Status::StreamEnd => Ok(true), 49 | Status::GetCheck => Err(io::Error::new( 50 | io::ErrorKind::Other, 51 | "Unexpected lzma integrity check", 52 | )), 53 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "More memory needed")), 54 | } 55 | } 56 | 57 | fn flush( 58 | &mut self, 59 | _output: &mut PartialBuffer + AsMut<[u8]>>, 60 | ) -> io::Result { 61 | // While decoding flush is a noop 62 | Ok(true) 63 | } 64 | 65 | fn finish( 66 | &mut self, 67 | output: &mut PartialBuffer + AsMut<[u8]>>, 68 | ) -> io::Result { 69 | let previous_out = self.stream.total_out() as usize; 70 | 71 | let status = self 72 | .stream 73 | .process(&[], output.unwritten_mut(), Action::Finish)?; 74 | 75 | output.advance(self.stream.total_out() as usize - previous_out); 76 | 77 | match status { 78 | Status::Ok => Ok(false), 79 | Status::StreamEnd => Ok(true), 80 | Status::GetCheck => Err(io::Error::new( 81 | io::ErrorKind::Other, 82 | "Unexpected lzma integrity check", 83 | )), 84 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "More memory needed")), 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/codec/xz2/encoder.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, io}; 2 | 3 | use liblzma::stream::{Action, Check, LzmaOptions, Status, Stream}; 4 | 5 | use crate::{ 6 | codec::{Encode, Xz2FileFormat}, 7 | util::PartialBuffer, 8 | }; 9 | 10 | pub struct Xz2Encoder { 11 | stream: Stream, 12 | } 13 | 14 | impl fmt::Debug for Xz2Encoder { 15 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 16 | f.debug_struct("Xz2Encoder").finish_non_exhaustive() 17 | } 18 | } 19 | 20 | impl Xz2Encoder { 21 | pub fn new(format: Xz2FileFormat, level: u32) -> Self { 22 | let stream = match format { 23 | Xz2FileFormat::Xz => Stream::new_easy_encoder(level, Check::Crc64).unwrap(), 24 | Xz2FileFormat::Lzma => { 25 | Stream::new_lzma_encoder(&LzmaOptions::new_preset(level).unwrap()).unwrap() 26 | } 27 | }; 28 | 29 | Self { stream } 30 | } 31 | } 32 | 33 | impl Encode for Xz2Encoder { 34 | fn encode( 35 | &mut self, 36 | input: &mut PartialBuffer>, 37 | output: &mut PartialBuffer + AsMut<[u8]>>, 38 | ) -> io::Result<()> { 39 | let previous_in = self.stream.total_in() as usize; 40 | let previous_out = self.stream.total_out() as usize; 41 | 42 | let status = self 43 | .stream 44 | .process(input.unwritten(), output.unwritten_mut(), Action::Run)?; 45 | 46 | input.advance(self.stream.total_in() as usize - previous_in); 47 | output.advance(self.stream.total_out() as usize - previous_out); 48 | 49 | match status { 50 | Status::Ok | Status::StreamEnd => Ok(()), 51 | Status::GetCheck => Err(io::Error::new( 52 | io::ErrorKind::Other, 53 | "Unexpected lzma integrity check", 54 | )), 55 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), 56 | } 57 | } 58 | 59 | fn flush( 60 | &mut self, 61 | output: &mut PartialBuffer + AsMut<[u8]>>, 62 | ) -> io::Result { 63 | let previous_out = self.stream.total_out() as usize; 64 | 65 | let status = self 66 | .stream 67 | .process(&[], output.unwritten_mut(), Action::SyncFlush)?; 68 | 69 | output.advance(self.stream.total_out() as usize - previous_out); 70 | 71 | match status { 72 | Status::Ok => Ok(false), 73 | Status::StreamEnd => Ok(true), 74 | Status::GetCheck => Err(io::Error::new( 75 | io::ErrorKind::Other, 76 | "Unexpected lzma integrity check", 77 | )), 78 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), 79 | } 80 | } 81 | 82 | fn finish( 83 | &mut self, 84 | output: &mut PartialBuffer + AsMut<[u8]>>, 85 | ) -> io::Result { 86 | let previous_out = self.stream.total_out() as usize; 87 | 88 | let status = self 89 | .stream 90 | .process(&[], output.unwritten_mut(), Action::Finish)?; 91 | 92 | output.advance(self.stream.total_out() as usize - previous_out); 93 | 94 | match status { 95 | Status::Ok => Ok(false), 96 | Status::StreamEnd => Ok(true), 97 | Status::GetCheck => Err(io::Error::new( 98 | io::ErrorKind::Other, 99 | "Unexpected lzma integrity check", 100 | )), 101 | Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/codec/xz2/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub enum Xz2FileFormat { 5 | Xz, 6 | Lzma, 7 | } 8 | 9 | pub(crate) use self::{decoder::Xz2Decoder, encoder::Xz2Encoder}; 10 | -------------------------------------------------------------------------------- /src/codec/zlib/decoder.rs: -------------------------------------------------------------------------------- 1 | use crate::util::PartialBuffer; 2 | use std::io::Result; 3 | 4 | #[derive(Debug)] 5 | pub struct ZlibDecoder { 6 | inner: crate::codec::FlateDecoder, 7 | } 8 | 9 | impl ZlibDecoder { 10 | pub(crate) fn new() -> Self { 11 | Self { 12 | inner: crate::codec::FlateDecoder::new(true), 13 | } 14 | } 15 | } 16 | 17 | impl crate::codec::Decode for ZlibDecoder { 18 | fn reinit(&mut self) -> Result<()> { 19 | self.inner.reinit()?; 20 | Ok(()) 21 | } 22 | 23 | fn decode( 24 | &mut self, 25 | input: &mut PartialBuffer>, 26 | output: &mut PartialBuffer + AsMut<[u8]>>, 27 | ) -> Result { 28 | self.inner.decode(input, output) 29 | } 30 | 31 | fn flush( 32 | &mut self, 33 | output: &mut PartialBuffer + AsMut<[u8]>>, 34 | ) -> Result { 35 | self.inner.flush(output) 36 | } 37 | 38 | fn finish( 39 | &mut self, 40 | output: &mut PartialBuffer + AsMut<[u8]>>, 41 | ) -> Result { 42 | self.inner.finish(output) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/codec/zlib/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, util::PartialBuffer}; 2 | use std::io::Result; 3 | 4 | use flate2::Compression; 5 | 6 | #[derive(Debug)] 7 | pub struct ZlibEncoder { 8 | inner: crate::codec::FlateEncoder, 9 | } 10 | 11 | impl ZlibEncoder { 12 | pub(crate) fn new(level: Compression) -> Self { 13 | Self { 14 | inner: crate::codec::FlateEncoder::new(level, true), 15 | } 16 | } 17 | 18 | pub(crate) fn get_ref(&self) -> &crate::codec::FlateEncoder { 19 | &self.inner 20 | } 21 | } 22 | 23 | impl Encode for ZlibEncoder { 24 | fn encode( 25 | &mut self, 26 | input: &mut PartialBuffer>, 27 | output: &mut PartialBuffer + AsMut<[u8]>>, 28 | ) -> Result<()> { 29 | self.inner.encode(input, output) 30 | } 31 | 32 | fn flush( 33 | &mut self, 34 | output: &mut PartialBuffer + AsMut<[u8]>>, 35 | ) -> Result { 36 | self.inner.flush(output) 37 | } 38 | 39 | fn finish( 40 | &mut self, 41 | output: &mut PartialBuffer + AsMut<[u8]>>, 42 | ) -> Result { 43 | self.inner.finish(output) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/codec/zlib/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::ZlibDecoder, encoder::ZlibEncoder}; 5 | -------------------------------------------------------------------------------- /src/codec/zstd/decoder.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::io::Result; 3 | 4 | use crate::{codec::Decode, unshared::Unshared, util::PartialBuffer}; 5 | use libzstd::stream::raw::{Decoder, Operation}; 6 | 7 | #[derive(Debug)] 8 | pub struct ZstdDecoder { 9 | decoder: Unshared>, 10 | } 11 | 12 | impl ZstdDecoder { 13 | pub(crate) fn new() -> Self { 14 | Self { 15 | decoder: Unshared::new(Decoder::new().unwrap()), 16 | } 17 | } 18 | 19 | pub(crate) fn new_with_params(params: &[crate::zstd::DParameter]) -> Self { 20 | let mut decoder = Decoder::new().unwrap(); 21 | for param in params { 22 | decoder.set_parameter(param.as_zstd()).unwrap(); 23 | } 24 | Self { 25 | decoder: Unshared::new(decoder), 26 | } 27 | } 28 | 29 | pub(crate) fn new_with_dict(dictionary: &[u8]) -> io::Result { 30 | let mut decoder = Decoder::with_dictionary(dictionary)?; 31 | Ok(Self { 32 | decoder: Unshared::new(decoder), 33 | }) 34 | } 35 | } 36 | 37 | impl Decode for ZstdDecoder { 38 | fn reinit(&mut self) -> Result<()> { 39 | self.decoder.get_mut().reinit()?; 40 | Ok(()) 41 | } 42 | 43 | fn decode( 44 | &mut self, 45 | input: &mut PartialBuffer>, 46 | output: &mut PartialBuffer + AsMut<[u8]>>, 47 | ) -> Result { 48 | let status = self 49 | .decoder 50 | .get_mut() 51 | .run_on_buffers(input.unwritten(), output.unwritten_mut())?; 52 | input.advance(status.bytes_read); 53 | output.advance(status.bytes_written); 54 | Ok(status.remaining == 0) 55 | } 56 | 57 | fn flush( 58 | &mut self, 59 | output: &mut PartialBuffer + AsMut<[u8]>>, 60 | ) -> Result { 61 | let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); 62 | let bytes_left = self.decoder.get_mut().flush(&mut out_buf)?; 63 | let len = out_buf.as_slice().len(); 64 | output.advance(len); 65 | Ok(bytes_left == 0) 66 | } 67 | 68 | fn finish( 69 | &mut self, 70 | output: &mut PartialBuffer + AsMut<[u8]>>, 71 | ) -> Result { 72 | let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); 73 | let bytes_left = self.decoder.get_mut().finish(&mut out_buf, true)?; 74 | let len = out_buf.as_slice().len(); 75 | output.advance(len); 76 | Ok(bytes_left == 0) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/codec/zstd/encoder.rs: -------------------------------------------------------------------------------- 1 | use crate::{codec::Encode, unshared::Unshared, util::PartialBuffer}; 2 | use libzstd::stream::raw::{CParameter, Encoder, Operation}; 3 | use std::io; 4 | use std::io::Result; 5 | 6 | #[derive(Debug)] 7 | pub struct ZstdEncoder { 8 | encoder: Unshared>, 9 | } 10 | 11 | impl ZstdEncoder { 12 | pub(crate) fn new(level: i32) -> Self { 13 | Self { 14 | encoder: Unshared::new(Encoder::new(level).unwrap()), 15 | } 16 | } 17 | 18 | pub(crate) fn new_with_params(level: i32, params: &[crate::zstd::CParameter]) -> Self { 19 | let mut encoder = Encoder::new(level).unwrap(); 20 | for param in params { 21 | encoder.set_parameter(param.as_zstd()).unwrap(); 22 | } 23 | Self { 24 | encoder: Unshared::new(encoder), 25 | } 26 | } 27 | 28 | pub(crate) fn new_with_dict(level: i32, dictionary: &[u8]) -> io::Result { 29 | let mut encoder = Encoder::with_dictionary(level, dictionary)?; 30 | Ok(Self { 31 | encoder: Unshared::new(encoder), 32 | }) 33 | } 34 | } 35 | 36 | impl Encode for ZstdEncoder { 37 | fn encode( 38 | &mut self, 39 | input: &mut PartialBuffer>, 40 | output: &mut PartialBuffer + AsMut<[u8]>>, 41 | ) -> Result<()> { 42 | let status = self 43 | .encoder 44 | .get_mut() 45 | .run_on_buffers(input.unwritten(), output.unwritten_mut())?; 46 | input.advance(status.bytes_read); 47 | output.advance(status.bytes_written); 48 | Ok(()) 49 | } 50 | 51 | fn flush( 52 | &mut self, 53 | output: &mut PartialBuffer + AsMut<[u8]>>, 54 | ) -> Result { 55 | let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); 56 | let bytes_left = self.encoder.get_mut().flush(&mut out_buf)?; 57 | let len = out_buf.as_slice().len(); 58 | output.advance(len); 59 | Ok(bytes_left == 0) 60 | } 61 | 62 | fn finish( 63 | &mut self, 64 | output: &mut PartialBuffer + AsMut<[u8]>>, 65 | ) -> Result { 66 | let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); 67 | let bytes_left = self.encoder.get_mut().finish(&mut out_buf, true)?; 68 | let len = out_buf.as_slice().len(); 69 | output.advance(len); 70 | Ok(bytes_left == 0) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/codec/zstd/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub(crate) use self::{decoder::ZstdDecoder, encoder::ZstdEncoder}; 5 | -------------------------------------------------------------------------------- /src/futures/bufread/generic/decoder.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | pin::Pin, 3 | task::{Context, Poll}, 4 | }; 5 | use std::io::{IoSlice, Result}; 6 | 7 | use crate::{codec::Decode, util::PartialBuffer}; 8 | use futures_core::ready; 9 | use futures_io::{AsyncBufRead, AsyncRead, AsyncWrite}; 10 | use pin_project_lite::pin_project; 11 | 12 | #[derive(Debug)] 13 | enum State { 14 | Decoding, 15 | Flushing, 16 | Done, 17 | Next, 18 | } 19 | 20 | pin_project! { 21 | #[derive(Debug)] 22 | pub struct Decoder { 23 | #[pin] 24 | reader: R, 25 | decoder: D, 26 | state: State, 27 | multiple_members: bool, 28 | } 29 | } 30 | 31 | impl Decoder { 32 | pub fn new(reader: R, decoder: D) -> Self { 33 | Self { 34 | reader, 35 | decoder, 36 | state: State::Decoding, 37 | multiple_members: false, 38 | } 39 | } 40 | } 41 | 42 | impl Decoder { 43 | pub fn get_ref(&self) -> &R { 44 | &self.reader 45 | } 46 | 47 | pub fn get_mut(&mut self) -> &mut R { 48 | &mut self.reader 49 | } 50 | 51 | pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { 52 | self.project().reader 53 | } 54 | 55 | pub fn into_inner(self) -> R { 56 | self.reader 57 | } 58 | 59 | pub fn multiple_members(&mut self, enabled: bool) { 60 | self.multiple_members = enabled; 61 | } 62 | } 63 | 64 | impl Decoder { 65 | fn do_poll_read( 66 | self: Pin<&mut Self>, 67 | cx: &mut Context<'_>, 68 | output: &mut PartialBuffer<&mut [u8]>, 69 | ) -> Poll> { 70 | let mut this = self.project(); 71 | 72 | let mut first = true; 73 | 74 | loop { 75 | *this.state = match this.state { 76 | State::Decoding => { 77 | let input = if first { 78 | &[][..] 79 | } else { 80 | ready!(this.reader.as_mut().poll_fill_buf(cx))? 81 | }; 82 | 83 | if input.is_empty() && !first { 84 | // Avoid attempting to reinitialise the decoder if the 85 | // reader has returned EOF. 86 | *this.multiple_members = false; 87 | 88 | State::Flushing 89 | } else { 90 | let mut input = PartialBuffer::new(input); 91 | let res = this.decoder.decode(&mut input, output).or_else(|err| { 92 | // ignore the first error, occurs when input is empty 93 | // but we need to run decode to flush 94 | if first { 95 | Ok(false) 96 | } else { 97 | Err(err) 98 | } 99 | }); 100 | 101 | if !first { 102 | let len = input.written().len(); 103 | this.reader.as_mut().consume(len); 104 | } 105 | 106 | first = false; 107 | 108 | if res? { 109 | State::Flushing 110 | } else { 111 | State::Decoding 112 | } 113 | } 114 | } 115 | 116 | State::Flushing => { 117 | if this.decoder.finish(output)? { 118 | if *this.multiple_members { 119 | this.decoder.reinit()?; 120 | State::Next 121 | } else { 122 | State::Done 123 | } 124 | } else { 125 | State::Flushing 126 | } 127 | } 128 | 129 | State::Done => State::Done, 130 | 131 | State::Next => { 132 | let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; 133 | if input.is_empty() { 134 | State::Done 135 | } else { 136 | State::Decoding 137 | } 138 | } 139 | }; 140 | 141 | if let State::Done = *this.state { 142 | return Poll::Ready(Ok(())); 143 | } 144 | if output.unwritten().is_empty() { 145 | return Poll::Ready(Ok(())); 146 | } 147 | } 148 | } 149 | } 150 | 151 | impl AsyncRead for Decoder { 152 | fn poll_read( 153 | self: Pin<&mut Self>, 154 | cx: &mut Context<'_>, 155 | buf: &mut [u8], 156 | ) -> Poll> { 157 | if buf.is_empty() { 158 | return Poll::Ready(Ok(0)); 159 | } 160 | 161 | let mut output = PartialBuffer::new(buf); 162 | match self.do_poll_read(cx, &mut output)? { 163 | Poll::Pending if output.written().is_empty() => Poll::Pending, 164 | _ => Poll::Ready(Ok(output.written().len())), 165 | } 166 | } 167 | } 168 | 169 | impl AsyncWrite for Decoder { 170 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 171 | self.get_pin_mut().poll_write(cx, buf) 172 | } 173 | 174 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 175 | self.get_pin_mut().poll_flush(cx) 176 | } 177 | 178 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 179 | self.get_pin_mut().poll_close(cx) 180 | } 181 | 182 | fn poll_write_vectored( 183 | self: Pin<&mut Self>, 184 | cx: &mut Context<'_>, 185 | bufs: &[IoSlice<'_>], 186 | ) -> Poll> { 187 | self.get_pin_mut().poll_write_vectored(cx, bufs) 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /src/futures/bufread/generic/encoder.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | pin::Pin, 3 | task::{Context, Poll}, 4 | }; 5 | use std::io::Result; 6 | 7 | use crate::{codec::Encode, util::PartialBuffer}; 8 | use futures_core::ready; 9 | use futures_io::{AsyncBufRead, AsyncRead, AsyncWrite, IoSlice}; 10 | use pin_project_lite::pin_project; 11 | 12 | #[derive(Debug)] 13 | enum State { 14 | Encoding, 15 | Flushing, 16 | Done, 17 | } 18 | 19 | pin_project! { 20 | #[derive(Debug)] 21 | pub struct Encoder { 22 | #[pin] 23 | reader: R, 24 | encoder: E, 25 | state: State, 26 | } 27 | } 28 | 29 | impl Encoder { 30 | pub fn new(reader: R, encoder: E) -> Self { 31 | Self { 32 | reader, 33 | encoder, 34 | state: State::Encoding, 35 | } 36 | } 37 | 38 | pub fn with_capacity(reader: R, encoder: E, _cap: usize) -> Self { 39 | Self::new(reader, encoder) 40 | } 41 | } 42 | 43 | impl Encoder { 44 | pub fn get_ref(&self) -> &R { 45 | &self.reader 46 | } 47 | 48 | pub fn get_mut(&mut self) -> &mut R { 49 | &mut self.reader 50 | } 51 | 52 | pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { 53 | self.project().reader 54 | } 55 | 56 | pub(crate) fn get_encoder_ref(&self) -> &E { 57 | &self.encoder 58 | } 59 | 60 | pub fn into_inner(self) -> R { 61 | self.reader 62 | } 63 | } 64 | 65 | impl Encoder { 66 | fn do_poll_read( 67 | self: Pin<&mut Self>, 68 | cx: &mut Context<'_>, 69 | output: &mut PartialBuffer<&mut [u8]>, 70 | ) -> Poll> { 71 | let mut this = self.project(); 72 | 73 | loop { 74 | *this.state = match this.state { 75 | State::Encoding => { 76 | let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; 77 | if input.is_empty() { 78 | State::Flushing 79 | } else { 80 | let mut input = PartialBuffer::new(input); 81 | this.encoder.encode(&mut input, output)?; 82 | let len = input.written().len(); 83 | this.reader.as_mut().consume(len); 84 | State::Encoding 85 | } 86 | } 87 | 88 | State::Flushing => { 89 | if this.encoder.finish(output)? { 90 | State::Done 91 | } else { 92 | State::Flushing 93 | } 94 | } 95 | 96 | State::Done => State::Done, 97 | }; 98 | 99 | if let State::Done = *this.state { 100 | return Poll::Ready(Ok(())); 101 | } 102 | if output.unwritten().is_empty() { 103 | return Poll::Ready(Ok(())); 104 | } 105 | } 106 | } 107 | } 108 | 109 | impl AsyncRead for Encoder { 110 | fn poll_read( 111 | self: Pin<&mut Self>, 112 | cx: &mut Context<'_>, 113 | buf: &mut [u8], 114 | ) -> Poll> { 115 | if buf.is_empty() { 116 | return Poll::Ready(Ok(0)); 117 | } 118 | 119 | let mut output = PartialBuffer::new(buf); 120 | match self.do_poll_read(cx, &mut output)? { 121 | Poll::Pending if output.written().is_empty() => Poll::Pending, 122 | _ => Poll::Ready(Ok(output.written().len())), 123 | } 124 | } 125 | } 126 | 127 | impl AsyncWrite for Encoder { 128 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 129 | self.get_pin_mut().poll_write(cx, buf) 130 | } 131 | 132 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 133 | self.get_pin_mut().poll_flush(cx) 134 | } 135 | 136 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 137 | self.get_pin_mut().poll_close(cx) 138 | } 139 | 140 | fn poll_write_vectored( 141 | self: Pin<&mut Self>, 142 | cx: &mut Context<'_>, 143 | bufs: &[IoSlice<'_>], 144 | ) -> Poll> { 145 | self.get_pin_mut().poll_write_vectored(cx, bufs) 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/futures/bufread/generic/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub use self::{decoder::Decoder, encoder::Encoder}; 5 | -------------------------------------------------------------------------------- /src/futures/bufread/macros/decoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! decoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncRead`](futures_io::AsyncRead) interface and will 7 | /// read compressed data from an underlying stream and emit a stream of uncompressed data. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::futures::bufread::Decoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: futures_io::AsyncBufRead> $name<$inner> { 16 | /// Creates a new decoder which will read compressed data from the given stream and 17 | /// emit a uncompressed stream. 18 | pub fn new(read: $inner) -> $name<$inner> { 19 | $name { 20 | inner: crate::futures::bufread::Decoder::new(read, crate::codec::$name::new()), 21 | } 22 | } 23 | 24 | $($($inherent_methods)*)* 25 | } 26 | 27 | impl<$inner> $name<$inner> { 28 | /// Configure multi-member/frame decoding, if enabled this will reset the decoder state 29 | /// when reaching the end of a compressed member/frame and expect either EOF or another 30 | /// compressed member/frame to follow it in the stream. 31 | pub fn multiple_members(&mut self, enabled: bool) { 32 | self.inner.multiple_members(enabled); 33 | } 34 | 35 | /// Acquires a reference to the underlying reader that this decoder is wrapping. 36 | pub fn get_ref(&self) -> &$inner { 37 | self.inner.get_ref() 38 | } 39 | 40 | /// Acquires a mutable reference to the underlying reader that this decoder is 41 | /// wrapping. 42 | /// 43 | /// Note that care must be taken to avoid tampering with the state of the reader which 44 | /// may otherwise confuse this decoder. 45 | pub fn get_mut(&mut self) -> &mut $inner { 46 | self.inner.get_mut() 47 | } 48 | 49 | /// Acquires a pinned mutable reference to the underlying reader that this decoder is 50 | /// wrapping. 51 | /// 52 | /// Note that care must be taken to avoid tampering with the state of the reader which 53 | /// may otherwise confuse this decoder. 54 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 55 | self.project().inner.get_pin_mut() 56 | } 57 | 58 | /// Consumes this decoder returning the underlying reader. 59 | /// 60 | /// Note that this may discard internal state of this decoder, so care should be taken 61 | /// to avoid losing resources when this is called. 62 | pub fn into_inner(self) -> $inner { 63 | self.inner.into_inner() 64 | } 65 | } 66 | 67 | impl<$inner: futures_io::AsyncBufRead> futures_io::AsyncRead for $name<$inner> { 68 | fn poll_read( 69 | self: std::pin::Pin<&mut Self>, 70 | cx: &mut std::task::Context<'_>, 71 | buf: &mut [u8], 72 | ) -> std::task::Poll> { 73 | self.project().inner.poll_read(cx, buf) 74 | } 75 | } 76 | 77 | impl<$inner: futures_io::AsyncWrite> futures_io::AsyncWrite for $name<$inner> { 78 | fn poll_write( 79 | self: std::pin::Pin<&mut Self>, 80 | cx: &mut std::task::Context<'_>, 81 | buf: &[u8], 82 | ) -> std::task::Poll> { 83 | self.get_pin_mut().poll_write(cx, buf) 84 | } 85 | 86 | fn poll_flush( 87 | self: std::pin::Pin<&mut Self>, 88 | cx: &mut std::task::Context<'_>, 89 | ) -> std::task::Poll> { 90 | self.get_pin_mut().poll_flush(cx) 91 | } 92 | 93 | fn poll_close( 94 | self: std::pin::Pin<&mut Self>, 95 | cx: &mut std::task::Context<'_>, 96 | ) -> std::task::Poll> { 97 | self.get_pin_mut().poll_close(cx) 98 | } 99 | 100 | fn poll_write_vectored( 101 | self: std::pin::Pin<&mut Self>, 102 | cx: &mut std::task::Context<'_>, 103 | bufs: &[std::io::IoSlice<'_>] 104 | ) -> std::task::Poll> { 105 | self.get_pin_mut().poll_write_vectored(cx, bufs) 106 | } 107 | } 108 | 109 | const _: () = { 110 | fn _assert() { 111 | use crate::util::{_assert_send, _assert_sync}; 112 | use core::pin::Pin; 113 | use futures_io::AsyncBufRead; 114 | 115 | _assert_send::<$name>>>(); 116 | _assert_sync::<$name>>>(); 117 | } 118 | }; 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/futures/bufread/macros/encoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! encoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncRead`](futures_io::AsyncRead) interface and will 7 | /// read uncompressed data from an underlying stream and emit a stream of compressed data. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::futures::bufread::Encoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: futures_io::AsyncBufRead> $name<$inner> { 16 | $( 17 | /// Creates a new encoder which will read uncompressed data from the given stream 18 | /// and emit a compressed stream. 19 | /// 20 | $($inherent_methods)* 21 | )* 22 | 23 | /// Acquires a reference to the underlying reader that this encoder is wrapping. 24 | pub fn get_ref(&self) -> &$inner { 25 | self.inner.get_ref() 26 | } 27 | 28 | /// Acquires a mutable reference to the underlying reader that this encoder is 29 | /// wrapping. 30 | /// 31 | /// Note that care must be taken to avoid tampering with the state of the reader which 32 | /// may otherwise confuse this encoder. 33 | pub fn get_mut(&mut self) -> &mut $inner { 34 | self.inner.get_mut() 35 | } 36 | 37 | /// Acquires a pinned mutable reference to the underlying reader that this encoder is 38 | /// wrapping. 39 | /// 40 | /// Note that care must be taken to avoid tampering with the state of the reader which 41 | /// may otherwise confuse this encoder. 42 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 43 | self.project().inner.get_pin_mut() 44 | } 45 | 46 | /// Consumes this encoder returning the underlying reader. 47 | /// 48 | /// Note that this may discard internal state of this encoder, so care should be taken 49 | /// to avoid losing resources when this is called. 50 | pub fn into_inner(self) -> $inner { 51 | self.inner.into_inner() 52 | } 53 | } 54 | 55 | impl<$inner: futures_io::AsyncBufRead> futures_io::AsyncRead for $name<$inner> { 56 | fn poll_read( 57 | self: std::pin::Pin<&mut Self>, 58 | cx: &mut std::task::Context<'_>, 59 | buf: &mut [u8], 60 | ) -> std::task::Poll> { 61 | self.project().inner.poll_read(cx, buf) 62 | } 63 | } 64 | 65 | impl<$inner: futures_io::AsyncWrite> futures_io::AsyncWrite for $name<$inner> { 66 | fn poll_write( 67 | self: std::pin::Pin<&mut Self>, 68 | cx: &mut std::task::Context<'_>, 69 | buf: &[u8], 70 | ) -> std::task::Poll> { 71 | self.project().inner.poll_write(cx, buf) 72 | } 73 | 74 | fn poll_flush( 75 | self: std::pin::Pin<&mut Self>, 76 | cx: &mut std::task::Context<'_>, 77 | ) -> std::task::Poll> { 78 | self.project().inner.poll_flush(cx) 79 | } 80 | 81 | fn poll_close( 82 | self: std::pin::Pin<&mut Self>, 83 | cx: &mut std::task::Context<'_>, 84 | ) -> std::task::Poll> { 85 | self.project().inner.poll_close(cx) 86 | } 87 | 88 | fn poll_write_vectored( 89 | self: std::pin::Pin<&mut Self>, 90 | cx: &mut std::task::Context<'_>, 91 | bufs: &[std::io::IoSlice<'_>] 92 | ) -> std::task::Poll> { 93 | self.project().inner.poll_write_vectored(cx, bufs) 94 | } 95 | } 96 | 97 | const _: () = { 98 | fn _assert() { 99 | use crate::util::{_assert_send, _assert_sync}; 100 | use core::pin::Pin; 101 | use futures_io::AsyncBufRead; 102 | 103 | _assert_send::<$name>>>(); 104 | _assert_sync::<$name>>>(); 105 | } 106 | }; 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/futures/bufread/macros/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod decoder; 3 | #[macro_use] 4 | mod encoder; 5 | -------------------------------------------------------------------------------- /src/futures/bufread/mod.rs: -------------------------------------------------------------------------------- 1 | //! Types which operate over [`AsyncBufRead`](futures_io::AsyncBufRead) streams, both encoders and 2 | //! decoders for various formats. 3 | 4 | #[macro_use] 5 | mod macros; 6 | mod generic; 7 | 8 | pub(crate) use generic::{Decoder, Encoder}; 9 | 10 | algos!(futures::bufread); 11 | -------------------------------------------------------------------------------- /src/futures/mod.rs: -------------------------------------------------------------------------------- 1 | //! Implementations for IO traits exported by [`futures-io`](::futures_io). 2 | 3 | pub mod bufread; 4 | pub mod write; 5 | -------------------------------------------------------------------------------- /src/futures/write/buf_write.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | pub(crate) trait AsyncBufWrite { 8 | /// Attempt to return an internal buffer to write to, flushing data out to the inner reader if 9 | /// it is full. 10 | /// 11 | /// On success, returns `Poll::Ready(Ok(buf))`. 12 | /// 13 | /// If the buffer is full and cannot be flushed, the method returns `Poll::Pending` and 14 | /// arranges for the current task context (`cx`) to receive a notification when the object 15 | /// becomes readable or is closed. 16 | fn poll_partial_flush_buf( 17 | self: Pin<&mut Self>, 18 | cx: &mut Context<'_>, 19 | ) -> Poll>; 20 | 21 | /// Tells this buffer that `amt` bytes have been written to its buffer, so they should be 22 | /// written out to the underlying IO when possible. 23 | /// 24 | /// This function is a lower-level call. It needs to be paired with the `poll_flush_buf` method to 25 | /// function properly. This function does not perform any I/O, it simply informs this object 26 | /// that some amount of its buffer, returned from `poll_flush_buf`, has been written to and should 27 | /// be sent. As such, this function may do odd things if `poll_flush_buf` isn't 28 | /// called before calling it. 29 | /// 30 | /// The `amt` must be `<=` the number of bytes in the buffer returned by `poll_flush_buf`. 31 | fn produce(self: Pin<&mut Self>, amt: usize); 32 | } 33 | -------------------------------------------------------------------------------- /src/futures/write/generic/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub use self::{decoder::Decoder, encoder::Encoder}; 5 | -------------------------------------------------------------------------------- /src/futures/write/macros/decoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! decoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncWrite`](futures_io::AsyncWrite) interface and will 7 | /// take in compressed data and write it uncompressed to an underlying stream. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::futures::write::Decoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: futures_io::AsyncWrite> $name<$inner> { 16 | /// Creates a new decoder which will take in compressed data and write it uncompressed 17 | /// to the given stream. 18 | pub fn new(read: $inner) -> $name<$inner> { 19 | $name { 20 | inner: crate::futures::write::Decoder::new(read, crate::codec::$name::new()), 21 | } 22 | } 23 | 24 | $($($inherent_methods)*)* 25 | } 26 | 27 | impl<$inner> $name<$inner> { 28 | /// Acquires a reference to the underlying reader that this decoder is wrapping. 29 | pub fn get_ref(&self) -> &$inner { 30 | self.inner.get_ref() 31 | } 32 | 33 | /// Acquires a mutable reference to the underlying reader that this decoder is 34 | /// wrapping. 35 | /// 36 | /// Note that care must be taken to avoid tampering with the state of the reader which 37 | /// may otherwise confuse this decoder. 38 | pub fn get_mut(&mut self) -> &mut $inner { 39 | self.inner.get_mut() 40 | } 41 | 42 | /// Acquires a pinned mutable reference to the underlying reader that this decoder is 43 | /// wrapping. 44 | /// 45 | /// Note that care must be taken to avoid tampering with the state of the reader which 46 | /// may otherwise confuse this decoder. 47 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 48 | self.project().inner.get_pin_mut() 49 | } 50 | 51 | /// Consumes this decoder returning the underlying reader. 52 | /// 53 | /// Note that this may discard internal state of this decoder, so care should be taken 54 | /// to avoid losing resources when this is called. 55 | pub fn into_inner(self) -> $inner { 56 | self.inner.into_inner() 57 | } 58 | } 59 | 60 | impl<$inner: futures_io::AsyncWrite> futures_io::AsyncWrite for $name<$inner> { 61 | fn poll_write( 62 | self: std::pin::Pin<&mut Self>, 63 | cx: &mut std::task::Context<'_>, 64 | buf: &[u8], 65 | ) -> std::task::Poll> { 66 | self.project().inner.poll_write(cx, buf) 67 | } 68 | 69 | fn poll_flush( 70 | self: std::pin::Pin<&mut Self>, 71 | cx: &mut std::task::Context<'_>, 72 | ) -> std::task::Poll> { 73 | self.project().inner.poll_flush(cx) 74 | } 75 | 76 | fn poll_close( 77 | self: std::pin::Pin<&mut Self>, 78 | cx: &mut std::task::Context<'_>, 79 | ) -> std::task::Poll> { 80 | self.project().inner.poll_close(cx) 81 | } 82 | } 83 | 84 | impl<$inner: futures_io::AsyncRead> futures_io::AsyncRead for $name<$inner> { 85 | fn poll_read( 86 | self: std::pin::Pin<&mut Self>, 87 | cx: &mut std::task::Context<'_>, 88 | buf: &mut [u8] 89 | ) -> std::task::Poll> { 90 | self.get_pin_mut().poll_read(cx, buf) 91 | } 92 | 93 | fn poll_read_vectored( 94 | self: std::pin::Pin<&mut Self>, 95 | cx: &mut std::task::Context<'_>, 96 | bufs: &mut [futures_io::IoSliceMut<'_>] 97 | ) -> std::task::Poll> { 98 | self.get_pin_mut().poll_read_vectored(cx, bufs) 99 | } 100 | } 101 | 102 | impl<$inner: futures_io::AsyncBufRead> futures_io::AsyncBufRead for $name<$inner> { 103 | fn poll_fill_buf( 104 | self: std::pin::Pin<&mut Self>, 105 | cx: &mut std::task::Context<'_> 106 | ) -> std::task::Poll> { 107 | self.get_pin_mut().poll_fill_buf(cx) 108 | } 109 | 110 | fn consume(self: std::pin::Pin<&mut Self>, amt: usize) { 111 | self.get_pin_mut().consume(amt) 112 | } 113 | } 114 | 115 | const _: () = { 116 | fn _assert() { 117 | use crate::util::{_assert_send, _assert_sync}; 118 | use core::pin::Pin; 119 | use futures_io::AsyncWrite; 120 | 121 | _assert_send::<$name>>>(); 122 | _assert_sync::<$name>>>(); 123 | } 124 | }; 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/futures/write/macros/encoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! encoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncWrite`](futures_io::AsyncWrite) interface and will 7 | /// take in uncompressed data and write it compressed to an underlying stream. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::futures::write::Encoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: futures_io::AsyncWrite> $name<$inner> { 16 | $( 17 | /// Creates a new encoder which will take in uncompressed data and write it 18 | /// compressed to the given stream. 19 | /// 20 | $($inherent_methods)* 21 | )* 22 | } 23 | 24 | impl<$inner> $name<$inner> { 25 | /// Acquires a reference to the underlying writer that this encoder is wrapping. 26 | pub fn get_ref(&self) -> &$inner { 27 | self.inner.get_ref() 28 | } 29 | 30 | /// Acquires a mutable reference to the underlying writer that this encoder is 31 | /// wrapping. 32 | /// 33 | /// Note that care must be taken to avoid tampering with the state of the writer which 34 | /// may otherwise confuse this encoder. 35 | pub fn get_mut(&mut self) -> &mut $inner { 36 | self.inner.get_mut() 37 | } 38 | 39 | /// Acquires a pinned mutable reference to the underlying writer that this encoder is 40 | /// wrapping. 41 | /// 42 | /// Note that care must be taken to avoid tampering with the state of the writer which 43 | /// may otherwise confuse this encoder. 44 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 45 | self.project().inner.get_pin_mut() 46 | } 47 | 48 | /// Consumes this encoder returning the underlying writer. 49 | /// 50 | /// Note that this may discard internal state of this encoder, so care should be taken 51 | /// to avoid losing resources when this is called. 52 | pub fn into_inner(self) -> $inner { 53 | self.inner.into_inner() 54 | } 55 | } 56 | 57 | impl<$inner: futures_io::AsyncWrite> futures_io::AsyncWrite for $name<$inner> { 58 | fn poll_write( 59 | self: std::pin::Pin<&mut Self>, 60 | cx: &mut std::task::Context<'_>, 61 | buf: &[u8], 62 | ) -> std::task::Poll> { 63 | self.project().inner.poll_write(cx, buf) 64 | } 65 | 66 | fn poll_flush( 67 | self: std::pin::Pin<&mut Self>, 68 | cx: &mut std::task::Context<'_>, 69 | ) -> std::task::Poll> { 70 | self.project().inner.poll_flush(cx) 71 | } 72 | 73 | fn poll_close( 74 | self: std::pin::Pin<&mut Self>, 75 | cx: &mut std::task::Context<'_>, 76 | ) -> std::task::Poll> { 77 | self.project().inner.poll_close(cx) 78 | } 79 | } 80 | 81 | impl<$inner: futures_io::AsyncRead> futures_io::AsyncRead for $name<$inner> { 82 | fn poll_read( 83 | self: std::pin::Pin<&mut Self>, 84 | cx: &mut std::task::Context<'_>, 85 | buf: &mut [u8] 86 | ) -> std::task::Poll> { 87 | self.get_pin_mut().poll_read(cx, buf) 88 | } 89 | 90 | fn poll_read_vectored( 91 | self: std::pin::Pin<&mut Self>, 92 | cx: &mut std::task::Context<'_>, 93 | bufs: &mut [futures_io::IoSliceMut<'_>] 94 | ) -> std::task::Poll> { 95 | self.get_pin_mut().poll_read_vectored(cx, bufs) 96 | } 97 | } 98 | 99 | impl<$inner: futures_io::AsyncBufRead> futures_io::AsyncBufRead for $name<$inner> { 100 | fn poll_fill_buf( 101 | self: std::pin::Pin<&mut Self>, 102 | cx: &mut std::task::Context<'_> 103 | ) -> std::task::Poll> { 104 | self.get_pin_mut().poll_fill_buf(cx) 105 | } 106 | 107 | fn consume(self: std::pin::Pin<&mut Self>, amt: usize) { 108 | self.get_pin_mut().consume(amt) 109 | } 110 | } 111 | 112 | const _: () = { 113 | fn _assert() { 114 | use crate::util::{_assert_send, _assert_sync}; 115 | use core::pin::Pin; 116 | use futures_io::AsyncWrite; 117 | 118 | _assert_send::<$name>>>(); 119 | _assert_sync::<$name>>>(); 120 | } 121 | }; 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/futures/write/macros/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod decoder; 3 | #[macro_use] 4 | mod encoder; 5 | -------------------------------------------------------------------------------- /src/futures/write/mod.rs: -------------------------------------------------------------------------------- 1 | //! Types which operate over [`AsyncWrite`](futures_io::AsyncWrite) streams, both encoders and 2 | //! decoders for various formats. 3 | 4 | #[macro_use] 5 | mod macros; 6 | mod generic; 7 | 8 | mod buf_write; 9 | mod buf_writer; 10 | 11 | use self::{ 12 | buf_write::AsyncBufWrite, 13 | buf_writer::BufWriter, 14 | generic::{Decoder, Encoder}, 15 | }; 16 | 17 | algos!(futures::write); 18 | -------------------------------------------------------------------------------- /src/lz4.rs: -------------------------------------------------------------------------------- 1 | //! This module contains lz4-specific types for async-compression. 2 | 3 | pub use lz4::liblz4::BlockSize; 4 | use lz4::{ 5 | liblz4::{BlockChecksum, FrameType, LZ4FFrameInfo, LZ4FPreferences}, 6 | BlockMode, ContentChecksum, 7 | }; 8 | 9 | /// lz4 compression parameters builder. This is a stable wrapper around lz4's own encoder 10 | /// params type, to abstract over different versions of the lz4 library. 11 | /// 12 | /// See the [lz4 documentation](https://github.com/lz4/lz4/blob/dev/doc/lz4frame_manual.html) 13 | /// for more information on these parameters. 14 | /// 15 | /// # Examples 16 | /// 17 | /// ``` 18 | /// use async_compression::lz4; 19 | /// 20 | /// let params = lz4::EncoderParams::default() 21 | /// .block_size(lz4::BlockSize::Max1MB) 22 | /// .content_checksum(true); 23 | /// ``` 24 | #[derive(Clone, Debug, Default)] 25 | pub struct EncoderParams { 26 | block_size: Option, 27 | block_checksum: Option, 28 | content_checksum: Option, 29 | } 30 | 31 | impl EncoderParams { 32 | /// Sets input block size. 33 | pub fn block_size(mut self, block_size: BlockSize) -> Self { 34 | self.block_size = Some(block_size); 35 | self 36 | } 37 | 38 | /// Add a 32-bit checksum of frame's decompressed data. 39 | pub fn content_checksum(mut self, enable: bool) -> Self { 40 | self.content_checksum = Some(if enable { 41 | ContentChecksum::ChecksumEnabled 42 | } else { 43 | ContentChecksum::NoChecksum 44 | }); 45 | self 46 | } 47 | 48 | /// Each block followed by a checksum of block's compressed data. 49 | pub fn block_checksum(mut self, enable: bool) -> Self { 50 | self.block_checksum = Some(if enable { 51 | BlockChecksum::BlockChecksumEnabled 52 | } else { 53 | BlockChecksum::NoBlockChecksum 54 | }); 55 | self 56 | } 57 | 58 | pub(crate) fn as_lz4(&self) -> LZ4FPreferences { 59 | let block_size_id = self.block_size.clone().unwrap_or(BlockSize::Default); 60 | let content_checksum_flag = self 61 | .content_checksum 62 | .clone() 63 | .unwrap_or(ContentChecksum::NoChecksum); 64 | let block_checksum_flag = self 65 | .block_checksum 66 | .clone() 67 | .unwrap_or(BlockChecksum::NoBlockChecksum); 68 | 69 | LZ4FPreferences { 70 | frame_info: LZ4FFrameInfo { 71 | block_size_id, 72 | block_mode: BlockMode::Linked, 73 | content_checksum_flag, 74 | frame_type: FrameType::Frame, 75 | content_size: 0, 76 | dict_id: 0, 77 | block_checksum_flag, 78 | }, 79 | compression_level: 0, 80 | auto_flush: 0, 81 | favor_dec_speed: 0, 82 | reserved: [0; 3], 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/tokio/bufread/generic/decoder.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | pin::Pin, 3 | task::{Context, Poll}, 4 | }; 5 | use std::io::{IoSlice, Result}; 6 | 7 | use crate::{codec::Decode, util::PartialBuffer}; 8 | use futures_core::ready; 9 | use pin_project_lite::pin_project; 10 | use tokio::io::{AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf}; 11 | 12 | #[derive(Debug)] 13 | enum State { 14 | Decoding, 15 | Flushing, 16 | Done, 17 | Next, 18 | } 19 | 20 | pin_project! { 21 | #[derive(Debug)] 22 | pub struct Decoder { 23 | #[pin] 24 | reader: R, 25 | decoder: D, 26 | state: State, 27 | multiple_members: bool, 28 | } 29 | } 30 | 31 | impl Decoder { 32 | pub fn new(reader: R, decoder: D) -> Self { 33 | Self { 34 | reader, 35 | decoder, 36 | state: State::Decoding, 37 | multiple_members: false, 38 | } 39 | } 40 | } 41 | 42 | impl Decoder { 43 | pub fn get_ref(&self) -> &R { 44 | &self.reader 45 | } 46 | 47 | pub fn get_mut(&mut self) -> &mut R { 48 | &mut self.reader 49 | } 50 | 51 | pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { 52 | self.project().reader 53 | } 54 | 55 | pub fn into_inner(self) -> R { 56 | self.reader 57 | } 58 | 59 | pub fn multiple_members(&mut self, enabled: bool) { 60 | self.multiple_members = enabled; 61 | } 62 | } 63 | 64 | impl Decoder { 65 | fn do_poll_read( 66 | self: Pin<&mut Self>, 67 | cx: &mut Context<'_>, 68 | output: &mut PartialBuffer<&mut [u8]>, 69 | ) -> Poll> { 70 | let mut this = self.project(); 71 | 72 | let mut first = true; 73 | 74 | loop { 75 | *this.state = match this.state { 76 | State::Decoding => { 77 | let input = if first { 78 | &[][..] 79 | } else { 80 | ready!(this.reader.as_mut().poll_fill_buf(cx))? 81 | }; 82 | 83 | if input.is_empty() && !first { 84 | // Avoid attempting to reinitialise the decoder if the reader 85 | // has returned EOF. 86 | *this.multiple_members = false; 87 | 88 | State::Flushing 89 | } else { 90 | let mut input = PartialBuffer::new(input); 91 | let res = this.decoder.decode(&mut input, output).or_else(|err| { 92 | // ignore the first error, occurs when input is empty 93 | // but we need to run decode to flush 94 | if first { 95 | Ok(false) 96 | } else { 97 | Err(err) 98 | } 99 | }); 100 | 101 | if !first { 102 | let len = input.written().len(); 103 | this.reader.as_mut().consume(len); 104 | } 105 | 106 | first = false; 107 | 108 | if res? { 109 | State::Flushing 110 | } else { 111 | State::Decoding 112 | } 113 | } 114 | } 115 | 116 | State::Flushing => { 117 | if this.decoder.finish(output)? { 118 | if *this.multiple_members { 119 | this.decoder.reinit()?; 120 | State::Next 121 | } else { 122 | State::Done 123 | } 124 | } else { 125 | State::Flushing 126 | } 127 | } 128 | 129 | State::Done => State::Done, 130 | 131 | State::Next => { 132 | let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; 133 | if input.is_empty() { 134 | State::Done 135 | } else { 136 | State::Decoding 137 | } 138 | } 139 | }; 140 | 141 | if let State::Done = *this.state { 142 | return Poll::Ready(Ok(())); 143 | } 144 | if output.unwritten().is_empty() { 145 | return Poll::Ready(Ok(())); 146 | } 147 | } 148 | } 149 | } 150 | 151 | impl AsyncRead for Decoder { 152 | fn poll_read( 153 | self: Pin<&mut Self>, 154 | cx: &mut Context<'_>, 155 | buf: &mut ReadBuf<'_>, 156 | ) -> Poll> { 157 | if buf.remaining() == 0 { 158 | return Poll::Ready(Ok(())); 159 | } 160 | 161 | let mut output = PartialBuffer::new(buf.initialize_unfilled()); 162 | match self.do_poll_read(cx, &mut output)? { 163 | Poll::Pending if output.written().is_empty() => Poll::Pending, 164 | _ => { 165 | let len = output.written().len(); 166 | buf.advance(len); 167 | Poll::Ready(Ok(())) 168 | } 169 | } 170 | } 171 | } 172 | 173 | impl AsyncWrite for Decoder { 174 | fn poll_write( 175 | mut self: Pin<&mut Self>, 176 | cx: &mut Context<'_>, 177 | buf: &[u8], 178 | ) -> Poll> { 179 | self.get_pin_mut().poll_write(cx, buf) 180 | } 181 | 182 | fn poll_write_vectored( 183 | mut self: Pin<&mut Self>, 184 | cx: &mut Context<'_>, 185 | mut bufs: &[IoSlice<'_>], 186 | ) -> Poll> { 187 | self.get_pin_mut().poll_write_vectored(cx, bufs) 188 | } 189 | 190 | fn is_write_vectored(&self) -> bool { 191 | self.get_ref().is_write_vectored() 192 | } 193 | 194 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 195 | self.get_pin_mut().poll_flush(cx) 196 | } 197 | 198 | fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 199 | self.get_pin_mut().poll_shutdown(cx) 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /src/tokio/bufread/generic/encoder.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | pin::Pin, 3 | task::{Context, Poll}, 4 | }; 5 | use std::io::{IoSlice, Result}; 6 | 7 | use crate::{codec::Encode, util::PartialBuffer}; 8 | use futures_core::ready; 9 | use pin_project_lite::pin_project; 10 | use tokio::io::{AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf}; 11 | 12 | #[derive(Debug)] 13 | enum State { 14 | Encoding, 15 | Flushing, 16 | Done, 17 | } 18 | 19 | pin_project! { 20 | #[derive(Debug)] 21 | pub struct Encoder { 22 | #[pin] 23 | reader: R, 24 | encoder: E, 25 | state: State, 26 | } 27 | } 28 | 29 | impl Encoder { 30 | pub fn new(reader: R, encoder: E) -> Self { 31 | Self { 32 | reader, 33 | encoder, 34 | state: State::Encoding, 35 | } 36 | } 37 | 38 | pub fn with_capacity(reader: R, encoder: E, _cap: usize) -> Self { 39 | Self::new(reader, encoder) 40 | } 41 | } 42 | 43 | impl Encoder { 44 | pub fn get_ref(&self) -> &R { 45 | &self.reader 46 | } 47 | 48 | pub fn get_mut(&mut self) -> &mut R { 49 | &mut self.reader 50 | } 51 | 52 | pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { 53 | self.project().reader 54 | } 55 | 56 | pub(crate) fn get_encoder_ref(&self) -> &E { 57 | &self.encoder 58 | } 59 | 60 | pub fn into_inner(self) -> R { 61 | self.reader 62 | } 63 | } 64 | impl Encoder { 65 | fn do_poll_read( 66 | self: Pin<&mut Self>, 67 | cx: &mut Context<'_>, 68 | output: &mut PartialBuffer<&mut [u8]>, 69 | ) -> Poll> { 70 | let mut this = self.project(); 71 | 72 | loop { 73 | *this.state = match this.state { 74 | State::Encoding => { 75 | let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; 76 | if input.is_empty() { 77 | State::Flushing 78 | } else { 79 | let mut input = PartialBuffer::new(input); 80 | this.encoder.encode(&mut input, output)?; 81 | let len = input.written().len(); 82 | this.reader.as_mut().consume(len); 83 | State::Encoding 84 | } 85 | } 86 | 87 | State::Flushing => { 88 | if this.encoder.finish(output)? { 89 | State::Done 90 | } else { 91 | State::Flushing 92 | } 93 | } 94 | 95 | State::Done => State::Done, 96 | }; 97 | 98 | if let State::Done = *this.state { 99 | return Poll::Ready(Ok(())); 100 | } 101 | if output.unwritten().is_empty() { 102 | return Poll::Ready(Ok(())); 103 | } 104 | } 105 | } 106 | } 107 | 108 | impl AsyncRead for Encoder { 109 | fn poll_read( 110 | self: Pin<&mut Self>, 111 | cx: &mut Context<'_>, 112 | buf: &mut ReadBuf<'_>, 113 | ) -> Poll> { 114 | if buf.remaining() == 0 { 115 | return Poll::Ready(Ok(())); 116 | } 117 | 118 | let mut output = PartialBuffer::new(buf.initialize_unfilled()); 119 | match self.do_poll_read(cx, &mut output)? { 120 | Poll::Pending if output.written().is_empty() => Poll::Pending, 121 | _ => { 122 | let len = output.written().len(); 123 | buf.advance(len); 124 | Poll::Ready(Ok(())) 125 | } 126 | } 127 | } 128 | } 129 | 130 | impl AsyncWrite for Encoder { 131 | fn poll_write( 132 | mut self: Pin<&mut Self>, 133 | cx: &mut Context<'_>, 134 | buf: &[u8], 135 | ) -> Poll> { 136 | self.get_pin_mut().poll_write(cx, buf) 137 | } 138 | 139 | fn poll_write_vectored( 140 | mut self: Pin<&mut Self>, 141 | cx: &mut Context<'_>, 142 | mut bufs: &[IoSlice<'_>], 143 | ) -> Poll> { 144 | self.get_pin_mut().poll_write_vectored(cx, bufs) 145 | } 146 | 147 | fn is_write_vectored(&self) -> bool { 148 | self.get_ref().is_write_vectored() 149 | } 150 | 151 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 152 | self.get_pin_mut().poll_flush(cx) 153 | } 154 | 155 | fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 156 | self.get_pin_mut().poll_shutdown(cx) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/tokio/bufread/generic/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub use self::{decoder::Decoder, encoder::Encoder}; 5 | -------------------------------------------------------------------------------- /src/tokio/bufread/macros/decoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! decoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncRead`](tokio::io::AsyncRead) interface and will 7 | /// read compressed data from an underlying stream and emit a stream of uncompressed data. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::tokio::bufread::Decoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: tokio::io::AsyncBufRead> $name<$inner> { 16 | /// Creates a new decoder which will read compressed data from the given stream and 17 | /// emit a uncompressed stream. 18 | pub fn new(read: $inner) -> $name<$inner> { 19 | $name { 20 | inner: crate::tokio::bufread::Decoder::new(read, crate::codec::$name::new()), 21 | } 22 | } 23 | 24 | $($($inherent_methods)*)* 25 | } 26 | 27 | impl<$inner> $name<$inner> { 28 | /// Configure multi-member/frame decoding, if enabled this will reset the decoder state 29 | /// when reaching the end of a compressed member/frame and expect either EOF or another 30 | /// compressed member/frame to follow it in the stream. 31 | pub fn multiple_members(&mut self, enabled: bool) { 32 | self.inner.multiple_members(enabled); 33 | } 34 | 35 | /// Acquires a reference to the underlying reader that this decoder is wrapping. 36 | pub fn get_ref(&self) -> &$inner { 37 | self.inner.get_ref() 38 | } 39 | 40 | /// Acquires a mutable reference to the underlying reader that this decoder is 41 | /// wrapping. 42 | /// 43 | /// Note that care must be taken to avoid tampering with the state of the reader which 44 | /// may otherwise confuse this decoder. 45 | pub fn get_mut(&mut self) -> &mut $inner { 46 | self.inner.get_mut() 47 | } 48 | 49 | /// Acquires a pinned mutable reference to the underlying reader that this decoder is 50 | /// wrapping. 51 | /// 52 | /// Note that care must be taken to avoid tampering with the state of the reader which 53 | /// may otherwise confuse this decoder. 54 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 55 | self.project().inner.get_pin_mut() 56 | } 57 | 58 | /// Consumes this decoder returning the underlying reader. 59 | /// 60 | /// Note that this may discard internal state of this decoder, so care should be taken 61 | /// to avoid losing resources when this is called. 62 | pub fn into_inner(self) -> $inner { 63 | self.inner.into_inner() 64 | } 65 | } 66 | 67 | impl<$inner: tokio::io::AsyncBufRead> tokio::io::AsyncRead for $name<$inner> { 68 | fn poll_read( 69 | self: std::pin::Pin<&mut Self>, 70 | cx: &mut std::task::Context<'_>, 71 | buf: &mut tokio::io::ReadBuf<'_>, 72 | ) -> std::task::Poll> { 73 | self.project().inner.poll_read(cx, buf) 74 | } 75 | } 76 | 77 | impl<$inner: tokio::io::AsyncWrite> tokio::io::AsyncWrite for $name<$inner> { 78 | fn poll_write( 79 | self: std::pin::Pin<&mut Self>, 80 | cx: &mut std::task::Context<'_>, 81 | buf: &[u8], 82 | ) -> std::task::Poll> { 83 | self.get_pin_mut().poll_write(cx, buf) 84 | } 85 | 86 | fn poll_flush( 87 | self: std::pin::Pin<&mut Self>, 88 | cx: &mut std::task::Context<'_>, 89 | ) -> std::task::Poll> { 90 | self.get_pin_mut().poll_flush(cx) 91 | } 92 | 93 | fn poll_shutdown( 94 | self: std::pin::Pin<&mut Self>, 95 | cx: &mut std::task::Context<'_>, 96 | ) -> std::task::Poll> { 97 | self.get_pin_mut().poll_shutdown(cx) 98 | } 99 | 100 | fn poll_write_vectored( 101 | self: std::pin::Pin<&mut Self>, 102 | cx: &mut std::task::Context<'_>, 103 | bufs: &[std::io::IoSlice<'_>], 104 | ) -> std::task::Poll> { 105 | self.get_pin_mut().poll_write_vectored(cx, bufs) 106 | } 107 | 108 | fn is_write_vectored(&self) -> bool { 109 | self.get_ref().is_write_vectored() 110 | } 111 | } 112 | 113 | const _: () = { 114 | fn _assert() { 115 | use crate::util::{_assert_send, _assert_sync}; 116 | use core::pin::Pin; 117 | use tokio::io::AsyncBufRead; 118 | 119 | _assert_send::<$name>>>(); 120 | _assert_sync::<$name>>>(); 121 | } 122 | }; 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/tokio/bufread/macros/encoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! encoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncRead`](tokio::io::AsyncRead) interface and will 7 | /// read uncompressed data from an underlying stream and emit a stream of compressed data. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::tokio::bufread::Encoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: tokio::io::AsyncBufRead> $name<$inner> { 16 | $( 17 | /// Creates a new encoder which will read uncompressed data from the given stream 18 | /// and emit a compressed stream. 19 | /// 20 | $($inherent_methods)* 21 | )* 22 | } 23 | 24 | impl<$inner> $name<$inner> { 25 | /// Acquires a reference to the underlying reader that this encoder is wrapping. 26 | pub fn get_ref(&self) -> &$inner { 27 | self.inner.get_ref() 28 | } 29 | 30 | /// Acquires a mutable reference to the underlying reader that this encoder is 31 | /// wrapping. 32 | /// 33 | /// Note that care must be taken to avoid tampering with the state of the reader which 34 | /// may otherwise confuse this encoder. 35 | pub fn get_mut(&mut self) -> &mut $inner { 36 | self.inner.get_mut() 37 | } 38 | 39 | /// Acquires a pinned mutable reference to the underlying reader that this encoder is 40 | /// wrapping. 41 | /// 42 | /// Note that care must be taken to avoid tampering with the state of the reader which 43 | /// may otherwise confuse this encoder. 44 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 45 | self.project().inner.get_pin_mut() 46 | } 47 | 48 | /// Consumes this encoder returning the underlying reader. 49 | /// 50 | /// Note that this may discard internal state of this encoder, so care should be taken 51 | /// to avoid losing resources when this is called. 52 | pub fn into_inner(self) -> $inner { 53 | self.inner.into_inner() 54 | } 55 | } 56 | 57 | impl<$inner: tokio::io::AsyncBufRead> tokio::io::AsyncRead for $name<$inner> { 58 | fn poll_read( 59 | self: std::pin::Pin<&mut Self>, 60 | cx: &mut std::task::Context<'_>, 61 | buf: &mut tokio::io::ReadBuf<'_>, 62 | ) -> std::task::Poll> { 63 | self.project().inner.poll_read(cx, buf) 64 | } 65 | } 66 | 67 | impl<$inner: tokio::io::AsyncWrite> tokio::io::AsyncWrite for $name<$inner> { 68 | fn poll_write( 69 | self: std::pin::Pin<&mut Self>, 70 | cx: &mut std::task::Context<'_>, 71 | buf: &[u8], 72 | ) -> std::task::Poll> { 73 | self.get_pin_mut().poll_write(cx, buf) 74 | } 75 | 76 | fn poll_flush( 77 | self: std::pin::Pin<&mut Self>, 78 | cx: &mut std::task::Context<'_>, 79 | ) -> std::task::Poll> { 80 | self.get_pin_mut().poll_flush(cx) 81 | } 82 | 83 | fn poll_shutdown( 84 | self: std::pin::Pin<&mut Self>, 85 | cx: &mut std::task::Context<'_>, 86 | ) -> std::task::Poll> { 87 | self.get_pin_mut().poll_shutdown(cx) 88 | } 89 | 90 | fn poll_write_vectored( 91 | self: std::pin::Pin<&mut Self>, 92 | cx: &mut std::task::Context<'_>, 93 | bufs: &[std::io::IoSlice<'_>], 94 | ) -> std::task::Poll> { 95 | self.get_pin_mut().poll_write_vectored(cx, bufs) 96 | } 97 | 98 | fn is_write_vectored(&self) -> bool { 99 | self.get_ref().is_write_vectored() 100 | } 101 | } 102 | 103 | const _: () = { 104 | fn _assert() { 105 | use crate::util::{_assert_send, _assert_sync}; 106 | use core::pin::Pin; 107 | use tokio::io::AsyncBufRead; 108 | 109 | _assert_send::<$name>>>(); 110 | _assert_sync::<$name>>>(); 111 | } 112 | }; 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/tokio/bufread/macros/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod decoder; 3 | #[macro_use] 4 | mod encoder; 5 | -------------------------------------------------------------------------------- /src/tokio/bufread/mod.rs: -------------------------------------------------------------------------------- 1 | //! Types which operate over [`AsyncBufRead`](::tokio::io::AsyncBufRead) streams, both encoders and 2 | //! decoders for various formats. 3 | 4 | #[macro_use] 5 | mod macros; 6 | mod generic; 7 | 8 | pub(crate) use generic::{Decoder, Encoder}; 9 | 10 | algos!(tokio::bufread); 11 | -------------------------------------------------------------------------------- /src/tokio/mod.rs: -------------------------------------------------------------------------------- 1 | //! Implementations for IO traits exported by [`tokio` v1.x](::tokio). 2 | 3 | pub mod bufread; 4 | pub mod write; 5 | -------------------------------------------------------------------------------- /src/tokio/write/buf_write.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | pub(crate) trait AsyncBufWrite { 8 | /// Attempt to return an internal buffer to write to, flushing data out to the inner reader if 9 | /// it is full. 10 | /// 11 | /// On success, returns `Poll::Ready(Ok(buf))`. 12 | /// 13 | /// If the buffer is full and cannot be flushed, the method returns `Poll::Pending` and 14 | /// arranges for the current task context (`cx`) to receive a notification when the object 15 | /// becomes readable or is closed. 16 | fn poll_partial_flush_buf( 17 | self: Pin<&mut Self>, 18 | cx: &mut Context<'_>, 19 | ) -> Poll>; 20 | 21 | /// Tells this buffer that `amt` bytes have been written to its buffer, so they should be 22 | /// written out to the underlying IO when possible. 23 | /// 24 | /// This function is a lower-level call. It needs to be paired with the `poll_flush_buf` method to 25 | /// function properly. This function does not perform any I/O, it simply informs this object 26 | /// that some amount of its buffer, returned from `poll_flush_buf`, has been written to and should 27 | /// be sent. As such, this function may do odd things if `poll_flush_buf` isn't 28 | /// called before calling it. 29 | /// 30 | /// The `amt` must be `<=` the number of bytes in the buffer returned by `poll_flush_buf`. 31 | fn produce(self: Pin<&mut Self>, amt: usize); 32 | } 33 | -------------------------------------------------------------------------------- /src/tokio/write/generic/mod.rs: -------------------------------------------------------------------------------- 1 | mod decoder; 2 | mod encoder; 3 | 4 | pub use self::{decoder::Decoder, encoder::Encoder}; 5 | -------------------------------------------------------------------------------- /src/tokio/write/macros/decoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! decoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncWrite`](tokio::io::AsyncWrite) interface and will 7 | /// take in compressed data and write it uncompressed to an underlying stream. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::tokio::write::Decoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: tokio::io::AsyncWrite> $name<$inner> { 16 | /// Creates a new decoder which will take in compressed data and write it uncompressed 17 | /// to the given stream. 18 | pub fn new(read: $inner) -> $name<$inner> { 19 | $name { 20 | inner: crate::tokio::write::Decoder::new(read, crate::codec::$name::new()), 21 | } 22 | } 23 | 24 | $($($inherent_methods)*)* 25 | } 26 | 27 | impl<$inner> $name<$inner> { 28 | /// Acquires a reference to the underlying reader that this decoder is wrapping. 29 | pub fn get_ref(&self) -> &$inner { 30 | self.inner.get_ref() 31 | } 32 | 33 | /// Acquires a mutable reference to the underlying reader that this decoder is 34 | /// wrapping. 35 | /// 36 | /// Note that care must be taken to avoid tampering with the state of the reader which 37 | /// may otherwise confuse this decoder. 38 | pub fn get_mut(&mut self) -> &mut $inner { 39 | self.inner.get_mut() 40 | } 41 | 42 | /// Acquires a pinned mutable reference to the underlying reader that this decoder is 43 | /// wrapping. 44 | /// 45 | /// Note that care must be taken to avoid tampering with the state of the reader which 46 | /// may otherwise confuse this decoder. 47 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 48 | self.project().inner.get_pin_mut() 49 | } 50 | 51 | /// Consumes this decoder returning the underlying reader. 52 | /// 53 | /// Note that this may discard internal state of this decoder, so care should be taken 54 | /// to avoid losing resources when this is called. 55 | pub fn into_inner(self) -> $inner { 56 | self.inner.into_inner() 57 | } 58 | } 59 | 60 | impl<$inner: tokio::io::AsyncWrite> tokio::io::AsyncWrite for $name<$inner> { 61 | fn poll_write( 62 | self: std::pin::Pin<&mut Self>, 63 | cx: &mut std::task::Context<'_>, 64 | buf: &[u8], 65 | ) -> std::task::Poll> { 66 | self.project().inner.poll_write(cx, buf) 67 | } 68 | 69 | fn poll_flush( 70 | self: std::pin::Pin<&mut Self>, 71 | cx: &mut std::task::Context<'_>, 72 | ) -> std::task::Poll> { 73 | self.project().inner.poll_flush(cx) 74 | } 75 | 76 | fn poll_shutdown( 77 | self: std::pin::Pin<&mut Self>, 78 | cx: &mut std::task::Context<'_>, 79 | ) -> std::task::Poll> { 80 | self.project().inner.poll_shutdown(cx) 81 | } 82 | } 83 | 84 | impl<$inner: tokio::io::AsyncRead> tokio::io::AsyncRead for $name<$inner> { 85 | fn poll_read( 86 | self: std::pin::Pin<&mut Self>, 87 | cx: &mut std::task::Context<'_>, 88 | buf: &mut tokio::io::ReadBuf<'_>, 89 | ) -> std::task::Poll> { 90 | self.get_pin_mut().poll_read(cx, buf) 91 | } 92 | } 93 | 94 | impl<$inner: tokio::io::AsyncBufRead> tokio::io::AsyncBufRead for $name<$inner> { 95 | fn poll_fill_buf( 96 | self: std::pin::Pin<&mut Self>, 97 | cx: &mut std::task::Context<'_> 98 | ) -> std::task::Poll> { 99 | self.get_pin_mut().poll_fill_buf(cx) 100 | } 101 | 102 | fn consume(self: std::pin::Pin<&mut Self>, amt: usize) { 103 | self.get_pin_mut().consume(amt) 104 | } 105 | } 106 | 107 | const _: () = { 108 | fn _assert() { 109 | use crate::util::{_assert_send, _assert_sync}; 110 | use core::pin::Pin; 111 | use tokio::io::AsyncWrite; 112 | 113 | _assert_send::<$name>>>(); 114 | _assert_sync::<$name>>>(); 115 | } 116 | }; 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/tokio/write/macros/encoder.rs: -------------------------------------------------------------------------------- 1 | macro_rules! encoder { 2 | ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { 3 | pin_project_lite::pin_project! { 4 | $(#[$attr])* 5 | /// 6 | /// This structure implements an [`AsyncWrite`](tokio::io::AsyncWrite) interface and will 7 | /// take in uncompressed data and write it compressed to an underlying stream. 8 | #[derive(Debug)] 9 | pub struct $name<$inner> { 10 | #[pin] 11 | inner: crate::tokio::write::Encoder<$inner, crate::codec::$name>, 12 | } 13 | } 14 | 15 | impl<$inner: tokio::io::AsyncWrite> $name<$inner> { 16 | $( 17 | /// Creates a new encoder which will take in uncompressed data and write it 18 | /// compressed to the given stream. 19 | /// 20 | $($inherent_methods)* 21 | )* 22 | } 23 | 24 | impl<$inner> $name<$inner> { 25 | /// Acquires a reference to the underlying writer that this encoder is wrapping. 26 | pub fn get_ref(&self) -> &$inner { 27 | self.inner.get_ref() 28 | } 29 | 30 | /// Acquires a mutable reference to the underlying writer that this encoder is 31 | /// wrapping. 32 | /// 33 | /// Note that care must be taken to avoid tampering with the state of the writer which 34 | /// may otherwise confuse this encoder. 35 | pub fn get_mut(&mut self) -> &mut $inner { 36 | self.inner.get_mut() 37 | } 38 | 39 | /// Acquires a pinned mutable reference to the underlying writer that this encoder is 40 | /// wrapping. 41 | /// 42 | /// Note that care must be taken to avoid tampering with the state of the writer which 43 | /// may otherwise confuse this encoder. 44 | pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { 45 | self.project().inner.get_pin_mut() 46 | } 47 | 48 | /// Consumes this encoder returning the underlying writer. 49 | /// 50 | /// Note that this may discard internal state of this encoder, so care should be taken 51 | /// to avoid losing resources when this is called. 52 | pub fn into_inner(self) -> $inner { 53 | self.inner.into_inner() 54 | } 55 | } 56 | 57 | impl<$inner: tokio::io::AsyncWrite> tokio::io::AsyncWrite for $name<$inner> { 58 | fn poll_write( 59 | self: std::pin::Pin<&mut Self>, 60 | cx: &mut std::task::Context<'_>, 61 | buf: &[u8], 62 | ) -> std::task::Poll> { 63 | self.project().inner.poll_write(cx, buf) 64 | } 65 | 66 | fn poll_flush( 67 | self: std::pin::Pin<&mut Self>, 68 | cx: &mut std::task::Context<'_>, 69 | ) -> std::task::Poll> { 70 | self.project().inner.poll_flush(cx) 71 | } 72 | 73 | fn poll_shutdown( 74 | self: std::pin::Pin<&mut Self>, 75 | cx: &mut std::task::Context<'_>, 76 | ) -> std::task::Poll> { 77 | self.project().inner.poll_shutdown(cx) 78 | } 79 | } 80 | 81 | impl<$inner: tokio::io::AsyncRead> tokio::io::AsyncRead for $name<$inner> { 82 | fn poll_read( 83 | self: std::pin::Pin<&mut Self>, 84 | cx: &mut std::task::Context<'_>, 85 | buf: &mut tokio::io::ReadBuf<'_>, 86 | ) -> std::task::Poll> { 87 | self.get_pin_mut().poll_read(cx, buf) 88 | } 89 | } 90 | 91 | impl<$inner: tokio::io::AsyncBufRead> tokio::io::AsyncBufRead for $name<$inner> { 92 | fn poll_fill_buf( 93 | self: std::pin::Pin<&mut Self>, 94 | cx: &mut std::task::Context<'_> 95 | ) -> std::task::Poll> { 96 | self.get_pin_mut().poll_fill_buf(cx) 97 | } 98 | 99 | fn consume(self: std::pin::Pin<&mut Self>, amt: usize) { 100 | self.get_pin_mut().consume(amt) 101 | } 102 | } 103 | 104 | const _: () = { 105 | fn _assert() { 106 | use crate::util::{_assert_send, _assert_sync}; 107 | use core::pin::Pin; 108 | use tokio::io::AsyncWrite; 109 | 110 | _assert_send::<$name>>>(); 111 | _assert_sync::<$name>>>(); 112 | } 113 | }; 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/tokio/write/macros/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod decoder; 3 | #[macro_use] 4 | mod encoder; 5 | -------------------------------------------------------------------------------- /src/tokio/write/mod.rs: -------------------------------------------------------------------------------- 1 | //! Types which operate over [`AsyncWrite`](tokio::io::AsyncWrite) streams, both encoders and 2 | //! decoders for various formats. 3 | 4 | #[macro_use] 5 | mod macros; 6 | mod generic; 7 | 8 | mod buf_write; 9 | mod buf_writer; 10 | 11 | use self::{ 12 | buf_write::AsyncBufWrite, 13 | buf_writer::BufWriter, 14 | generic::{Decoder, Encoder}, 15 | }; 16 | 17 | algos!(tokio::write); 18 | -------------------------------------------------------------------------------- /src/unshared.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] // unused without any features 2 | 3 | use core::fmt::{self, Debug}; 4 | 5 | /// Wraps a type and only allows unique borrowing, the main use case is to wrap a `!Sync` type and 6 | /// implement `Sync` for it as this type blocks having multiple shared references to the inner 7 | /// value. 8 | /// 9 | /// # Safety 10 | /// 11 | /// We must be careful when accessing `inner`, there must be no way to create a shared reference to 12 | /// it from a shared reference to an `Unshared`, as that would allow creating shared references on 13 | /// multiple threads. 14 | /// 15 | /// As an example deriving or implementing `Clone` is impossible, two threads could attempt to 16 | /// clone a shared `Unshared` reference which would result in accessing the same inner value 17 | /// concurrently. 18 | pub struct Unshared { 19 | inner: T, 20 | } 21 | 22 | impl Unshared { 23 | pub fn new(inner: T) -> Self { 24 | Unshared { inner } 25 | } 26 | 27 | pub fn get_mut(&mut self) -> &mut T { 28 | &mut self.inner 29 | } 30 | } 31 | 32 | /// Safety: See comments on main docs for `Unshared` 33 | unsafe impl Sync for Unshared {} 34 | 35 | impl Debug for Unshared { 36 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 37 | f.debug_struct(core::any::type_name::()).finish() 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | pub fn _assert_send() {} 2 | pub fn _assert_sync() {} 3 | 4 | #[derive(Debug, Default)] 5 | pub struct PartialBuffer> { 6 | buffer: B, 7 | index: usize, 8 | } 9 | 10 | impl> PartialBuffer { 11 | pub(crate) fn new(buffer: B) -> Self { 12 | Self { buffer, index: 0 } 13 | } 14 | 15 | pub(crate) fn written(&self) -> &[u8] { 16 | &self.buffer.as_ref()[..self.index] 17 | } 18 | 19 | pub(crate) fn unwritten(&self) -> &[u8] { 20 | &self.buffer.as_ref()[self.index..] 21 | } 22 | 23 | pub(crate) fn advance(&mut self, amount: usize) { 24 | self.index += amount; 25 | } 26 | 27 | pub(crate) fn get_mut(&mut self) -> &mut B { 28 | &mut self.buffer 29 | } 30 | 31 | pub(crate) fn into_inner(self) -> B { 32 | self.buffer 33 | } 34 | 35 | pub(crate) fn reset(&mut self) { 36 | self.index = 0; 37 | } 38 | } 39 | 40 | impl + AsMut<[u8]>> PartialBuffer { 41 | pub(crate) fn unwritten_mut(&mut self) -> &mut [u8] { 42 | &mut self.buffer.as_mut()[self.index..] 43 | } 44 | 45 | pub(crate) fn copy_unwritten_from>( 46 | &mut self, 47 | other: &mut PartialBuffer, 48 | ) -> usize { 49 | let len = std::cmp::min(self.unwritten().len(), other.unwritten().len()); 50 | 51 | self.unwritten_mut()[..len].copy_from_slice(&other.unwritten()[..len]); 52 | 53 | self.advance(len); 54 | other.advance(len); 55 | len 56 | } 57 | } 58 | 59 | impl + Default> PartialBuffer { 60 | pub(crate) fn take(&mut self) -> Self { 61 | std::mem::replace(self, Self::new(B::default())) 62 | } 63 | } 64 | 65 | impl + AsMut<[u8]>> From for PartialBuffer { 66 | fn from(buffer: B) -> Self { 67 | Self::new(buffer) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/zstd.rs: -------------------------------------------------------------------------------- 1 | //! This module contains zstd-specific types for async-compression. 2 | 3 | /// A compression parameter for zstd. This is a stable wrapper around zstd's own `CParameter` 4 | /// type, to abstract over different versions of the zstd library. 5 | /// 6 | /// See the [zstd documentation](https://facebook.github.io/zstd/zstd_manual.html) for more 7 | /// information on these parameters. 8 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 9 | pub struct CParameter(libzstd::stream::raw::CParameter); 10 | 11 | impl CParameter { 12 | /// Window size in bytes (as a power of two) 13 | pub fn window_log(value: u32) -> Self { 14 | Self(libzstd::stream::raw::CParameter::WindowLog(value)) 15 | } 16 | 17 | /// Size of the initial probe table in 4-byte entries (as a power of two) 18 | pub fn hash_log(value: u32) -> Self { 19 | Self(libzstd::stream::raw::CParameter::HashLog(value)) 20 | } 21 | 22 | /// Size of the multi-probe table in 4-byte entries (as a power of two) 23 | pub fn chain_log(value: u32) -> Self { 24 | Self(libzstd::stream::raw::CParameter::ChainLog(value)) 25 | } 26 | 27 | /// Number of search attempts (as a power of two) 28 | pub fn search_log(value: u32) -> Self { 29 | Self(libzstd::stream::raw::CParameter::SearchLog(value)) 30 | } 31 | 32 | /// Minimum size of matches searched for 33 | pub fn min_match(value: u32) -> Self { 34 | Self(libzstd::stream::raw::CParameter::MinMatch(value)) 35 | } 36 | 37 | /// Strategy-dependent length modifier 38 | pub fn target_length(value: u32) -> Self { 39 | Self(libzstd::stream::raw::CParameter::TargetLength(value)) 40 | } 41 | 42 | /// Enable long-distance matching mode to look for and emit long-distance references. 43 | /// 44 | /// This increases the default window size. 45 | pub fn enable_long_distance_matching(value: bool) -> Self { 46 | Self(libzstd::stream::raw::CParameter::EnableLongDistanceMatching(value)) 47 | } 48 | 49 | /// Size of the long-distance matching table (as a power of two) 50 | pub fn ldm_hash_log(value: u32) -> Self { 51 | Self(libzstd::stream::raw::CParameter::LdmHashLog(value)) 52 | } 53 | 54 | /// Minimum size of long-distance matches searched for 55 | pub fn ldm_min_match(value: u32) -> Self { 56 | Self(libzstd::stream::raw::CParameter::LdmMinMatch(value)) 57 | } 58 | 59 | /// Size of each bucket in the LDM hash table for collision resolution (as a power of two) 60 | pub fn ldm_bucket_size_log(value: u32) -> Self { 61 | Self(libzstd::stream::raw::CParameter::LdmBucketSizeLog(value)) 62 | } 63 | 64 | /// Frequency of using the LDM hash table (as a power of two) 65 | pub fn ldm_hash_rate_log(value: u32) -> Self { 66 | Self(libzstd::stream::raw::CParameter::LdmHashRateLog(value)) 67 | } 68 | 69 | /// Emit the size of the content (default: true). 70 | pub fn content_size_flag(value: bool) -> Self { 71 | Self(libzstd::stream::raw::CParameter::ContentSizeFlag(value)) 72 | } 73 | 74 | /// Emit a checksum (default: false). 75 | pub fn checksum_flag(value: bool) -> Self { 76 | Self(libzstd::stream::raw::CParameter::ChecksumFlag(value)) 77 | } 78 | 79 | /// Emit a dictionary ID when using a custom dictionary (default: true). 80 | pub fn dict_id_flag(value: bool) -> Self { 81 | Self(libzstd::stream::raw::CParameter::DictIdFlag(value)) 82 | } 83 | 84 | /// Number of threads to spawn. 85 | /// 86 | /// If set to 0, compression functions will block; if set to 1 or more, compression will 87 | /// run in background threads and `flush` pushes bytes through the compressor. 88 | /// 89 | /// # Panics 90 | /// 91 | /// This parameter requires feature `zstdmt` to be enabled, otherwise it will cause a panic 92 | /// when used in `ZstdEncoder::with_quality_and_params()` calls. 93 | // 94 | // TODO: make this a normal feature guarded fn on next breaking release 95 | #[cfg_attr(docsrs, doc(cfg(feature = "zstdmt")))] 96 | pub fn nb_workers(value: u32) -> Self { 97 | Self(libzstd::stream::raw::CParameter::NbWorkers(value)) 98 | } 99 | 100 | /// Number of bytes given to each worker. 101 | /// 102 | /// If set to 0, zstd selects a job size based on compression parameters. 103 | pub fn job_size(value: u32) -> Self { 104 | Self(libzstd::stream::raw::CParameter::JobSize(value)) 105 | } 106 | 107 | pub(crate) fn as_zstd(&self) -> libzstd::stream::raw::CParameter { 108 | self.0 109 | } 110 | } 111 | 112 | /// A decompression parameter for zstd. This is a stable wrapper around zstd's own `DParameter` 113 | /// type, to abstract over different versions of the zstd library. 114 | /// 115 | /// See the [zstd documentation](https://facebook.github.io/zstd/zstd_manual.html) for more 116 | /// information on these parameters. 117 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 118 | pub struct DParameter(libzstd::stream::raw::DParameter); 119 | 120 | impl DParameter { 121 | /// Maximum window size in bytes (as a power of two) 122 | pub fn window_log_max(value: u32) -> Self { 123 | Self(libzstd::stream::raw::DParameter::WindowLogMax(value)) 124 | } 125 | 126 | pub(crate) fn as_zstd(&self) -> libzstd::stream::raw::DParameter { 127 | self.0 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /tests/artifacts/dictionary-rust: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nullus157/async-compression/aa03267e075d43a6a201a6d7fe573f682f7a65c9/tests/artifacts/dictionary-rust -------------------------------------------------------------------------------- /tests/artifacts/dictionary-rust-other: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nullus157/async-compression/aa03267e075d43a6a201a6d7fe573f682f7a65c9/tests/artifacts/dictionary-rust-other -------------------------------------------------------------------------------- /tests/artifacts/lib.rs.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nullus157/async-compression/aa03267e075d43a6a201a6d7fe573f682f7a65c9/tests/artifacts/lib.rs.zst -------------------------------------------------------------------------------- /tests/artifacts/long-window-size-lib.rs.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nullus157/async-compression/aa03267e075d43a6a201a6d7fe573f682f7a65c9/tests/artifacts/long-window-size-lib.rs.zst -------------------------------------------------------------------------------- /tests/brotli.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(brotli); 5 | -------------------------------------------------------------------------------- /tests/bzip2.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(bzip2); 5 | -------------------------------------------------------------------------------- /tests/deflate.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(deflate); 5 | -------------------------------------------------------------------------------- /tests/gzip.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(gzip); 5 | 6 | #[allow(unused)] 7 | use utils::{algos::gzip::sync, InputStream}; 8 | 9 | #[cfg(feature = "futures-io")] 10 | use utils::algos::gzip::futures::bufread; 11 | 12 | #[allow(unused)] 13 | fn compress_with_header(data: &[u8]) -> Vec { 14 | use flate2::{Compression, GzBuilder}; 15 | use std::io::Write; 16 | 17 | let mut bytes = Vec::new(); 18 | { 19 | let mut gz = GzBuilder::new() 20 | .filename("hello_world.txt") 21 | .comment("test file, please delete") 22 | .extra(vec![1, 2, 3, 4]) 23 | .write(&mut bytes, Compression::fast()); 24 | 25 | gz.write_all(data).unwrap(); 26 | } 27 | 28 | bytes 29 | } 30 | 31 | #[test] 32 | #[ntest::timeout(1000)] 33 | #[cfg(feature = "futures-io")] 34 | fn gzip_bufread_decompress_with_extra_header() { 35 | let bytes = compress_with_header(&[1, 2, 3, 4, 5, 6]); 36 | 37 | let input = InputStream::from(vec![bytes]); 38 | let output = bufread::decompress(bufread::from(&input)); 39 | 40 | assert_eq!(output, &[1, 2, 3, 4, 5, 6][..]); 41 | } 42 | 43 | #[test] 44 | #[ntest::timeout(1000)] 45 | #[cfg(feature = "futures-io")] 46 | fn gzip_bufread_chunks_decompress_with_extra_header() { 47 | let bytes = compress_with_header(&[1, 2, 3, 4, 5, 6]); 48 | 49 | let input = InputStream::from(bytes.chunks(2)); 50 | let output = bufread::decompress(bufread::from(&input)); 51 | 52 | assert_eq!(output, &[1, 2, 3, 4, 5, 6][..]); 53 | } 54 | -------------------------------------------------------------------------------- /tests/lz4.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(lz4); 5 | -------------------------------------------------------------------------------- /tests/lzma.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(lzma); 5 | -------------------------------------------------------------------------------- /tests/proptest.proptest-regressions: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 56136f76bf926382b1bf5a24304af3826b03e674763b5c99d41496e40f56c1c3 # shrinks to ref input = [0, 0, 0, 0, 0, 0, 0], chunk_size = 1 8 | cc e6c96e0924384950a67cc69e8ce9d7da5a11873cac62705241d7176ba9b20875 # shrinks to ref input = InputStream([]), level = Precise(11) 9 | -------------------------------------------------------------------------------- /tests/proptest.rs: -------------------------------------------------------------------------------- 1 | use async_compression::Level; 2 | 3 | use ::proptest::{ 4 | arbitrary::any, 5 | prop_oneof, 6 | strategy::{Just, Strategy}, 7 | }; 8 | 9 | mod utils; 10 | 11 | #[allow(dead_code)] 12 | fn any_level() -> impl Strategy { 13 | prop_oneof![ 14 | Just(Level::Fastest), 15 | Just(Level::Best), 16 | Just(Level::Default), 17 | any::().prop_map(Level::Precise), 18 | ] 19 | } 20 | 21 | #[allow(unused_macros)] 22 | macro_rules! io_tests { 23 | ($impl:ident, $variant:ident) => { 24 | mod $impl { 25 | mod bufread { 26 | use crate::utils::{algos::$variant::{$impl::{read, bufread}, sync}, InputStream}; 27 | use proptest::{prelude::{any, ProptestConfig}, proptest}; 28 | use std::iter::FromIterator; 29 | 30 | proptest! { 31 | #[test] 32 | fn compress(ref input in any::()) { 33 | let compressed = bufread::compress(bufread::from(input)); 34 | let output = sync::decompress(&compressed); 35 | assert_eq!(output, input.bytes()); 36 | } 37 | 38 | #[test] 39 | fn decompress( 40 | ref bytes in any::>(), 41 | chunk_size in 1..20usize, 42 | ) { 43 | let compressed = sync::compress(bytes); 44 | let input = InputStream::from(Vec::from_iter(compressed.chunks(chunk_size).map(Vec::from))); 45 | let output = bufread::decompress(bufread::from(&input)); 46 | assert_eq!(&output, bytes); 47 | } 48 | } 49 | 50 | proptest! { 51 | #![proptest_config(ProptestConfig::with_cases(32))] 52 | 53 | #[test] 54 | fn compress_with_level( 55 | ref input in any::(), 56 | level in crate::any_level(), 57 | ) { 58 | let encoder = bufread::Encoder::with_quality(bufread::from(input), level); 59 | let compressed = read::to_vec(encoder); 60 | let output = sync::decompress(&compressed); 61 | assert_eq!(output, input.bytes()); 62 | } 63 | } 64 | } 65 | 66 | mod write { 67 | use crate::utils::{algos::$variant::{$impl::write, sync}, InputStream}; 68 | use proptest::{prelude::{any, ProptestConfig}, proptest}; 69 | 70 | proptest! { 71 | #[test] 72 | fn compress( 73 | ref input in any::(), 74 | limit in 1..20usize, 75 | ) { 76 | let compressed = write::compress(input.as_ref(), limit); 77 | let output = sync::decompress(&compressed); 78 | assert_eq!(output, input.bytes()); 79 | } 80 | } 81 | 82 | proptest! { 83 | #![proptest_config(ProptestConfig::with_cases(32))] 84 | 85 | #[test] 86 | fn compress_with_level( 87 | ref input in any::(), 88 | limit in 1..20usize, 89 | level in crate::any_level(), 90 | ) { 91 | let compressed = write::to_vec( 92 | input.as_ref(), 93 | |input| Box::pin(write::Encoder::with_quality(input, level)), 94 | limit, 95 | ); 96 | let output = sync::decompress(&compressed); 97 | assert_eq!(output, input.bytes()); 98 | } 99 | } 100 | } 101 | } 102 | } 103 | } 104 | 105 | #[allow(unused_macros)] 106 | macro_rules! tests { 107 | ($variant:ident) => { 108 | mod $variant { 109 | #[cfg(feature = "futures-io")] 110 | io_tests!(futures, $variant); 111 | 112 | #[cfg(feature = "tokio")] 113 | io_tests!(tokio, $variant); 114 | } 115 | }; 116 | } 117 | 118 | mod proptest { 119 | #[cfg(feature = "brotli")] 120 | tests!(brotli); 121 | 122 | #[cfg(feature = "bzip2")] 123 | tests!(bzip2); 124 | 125 | #[cfg(feature = "deflate")] 126 | tests!(deflate); 127 | 128 | #[cfg(feature = "gzip")] 129 | tests!(gzip); 130 | 131 | #[cfg(feature = "lz4")] 132 | tests!(lz4); 133 | 134 | #[cfg(feature = "lzma")] 135 | tests!(lzma); 136 | 137 | #[cfg(feature = "xz")] 138 | tests!(xz); 139 | 140 | #[cfg(feature = "zlib")] 141 | tests!(zlib); 142 | 143 | #[cfg(feature = "zstd")] 144 | tests!(zstd); 145 | } 146 | -------------------------------------------------------------------------------- /tests/utils/impls.rs: -------------------------------------------------------------------------------- 1 | pub mod sync { 2 | use std::io::Read; 3 | 4 | pub fn to_vec(mut read: impl Read) -> Vec { 5 | let mut output = vec![]; 6 | read.read_to_end(&mut output).unwrap(); 7 | output 8 | } 9 | } 10 | 11 | #[cfg(feature = "futures-io")] 12 | pub mod futures { 13 | pub mod bufread { 14 | pub use futures::io::AsyncBufRead; 15 | 16 | use crate::utils::{InputStream, TrackEof}; 17 | use futures::stream::{StreamExt as _, TryStreamExt as _}; 18 | 19 | pub fn from(input: &InputStream) -> impl AsyncBufRead { 20 | // By using the stream here we ensure that each chunk will require a separate 21 | // read/poll_fill_buf call to process to help test reading multiple chunks. 22 | TrackEof::new(input.stream().map(Ok).into_async_read()) 23 | } 24 | } 25 | 26 | pub mod read { 27 | use crate::utils::{block_on, pin_mut}; 28 | use futures::io::{copy_buf, AsyncRead, AsyncReadExt, BufReader, Cursor}; 29 | 30 | pub fn to_vec(read: impl AsyncRead) -> Vec { 31 | // TODO: https://github.com/rust-lang-nursery/futures-rs/issues/1510 32 | // All current test cases are < 100kB 33 | let mut output = Cursor::new(vec![0; 102_400]); 34 | pin_mut!(read); 35 | let len = block_on(copy_buf(BufReader::with_capacity(2, read), &mut output)).unwrap(); 36 | let mut output = output.into_inner(); 37 | output.truncate(len as usize); 38 | output 39 | } 40 | 41 | pub fn poll_read(reader: impl AsyncRead, output: &mut [u8]) -> std::io::Result { 42 | pin_mut!(reader); 43 | block_on(reader.read(output)) 44 | } 45 | } 46 | 47 | pub mod write { 48 | use crate::utils::{block_on, Pin, TrackClosed}; 49 | use futures::io::{AsyncWrite, AsyncWriteExt as _}; 50 | use futures_test::io::AsyncWriteTestExt as _; 51 | 52 | pub fn to_vec( 53 | input: &[Vec], 54 | create_writer: impl for<'a> FnOnce( 55 | &'a mut (dyn AsyncWrite + Unpin), 56 | ) -> Pin>, 57 | limit: usize, 58 | ) -> Vec { 59 | let mut output = Vec::new(); 60 | { 61 | let mut test_writer = TrackClosed::new( 62 | (&mut output) 63 | .limited_write(limit) 64 | .interleave_pending_write(), 65 | ); 66 | { 67 | let mut writer = create_writer(&mut test_writer); 68 | for chunk in input { 69 | block_on(writer.write_all(chunk)).unwrap(); 70 | block_on(writer.flush()).unwrap(); 71 | } 72 | block_on(writer.close()).unwrap(); 73 | } 74 | assert!(test_writer.is_closed()); 75 | } 76 | output 77 | } 78 | } 79 | } 80 | 81 | #[cfg(feature = "tokio")] 82 | pub mod tokio { 83 | pub mod bufread { 84 | use crate::utils::{InputStream, TrackEof}; 85 | use bytes::Bytes; 86 | use futures::stream::StreamExt; 87 | pub use tokio::io::AsyncBufRead; 88 | use tokio_util::io::StreamReader; 89 | 90 | pub fn from(input: &InputStream) -> impl AsyncBufRead { 91 | // By using the stream here we ensure that each chunk will require a separate 92 | // read/poll_fill_buf call to process to help test reading multiple chunks. 93 | TrackEof::new(StreamReader::new( 94 | input.stream().map(Bytes::from).map(std::io::Result::Ok), 95 | )) 96 | } 97 | } 98 | 99 | pub mod read { 100 | use crate::utils::{block_on, pin_mut, tokio_ext::copy_buf}; 101 | use std::io::Cursor; 102 | use tokio::io::{AsyncRead, AsyncReadExt, BufReader}; 103 | 104 | pub fn to_vec(read: impl AsyncRead) -> Vec { 105 | let mut output = Cursor::new(vec![0; 102_400]); 106 | pin_mut!(read); 107 | let len = block_on(copy_buf(BufReader::with_capacity(2, read), &mut output)).unwrap(); 108 | let mut output = output.into_inner(); 109 | output.truncate(len as usize); 110 | output 111 | } 112 | 113 | pub fn poll_read(reader: impl AsyncRead, output: &mut [u8]) -> std::io::Result { 114 | pin_mut!(reader); 115 | block_on(reader.read(output)) 116 | } 117 | } 118 | 119 | pub mod write { 120 | use crate::utils::{ 121 | block_on, tokio_ext::AsyncWriteTestExt as _, track_closed::TrackClosed, Pin, 122 | }; 123 | use std::io::Cursor; 124 | use tokio::io::{AsyncWrite, AsyncWriteExt as _}; 125 | 126 | pub fn to_vec( 127 | input: &[Vec], 128 | create_writer: impl for<'a> FnOnce( 129 | &'a mut (dyn AsyncWrite + Unpin), 130 | ) -> Pin>, 131 | limit: usize, 132 | ) -> Vec { 133 | let mut output = Cursor::new(Vec::new()); 134 | { 135 | let mut test_writer = TrackClosed::new( 136 | (&mut output) 137 | .limited_write(limit) 138 | .interleave_pending_write(), 139 | ); 140 | { 141 | let mut writer = create_writer(&mut test_writer); 142 | for chunk in input { 143 | block_on(writer.write_all(chunk)).unwrap(); 144 | block_on(writer.flush()).unwrap(); 145 | } 146 | block_on(writer.shutdown()).unwrap(); 147 | } 148 | assert!(test_writer.is_closed()); 149 | } 150 | output.into_inner() 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /tests/utils/input_stream.rs: -------------------------------------------------------------------------------- 1 | use futures::stream::Stream; 2 | use futures_test::stream::StreamTestExt as _; 3 | use proptest_derive::Arbitrary; 4 | 5 | #[derive(Arbitrary, Debug, Clone)] 6 | pub struct InputStream(Vec>); 7 | 8 | impl InputStream { 9 | pub fn new(input: Vec>) -> Self { 10 | InputStream(input) 11 | } 12 | 13 | pub fn as_ref(&self) -> &[Vec] { 14 | &self.0 15 | } 16 | 17 | pub fn stream(&self) -> impl Stream> { 18 | // The resulting stream here will interleave empty chunks before and after each chunk, and 19 | // then interleave a `Poll::Pending` between each yielded chunk, that way we test the 20 | // handling of these two conditions in every point of the tested stream. 21 | futures::stream::iter( 22 | self.0 23 | .clone() 24 | .into_iter() 25 | .flat_map(|bytes| vec![vec![], bytes]) 26 | .chain(Some(vec![])), 27 | ) 28 | .interleave_pending() 29 | } 30 | 31 | pub fn bytes(&self) -> Vec { 32 | self.0.iter().flatten().cloned().collect() 33 | } 34 | 35 | pub fn len(&self) -> usize { 36 | self.0.iter().map(Vec::len).sum() 37 | } 38 | } 39 | 40 | impl From for InputStream 41 | where 42 | I: IntoIterator, 43 | I::Item: Into>, 44 | { 45 | fn from(input: I) -> InputStream { 46 | Self::new(input.into_iter().map(|b| b.into()).collect()) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /tests/utils/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code, unused_imports, unused_macros)] // Different tests use a different subset of functions 2 | 3 | mod input_stream; 4 | #[cfg(feature = "tokio")] 5 | mod tokio_ext; 6 | mod track_closed; 7 | mod track_eof; 8 | #[macro_use] 9 | mod test_cases; 10 | 11 | pub mod algos; 12 | pub mod impls; 13 | 14 | pub use self::{input_stream::InputStream, track_closed::TrackClosed, track_eof::TrackEof}; 15 | pub use async_compression::Level; 16 | pub use futures::{executor::block_on, pin_mut, stream::Stream}; 17 | pub use std::{future::Future, io::Result, iter::FromIterator, pin::Pin}; 18 | 19 | pub fn one_to_six_stream() -> InputStream { 20 | InputStream::new(vec![vec![1, 2, 3], vec![4, 5, 6]]) 21 | } 22 | 23 | pub fn one_to_six() -> &'static [u8] { 24 | &[1, 2, 3, 4, 5, 6] 25 | } 26 | -------------------------------------------------------------------------------- /tests/utils/tokio_ext/copy_buf.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | future::Future, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | use futures::ready; 7 | use tokio::io::{AsyncBufRead, AsyncWrite}; 8 | 9 | pub fn copy_buf(reader: R, writer: &mut W) -> CopyBuf<'_, R, W> 10 | where 11 | R: AsyncBufRead + Unpin, 12 | W: AsyncWrite + Unpin + ?Sized, 13 | { 14 | CopyBuf { 15 | reader, 16 | writer, 17 | amt: 0, 18 | } 19 | } 20 | 21 | #[derive(Debug)] 22 | pub struct CopyBuf<'a, R, W: ?Sized> { 23 | reader: R, 24 | writer: &'a mut W, 25 | amt: u64, 26 | } 27 | 28 | impl Future for CopyBuf<'_, R, W> 29 | where 30 | R: AsyncBufRead + Unpin, 31 | W: AsyncWrite + Unpin + ?Sized, 32 | { 33 | type Output = std::io::Result; 34 | 35 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 36 | let this = &mut *self; 37 | loop { 38 | let buffer = ready!(Pin::new(&mut this.reader).poll_fill_buf(cx))?; 39 | if buffer.is_empty() { 40 | ready!(Pin::new(&mut this.writer).poll_flush(cx))?; 41 | return Poll::Ready(Ok(this.amt)); 42 | } 43 | 44 | let i = ready!(Pin::new(&mut this.writer).poll_write(cx, buffer))?; 45 | if i == 0 { 46 | return Poll::Ready(Err(std::io::ErrorKind::WriteZero.into())); 47 | } 48 | this.amt += i as u64; 49 | Pin::new(&mut this.reader).consume(i); 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /tests/utils/tokio_ext/interleave_pending.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | pin::Pin, 3 | task::{Context, Poll}, 4 | }; 5 | 6 | pub struct InterleavePending { 7 | inner: T, 8 | pended: bool, 9 | } 10 | 11 | impl InterleavePending { 12 | pub(crate) fn new(inner: T) -> Self { 13 | Self { 14 | inner, 15 | pended: false, 16 | } 17 | } 18 | } 19 | 20 | impl tokio::io::AsyncWrite for InterleavePending { 21 | fn poll_write( 22 | mut self: Pin<&mut Self>, 23 | cx: &mut Context<'_>, 24 | buf: &[u8], 25 | ) -> Poll> { 26 | if self.pended { 27 | let next = Pin::new(&mut self.inner).poll_write(cx, buf); 28 | if next.is_ready() { 29 | self.pended = false; 30 | } 31 | next 32 | } else { 33 | cx.waker().wake_by_ref(); 34 | self.pended = true; 35 | Poll::Pending 36 | } 37 | } 38 | 39 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 40 | if self.pended { 41 | let next = Pin::new(&mut self.inner).poll_flush(cx); 42 | if next.is_ready() { 43 | self.pended = false; 44 | } 45 | next 46 | } else { 47 | cx.waker().wake_by_ref(); 48 | self.pended = true; 49 | Poll::Pending 50 | } 51 | } 52 | 53 | fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 54 | if self.pended { 55 | let next = Pin::new(&mut self.inner).poll_shutdown(cx); 56 | if next.is_ready() { 57 | self.pended = false; 58 | } 59 | next 60 | } else { 61 | cx.waker().wake_by_ref(); 62 | self.pended = true; 63 | Poll::Pending 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /tests/utils/tokio_ext/limited.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | pin::Pin, 3 | task::{Context, Poll}, 4 | }; 5 | 6 | #[derive(Debug)] 7 | pub struct Limited { 8 | io: Io, 9 | limit: usize, 10 | } 11 | 12 | impl Limited { 13 | pub(crate) fn new(io: Io, limit: usize) -> Limited { 14 | Limited { io, limit } 15 | } 16 | } 17 | 18 | impl tokio::io::AsyncWrite for Limited { 19 | fn poll_write( 20 | mut self: Pin<&mut Self>, 21 | cx: &mut Context<'_>, 22 | buf: &[u8], 23 | ) -> Poll> { 24 | let limit = self.limit; 25 | Pin::new(&mut self.io).poll_write(cx, &buf[..std::cmp::min(limit, buf.len())]) 26 | } 27 | 28 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 29 | Pin::new(&mut self.io).poll_flush(cx) 30 | } 31 | 32 | fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 33 | Pin::new(&mut self.io).poll_shutdown(cx) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /tests/utils/tokio_ext/mod.rs: -------------------------------------------------------------------------------- 1 | mod copy_buf; 2 | mod interleave_pending; 3 | mod limited; 4 | 5 | pub use copy_buf::copy_buf; 6 | 7 | pub trait AsyncWriteTestExt: tokio::io::AsyncWrite { 8 | fn interleave_pending_write(self) -> interleave_pending::InterleavePending 9 | where 10 | Self: Sized + Unpin, 11 | { 12 | interleave_pending::InterleavePending::new(self) 13 | } 14 | 15 | fn limited_write(self, limit: usize) -> limited::Limited 16 | where 17 | Self: Sized + Unpin, 18 | { 19 | limited::Limited::new(self, limit) 20 | } 21 | } 22 | 23 | impl AsyncWriteTestExt for T {} 24 | -------------------------------------------------------------------------------- /tests/utils/track_closed.rs: -------------------------------------------------------------------------------- 1 | #[cfg_attr(not(feature = "all-implementations"), allow(unused))] 2 | use std::{ 3 | io::Result, 4 | pin::Pin, 5 | task::{Context, Poll}, 6 | }; 7 | 8 | pub struct TrackClosed { 9 | inner: W, 10 | closed: bool, 11 | } 12 | 13 | impl TrackClosed { 14 | pub fn new(inner: W) -> Self { 15 | Self { 16 | inner, 17 | closed: false, 18 | } 19 | } 20 | 21 | pub fn is_closed(&self) -> bool { 22 | self.closed 23 | } 24 | } 25 | 26 | #[cfg(feature = "futures-io")] 27 | impl futures::io::AsyncWrite for TrackClosed { 28 | fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { 29 | assert!(!self.closed); 30 | Pin::new(&mut self.inner).poll_write(cx, buf) 31 | } 32 | 33 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 34 | assert!(!self.closed); 35 | Pin::new(&mut self.inner).poll_flush(cx) 36 | } 37 | 38 | fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 39 | assert!(!self.closed); 40 | match Pin::new(&mut self.inner).poll_close(cx) { 41 | Poll::Ready(Ok(())) => { 42 | self.closed = true; 43 | Poll::Ready(Ok(())) 44 | } 45 | other => other, 46 | } 47 | } 48 | 49 | fn poll_write_vectored( 50 | mut self: Pin<&mut Self>, 51 | cx: &mut Context, 52 | bufs: &[std::io::IoSlice], 53 | ) -> Poll> { 54 | assert!(!self.closed); 55 | Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) 56 | } 57 | } 58 | 59 | #[cfg(feature = "tokio")] 60 | impl tokio::io::AsyncWrite for TrackClosed { 61 | fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { 62 | assert!(!self.closed); 63 | Pin::new(&mut self.inner).poll_write(cx, buf) 64 | } 65 | 66 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 67 | assert!(!self.closed); 68 | Pin::new(&mut self.inner).poll_flush(cx) 69 | } 70 | 71 | fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 72 | assert!(!self.closed); 73 | match Pin::new(&mut self.inner).poll_shutdown(cx) { 74 | Poll::Ready(Ok(())) => { 75 | self.closed = true; 76 | Poll::Ready(Ok(())) 77 | } 78 | other => other, 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /tests/utils/track_eof.rs: -------------------------------------------------------------------------------- 1 | #[cfg_attr(not(feature = "all-implementations"), allow(unused))] 2 | use std::{ 3 | io::Result, 4 | pin::Pin, 5 | task::{Context, Poll}, 6 | }; 7 | 8 | pub struct TrackEof { 9 | inner: R, 10 | eof: bool, 11 | } 12 | 13 | impl TrackEof { 14 | pub fn new(inner: R) -> Self { 15 | Self { inner, eof: false } 16 | } 17 | 18 | pub fn project(self: Pin<&mut Self>) -> (Pin<&mut R>, &mut bool) { 19 | let Self { inner, eof } = Pin::into_inner(self); 20 | (Pin::new(inner), eof) 21 | } 22 | } 23 | 24 | #[cfg(feature = "futures-io")] 25 | impl futures::io::AsyncRead for TrackEof { 26 | fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { 27 | let (inner, eof) = self.project(); 28 | assert!(!*eof); 29 | match inner.poll_read(cx, buf) { 30 | Poll::Ready(Ok(0)) => { 31 | if !buf.is_empty() { 32 | *eof = true; 33 | } 34 | Poll::Ready(Ok(0)) 35 | } 36 | other => other, 37 | } 38 | } 39 | } 40 | 41 | #[cfg(feature = "futures-io")] 42 | impl futures::io::AsyncBufRead for TrackEof { 43 | fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 44 | let (inner, eof) = self.project(); 45 | assert!(!*eof); 46 | match inner.poll_fill_buf(cx) { 47 | Poll::Ready(Ok(buf)) => { 48 | if buf.is_empty() { 49 | *eof = true; 50 | } 51 | Poll::Ready(Ok(buf)) 52 | } 53 | other => other, 54 | } 55 | } 56 | 57 | fn consume(self: Pin<&mut Self>, amt: usize) { 58 | self.project().0.consume(amt) 59 | } 60 | } 61 | 62 | #[cfg(feature = "tokio")] 63 | impl tokio::io::AsyncRead for TrackEof { 64 | fn poll_read( 65 | self: Pin<&mut Self>, 66 | cx: &mut Context, 67 | buf: &mut tokio::io::ReadBuf, 68 | ) -> Poll> { 69 | let (inner, eof) = self.project(); 70 | assert!(!*eof); 71 | let len = buf.filled().len(); 72 | match inner.poll_read(cx, buf) { 73 | Poll::Ready(Ok(())) => { 74 | if buf.filled().len() == len && buf.remaining() > 0 { 75 | *eof = true; 76 | } 77 | Poll::Ready(Ok(())) 78 | } 79 | other => other, 80 | } 81 | } 82 | } 83 | 84 | #[cfg(feature = "tokio")] 85 | impl tokio::io::AsyncBufRead for TrackEof { 86 | fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 87 | let (inner, eof) = self.project(); 88 | assert!(!*eof); 89 | match inner.poll_fill_buf(cx) { 90 | Poll::Ready(Ok(buf)) => { 91 | if buf.is_empty() { 92 | *eof = true; 93 | } 94 | Poll::Ready(Ok(buf)) 95 | } 96 | other => other, 97 | } 98 | } 99 | 100 | fn consume(self: Pin<&mut Self>, amt: usize) { 101 | self.project().0.consume(amt) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /tests/xz.rs: -------------------------------------------------------------------------------- 1 | #[allow(unused)] 2 | use futures::{executor::block_on, io::AsyncReadExt}; 3 | 4 | #[macro_use] 5 | mod utils; 6 | 7 | test_cases!(xz); 8 | 9 | #[allow(unused)] 10 | use utils::{algos::xz::sync, InputStream}; 11 | 12 | #[cfg(feature = "futures-io")] 13 | use utils::algos::xz::futures::{bufread, read}; 14 | 15 | #[test] 16 | #[ntest::timeout(1000)] 17 | #[cfg(feature = "futures-io")] 18 | fn bufread_multiple_members_with_padding() { 19 | let compressed = [ 20 | sync::compress(&[1, 2, 3, 4, 5, 6]), 21 | vec![0, 0, 0, 0], 22 | sync::compress(&[6, 5, 4, 3, 2, 1]), 23 | vec![0, 0, 0, 0], 24 | ] 25 | .join(&[][..]); 26 | 27 | let input = InputStream::from(vec![compressed]); 28 | 29 | let mut decoder = bufread::Decoder::new(bufread::from(&input)); 30 | decoder.multiple_members(true); 31 | let output = read::to_vec(decoder); 32 | 33 | assert_eq!(output, &[1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1][..]); 34 | } 35 | 36 | #[test] 37 | #[ntest::timeout(1000)] 38 | #[cfg(feature = "futures-io")] 39 | fn bufread_multiple_members_with_invalid_padding() { 40 | let compressed = [ 41 | sync::compress(&[1, 2, 3, 4, 5, 6]), 42 | vec![0, 0, 0], 43 | sync::compress(&[6, 5, 4, 3, 2, 1]), 44 | vec![0, 0, 0, 0], 45 | ] 46 | .join(&[][..]); 47 | 48 | let input = InputStream::from(vec![compressed]); 49 | 50 | let mut decoder = bufread::Decoder::new(bufread::from(&input)); 51 | decoder.multiple_members(true); 52 | 53 | let mut output = Vec::new(); 54 | assert!(block_on(decoder.read_to_end(&mut output)).is_err()); 55 | } 56 | -------------------------------------------------------------------------------- /tests/zlib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(zlib); 5 | -------------------------------------------------------------------------------- /tests/zstd-dict.rs: -------------------------------------------------------------------------------- 1 | #![cfg(not(windows))] 2 | 3 | use tokio::io::AsyncWriteExt as _; 4 | 5 | #[tokio::test] 6 | async fn trained_zstd_decode_no_dict() { 7 | let compressed = include_bytes!("./artifacts/lib.rs.zst"); 8 | 9 | let mut decoder = async_compression::tokio::write::ZstdDecoder::new(Vec::new()); 10 | decoder.write_all(compressed).await.unwrap_err(); 11 | } 12 | 13 | #[tokio::test] 14 | async fn trained_zstd_decode_with_dict() { 15 | let source = include_bytes!("./artifacts/lib.rs"); 16 | let dict = include_bytes!("./artifacts/dictionary-rust"); 17 | let compressed = include_bytes!("./artifacts/lib.rs.zst"); 18 | 19 | let mut decoder = 20 | async_compression::tokio::write::ZstdDecoder::with_dict(Vec::new(), dict).unwrap(); 21 | decoder.write_all(compressed).await.unwrap(); 22 | decoder.shutdown().await.unwrap(); 23 | 24 | assert_eq!(decoder.into_inner(), source); 25 | } 26 | 27 | #[tokio::test] 28 | async fn trained_zstd_decode_with_wrong_dict() { 29 | let dict = include_bytes!("./artifacts/dictionary-rust-other"); 30 | let compressed = include_bytes!("./artifacts/lib.rs.zst"); 31 | 32 | let mut decoder = 33 | async_compression::tokio::write::ZstdDecoder::with_dict(Vec::new(), dict).unwrap(); 34 | decoder.write_all(compressed).await.unwrap_err(); 35 | } 36 | -------------------------------------------------------------------------------- /tests/zstd-window-size.rs: -------------------------------------------------------------------------------- 1 | #![cfg(not(windows))] 2 | 3 | use async_compression::zstd::DParameter; 4 | use tokio::io::AsyncWriteExt as _; 5 | 6 | #[tokio::test] 7 | async fn zstd_decode_large_window_size_default() { 8 | let compressed = include_bytes!("./artifacts/long-window-size-lib.rs.zst"); 9 | 10 | // Default decoder should throw with an error, window size maximum is too low. 11 | let mut decoder = async_compression::tokio::write::ZstdDecoder::new(Vec::new()); 12 | decoder.write_all(compressed).await.unwrap_err(); 13 | } 14 | 15 | #[tokio::test] 16 | async fn zstd_decode_large_window_size_explicit_small_window_size() { 17 | let compressed = include_bytes!("./artifacts/long-window-size-lib.rs.zst"); 18 | 19 | // Short window decoder should throw with an error, window size maximum is too low. 20 | let mut decoder = async_compression::tokio::write::ZstdDecoder::with_params( 21 | Vec::new(), 22 | &[DParameter::window_log_max(16)], 23 | ); 24 | decoder.write_all(compressed).await.unwrap_err(); 25 | } 26 | 27 | #[tokio::test] 28 | async fn zstd_decode_large_window_size_explicit_large_window_size() { 29 | let compressed = include_bytes!("./artifacts/long-window-size-lib.rs.zst"); 30 | let source = include_bytes!("./artifacts/lib.rs"); 31 | 32 | // Long window decoder should succeed as the window size is large enough to decompress the given input. 33 | let mut long_window_size_decoder = async_compression::tokio::write::ZstdDecoder::with_params( 34 | Vec::new(), 35 | &[DParameter::window_log_max(31)], 36 | ); 37 | // Long window size decoder should successfully decode the given input data. 38 | long_window_size_decoder 39 | .write_all(compressed) 40 | .await 41 | .unwrap(); 42 | long_window_size_decoder.shutdown().await.unwrap(); 43 | 44 | assert_eq!(long_window_size_decoder.into_inner(), source); 45 | } 46 | -------------------------------------------------------------------------------- /tests/zstd.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod utils; 3 | 4 | test_cases!(zstd); 5 | --------------------------------------------------------------------------------