├── .github └── workflows │ ├── asan.yml │ ├── build.yml │ ├── coverage.yml │ ├── lint.yml │ ├── miri.yml │ └── test.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── codecov.yml ├── mfio-derive ├── Cargo.toml └── src │ └── lib.rs ├── mfio-netfs ├── Cargo.toml ├── README.md ├── examples │ └── net_sample.rs └── src │ ├── bin │ └── mfio-netfs-server.rs │ ├── lib.rs │ └── net │ ├── client.rs │ ├── mod.rs │ └── server.rs ├── mfio-rt ├── Cargo.toml ├── README.md └── src │ ├── __doctest.rs │ ├── lib.rs │ ├── native │ ├── impls │ │ ├── deferred.rs │ │ ├── io_uring │ │ │ ├── file.rs │ │ │ ├── mod.rs │ │ │ ├── tcp_listener.rs │ │ │ └── tcp_stream.rs │ │ ├── iocp │ │ │ ├── file.rs │ │ │ ├── mod.rs │ │ │ ├── tcp_listener.rs │ │ │ └── tcp_stream.rs │ │ ├── mio │ │ │ ├── file.rs │ │ │ ├── mod.rs │ │ │ ├── tcp_listener.rs │ │ │ └── tcp_stream.rs │ │ ├── mod.rs │ │ ├── thread.rs │ │ ├── unix_extra.rs │ │ └── windows_extra.rs │ └── mod.rs │ ├── test_suite.rs │ ├── util.rs │ ├── util │ └── stream.rs │ └── virt │ └── mod.rs ├── mfio ├── Cargo.toml ├── benches │ └── main.rs ├── examples │ └── sample.rs └── src │ ├── backend │ ├── fd.rs │ ├── handle.rs │ ├── integrations │ │ ├── async_io.rs │ │ ├── mod.rs │ │ ├── null.rs │ │ └── tokio.rs │ ├── mod.rs │ └── windows.rs │ ├── error.rs │ ├── futures_compat.rs │ ├── io │ ├── mod.rs │ ├── opaque.rs │ └── packet │ │ ├── mod.rs │ │ ├── output.rs │ │ └── view.rs │ ├── lib.rs │ ├── poller.rs │ ├── sample.rs │ ├── stdeq.rs │ ├── traits.rs │ └── util.rs └── version-hack ├── Cargo.toml └── src └── lib.rs /.github/workflows/asan.yml: -------------------------------------------------------------------------------- 1 | name: Test with ASAN 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | CARGO_NET_GIT_FETCH_WITH_CLI: true 8 | 9 | jobs: 10 | test-asan: 11 | runs-on: ${{ matrix.os }} 12 | env: 13 | RUSTFLAGS: -Zsanitizer=address -C debuginfo=2 ${{ matrix.rustflags }} 14 | RUSTDOCFLAGS: -Zsanitizer=address -C debuginfo=2 ${{ matrix.rustflags }} 15 | CARGO_BUILD_RUSTFLAGS: -C debuginfo=2 16 | ASAN_OPTIONS: symbolize=1 detect_leaks=0 17 | timeout-minutes: 20 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | # TODO: enable windows, macos 22 | os: [ubuntu-latest] 23 | toolchain: ["nightly-2024-12-12"] 24 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 25 | steps: 26 | - uses: actions/checkout@v2 27 | - uses: actions-rs/toolchain@v1 28 | with: 29 | toolchain: ${{ matrix.toolchain }} 30 | override: true 31 | 32 | - name: Get rustc target 33 | run: | 34 | echo "RUSTC_TARGET=$(rustc -vV | sed -n 's|host: ||p')" >> $GITHUB_OUTPUT 35 | id: target 36 | - name: Install llvm 37 | run: sudo apt update && sudo apt install llvm-19 38 | - run: rustup component add rust-src 39 | - name: Run all tests 40 | run: cargo -Zbuild-std test --verbose --target ${{ steps.target.outputs.RUSTC_TARGET }} 41 | 42 | test-asan-lite: 43 | runs-on: ${{ matrix.os }} 44 | env: 45 | RUSTFLAGS: -Zsanitizer=address ${{ matrix.rustflags }} 46 | RUSTDOCFLAGS: -Zsanitizer=address ${{ matrix.rustflags }} 47 | ASAN_OPTIONS: symbolize=1 detect_leaks=0 48 | timeout-minutes: 20 49 | strategy: 50 | fail-fast: false 51 | matrix: 52 | # TODO: enable windows, macos 53 | os: [ubuntu-latest] 54 | toolchain: ["nightly-2024-12-12"] 55 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 56 | steps: 57 | - uses: actions/checkout@v2 58 | - uses: actions-rs/toolchain@v1 59 | with: 60 | toolchain: ${{ matrix.toolchain }} 61 | override: true 62 | 63 | - name: Get rustc target 64 | run: | 65 | echo "RUSTC_TARGET=$(rustc -vV | sed -n 's|host: ||p')" >> $GITHUB_OUTPUT 66 | id: target 67 | - name: Install llvm 68 | run: sudo apt update && sudo apt install llvm-19 69 | - name: Run all tests 70 | run: cargo test --verbose --target ${{ steps.target.outputs.RUSTC_TARGET }} 71 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | CARGO_NET_GIT_FETCH_WITH_CLI: true 8 | 9 | jobs: 10 | build-base: 11 | runs-on: ${{ matrix.os }} 12 | env: 13 | RUSTFLAGS: ${{ matrix.rustflags }} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | os: [macos-latest, ubuntu-latest, windows-latest] 18 | toolchain: ["1.84", "stable"] 19 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 20 | steps: 21 | - uses: actions/checkout@v2 22 | - uses: actions-rs/toolchain@v1 23 | with: 24 | toolchain: ${{ matrix.toolchain }} 25 | override: true 26 | 27 | - name: Build without examples 28 | run: cargo build --verbose 29 | 30 | build-nightly: 31 | runs-on: ${{ matrix.os }} 32 | env: 33 | RUSTFLAGS: ${{ matrix.rustflags }} 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | os: [macos-latest, ubuntu-latest, windows-latest] 38 | toolchain: ["1.84", "stable", "nightly-2024-11-22"] 39 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 40 | steps: 41 | - uses: actions/checkout@v2 42 | - uses: actions-rs/toolchain@v1 43 | with: 44 | toolchain: ${{ matrix.toolchain }} 45 | override: true 46 | 47 | - name: Build 48 | run: cargo build --workspace --all-features --verbose 49 | 50 | - name: Build examples 51 | run: cargo build --workspace --all-features --examples --verbose 52 | 53 | build-cross-aarch64: 54 | runs-on: ubuntu-latest 55 | env: 56 | RUSTFLAGS: ${{ matrix.rustflags }} 57 | strategy: 58 | fail-fast: false 59 | matrix: 60 | toolchain: ["1.84", "stable"] 61 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 62 | steps: 63 | - uses: actions/checkout@v2 64 | - uses: actions-rs/toolchain@v1 65 | with: 66 | toolchain: ${{ matrix.toolchain }} 67 | target: aarch64-unknown-linux-gnu 68 | override: true 69 | - uses: actions-rs/cargo@v1 70 | with: 71 | use-cross: true 72 | command: build 73 | args: --target aarch64-unknown-linux-gnu --verbose 74 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | name: Test coverage 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | CARGO_NET_GIT_FETCH_WITH_CLI: true 8 | 9 | jobs: 10 | test-coverage: 11 | runs-on: ubuntu-latest 12 | timeout-minutes: 60 13 | steps: 14 | - uses: actions/checkout@v2 15 | with: 16 | path: 'mfio-repo' 17 | - uses: actions-rs/toolchain@v1 18 | with: 19 | profile: minimal 20 | toolchain: nightly-2024-11-22 21 | override: true 22 | components: llvm-tools-preview 23 | - run: cargo install grcov 24 | - name: Run tests with coverage 25 | run: | 26 | cd mfio-repo 27 | export CARGO_INCREMENTAL=0 28 | export RUSTFLAGS="-Cinstrument-coverage -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" 29 | export RUSTDOCFLAGS="-Cpanic=abort" 30 | export LLVM_PROFILE_FILE="your_name-%p-%m.profraw" 31 | cargo build --workspace --exclude mfio-derive --all-features 32 | cargo test --workspace --exclude mfio-derive --all-features 33 | grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info 34 | bash <(curl -s https://codecov.io/bash) -f lcov.info -t ${{ secrets.CODECOV_TOKEN }}; 35 | 36 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | CARGO_NET_GIT_FETCH_WITH_CLI: true 8 | 9 | jobs: 10 | lint: 11 | runs-on: ${{ matrix.os }} 12 | env: 13 | RUSTFLAGS: ${{ matrix.rustflags }} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | os: [macos-latest, ubuntu-latest, windows-latest] 18 | features: ["--all-features", ""] 19 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 20 | steps: 21 | - uses: actions/checkout@v2 22 | - uses: actions-rs/toolchain@v1 23 | with: 24 | toolchain: stable 25 | override: true 26 | 27 | - run: rustup component add clippy 28 | - name: Check formatting 29 | run: cargo fmt -- --check 30 | - uses: actions-rs/clippy-check@v1 31 | with: 32 | token: ${{ secrets.GITHUB_TOKEN }} 33 | args: --all-targets ${{ matrix.features }} 34 | -------------------------------------------------------------------------------- /.github/workflows/miri.yml: -------------------------------------------------------------------------------- 1 | name: Miri test 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | CARGO_NET_GIT_FETCH_WITH_CLI: true 8 | DEFAULT_MIRI_FLAGS: -Zmiri-ignore-leaks -Zmiri-symbolic-alignment-check -Zmiri-retag-fields=all -Zmiri-symbolic-alignment-check -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-tree-borrows 9 | 10 | jobs: 11 | miri: 12 | runs-on: ubuntu-latest 13 | env: 14 | RUSTFLAGS: --cfg tokio_unstable ${{matrix.rustflags}} 15 | strategy: 16 | matrix: 17 | toolchain: ["nightly-2024-11-22"] 18 | seed: [1, 2, 3, 4, 5, 6, 7, 8] 19 | rustflags: ["", "--cfg mfio_assume_linear_types"] 20 | steps: 21 | - uses: actions/checkout@v2 22 | - uses: actions-rs/toolchain@v1 23 | with: 24 | toolchain: ${{ matrix.toolchain }} 25 | override: true 26 | - run: rustup component add miri 27 | - name: Run miri 28 | run: | 29 | MIRIFLAGS="-Zmiri-seed=${{matrix.seed}} ${{env.DEFAULT_MIRI_FLAGS}}" cargo miri test 30 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | CARGO_NET_GIT_FETCH_WITH_CLI: true 8 | 9 | jobs: 10 | 11 | test: 12 | runs-on: ${{ matrix.os }} 13 | env: 14 | RUSTFLAGS: ${{ matrix.rustflags }} 15 | timeout-minutes: 20 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | os: [macos-latest, ubuntu-latest, windows-latest] 20 | toolchain: ["1.84", "stable"] 21 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 22 | steps: 23 | - uses: actions/checkout@v2 24 | - uses: actions-rs/toolchain@v1 25 | with: 26 | toolchain: ${{ matrix.toolchain }} 27 | override: true 28 | 29 | - name: Run all tests 30 | run: cargo test --verbose 31 | 32 | test-all-features: 33 | runs-on: ${{ matrix.os }} 34 | env: 35 | RUSTFLAGS: ${{ matrix.rustflags }} 36 | timeout-minutes: 20 37 | strategy: 38 | fail-fast: false 39 | matrix: 40 | os: [macos-latest, ubuntu-latest, windows-latest] 41 | toolchain: ["1.84", "stable", "nightly-2024-11-22"] 42 | rustflags: ["--cfg mfio_assume_linear_types --cfg tokio_unstable", "--cfg tokio_unstable"] 43 | steps: 44 | - uses: actions/checkout@v2 45 | - uses: actions-rs/toolchain@v1 46 | with: 47 | toolchain: ${{ matrix.toolchain }} 48 | override: true 49 | 50 | - name: Run all tests 51 | run: cargo test --verbose 52 | 53 | 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | 2 | [profile.bench] 3 | debug = true 4 | 5 | [workspace] 6 | resolver = "2" 7 | 8 | members = [ 9 | "mfio", 10 | "mfio-rt", 11 | "mfio-derive", 12 | "mfio-netfs", 13 | "version-hack" 14 | ] 15 | 16 | default-members = [ 17 | "mfio", 18 | "mfio-rt", 19 | "mfio-derive", 20 | "mfio-netfs", 21 | "version-hack" 22 | ] 23 | 24 | [patch.crates-io] 25 | abi_stable = { version = "0.11", git = "https://github.com/h33p/abi_stable_crates", branch = "c-unwind" } 26 | abi_stable_derive = { version = "0.11", git = "https://github.com/h33p/abi_stable_crates", branch = "c-unwind" } 27 | cglue = { version = "0.3", git = "https://github.com/h33p/cglue" } 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023-2024 Aurimas Blažulionis <0x60@pm.me> 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mfio 2 | 3 | [![Build and test]][workflows] [![Rustc 1.74]][rust] [![codecov]][codecov-link] 4 | 5 | [Build and test]: https://github.com/memflow/mfio/actions/workflows/build.yml/badge.svg 6 | [workflows]: https://github.com/memflow/mfio/actions/workflows/build.yml 7 | [MIT licensed]: https://img.shields.io/badge/license-MIT-blue.svg 8 | [Rustc 1.74]: https://img.shields.io/badge/rustc-1.74+-lightgray.svg 9 | [rust]: https://blog.rust-lang.org/2023/11/16/Rust-1.74.0.html 10 | [codecov]: https://codecov.io/gh/memflow/mfio/branch/main/graph/badge.svg?token=IJ1K4QPAIM 11 | [codecov-link]: https://codecov.io/gh/memflow/mfio 12 | 13 | ## Framework for Async I/O Systems 14 | 15 | mfio is a one-stop shop for custom async I/O systems. It allows you to go wild, beyond typical OS 16 | APIs. Originally built for memflow, it aims to make the following aspects of an I/O as simple as 17 | possible: 18 | 19 | 1. Async 20 | 2. Automatic batching (vectoring) 21 | 3. Fragmentation 22 | 4. Partial success 23 | 5. Lack of color (full sync support) 24 | 6. I/O directly to the stack 25 | 7. Using without standard library 26 | 27 | mfio achieves all this by building everything around two key, but tiny traits: `PacketIo`, and 28 | `IoBackend`. Backends implement these traits, which then allow asynchronous futures to be driven to 29 | completion. Then, high level abstractions are used to pass data to the I/O system, in the form of 30 | packets. These packets can have multiple views to non-overlapping segments of data, and different 31 | views may be processed differently. The end result is an incredibly flexible I/O system with 32 | unmatched potential for efficient concurrency. 33 | 34 | ## Crates 35 | 36 | The project is split into several crates: 37 | 38 | | Crate | Purpose | Status | 39 | |-------------------------------------|-------------------------|--------------| 40 | | [mfio](mfio/src/lib.rs) | Core building blocks | Beta | 41 | | [mfio-rt](mfio-rt/src/lib.rs) | Runtime building blocks | Alpha | 42 | | [mfio-netfs](mfio-netfs/src/lib.rs) | Network filesystem PoC | Experimental | 43 | 44 | What each status means: 45 | 46 | - Beta - API is subject to change, however, has proven to be relatively reliable. 47 | - Alpha - API is incomplete, however, no serious bugs have been observed. 48 | - Experimental - Incomplete, and serious bugs have been observed. DO NOT run in production. 49 | 50 | ## no_std 51 | 52 | `mfio` and `mfio-rt` do not require `std`, albeit `alloc` crate is still required. You can disable 53 | standard library requirements as follows: 54 | 55 | ```toml 56 | mfio = { version = "0.1", default-features = false } 57 | mfio-rt = { version = "0.1", default-features = false, features = ["virt"] } 58 | ``` 59 | 60 | This will add both `mfio` and `mfio-rt` as dependencies in no\_std mode. Many features will be 61 | disabled, such as native polling handles, native runtime, and anything else that depends on running 62 | operating system. 63 | 64 | However, the remaining blocks should be sufficient to build any non-blocking I/O systems. Custom 65 | polling handles may be implemented on arbitrary types to enable cooperative operations on any 66 | operating system. 67 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | target: 60% # TODO: up this to at least 75% 6 | threshold: 1% 7 | -------------------------------------------------------------------------------- /mfio-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mfio-derive" 3 | version = "0.1.0" 4 | rust-version = "1.72" 5 | edition = "2021" 6 | authors = ["Aurimas Blažulionis <0x60@pm.me>"] 7 | license = "MIT" 8 | repository = "https://github.com/memflow/mfio" 9 | documentation = "https://docs.rs/mfio-derive" 10 | description = "mfio derive macros" 11 | 12 | [lib] 13 | proc-macro = true 14 | 15 | [dependencies] 16 | syn = { version = "1", features = ["full", "extra-traits"] } 17 | proc-macro2 = "1" 18 | quote = "1" 19 | -------------------------------------------------------------------------------- /mfio-derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | use quote::*; 3 | use syn::*; 4 | 5 | #[proc_macro_derive(SyncIoRead)] 6 | pub fn io_read(item: TokenStream) -> TokenStream { 7 | let st = parse_macro_input!(item as ItemStruct); 8 | 9 | let ident = &st.ident; 10 | let type_gens = st.generics.split_for_impl().1; 11 | let impl_bounds = &st.generics.params; 12 | let where_bounds = st.generics.where_clause.as_ref().map(|v| &v.predicates); 13 | 14 | let impl_comma = if !impl_bounds.trailing_punct() && !impl_bounds.is_empty() { 15 | Some(token::Comma::default()) 16 | } else { 17 | None 18 | }; 19 | 20 | quote! { 21 | impl<#impl_bounds #impl_comma __Pos: 'static> mfio::traits::sync::SyncIoRead<__Pos> for #ident #type_gens where #ident #type_gens: mfio::traits::IoRead<__Pos> + mfio::backend::IoBackend, #where_bounds {} 22 | }.into() 23 | } 24 | 25 | #[proc_macro_derive(SyncIoWrite)] 26 | pub fn io_write(item: TokenStream) -> TokenStream { 27 | let st = parse_macro_input!(item as ItemStruct); 28 | 29 | let ident = &st.ident; 30 | let type_gens = st.generics.split_for_impl().1; 31 | let impl_bounds = &st.generics.params; 32 | let where_bounds = st.generics.where_clause.as_ref().map(|v| &v.predicates); 33 | 34 | let impl_comma = if !impl_bounds.trailing_punct() && !impl_bounds.is_empty() { 35 | Some(token::Comma::default()) 36 | } else { 37 | None 38 | }; 39 | 40 | quote! { 41 | impl<#impl_bounds #impl_comma __Pos: 'static> mfio::traits::sync::SyncIoWrite<__Pos> for #ident #type_gens where #ident #type_gens: mfio::traits::IoWrite<__Pos> + mfio::backend::IoBackend, #where_bounds {} 42 | }.into() 43 | } 44 | -------------------------------------------------------------------------------- /mfio-netfs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mfio-netfs" 3 | version = "0.1.0" 4 | rust-version = "1.74" 5 | edition = "2021" 6 | authors = ["Aurimas Blažulionis <0x60@pm.me>"] 7 | license = "MIT" 8 | repository = "https://github.com/memflow/mfio" 9 | description = "mfio based network filesystem" 10 | documentation = "https://docs.rs/mfio-netfs" 11 | keywords = [ "mfio", "async", "network", "filesystem" ] 12 | categories = [ "asynchronous", "network-programming", "filesystem" ] 13 | readme = "README.md" 14 | 15 | [[bin]] 16 | name = "mfio-netfs-server" 17 | test = false 18 | bench = false 19 | required-features = ["bin-deps"] 20 | 21 | [dependencies] 22 | mfio = { version = "0.1", path = "../mfio", default-features = false } 23 | mfio-rt = { version = "0.1", path = "../mfio-rt", default-features = false } 24 | cglue = { version = "0.3", features = ["task"], default-features = false } 25 | 26 | log = "0.4" 27 | bytemuck = { version = "1", features = ["derive"] } 28 | serde = { version = "1", default-features = false, features = ["derive", "alloc"] } 29 | futures = { version = "0.3", default-features = false, features = ["async-await"] } 30 | debug-ignore = "1" 31 | postcard = { version = "1", features = ["alloc"], default-features = false } 32 | slab = { version = "0.4", default-features = false } 33 | 34 | flume = { version = "0.10", optional = true } 35 | async-mutex = { version = "1", optional = true } 36 | parking_lot = { version = "0.12", optional = true } 37 | tracing = { version = "0.1", optional = true } 38 | 39 | clap = { version = "4", features = ["cargo", "derive"], optional = true } 40 | anyhow = { version = "1", optional = true } 41 | env_logger = { version = "0.10", optional = true } 42 | 43 | tokio = { version = "1.24", features = ["rt", "net", "macros"], optional = true } 44 | 45 | [dev-dependencies] 46 | mfio-rt = { version = "0.1", path = "../mfio-rt", features = ["test_suite"], default-features = false } 47 | env_logger = "0.10" 48 | criterion = { version = "0.5", git = "https://github.com/h33p/criterion.rs", branch = "tput2", features = ["async_tokio", "async_smol", "async_futures"] } 49 | rand = "0.8" 50 | tracing-subscriber = "0.3" 51 | 52 | [target.'cfg(not(miri))'.dev-dependencies] 53 | tokio = { version = "1", features = ["rt", "macros"] } 54 | 55 | [features] 56 | default = ["std"] 57 | std = ["mfio/std", "mfio-rt/std", "mfio-rt/native", "flume", "async-mutex", "parking_lot", "tracing", "futures/std"] 58 | bin-deps = ["clap", "tokio", "anyhow", "env_logger", "mfio/tokio"] 59 | -------------------------------------------------------------------------------- /mfio-netfs/README.md: -------------------------------------------------------------------------------- 1 | # mfio-netfs 2 | 3 | # Network filesystem sample for mfio 4 | 5 | This crate is currently just an example showing how a relatively simple filesystem proxy could 6 | be implemented using mfio's TCP streams. 7 | 8 | Please do not use this in production, because the library does close to no error checking, so 9 | data corruption is likely to happen. 10 | -------------------------------------------------------------------------------- /mfio-netfs/examples/net_sample.rs: -------------------------------------------------------------------------------- 1 | use mfio::backend::*; 2 | use mfio::error::Result; 3 | use mfio::traits::*; 4 | use mfio_netfs::NetworkFs; 5 | use mfio_rt::*; 6 | 7 | fn main() -> Result<()> { 8 | let rt = NativeRt::default(); 9 | let rt = NetworkFs::with_fs("127.0.0.1:4446".parse().unwrap(), rt.into(), true)?; 10 | 11 | rt.block_on(async { 12 | let file = rt 13 | .open(Path::new("Cargo.toml"), OpenOptions::new().read(true)) 14 | .await?; 15 | 16 | let mut buf = vec![]; 17 | file.read_to_end(0, &mut buf).await?; 18 | 19 | let data = String::from_utf8_lossy(&buf); 20 | println!("{data}"); 21 | 22 | Ok(()) 23 | }) 24 | } 25 | -------------------------------------------------------------------------------- /mfio-netfs/src/bin/mfio-netfs-server.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use log::*; 3 | use mfio::backend::*; 4 | use mfio_rt::{NativeRt, Tcp}; 5 | use std::net::SocketAddr; 6 | 7 | #[derive(Parser, Debug)] 8 | struct Args { 9 | #[arg(short, long, default_value = "0.0.0.0:4446")] 10 | bind: SocketAddr, 11 | } 12 | 13 | fn main() -> anyhow::Result<()> { 14 | env_logger::init(); 15 | 16 | println!( 17 | "mfio-netfs is a dangerous PoC that violates memory safety. Do not run in production!" 18 | ); 19 | warn!("mfio-netfs is a dangerous PoC that violates memory safety. Do not run in production!"); 20 | info!("Grep for 'memunsafe' to see details."); 21 | 22 | let args = Args::parse(); 23 | 24 | let fs = NativeRt::default(); 25 | 26 | fs.block_on(async { 27 | let listener = fs.bind(args.bind).await?; 28 | info!("Bound to {}", args.bind); 29 | mfio_netfs::server(&fs, listener).await; 30 | Ok(()) 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /mfio-netfs/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # mfio-netfs 2 | //! 3 | //! # Network filesystem sample for mfio 4 | //! 5 | //! This crate is currently just an example showing how a relatively simple filesystem proxy could 6 | //! be implemented using mfio's TCP streams. 7 | //! 8 | //! Please do not use this in production, because the library does close to no error checking, so 9 | //! data corruption is likely to happen. 10 | 11 | #![cfg_attr(not(feature = "std"), no_std)] 12 | 13 | extern crate alloc; 14 | 15 | #[cfg(feature = "std")] 16 | mod net; 17 | 18 | #[cfg(feature = "std")] 19 | pub use net::client::*; 20 | #[cfg(feature = "std")] 21 | pub use net::server::*; 22 | -------------------------------------------------------------------------------- /mfio-netfs/src/net/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod server; 3 | 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use bytemuck::{Pod, Zeroable}; 7 | use core::mem::MaybeUninit; 8 | use core::num::{NonZeroI32, NonZeroU16}; 9 | use mfio::io::{ 10 | BoundPacketView, FullPacket, OpaqueStore, OutputFunction, PacketIo, PacketOutput, PacketView, 11 | }; 12 | use mfio::tarc::BaseArc; 13 | use mfio_rt::{DirEntry, DirOp, Metadata, OpenOptions}; 14 | 15 | // SAFETY: memunsafe 16 | // We cannot have safe implementation of this, because malformed data may lead to invalid tag. 17 | // This may lead to incorrect jumps in pattern matching. 18 | unsafe impl Zeroable for Request {} 19 | unsafe impl Pod for Request {} 20 | unsafe impl Zeroable for Response {} 21 | unsafe impl Pod for Response {} 22 | 23 | #[repr(u8, C)] 24 | #[derive(Debug, Clone, Copy)] 25 | enum Request { 26 | Read { 27 | file_id: u32, 28 | packet_id: u32, 29 | pos: u64, 30 | len: u64, 31 | }, 32 | Write { 33 | file_id: u32, 34 | packet_id: u32, 35 | pos: u64, 36 | len: u64, 37 | }, 38 | FileClose { 39 | file_id: u32, 40 | }, 41 | ReadDir { 42 | // If OpenDir results in 16-bit value there is no reason to have 32-bits for read streams. 43 | // If we end up bottlenecked, we will first be bottlenecked by OpenDir. 44 | stream_id: u16, 45 | count: u16, 46 | }, 47 | Fs { 48 | req_id: u32, 49 | dir_id: u16, 50 | req_len: u16, 51 | }, 52 | } 53 | 54 | #[derive(Serialize, Deserialize, Debug, Clone)] 55 | enum FsRequest { 56 | Path, 57 | ReadDir, 58 | OpenDir { path: String }, 59 | OpenFile { path: String, options: OpenOptions }, 60 | Metadata { path: String }, 61 | DirOp(DirOp), 62 | } 63 | 64 | #[repr(u8, C)] 65 | #[derive(Debug, Clone, Copy)] 66 | enum Response { 67 | Read { 68 | err: Option, 69 | packet_id: u32, 70 | idx: u64, 71 | len: u64, 72 | }, 73 | Write { 74 | err: Option, 75 | packet_id: u32, 76 | idx: u64, 77 | len: u64, 78 | }, 79 | ReadDir { 80 | stream_id: u16, 81 | // Highest bit being set means the stream is closing. 82 | len: u32, 83 | }, 84 | Fs { 85 | req_id: u32, 86 | resp_len: u16, 87 | }, 88 | } 89 | 90 | type ReadDirResponse = Result; 91 | 92 | #[derive(Serialize, Deserialize, Debug, Clone)] 93 | enum FsResponse { 94 | Path { 95 | path: Result, 96 | }, 97 | ReadDir { 98 | stream_id: Result, 99 | }, 100 | OpenDir { 101 | dir_id: Result, 102 | }, 103 | OpenFile { 104 | file_id: Result, 105 | }, 106 | Metadata { 107 | metadata: Result, 108 | }, 109 | DirOp(Option), 110 | } 111 | 112 | use mfio::error::Error; 113 | use mfio::io::{NoPos, Packet, PacketPerms, Read, ReboundPacket}; 114 | use parking_lot::Mutex; 115 | use slab::Slab; 116 | 117 | #[derive(Clone, Copy, Debug)] 118 | enum Key { 119 | Header(u64), 120 | Obj(u64), 121 | } 122 | 123 | impl From for Key { 124 | fn from(raw: u64) -> Self { 125 | if raw & 1 != 0 { 126 | Self::Obj(raw >> 1) 127 | } else { 128 | Self::Header(raw >> 1) 129 | } 130 | } 131 | } 132 | 133 | impl Key { 134 | fn idx(self) -> u64 { 135 | match self { 136 | Self::Header(v) => v, 137 | Self::Obj(v) => v, 138 | } 139 | } 140 | 141 | fn key(self) -> u64 { 142 | match self { 143 | Self::Header(v) => v << 1, 144 | Self::Obj(v) => (v << 1) | 1, 145 | } 146 | } 147 | } 148 | 149 | enum RoutedData { 150 | Pkt(ReboundPacket), 151 | Bytes { buffer: BaseArc> }, 152 | None, 153 | } 154 | 155 | struct RoutedObj { 156 | obj: RoutedData, 157 | header: BaseArc>, 158 | } 159 | 160 | impl RoutedObj { 161 | fn header_pkt(&self) -> BaseArc> { 162 | self.header.clone().transpose().into_base().unwrap() 163 | //Packet::from(bytemuck::bytes_of(&this.header)).tag(Key::Header(this.idx).key()) 164 | } 165 | 166 | fn data_pkt(&self) -> Option> { 167 | match &self.obj { 168 | RoutedData::Pkt(p) => Some(p.unbound()), 169 | RoutedData::Bytes { buffer, .. } => Some(PacketView::from_arc(buffer.clone(), 0)), 170 | RoutedData::None => None, 171 | } 172 | } 173 | 174 | pub fn is_fully_processed(&self) -> bool { 175 | if self.header.rc() > 0 { 176 | return false; 177 | } 178 | 179 | let obj = &self.obj; 180 | match obj { 181 | RoutedData::Pkt(p) => p.ranges().is_empty() && p.packets_in_flight() == 0, 182 | RoutedData::Bytes { buffer } => { 183 | let rc = buffer.rc(); 184 | rc == 0 185 | } 186 | RoutedData::None => true, 187 | } 188 | } 189 | } 190 | 191 | pub struct HeaderRouterState { 192 | handles: Slab>, 193 | spares: Vec>, 194 | } 195 | 196 | impl Default for HeaderRouterState { 197 | fn default() -> Self { 198 | Self { 199 | handles: Default::default(), 200 | spares: vec![], 201 | } 202 | } 203 | } 204 | 205 | pub struct HeaderRouter<'a, T: Copy, Perms: PacketPerms, Io> { 206 | io: &'a Io, 207 | state: BaseArc>>, 208 | output: BaseArc>, 209 | } 210 | 211 | impl<'a, T: bytemuck::Pod + Send + Sync, Io: PacketIo> HeaderRouter<'a, T, Read, Io> { 212 | pub fn new(io: &'a Io) -> Self { 213 | let state = BaseArc::new(Mutex::new(HeaderRouterState::default())); 214 | 215 | let output = BaseArc::new(OutputFunction::new({ 216 | let state = state.clone(); 217 | move |view, err| { 218 | let key = Key::from(view.tag()); 219 | let mut state = state.lock(); 220 | let handle = state.handles.get_mut(key.idx() as usize).unwrap(); 221 | match key { 222 | Key::Header(_) => {} 223 | Key::Obj(_) => { 224 | // SAFETY: we are not moving the data 225 | let obj = &mut handle.obj; 226 | if let RoutedData::Pkt(p) = obj { 227 | p.on_processed(view, err); 228 | } 229 | } 230 | } 231 | 232 | if handle.is_fully_processed() { 233 | state.handles.remove(key.idx() as usize); 234 | } 235 | } 236 | })) 237 | .transpose() 238 | .into_base() 239 | .unwrap(); 240 | 241 | Self { io, state, output } 242 | } 243 | 244 | fn send(&self, header: impl FnOnce(usize) -> T, in_obj: RoutedData) -> usize { 245 | let state = &mut *self.state.lock(); 246 | let entry = state.handles.vacant_entry(); 247 | let idx = entry.key(); 248 | let header = header(idx); 249 | 250 | let mut obj = state 251 | .spares 252 | .pop() 253 | .map(|v| { 254 | let hdr = v.header; 255 | 256 | unsafe { hdr.reset_err() }; 257 | let header = bytemuck::bytes_of(&header); 258 | // SAFETY: we are converting a slice of initialized bytes to a slice of 259 | // uninitialized bytes. 260 | let header = unsafe { core::mem::transmute::<&[u8], &[MaybeUninit]>(header) }; 261 | unsafe { hdr.simple_slice_mut().unwrap() }.copy_from_slice(header); 262 | 263 | RoutedObj { 264 | obj: v.obj, 265 | header: hdr, 266 | } 267 | }) 268 | .unwrap_or_else(|| RoutedObj { 269 | obj: RoutedData::None, 270 | header: FullPacket::new(header).into(), 271 | }); 272 | 273 | obj.obj = in_obj; 274 | 275 | { 276 | let pkt = obj.header_pkt(); 277 | let pv = PacketView::from_arc(pkt, Key::Header(idx as u64).key()); 278 | let bpv = unsafe { pv.bind(Some(self.output.clone().into())) }; 279 | self.io.send_io(NoPos::new(), bpv); 280 | 281 | if let Some(mut pv) = obj.data_pkt() { 282 | pv.set_tag(Key::Obj(idx as u64).key()); 283 | let bpv = unsafe { pv.bind(Some(self.output.clone().into())) }; 284 | self.io.send_io(NoPos::new(), bpv); 285 | } 286 | } 287 | 288 | entry.insert(obj); 289 | 290 | idx 291 | } 292 | 293 | /// Sends a packet, returns key for completion processing. 294 | /// 295 | /// The returned key needs to be used once results are received from server side marking state 296 | /// of success of various ranges of packets. 297 | pub fn send_pkt(&self, header: impl FnOnce(usize) -> T, pkt: BoundPacketView) -> usize { 298 | self.send(header, RoutedData::Pkt(pkt.into())) 299 | } 300 | 301 | pub fn send_bytes>>>>( 302 | &self, 303 | header: impl FnOnce(usize) -> T, 304 | buffer: P, 305 | ) { 306 | self.send( 307 | header, 308 | RoutedData::Bytes { 309 | buffer: buffer.heap().transpose().into_base().unwrap(), 310 | }, 311 | ); 312 | } 313 | 314 | pub fn send_hdr(&self, header: impl FnOnce(usize) -> T) { 315 | self.send(header, RoutedData::None); 316 | } 317 | 318 | pub fn pkt_result(&self, idx: usize, start: u64, len: u64, res: Option) -> bool { 319 | let mut state = self.state.lock(); 320 | let handle = if let Some(v) = state.handles.get_mut(idx) { 321 | v 322 | } else { 323 | return true; 324 | }; 325 | 326 | let obj = &mut handle.obj; 327 | 328 | // TODO: do we warn if this condition doesn't match? 329 | if let RoutedData::Pkt(p) = obj { 330 | p.range_result(start, len, res); 331 | } 332 | 333 | if handle.is_fully_processed() { 334 | let mut obj = state.handles.remove(idx); 335 | obj.obj = RoutedData::None; 336 | state.spares.push(obj); 337 | true 338 | } else { 339 | false 340 | } 341 | } 342 | } 343 | -------------------------------------------------------------------------------- /mfio-rt/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mfio-rt" 3 | version = "0.1.0" 4 | rust-version = "1.74" 5 | edition = "2021" 6 | authors = ["Aurimas Blažulionis <0x60@pm.me>"] 7 | license = "MIT" 8 | repository = "https://github.com/memflow/mfio" 9 | description = "mfio based async runtime" 10 | documentation = "https://docs.rs/mfio-rt" 11 | keywords = [ "mfio", "async", "runtime", "io_uring", "iocp" ] 12 | categories = [ "asynchronous", "network-programming", "no-std", "os", "filesystem" ] 13 | readme = "README.md" 14 | 15 | [package.metadata.docs.rs] 16 | all-features = true 17 | rustdoc-args = ["--cfg", "docsrs"] 18 | 19 | [dependencies] 20 | mfio = { version = "0.1", path = "../mfio", default-features = false } 21 | futures = { version = "0.3", default-features = false, features = ["async-await"] } 22 | once_cell = { version = "1", default-features = false } 23 | log = "0.4" 24 | serde = { version = "1", features = ["derive", "alloc"], default-features = false } 25 | 26 | typed-path = { version = "0.7", default-features = false } 27 | 28 | # native rt deps 29 | tracing = { version = "0.1", optional = true } 30 | flume = { version = "0.10", optional = true } 31 | parking_lot = { version = "0.12", optional = true } 32 | oneshot = { version = "0.1", optional = true } 33 | tempdir = { version = "0.3", optional = true } 34 | pathdiff = { version = "0.2", optional = true } 35 | async-semaphore = { version = "1", optional = true } 36 | slab = { version = "0.4", default-features = false } 37 | 38 | 39 | [target.'cfg(windows)'.dependencies] 40 | windows = { version = "0.51", features = ["Win32_System_IO", "Win32_Foundation", "Win32_System_WindowsProgramming", "Win32_Storage_FileSystem", "Win32_Networking_WinSock"] } 41 | force-send-sync = "1" 42 | 43 | [target.'cfg(unix)'.dependencies] 44 | mio = { version = "0.8", optional = true, features = ["os-poll", "os-ext", "net"] } 45 | sharded-slab = "0.1" 46 | nix = "0.26" 47 | 48 | [target.'cfg(target_os = "linux")'.dependencies] 49 | io-uring = { version = "0.6", optional = true } 50 | 51 | [dev-dependencies] 52 | mfio = { version = "0.1", path = "../mfio", default-features = false, features = ["tokio", "async-io"] } 53 | pollster = { version = "0.3.0", features = ["macro"] } 54 | criterion = { version = "0.5", git = "https://github.com/h33p/criterion.rs", branch = "tput2", features = ["async_tokio", "async_smol", "async_futures"] } 55 | rand = "0.8" 56 | flume = "0.10" 57 | smol = "1" 58 | env_logger = "0.10" 59 | tempdir = "0.3" 60 | pathdiff = "0.2" 61 | async-semaphore = "1" 62 | 63 | [target.'cfg(not(miri))'.dev-dependencies] 64 | tokio = { version = "1", features = ["rt", "rt-multi-thread", "fs", "io-util"] } 65 | 66 | [target.'cfg(target_os = "linux")'.dev-dependencies] 67 | rio = "0.9" 68 | # We need git version to compile on alpine 69 | glommio = { version = "0.8", git = "https://github.com/DataDog/glommio", rev = "517326bb2b63b6f6ddcf5deec7a283ee510f44df" } 70 | 71 | [features] 72 | default = ["mio", "io-uring", "iocp", "native", "std", "virt"] 73 | native = ["oneshot", "parking_lot", "flume", "tracing", "std"] 74 | virt = [] 75 | virt-sync = [] 76 | std = ["mfio/std", "once_cell/std"] 77 | # technically iocp depends on native, but let's be in-line with other backends 78 | iocp = [] 79 | test_suite = ["tempdir", "pathdiff", "async-semaphore"] 80 | __bench = ["mfio/tokio", "mfio/async-io"] 81 | -------------------------------------------------------------------------------- /mfio-rt/README.md: -------------------------------------------------------------------------------- 1 | # mfio-rt 2 | 3 | ## mfio Backed Runtime 4 | 5 | This crate aims to provide building blocks for mfio backed asynchronous runtimes. The traits 6 | have the option to not rely on the standard library. This makes the system great for `no_std` 7 | embedded environments or kernel-side code. 8 | 9 | `native` feature (depends on `std`) enables native implementations of the runtime through 10 | `NativeRt` structure. 11 | 12 | `virt` feature enables a virtual in-memory runtime through `VirtRt` structure. 13 | 14 | Custom runtimes may be implemented by implementing `IoBackend`, and any of the runtime 15 | traits, such as `Fs` or `Tcp`. 16 | -------------------------------------------------------------------------------- /mfio-rt/src/__doctest.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "native")] 2 | use crate::NativeRt; 3 | #[cfg(all(any(miri, test, feature = "virt"), not(feature = "native")))] 4 | use crate::{virt::VirtRt, DirHandleExt}; 5 | #[cfg(any(miri, test, feature = "native", feature = "virt"))] 6 | use core::future::Future; 7 | #[cfg(any(miri, test, feature = "native", feature = "virt"))] 8 | use mfio::backend::IoBackendExt; 9 | 10 | #[cfg(feature = "native")] 11 | pub fn run_each<'a, Func: Fn(&'a NativeRt) -> F, F: Future>(func: Func) { 12 | for (_, fs) in NativeRt::builder().enable_all().build_each() { 13 | if let Ok(fs) = fs { 14 | let fs = &fs; 15 | // SAFETY: there isn't. The doctests shouldn't move the fs handle though. 16 | let fs: &'a NativeRt = unsafe { &(*(fs as *const _)) }; 17 | fs.block_on(func(fs)); 18 | } 19 | } 20 | } 21 | 22 | #[cfg(all(any(miri, test, feature = "virt"), not(feature = "native")))] 23 | pub fn run_each<'a, Func: Fn(&'a VirtRt) -> F, F: Future>(func: Func) { 24 | use crate::{DirHandle, Fs, OpenOptions}; 25 | use mfio::traits::*; 26 | 27 | const FILES: &[(&str, &str)] = &[ 28 | ("Cargo.toml", include_str!("../Cargo.toml")), 29 | ("src/lib.rs", include_str!("lib.rs")), 30 | ]; 31 | 32 | let fs = &VirtRt::new(); 33 | 34 | fs.block_on(async { 35 | let cd = fs.current_dir(); 36 | cd.create_dir("src").await.unwrap(); 37 | for (p, data) in FILES { 38 | let fh = cd 39 | .open_file(p, OpenOptions::new().create_new(true).write(true)) 40 | .await 41 | .unwrap(); 42 | fh.write_all(0, data.as_bytes()).await.unwrap(); 43 | } 44 | }); 45 | 46 | // SAFETY: there isn't. The doctests shouldn't move the fs handle though. 47 | let fs: &'a VirtRt = unsafe { &(*(fs as *const _)) }; 48 | fs.block_on(func(fs)); 49 | } 50 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/deferred.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/memflow/mfio/24541b08da010e51a0d09e2eed7ef386da14ec94/mfio-rt/src/native/impls/deferred.rs -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/io_uring/file.rs: -------------------------------------------------------------------------------- 1 | use std::os::fd::AsRawFd; 2 | 3 | use io_uring::{opcode, squeue::Entry, types::Fixed}; 4 | use parking_lot::Mutex; 5 | 6 | use core::mem::MaybeUninit; 7 | 8 | use mfio::io::{Read as RdPerm, Write as WrPerm, *}; 9 | use mfio::tarc::BaseArc; 10 | 11 | use super::{IoUringState, Key, Operation, RawBox}; 12 | 13 | trait IntoOp: PacketPerms { 14 | fn into_op(fd: u32, pos: u64, pkt: BoundPacketView) -> (Entry, Operation); 15 | } 16 | 17 | impl IntoOp for RdPerm { 18 | fn into_op(fd: u32, pos: u64, pkt: BoundPacketView) -> (Entry, Operation) { 19 | let len = pkt.len(); 20 | let pkt = pkt.try_alloc(); 21 | 22 | let (buf, raw_box, pkt) = match pkt { 23 | Ok(pkt) => (pkt.as_ptr(), RawBox::null(), Ok(pkt)), 24 | Err(pkt) => { 25 | let mut buf: Vec> = Vec::with_capacity(len as usize); 26 | unsafe { buf.set_len(len as usize) }; 27 | let mut buf = buf.into_boxed_slice(); 28 | let buf_ptr = buf.as_mut_ptr(); 29 | let buf = Box::into_raw(buf); 30 | let pkt = unsafe { pkt.transfer_data(buf_ptr as *mut ()) }; 31 | (buf_ptr as *const u8, RawBox(buf), Err(pkt)) 32 | } 33 | }; 34 | 35 | let entry = opcode::Write::new(Fixed(fd), buf, len as u32) 36 | .offset(pos) 37 | .build(); 38 | 39 | (entry, Operation::FileWrite(pkt, raw_box)) 40 | } 41 | } 42 | 43 | impl IntoOp for WrPerm { 44 | fn into_op(fd: u32, pos: u64, pkt: BoundPacketView) -> (Entry, Operation) { 45 | let len = pkt.len(); 46 | let pkt = pkt.try_alloc(); 47 | 48 | let (buf, raw_box) = match &pkt { 49 | Ok(pkt) => (pkt.as_ptr().cast(), RawBox::null()), 50 | Err(_) => { 51 | let mut buf = Vec::with_capacity(len as usize); 52 | unsafe { buf.set_len(len as usize) }; 53 | let mut buf = buf.into_boxed_slice(); 54 | let buf_ptr = buf.as_mut_ptr(); 55 | let buf = Box::into_raw(buf); 56 | (buf_ptr, RawBox(buf)) 57 | } 58 | }; 59 | 60 | let buf: *mut MaybeUninit = buf; 61 | 62 | let entry = opcode::Read::new(Fixed(fd), buf.cast(), len as u32) 63 | .offset(pos) 64 | .build(); 65 | 66 | (entry, Operation::FileRead(pkt, raw_box)) 67 | } 68 | } 69 | 70 | impl PacketIo for FileWrapper { 71 | fn send_io(&self, pos: u64, packet: BoundPacketView) { 72 | let mut state = self.state.lock(); 73 | let state = &mut *state; 74 | 75 | // TODO: handle size limitations??? 76 | let (ring_entry, ops_entry) = Perms::into_op(Key::File(self.idx).key() as _, pos, packet); 77 | 78 | state.all_ssub += 1; 79 | state.push_handle().try_push_op(ring_entry, ops_entry); 80 | } 81 | } 82 | 83 | pub struct FileWrapper { 84 | idx: usize, 85 | state: BaseArc>, 86 | } 87 | 88 | impl FileWrapper { 89 | pub(super) fn new(idx: usize, state: BaseArc>) -> Self { 90 | Self { idx, state } 91 | } 92 | } 93 | 94 | impl Drop for FileWrapper { 95 | fn drop(&mut self) { 96 | let mut state = self.state.lock(); 97 | let v = state.files.remove(self.idx); 98 | 99 | log::trace!("Dropping {} {}", self.idx, v.as_raw_fd()); 100 | 101 | let r = state 102 | .ring 103 | .submitter() 104 | .register_files_update(Key::File(self.idx).key() as _, &[-1]) 105 | .unwrap(); 106 | 107 | log::trace!("{r} {}", self.state.strong_count(),); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/io_uring/tcp_listener.rs: -------------------------------------------------------------------------------- 1 | use std::os::fd::AsRawFd; 2 | 3 | use core::pin::Pin; 4 | use core::task::{Context, Poll}; 5 | use futures::Stream; 6 | 7 | use io_uring::{opcode, types::Fixed}; 8 | use parking_lot::Mutex; 9 | 10 | use nix::sys::socket::{AddressFamily, SockaddrLike, SockaddrStorage}; 11 | 12 | use mfio::error::State; 13 | use mfio::tarc::BaseArc; 14 | 15 | use super::{IoUringState, Operation, TcpStream, TmpAddr}; 16 | use crate::util::{from_io_error, io_err, Key}; 17 | use crate::TcpListenerHandle; 18 | 19 | use std::net::{self, SocketAddr, SocketAddrV4, SocketAddrV6}; 20 | 21 | pub struct ListenerInner { 22 | fd: net::TcpListener, 23 | } 24 | 25 | impl From for ListenerInner { 26 | fn from(fd: net::TcpListener) -> Self { 27 | Self { fd } 28 | } 29 | } 30 | 31 | pub struct TcpListener { 32 | idx: usize, 33 | state: BaseArc>, 34 | accept_idx: Option, 35 | } 36 | 37 | impl TcpListener { 38 | pub(super) fn register_listener( 39 | state_arc: &BaseArc>, 40 | listener: net::TcpListener, 41 | ) -> Self { 42 | let fd = listener.as_raw_fd(); 43 | 44 | let state = &mut *state_arc.lock(); 45 | let entry = state.listeners.vacant_entry(); 46 | let key = Key::TcpListener(entry.key()); 47 | let listener = ListenerInner::from(listener); 48 | 49 | log::trace!( 50 | "Register listener={:?} state={:?}: key={key:?}", 51 | listener.fd.as_raw_fd(), 52 | state_arc.as_ptr(), 53 | ); 54 | 55 | entry.insert(listener); 56 | 57 | IoUringState::register_fd(&state.ring.submitter(), fd, key); 58 | 59 | TcpListener { 60 | idx: key.idx(), 61 | state: state_arc.clone(), 62 | accept_idx: None, 63 | } 64 | } 65 | } 66 | 67 | impl Drop for TcpListener { 68 | fn drop(&mut self) { 69 | let mut state = self.state.lock(); 70 | let v = state.listeners.remove(self.idx); 71 | 72 | log::trace!("Dropping {} {}", self.idx, v.fd.as_raw_fd()); 73 | 74 | let r = state 75 | .ring 76 | .submitter() 77 | .register_files_update(Key::TcpListener(self.idx).key() as _, &[-1]) 78 | .unwrap(); 79 | 80 | log::trace!("{r} {}", self.state.strong_count(),); 81 | } 82 | } 83 | 84 | impl TcpListenerHandle for TcpListener { 85 | type StreamHandle = TcpStream; 86 | 87 | fn local_addr(&self) -> mfio::error::Result { 88 | let state = self.state.lock(); 89 | let listener = state 90 | .listeners 91 | .get(self.idx) 92 | .ok_or_else(|| io_err(State::NotFound))?; 93 | listener.fd.local_addr().map_err(from_io_error) 94 | } 95 | } 96 | 97 | impl Stream for TcpListener { 98 | type Item = (TcpStream, SocketAddr); 99 | 100 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 101 | // SAFETY: we are not moving out of this future 102 | let this = unsafe { self.get_unchecked_mut() }; 103 | 104 | let backend = &mut *this.state.lock(); 105 | 106 | loop { 107 | if let Some(idx) = this.accept_idx { 108 | if let Some(conn) = backend.connections.get_mut(idx) { 109 | match conn.res.take() { 110 | Some(v) => { 111 | let v = v.ok().zip(conn.tmp_addr.take()); 112 | let _ = backend.connections.remove(idx); 113 | this.accept_idx = None; 114 | if let Some((stream, TmpAddr { domain, addr })) = v { 115 | let addr_len = addr.1; 116 | let addr = addr.0; 117 | 118 | if addr_len > addr.len() { 119 | log::error!("Resulting address didn't fit! ({addr_len} vs {}). This shouldn't happen", addr.len()); 120 | } 121 | 122 | // If anything is wrong here, we should panic. 123 | let addr = match domain { 124 | AddressFamily::Inet => { 125 | let addr = addr.as_sockaddr_in().unwrap(); 126 | SocketAddr::V4(SocketAddrV4::new( 127 | addr.ip().into(), 128 | addr.port(), 129 | )) 130 | } 131 | AddressFamily::Inet6 => { 132 | let addr = addr.as_sockaddr_in6().unwrap(); 133 | SocketAddr::V6(SocketAddrV6::new( 134 | addr.ip(), 135 | addr.port(), 136 | addr.flowinfo(), 137 | addr.scope_id(), 138 | )) 139 | } 140 | _ => unreachable!("invalid state reached {domain:?}"), 141 | }; 142 | 143 | return Poll::Ready(Some((stream, addr))); 144 | } else { 145 | continue; 146 | } 147 | } 148 | None => { 149 | conn.waker = Some(cx.waker().clone()); 150 | return Poll::Pending; 151 | } 152 | } 153 | } else { 154 | this.accept_idx = None; 155 | continue; 156 | } 157 | } else { 158 | let idx = backend.connections.insert(cx.waker().clone().into()); 159 | this.accept_idx = Some(idx); 160 | // The invariant here is that we have an entry within connections - if we didn't, we 161 | // would have returned in the previous block. 162 | let conn = backend.connections.get_mut(idx).unwrap(); 163 | 164 | let local_addr = { 165 | let listener = backend 166 | .listeners 167 | .get(this.idx) 168 | .ok_or_else(|| io_err(State::NotFound)) 169 | .unwrap(); 170 | listener.fd.local_addr().map_err(from_io_error).unwrap() 171 | }; 172 | 173 | let (domain, storage) = match local_addr { 174 | SocketAddr::V4(_) => ( 175 | AddressFamily::Inet, 176 | SockaddrStorage::from(SocketAddrV4::new(0.into(), 0)), 177 | ), 178 | SocketAddr::V6(_) => ( 179 | AddressFamily::Inet6, 180 | SockaddrStorage::from(SocketAddrV6::new(0.into(), 0, 0, 0)), 181 | ), 182 | }; 183 | 184 | let len = storage.len(); 185 | 186 | conn.tmp_addr = Some(TmpAddr { 187 | domain, 188 | addr: Box::pin((storage, len)), 189 | }); 190 | let (addr, _, len_ptr) = conn 191 | .tmp_addr 192 | .as_mut() 193 | .map(|TmpAddr { addr, .. }| { 194 | ( 195 | addr.0.as_ptr() as *mut _, 196 | addr.0.len(), 197 | &mut addr.1 as *mut u32, 198 | ) 199 | }) 200 | .unwrap(); 201 | 202 | let entry = opcode::Accept::new( 203 | Fixed(Key::TcpListener(this.idx).key() as _), 204 | addr, 205 | len_ptr, 206 | ) 207 | .build(); 208 | 209 | backend 210 | .push_handle() 211 | .try_push_op(entry, Operation::TcpGetSock(idx)); 212 | 213 | break Poll::Pending; 214 | } 215 | } 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/io_uring/tcp_stream.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::net::{self, SocketAddr, ToSocketAddrs}; 3 | use std::os::fd::{AsRawFd, FromRawFd, OwnedFd}; 4 | 5 | use core::future::Future; 6 | use core::mem::MaybeUninit; 7 | use core::pin::Pin; 8 | use core::task::{Context, Poll}; 9 | 10 | use io_uring::{ 11 | opcode, 12 | squeue::Flags, 13 | types::{Fd, Fixed}, 14 | }; 15 | use parking_lot::Mutex; 16 | 17 | use nix::libc::{iovec, msghdr}; 18 | use nix::sys::socket::{self, SockaddrLike, SockaddrStorage}; 19 | 20 | use mfio::error::State; 21 | use mfio::io::{Read as RdPerm, Write as WrPerm, *}; 22 | use mfio::tarc::BaseArc; 23 | 24 | use super::super::unix_extra::new_for_addr; 25 | use super::{DeferredPackets, IoUringPushHandle, IoUringState, Operation, TmpAddr}; 26 | use crate::util::{from_io_error, io_err, stream::StreamBuf, Key}; 27 | use crate::{Shutdown, TcpStreamHandle}; 28 | 29 | use once_cell::sync::Lazy; 30 | 31 | static IOV_MAX: Lazy = Lazy::new(|| { 32 | nix::unistd::sysconf(nix::unistd::SysconfVar::IOV_MAX) 33 | .ok() 34 | .flatten() 35 | .unwrap_or(1024) as _ 36 | }); 37 | 38 | pub struct StreamInner { 39 | fd: net::TcpStream, 40 | stream: StreamBuf, 41 | in_read: bool, 42 | in_write: usize, 43 | recv_msg: Box, 44 | read_queue: Vec>, 45 | write_queue: Vec>, 46 | } 47 | 48 | unsafe impl Send for StreamInner {} 49 | unsafe impl Sync for StreamInner {} 50 | 51 | impl Drop for StreamInner { 52 | fn drop(&mut self) { 53 | // For some reason we need to do this, because otherwise there is a second or-so delay 54 | // before the other end receives the shutdown. 55 | if let Err(e) = socket::shutdown(self.fd.as_raw_fd(), socket::Shutdown::Both) { 56 | log::warn!("Could not shutdown stream: {e:?}"); 57 | } 58 | } 59 | } 60 | 61 | impl From for StreamInner { 62 | fn from(fd: net::TcpStream) -> Self { 63 | Self { 64 | fd, 65 | stream: StreamBuf::default(), 66 | in_read: false, 67 | in_write: 0, 68 | recv_msg: empty_msg().into(), 69 | read_queue: Default::default(), 70 | write_queue: Default::default(), 71 | } 72 | } 73 | } 74 | 75 | fn empty_msg() -> msghdr { 76 | unsafe { MaybeUninit::zeroed().assume_init() } 77 | } 78 | 79 | impl StreamInner { 80 | pub fn on_read(&mut self, res: io::Result, deferred_pkts: &mut DeferredPackets) { 81 | self.in_read = false; 82 | self.stream.on_read(res, Some(deferred_pkts)) 83 | } 84 | 85 | pub fn on_write(&mut self, res: io::Result, deferred_pkts: &mut DeferredPackets) { 86 | self.in_write -= 1; 87 | self.stream.on_write(res, Some(deferred_pkts)) 88 | } 89 | 90 | #[tracing::instrument(skip(self, push_handle, deferred_pkts))] 91 | pub(super) fn on_queue( 92 | &mut self, 93 | idx: usize, 94 | push_handle: &mut IoUringPushHandle, 95 | deferred_pkts: &mut DeferredPackets, 96 | ) { 97 | log::trace!( 98 | "Do ops file={:?} (to read={} to write={})", 99 | self.fd.as_raw_fd(), 100 | self.stream.read_ops(), 101 | self.stream.write_ops() 102 | ); 103 | 104 | if (!self.read_queue.is_empty() || self.stream.read_ops() > 0) && !self.in_read { 105 | let rd_span = 106 | tracing::span!(tracing::Level::TRACE, "read", ops = self.stream.read_ops()); 107 | let _span = rd_span.enter(); 108 | for op in self.read_queue.drain(..) { 109 | self.stream.queue_read(op, Some(deferred_pkts)); 110 | } 111 | let queue = self.stream.read_queue(); 112 | if !queue.is_empty() { 113 | self.in_read = true; 114 | let msg = &mut *self.recv_msg; 115 | // Limit iov read to IOV_MAX, because we don't want to have the operation fail. 116 | msg.msg_iovlen = core::cmp::min(queue.len() as usize, *IOV_MAX as usize) as _; 117 | msg.msg_iov = queue.as_mut_ptr() as *mut iovec; 118 | let entry = opcode::RecvMsg::new(Fixed(Key::Stream(idx).key() as _), msg).build(); 119 | push_handle.try_push_op(entry, Operation::StreamRead(idx)) 120 | } 121 | } 122 | 123 | if (!self.write_queue.is_empty() || self.stream.write_ops() > 0) && self.in_write == 0 { 124 | let wr_span = tracing::span!( 125 | tracing::Level::TRACE, 126 | "write", 127 | ops = self.stream.write_ops() 128 | ); 129 | let _span = wr_span.enter(); 130 | for op in self.write_queue.drain(..) { 131 | self.stream.queue_write(op, Some(deferred_pkts)); 132 | } 133 | let queue = self.stream.write_queue(); 134 | if !queue.is_empty() { 135 | // FIXME: investigate why processing more than 3 chunks leads to out-of-order 136 | // transfer of data - OOO does not happen only if we add link flag to the tail as 137 | // well, which is wrong - adding link flag to the tail likely interupts other logic 138 | // slowing us down. Taking 3 chunks seems to work fine, but 2 should be good 139 | // enough. 140 | // 141 | // Performance wise, a better improvement here would be to enable submission of new 142 | // writes, before all chunks complete - this would maximize the throughput. 143 | let target_height = core::cmp::min((queue.len() + *IOV_MAX - 1) / *IOV_MAX, 2); 144 | let mut tailed = false; 145 | for queue in queue.chunks(*IOV_MAX).take(target_height) { 146 | debug_assert!(!tailed); 147 | self.in_write += 1; 148 | let entry = opcode::Writev::new( 149 | Fixed(Key::Stream(idx).key() as _), 150 | queue.as_ptr() as *mut iovec, 151 | queue.len() as _, 152 | ) 153 | .offset(!0u64) 154 | .build(); 155 | 156 | push_handle.try_push_op( 157 | if self.in_write < target_height { 158 | entry.flags(Flags::IO_HARDLINK) 159 | } else { 160 | tailed = true; 161 | entry 162 | }, 163 | Operation::StreamWrite(idx), 164 | ); 165 | } 166 | } 167 | } 168 | } 169 | 170 | pub fn cancel_all_ops(&mut self) { 171 | self.stream 172 | .on_read(Err(io::ErrorKind::Interrupted.into()), None) 173 | } 174 | } 175 | 176 | trait IntoOp: PacketPerms { 177 | fn push_op( 178 | stream: &mut StreamInner, 179 | pkt: BoundPacketView, 180 | deferred_pkts: &mut DeferredPackets, 181 | ); 182 | } 183 | 184 | impl IntoOp for RdPerm { 185 | fn push_op( 186 | stream: &mut StreamInner, 187 | pkt: BoundPacketView, 188 | deferred_pkts: &mut DeferredPackets, 189 | ) { 190 | if stream.in_write == 0 { 191 | stream.stream.queue_write(pkt, Some(deferred_pkts)); 192 | } else { 193 | stream.write_queue.push(pkt); 194 | } 195 | } 196 | } 197 | 198 | impl IntoOp for WrPerm { 199 | fn push_op( 200 | stream: &mut StreamInner, 201 | pkt: BoundPacketView, 202 | deferred_pkts: &mut DeferredPackets, 203 | ) { 204 | if !stream.in_read { 205 | stream.stream.queue_read(pkt, Some(deferred_pkts)); 206 | } else { 207 | stream.read_queue.push(pkt); 208 | } 209 | } 210 | } 211 | 212 | impl PacketIo for TcpStream { 213 | fn send_io(&self, _: NoPos, packet: BoundPacketView) { 214 | let mut state = self.state.lock(); 215 | let state = &mut *state; 216 | 217 | let stream = state.streams.get_mut(self.idx).unwrap(); 218 | 219 | Perms::push_op(stream, packet, &mut state.deferred_pkts); 220 | } 221 | } 222 | 223 | pub struct TcpStream { 224 | idx: usize, 225 | state: BaseArc>, 226 | } 227 | 228 | impl TcpStream { 229 | pub(super) fn new(idx: usize, state: BaseArc>) -> Self { 230 | Self { idx, state } 231 | } 232 | 233 | pub(super) fn tcp_connect<'a, A: ToSocketAddrs + Send + 'a>( 234 | backend: &'a BaseArc>, 235 | addrs: A, 236 | ) -> TcpConnectFuture<'a, A> { 237 | TcpConnectFuture { 238 | backend, 239 | addrs: addrs.to_socket_addrs().ok(), 240 | idx: None, 241 | } 242 | } 243 | } 244 | 245 | impl Drop for TcpStream { 246 | fn drop(&mut self) { 247 | let mut state = self.state.lock(); 248 | let v = state.streams.remove(self.idx); 249 | 250 | log::trace!("Dropping {} {}", self.idx, v.fd.as_raw_fd()); 251 | 252 | let r = state 253 | .ring 254 | .submitter() 255 | .register_files_update(Key::Stream(self.idx).key() as _, &[-1]) 256 | .unwrap(); 257 | 258 | log::trace!("{r} {}", self.state.strong_count(),); 259 | } 260 | } 261 | 262 | impl TcpStreamHandle for TcpStream { 263 | fn local_addr(&self) -> mfio::error::Result { 264 | let state = self.state.lock(); 265 | let stream = state 266 | .streams 267 | .get(self.idx) 268 | .ok_or_else(|| io_err(State::NotFound))?; 269 | stream.fd.local_addr().map_err(from_io_error) 270 | } 271 | 272 | fn peer_addr(&self) -> mfio::error::Result { 273 | let state = self.state.lock(); 274 | let stream = state 275 | .streams 276 | .get(self.idx) 277 | .ok_or_else(|| io_err(State::NotFound))?; 278 | stream.fd.peer_addr().map_err(from_io_error) 279 | } 280 | 281 | fn shutdown(&self, how: Shutdown) -> mfio::error::Result<()> { 282 | let state = self.state.lock(); 283 | let stream = state 284 | .streams 285 | .get(self.idx) 286 | .ok_or_else(|| io_err(State::NotFound))?; 287 | stream.fd.shutdown(how.into()).map_err(from_io_error) 288 | } 289 | } 290 | 291 | pub struct TcpConnectFuture<'a, A: ToSocketAddrs + 'a> { 292 | backend: &'a BaseArc>, 293 | addrs: Option, 294 | idx: Option, 295 | } 296 | 297 | impl<'a, A: ToSocketAddrs + 'a> Future for TcpConnectFuture<'a, A> { 298 | type Output = mfio::error::Result; 299 | 300 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 301 | // SAFETY: we are not moving out of this future 302 | let this = unsafe { self.get_unchecked_mut() }; 303 | 304 | let backend = &mut *this.backend.lock(); 305 | 306 | if let Some(idx) = this.idx { 307 | if let Some(conn) = backend.connections.get_mut(idx) { 308 | match conn.res.take() { 309 | Some(Ok(stream)) => { 310 | let _ = backend.connections.remove(idx); 311 | return Poll::Ready(Ok(stream)); 312 | } 313 | Some(Err(_)) => { 314 | conn.waker = Some(cx.waker().clone()); 315 | } 316 | None => { 317 | conn.waker = Some(cx.waker().clone()); 318 | return Poll::Pending; 319 | } 320 | } 321 | } else { 322 | return Poll::Ready(Err(io_err(State::NotFound))); 323 | } 324 | } 325 | 326 | // Push new op to the ring if we've got an address for it 327 | loop { 328 | if let Some(addr) = this.addrs.as_mut().and_then(|v| v.next()) { 329 | let &mut idx = this 330 | .idx 331 | .get_or_insert_with(|| backend.connections.insert(cx.waker().clone().into())); 332 | 333 | // The invariant here is that we have an entry within connections - if we didn't, we 334 | // would have returned in the previous block. 335 | let conn = backend.connections.get_mut(idx).unwrap(); 336 | 337 | let Ok((domain, fd)) = new_for_addr(addr, false) else { 338 | continue; 339 | }; 340 | let fd = unsafe { OwnedFd::from_raw_fd(fd) }; 341 | 342 | let (addr, len) = { 343 | let stor = SockaddrStorage::from(addr); 344 | conn.tmp_addr = Some(TmpAddr { 345 | domain, 346 | addr: Box::pin((stor, 0)), 347 | }); 348 | conn.tmp_addr 349 | .as_ref() 350 | .map(|v| (v.addr.0.as_ptr(), v.addr.0.len())) 351 | .unwrap() 352 | }; 353 | 354 | let entry = opcode::Connect::new(Fd(fd.as_raw_fd()), addr, len).build(); 355 | 356 | conn.fd = Some(fd); 357 | 358 | backend 359 | .push_handle() 360 | .try_push_op(entry, Operation::TcpGetSock(idx)); 361 | 362 | break Poll::Pending; 363 | } else { 364 | if let Some(idx) = this.idx { 365 | backend.connections.remove(idx); 366 | } 367 | 368 | break Poll::Ready(Err(io_err(State::Exhausted))); 369 | } 370 | } 371 | } 372 | } 373 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/iocp/file.rs: -------------------------------------------------------------------------------- 1 | use std::os::windows::io::AsRawHandle; 2 | 3 | use parking_lot::Mutex; 4 | 5 | use core::mem::MaybeUninit; 6 | 7 | use mfio::io::*; 8 | use mfio::tarc::BaseArc; 9 | 10 | use ::windows::Win32::Foundation::HANDLE; 11 | 12 | use ::windows::Win32::System::IO::{OVERLAPPED, OVERLAPPED_0, OVERLAPPED_0_0}; 13 | 14 | use super::{IocpState, Operation, OperationHeader, OperationMode, RawBox}; 15 | 16 | trait IntoOp: PacketPerms { 17 | fn into_op(pkt: BoundPacketView) -> OperationMode; 18 | } 19 | 20 | impl IntoOp for Read { 21 | fn into_op(pkt: BoundPacketView) -> OperationMode { 22 | let len = pkt.len(); 23 | let pkt = pkt.try_alloc(); 24 | 25 | let (raw_box, pkt) = match pkt { 26 | Ok(pkt) => (RawBox::null(), Ok(pkt)), 27 | Err(pkt) => { 28 | let mut buf: Vec> = Vec::with_capacity(len as usize); 29 | unsafe { buf.set_len(len as usize) }; 30 | let mut buf = buf.into_boxed_slice(); 31 | let buf_ptr = buf.as_mut_ptr(); 32 | let buf = Box::into_raw(buf); 33 | let pkt = unsafe { pkt.transfer_data(buf_ptr as *mut ()) }; 34 | (RawBox(buf), Err(pkt)) 35 | } 36 | }; 37 | 38 | OperationMode::FileWrite(pkt, raw_box) 39 | } 40 | } 41 | 42 | impl IntoOp for Write { 43 | fn into_op(pkt: BoundPacketView) -> OperationMode { 44 | let len = pkt.len(); 45 | let pkt = pkt.try_alloc(); 46 | 47 | let raw_box = match &pkt { 48 | Ok(_) => RawBox::null(), 49 | Err(_) => { 50 | let mut buf = Vec::with_capacity(len as usize); 51 | unsafe { buf.set_len(len as usize) }; 52 | let buf = buf.into_boxed_slice(); 53 | let buf = Box::into_raw(buf); 54 | RawBox(buf) 55 | } 56 | }; 57 | 58 | OperationMode::FileRead(pkt, raw_box) 59 | } 60 | } 61 | 62 | impl PacketIo for FileWrapper { 63 | fn send_io(&self, pos: u64, packet: BoundPacketView) { 64 | log::trace!("Send io @ {pos:x}"); 65 | let mut state = self.state.lock(); 66 | let state = &mut *state; 67 | 68 | let hdr = OperationHeader { 69 | overlapped: OVERLAPPED { 70 | Internal: 0, 71 | InternalHigh: 0, 72 | Anonymous: OVERLAPPED_0 { 73 | Anonymous: OVERLAPPED_0_0 { 74 | Offset: (pos & (!0 >> 32)) as u32, 75 | OffsetHigh: (pos >> 32) as u32, 76 | }, 77 | }, 78 | hEvent: HANDLE(state.event.as_raw_handle() as _), 79 | }, 80 | idx: !0, 81 | handle: HANDLE(state.files.get(self.idx).unwrap().as_raw_handle() as _), 82 | }; 83 | 84 | let operation = Operation { 85 | header: hdr.into(), 86 | mode: Perms::into_op(packet), 87 | }; 88 | 89 | let _ = unsafe { state.try_submit_op(operation) }; 90 | } 91 | } 92 | 93 | pub struct FileWrapper { 94 | idx: usize, 95 | state: BaseArc>, 96 | } 97 | 98 | impl FileWrapper { 99 | pub(super) fn new(idx: usize, state: BaseArc>) -> Self { 100 | Self { idx, state } 101 | } 102 | } 103 | 104 | impl Drop for FileWrapper { 105 | fn drop(&mut self) { 106 | let mut state = self.state.lock(); 107 | let v = state.files.remove(self.idx); 108 | log::trace!("Dropping {} {:?}", self.idx, v.as_raw_handle()); 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/iocp/tcp_listener.rs: -------------------------------------------------------------------------------- 1 | use core::pin::Pin; 2 | use core::task::{Context, Poll}; 3 | use futures::stream::Stream; 4 | use mfio::error::State; 5 | use mfio::tarc::BaseArc; 6 | use parking_lot::Mutex; 7 | use std::net::{self, SocketAddr, SocketAddrV4, SocketAddrV6}; 8 | use std::os::windows::io::{AsRawHandle, AsRawSocket}; 9 | 10 | use super::{IocpState, Operation, OperationHeader, OperationMode, TcpGetSock, TcpStream, TmpAddr}; 11 | use crate::util::{from_io_error, io_err, Key}; 12 | use crate::TcpListenerHandle; 13 | 14 | use ::windows::Win32::Foundation::HANDLE; 15 | use ::windows::Win32::Networking::WinSock::{ 16 | GetAcceptExSockaddrs, AF_INET, AF_INET6, SOCKADDR, SOCKADDR_IN, SOCKADDR_IN6, SOCKET, 17 | }; 18 | use ::windows::Win32::System::IO::OVERLAPPED; 19 | 20 | pub struct ListenerInner { 21 | pub(super) socket: net::TcpListener, 22 | } 23 | 24 | impl From for ListenerInner { 25 | fn from(socket: net::TcpListener) -> Self { 26 | Self { socket } 27 | } 28 | } 29 | 30 | pub struct TcpListener { 31 | idx: usize, 32 | state: BaseArc>, 33 | accept_idx: Option, 34 | local_addr: Option, 35 | } 36 | 37 | impl TcpListener { 38 | pub(super) fn register_listener( 39 | state_arc: &BaseArc>, 40 | listener: net::TcpListener, 41 | ) -> Self { 42 | let handle = HANDLE(listener.as_raw_socket() as _); 43 | let state = &mut *state_arc.lock(); 44 | let entry = state.listeners.vacant_entry(); 45 | let key = Key::TcpListener(entry.key()); 46 | let listener = ListenerInner::from(listener); 47 | 48 | log::trace!( 49 | "Register listener={:?} state={:?}: key={key:?}", 50 | listener.socket.as_raw_socket(), 51 | state_arc.as_ptr() 52 | ); 53 | 54 | entry.insert(listener); 55 | 56 | IocpState::register_handle(state.iocp, handle, key.key()).unwrap(); 57 | 58 | TcpListener { 59 | idx: key.idx(), 60 | state: state_arc.clone(), 61 | accept_idx: None, 62 | local_addr: None, 63 | } 64 | } 65 | } 66 | 67 | impl Drop for TcpListener { 68 | fn drop(&mut self) { 69 | let mut state = self.state.lock(); 70 | let v = state.listeners.remove(self.idx); 71 | 72 | log::trace!("Dropping {} {}", self.idx, v.socket.as_raw_socket()); 73 | } 74 | } 75 | 76 | impl TcpListenerHandle for TcpListener { 77 | type StreamHandle = TcpStream; 78 | 79 | fn local_addr(&self) -> mfio::error::Result { 80 | let state = self.state.lock(); 81 | let listener = state 82 | .listeners 83 | .get(self.idx) 84 | .ok_or_else(|| io_err(State::NotFound))?; 85 | listener.socket.local_addr().map_err(from_io_error) 86 | } 87 | } 88 | 89 | impl Stream for TcpListener { 90 | type Item = (TcpStream, SocketAddr); 91 | 92 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 93 | // SAFETY: we are not moving out of this future 94 | let this = unsafe { self.get_unchecked_mut() }; 95 | 96 | let state = &mut *this.state.lock(); 97 | 98 | loop { 99 | if let Some(idx) = this.accept_idx { 100 | if let Some(conn) = state.connections.get_mut(idx) { 101 | match conn.res.take() { 102 | Some(v) => { 103 | let v = v.ok().zip(conn.tmp_addr.take()); 104 | let _ = state.connections.remove(idx); 105 | this.accept_idx = None; 106 | if let Some((socket_idx, TmpAddr { addr })) = v { 107 | let mut local_addr: *mut SOCKADDR = core::ptr::null_mut(); 108 | let mut local_sockaddr_length = 0; 109 | let mut remote_addr: *mut SOCKADDR = core::ptr::null_mut(); 110 | let mut remote_sockaddr_length = 0; 111 | 112 | unsafe { 113 | GetAcceptExSockaddrs( 114 | addr.as_ptr().cast(), 115 | 0, 116 | TmpAddr::ADDR_LENGTH as _, 117 | TmpAddr::ADDR_LENGTH as _, 118 | &mut local_addr, 119 | &mut local_sockaddr_length, 120 | &mut remote_addr, 121 | &mut remote_sockaddr_length, 122 | ) 123 | }; 124 | 125 | // If anything is wrong here, we should panic. 126 | let addr = match unsafe { &*remote_addr.cast::() } 127 | .sa_family 128 | { 129 | AF_INET => { 130 | let addr = unsafe { &*remote_addr.cast::() }; 131 | SocketAddr::V4(SocketAddrV4::new( 132 | unsafe { addr.sin_addr.S_un.S_addr }.into(), 133 | addr.sin_port, 134 | )) 135 | } 136 | AF_INET6 => { 137 | let addr = unsafe { &*remote_addr.cast::() }; 138 | SocketAddr::V6(SocketAddrV6::new( 139 | unsafe { addr.sin6_addr.u.Word }.into(), 140 | addr.sin6_port, 141 | addr.sin6_flowinfo, 142 | unsafe { addr.Anonymous.sin6_scope_id }, 143 | )) 144 | } 145 | _ => unreachable!("invalid state reached {addr:?}"), 146 | }; 147 | 148 | let stream = TcpStream::new(socket_idx, this.state.clone()); 149 | 150 | return Poll::Ready(Some((stream, addr))); 151 | } else { 152 | continue; 153 | } 154 | } 155 | None => { 156 | conn.waker = Some(cx.waker().clone()); 157 | return Poll::Pending; 158 | } 159 | } 160 | } else { 161 | this.accept_idx = None; 162 | continue; 163 | } 164 | } else { 165 | if this.local_addr.is_none() { 166 | this.local_addr = state 167 | .listeners 168 | .get(this.idx) 169 | .and_then(|v| v.socket.local_addr().ok()); 170 | } 171 | 172 | let handle = HANDLE( 173 | state 174 | .listeners 175 | .get(this.idx) 176 | .unwrap() 177 | .socket 178 | .as_raw_socket() as _, 179 | ); 180 | 181 | // FIXME: If socket is gone, what shall we do here? 182 | let Some(local_addr) = this.local_addr else { 183 | return Poll::Ready(None); 184 | }; 185 | 186 | let Ok(sock) = TcpGetSock::new_for_accept(local_addr, state, cx.waker().clone()) 187 | else { 188 | return Poll::Ready(None); 189 | }; 190 | 191 | let idx = state.connections.insert(sock); 192 | 193 | this.accept_idx = Some(idx); 194 | // The invariant here is that we have an entry within connections - if we didn't, we 195 | // would have returned in the previous block. 196 | let conn = state.connections.get_mut(idx).unwrap(); 197 | 198 | let in_sock = SOCKET( 199 | state 200 | .streams 201 | .get(conn.socket_idx.unwrap()) 202 | .unwrap() 203 | .socket 204 | .as_raw_socket() as _, 205 | ); 206 | 207 | let hdr = OperationHeader { 208 | overlapped: OVERLAPPED { 209 | hEvent: HANDLE(state.event.as_raw_handle() as _), 210 | ..Default::default() 211 | }, 212 | idx: !0, 213 | handle, 214 | }; 215 | 216 | let operation = Operation { 217 | header: hdr.into(), 218 | mode: OperationMode::TcpAccept(TcpAccept { 219 | in_sock, 220 | conn_id: idx, 221 | tmp_addr: Default::default(), 222 | }), 223 | }; 224 | 225 | match unsafe { state.try_submit_op(operation) } { 226 | Ok(()) => { 227 | // Go to the next iteration of this loop, checking result. 228 | continue; 229 | } 230 | Err(_) => return Poll::Pending, 231 | } 232 | } 233 | } 234 | } 235 | } 236 | 237 | pub(super) struct TcpAccept { 238 | pub in_sock: SOCKET, 239 | pub conn_id: usize, 240 | pub tmp_addr: TmpAddr, 241 | } 242 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/mio/tcp_listener.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::net::SocketAddr; 3 | use std::os::fd::{AsRawFd, RawFd}; 4 | 5 | use core::pin::Pin; 6 | use core::task::{Context, Poll, Waker}; 7 | 8 | use mio::{event::Source, unix::SourceFd, Interest, Registry, Token}; 9 | 10 | use mfio::error::State; 11 | use mfio::tarc::BaseArc; 12 | 13 | use super::TcpStream; 14 | use super::{BlockTrack, Key, MioState}; 15 | use crate::util::{from_io_error, io_err}; 16 | use crate::TcpListenerHandle; 17 | 18 | use futures::Stream; 19 | use mio::net; 20 | 21 | pub struct ListenerInner { 22 | fd: net::TcpListener, 23 | track: BlockTrack, 24 | poll_waker: Option, 25 | } 26 | 27 | impl AsRawFd for ListenerInner { 28 | fn as_raw_fd(&self) -> RawFd { 29 | self.fd.as_raw_fd() 30 | } 31 | } 32 | 33 | impl Source for ListenerInner { 34 | // Required methods 35 | fn register( 36 | &mut self, 37 | registry: &Registry, 38 | token: Token, 39 | interests: Interest, 40 | ) -> io::Result<()> { 41 | // TODO: do we need to not do this on error? 42 | self.track.cur_interests = Some(interests); 43 | registry.register(&mut SourceFd(&self.fd.as_raw_fd()), token, interests) 44 | } 45 | fn reregister( 46 | &mut self, 47 | registry: &Registry, 48 | token: Token, 49 | interests: Interest, 50 | ) -> io::Result<()> { 51 | self.track.cur_interests = Some(interests); 52 | registry.reregister(&mut SourceFd(&self.fd.as_raw_fd()), token, interests) 53 | } 54 | fn deregister(&mut self, registry: &Registry) -> io::Result<()> { 55 | self.track.cur_interests = None; 56 | registry.deregister(&mut SourceFd(&self.fd.as_raw_fd())) 57 | } 58 | } 59 | 60 | impl From for ListenerInner { 61 | fn from(fd: net::TcpListener) -> Self { 62 | Self { 63 | fd, 64 | track: Default::default(), 65 | //read: read::, 66 | //write: write::, 67 | poll_waker: None, 68 | } 69 | } 70 | } 71 | 72 | impl ListenerInner { 73 | pub fn update_interests(&mut self, key: usize, registry: &Registry) -> std::io::Result<()> { 74 | let expected_interests = self.track.expected_interests(); 75 | 76 | if self.track.cur_interests != expected_interests { 77 | if let Some(i) = expected_interests { 78 | if self.track.cur_interests.is_some() { 79 | self.reregister(registry, Token(key), i)?; 80 | } else { 81 | self.register(registry, Token(key), i)?; 82 | } 83 | } else { 84 | self.deregister(registry)?; 85 | } 86 | } 87 | 88 | Ok(()) 89 | } 90 | 91 | #[tracing::instrument(skip(self))] 92 | pub fn do_ops(&mut self, read: bool, write: bool) { 93 | log::trace!( 94 | "Do ops file={:?} read={read} write={write}", 95 | self.fd.as_raw_fd(), 96 | ); 97 | 98 | if let Some(waker) = self.poll_waker.take() { 99 | waker.wake(); 100 | } 101 | } 102 | } 103 | 104 | pub struct TcpListener { 105 | idx: usize, 106 | state: BaseArc, 107 | } 108 | 109 | impl TcpListener { 110 | pub(super) fn register_listener(state: &BaseArc, listener: net::TcpListener) -> Self { 111 | // TODO: make this portable 112 | let fd = listener.as_raw_fd(); 113 | super::set_nonblock(fd).unwrap(); 114 | 115 | let entry = state.listeners.vacant_entry().unwrap(); 116 | let key = Key::TcpListener(entry.key()); 117 | let listener = ListenerInner::from(listener); 118 | 119 | log::trace!( 120 | "Register listener={:?} state={:?}: key={key:?}", 121 | listener.as_raw_fd(), 122 | state.as_ptr() 123 | ); 124 | 125 | entry.insert(listener.into()); 126 | 127 | TcpListener { 128 | idx: key.idx(), 129 | state: state.clone(), 130 | } 131 | } 132 | } 133 | 134 | impl Drop for TcpListener { 135 | fn drop(&mut self) { 136 | let mut listener = self.state.listeners.take(self.idx).unwrap(); 137 | // TODO: what to do on error? 138 | let _ = self 139 | .state 140 | .poll 141 | .lock() 142 | .registry() 143 | .deregister(listener.get_mut()); 144 | } 145 | } 146 | 147 | impl TcpListenerHandle for TcpListener { 148 | type StreamHandle = TcpStream; 149 | 150 | fn local_addr(&self) -> mfio::error::Result { 151 | let listener = self 152 | .state 153 | .listeners 154 | .get(self.idx) 155 | .ok_or_else(|| io_err(State::NotFound))?; 156 | let listener = listener.lock(); 157 | listener.fd.local_addr().map_err(from_io_error) 158 | } 159 | } 160 | 161 | impl Stream for TcpListener { 162 | type Item = (TcpStream, SocketAddr); 163 | 164 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 165 | let this = unsafe { self.get_unchecked_mut() }; 166 | 167 | if let Some(inner) = this.state.listeners.get(this.idx) { 168 | let mut inner = inner.lock(); 169 | match inner.fd.accept() { 170 | Err(e) if e.kind() == io::ErrorKind::WouldBlock => { 171 | inner.track.read_blocked = true; 172 | inner.poll_waker = Some(cx.waker().clone()); 173 | this.state.opqueue.lock().push(Key::TcpListener(this.idx)); 174 | Poll::Pending 175 | } 176 | Ok((stream, addr)) => { 177 | log::trace!("Accept {addr} {}", stream.as_raw_fd()); 178 | let stream = TcpStream::register_stream(&this.state, stream); 179 | Poll::Ready(Some((stream, addr))) 180 | } 181 | Err(e) => { 182 | log::error!("Polling error: {e}"); 183 | let mut listener = this.state.listeners.take(this.idx).unwrap(); 184 | // TODO: what to do on error? 185 | let _ = this 186 | .state 187 | .poll 188 | .lock() 189 | .registry() 190 | .deregister(listener.get_mut()); 191 | Poll::Ready(None) 192 | } 193 | } 194 | } else { 195 | Poll::Ready(None) 196 | } 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/mio/tcp_stream.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::io::{IoSlice, IoSliceMut}; 3 | use std::net::SocketAddr; 4 | use std::net::ToSocketAddrs; 5 | use std::os::fd::{AsRawFd, RawFd}; 6 | 7 | use core::future::Future; 8 | use core::pin::Pin; 9 | use core::task::{Context, Poll, Waker}; 10 | 11 | use mio::{event::Source, unix::SourceFd, Interest, Registry, Token}; 12 | 13 | use mfio::error::State; 14 | use mfio::io::{Read as RdPerm, Write as WrPerm, *}; 15 | use mfio::tarc::BaseArc; 16 | 17 | use super::super::unix_extra::set_nonblock; 18 | use super::{BlockTrack, Key, MioState}; 19 | use crate::util::{from_io_error, io_err, stream::StreamBuf}; 20 | use crate::{Shutdown, TcpStreamHandle}; 21 | 22 | use mio::net; 23 | 24 | pub struct StreamInner { 25 | fd: net::TcpStream, 26 | stream: StreamBuf, 27 | track: BlockTrack, 28 | poll_waker: Option, 29 | } 30 | 31 | impl AsRawFd for StreamInner { 32 | fn as_raw_fd(&self) -> RawFd { 33 | self.fd.as_raw_fd() 34 | } 35 | } 36 | 37 | impl Source for StreamInner { 38 | // Required methods 39 | fn register( 40 | &mut self, 41 | registry: &Registry, 42 | token: Token, 43 | interests: Interest, 44 | ) -> io::Result<()> { 45 | // TODO: do we need to not do this on error? 46 | self.track.cur_interests = Some(interests); 47 | registry.register(&mut SourceFd(&self.fd.as_raw_fd()), token, interests) 48 | } 49 | fn reregister( 50 | &mut self, 51 | registry: &Registry, 52 | token: Token, 53 | interests: Interest, 54 | ) -> io::Result<()> { 55 | self.track.cur_interests = Some(interests); 56 | registry.reregister(&mut SourceFd(&self.fd.as_raw_fd()), token, interests) 57 | } 58 | fn deregister(&mut self, registry: &Registry) -> io::Result<()> { 59 | self.track.cur_interests = None; 60 | registry.deregister(&mut SourceFd(&self.fd.as_raw_fd())) 61 | } 62 | } 63 | 64 | impl From for StreamInner { 65 | fn from(fd: net::TcpStream) -> Self { 66 | Self { 67 | fd, 68 | stream: StreamBuf::default(), 69 | track: Default::default(), 70 | //read: read::, 71 | //write: write::, 72 | poll_waker: None, 73 | } 74 | } 75 | } 76 | 77 | impl StreamInner { 78 | fn read(mut stream: &net::TcpStream, iov: &mut [IoSliceMut]) -> io::Result { 79 | use std::io::Read; 80 | stream.read_vectored(iov) 81 | } 82 | 83 | fn write(mut stream: &net::TcpStream, iov: &[IoSlice]) -> io::Result { 84 | use std::io::Write; 85 | stream.write_vectored(iov) 86 | } 87 | 88 | pub fn update_interests(&mut self, key: usize, registry: &Registry) -> std::io::Result<()> { 89 | let expected_interests = self.track.expected_interests(); 90 | 91 | if self.track.cur_interests != expected_interests { 92 | if let Some(i) = expected_interests { 93 | if self.track.cur_interests.is_some() { 94 | self.reregister(registry, Token(key), i)?; 95 | } else { 96 | self.register(registry, Token(key), i)?; 97 | } 98 | } else { 99 | self.deregister(registry)?; 100 | } 101 | } 102 | 103 | Ok(()) 104 | } 105 | 106 | pub fn cancel_all_ops(&mut self) { 107 | self.stream 108 | .on_read(Err(io::ErrorKind::Interrupted.into()), None); 109 | self.stream 110 | .on_write(Err(io::ErrorKind::Interrupted.into()), None); 111 | } 112 | 113 | #[tracing::instrument(skip(self))] 114 | pub fn do_ops(&mut self, read: bool, write: bool) { 115 | log::trace!( 116 | "Do ops file={:?} read={read} write={write} (to read={} to write={})", 117 | self.fd.as_raw_fd(), 118 | self.stream.read_ops(), 119 | self.stream.write_ops() 120 | ); 121 | 122 | if let Some(waker) = self.poll_waker.take() { 123 | waker.wake(); 124 | } 125 | 126 | if read || !self.track.read_blocked { 127 | while self.stream.read_ops() > 0 { 128 | let rd_span = 129 | tracing::span!(tracing::Level::TRACE, "read", ops = self.stream.read_ops()); 130 | let _span = rd_span.enter(); 131 | self.track.read_blocked = false; 132 | let queue = self.stream.read_queue(); 133 | if !queue.is_empty() { 134 | let res = Self::read(&self.fd, queue); 135 | 136 | if res 137 | .as_ref() 138 | .err() 139 | .map(|e| e.kind() != io::ErrorKind::WouldBlock) 140 | .unwrap_or(true) 141 | { 142 | self.stream.on_read(res, None); 143 | } else { 144 | tracing::event!(tracing::Level::INFO, "read blocked"); 145 | self.track.read_blocked = true; 146 | break; 147 | } 148 | } 149 | } 150 | } 151 | 152 | if write || !self.track.write_blocked { 153 | while self.stream.write_ops() > 0 { 154 | let wr_span = tracing::span!( 155 | tracing::Level::TRACE, 156 | "write", 157 | ops = self.stream.write_ops() 158 | ); 159 | let _span = wr_span.enter(); 160 | self.track.write_blocked = false; 161 | let queue = self.stream.write_queue(); 162 | if !queue.is_empty() { 163 | let res = Self::write(&self.fd, queue); 164 | 165 | if res 166 | .as_ref() 167 | .err() 168 | .map(|e| e.kind() != io::ErrorKind::WouldBlock) 169 | .unwrap_or(true) 170 | { 171 | self.stream.on_write(res, None); 172 | } else { 173 | tracing::event!(tracing::Level::INFO, "write blocked"); 174 | self.track.write_blocked = true; 175 | break; 176 | } 177 | } 178 | } 179 | } 180 | } 181 | 182 | pub fn on_queue(&mut self) { 183 | self.track.update_queued = false; 184 | self.do_ops(true, true); 185 | } 186 | } 187 | 188 | trait IntoOp: PacketPerms { 189 | fn push_op(stream: &mut StreamInner, pkt: BoundPacketView); 190 | } 191 | 192 | impl IntoOp for RdPerm { 193 | fn push_op(stream: &mut StreamInner, pkt: BoundPacketView) { 194 | stream.stream.queue_write(pkt, None); 195 | // we would normally attempt the operation right here, but that leads to overly high 196 | // syscall count. 197 | //stream.do_ops(false, false); 198 | } 199 | } 200 | 201 | impl IntoOp for WrPerm { 202 | fn push_op(stream: &mut StreamInner, pkt: BoundPacketView) { 203 | stream.stream.queue_read(pkt, None); 204 | // we would normally attempt the operation right here, but that leads to overly high 205 | // syscall count. 206 | //stream.do_ops(true, false); 207 | } 208 | } 209 | 210 | impl PacketIo for TcpStream { 211 | fn send_io(&self, _: NoPos, packet: BoundPacketView) { 212 | let streams = self.state.streams.read(); 213 | let stream = streams.get(self.idx).unwrap(); 214 | let stream = &mut *stream.lock(); 215 | 216 | Perms::push_op(stream, packet); 217 | 218 | // This will trigger change in interests in the mio loop 219 | if !stream.track.update_queued { 220 | stream.track.update_queued = true; 221 | self.state.opqueue.lock().push(Key::Stream(self.idx)); 222 | } 223 | } 224 | } 225 | 226 | pub struct TcpStream { 227 | idx: usize, 228 | state: BaseArc, 229 | } 230 | 231 | impl TcpStream { 232 | pub(super) fn new(idx: usize, state: BaseArc) -> Self { 233 | Self { idx, state } 234 | } 235 | 236 | pub(super) fn register_stream(state: &BaseArc, stream: net::TcpStream) -> Self { 237 | // TODO: make this portable 238 | let fd = stream.as_raw_fd(); 239 | set_nonblock(fd).unwrap(); 240 | 241 | let streams = state.streams.read(); 242 | let entry = streams.vacant_entry().unwrap(); 243 | // 2N mapping, to accomodate for streams 244 | let key = Key::Stream(entry.key()); 245 | let stream = StreamInner::from(stream); 246 | 247 | log::trace!( 248 | "Register stream={:?} state={:?}: key={key:?}", 249 | stream.as_raw_fd(), 250 | state.as_ptr() 251 | ); 252 | 253 | entry.insert(stream.into()); 254 | 255 | TcpStream::new(key.idx(), state.clone()) 256 | } 257 | 258 | pub(super) fn tcp_connect<'a, A: ToSocketAddrs + Send + 'a>( 259 | backend: &'a BaseArc, 260 | addrs: A, 261 | ) -> TcpConnectFuture<'a, A> { 262 | TcpConnectFuture { 263 | backend, 264 | addrs: addrs.to_socket_addrs().ok(), 265 | idx: None, 266 | } 267 | } 268 | } 269 | 270 | impl Drop for TcpStream { 271 | fn drop(&mut self) { 272 | log::trace!("Drop {}", self.idx); 273 | let mut stream = self.state.streams.read().take(self.idx).unwrap(); 274 | // TODO: what to do on error? 275 | let _ = self 276 | .state 277 | .poll 278 | .lock() 279 | .registry() 280 | .deregister(stream.get_mut()); 281 | } 282 | } 283 | 284 | impl TcpStreamHandle for TcpStream { 285 | fn local_addr(&self) -> mfio::error::Result { 286 | let streams = self.state.streams.read(); 287 | let stream = streams 288 | .get(self.idx) 289 | .ok_or_else(|| io_err(State::NotFound))?; 290 | let stream = stream.lock(); 291 | stream.fd.local_addr().map_err(from_io_error) 292 | } 293 | 294 | fn peer_addr(&self) -> mfio::error::Result { 295 | let streams = self.state.streams.read(); 296 | let stream = streams 297 | .get(self.idx) 298 | .ok_or_else(|| io_err(State::NotFound))?; 299 | let stream = stream.lock(); 300 | stream.fd.peer_addr().map_err(from_io_error) 301 | } 302 | 303 | fn shutdown(&self, how: Shutdown) -> mfio::error::Result<()> { 304 | let streams = self.state.streams.read(); 305 | let stream = streams 306 | .get(self.idx) 307 | .ok_or_else(|| io_err(State::NotFound))?; 308 | let stream = stream.lock(); 309 | stream.fd.shutdown(how.into()).map_err(from_io_error) 310 | } 311 | } 312 | 313 | pub struct TcpConnectFuture<'a, A: ToSocketAddrs + 'a> { 314 | backend: &'a BaseArc, 315 | addrs: Option, 316 | idx: Option, 317 | } 318 | 319 | impl<'a, A: ToSocketAddrs + 'a> Future for TcpConnectFuture<'a, A> { 320 | type Output = mfio::error::Result; 321 | 322 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 323 | // SAFETY: we are not moving out of this future 324 | let this = unsafe { self.get_unchecked_mut() }; 325 | 326 | loop { 327 | if let Some(idx) = this.idx.take() { 328 | if let Some(stream) = this.backend.streams.read().get(idx) { 329 | let mut stream = stream.lock(); 330 | if stream.fd.peer_addr().is_ok() { 331 | let wrapper = TcpStream::new(idx, this.backend.clone()); 332 | 333 | let ret = match stream.fd.take_error() { 334 | Ok(Some(e)) => Err(e), 335 | Err(e) => Err(e), 336 | Ok(None) => Ok(wrapper), 337 | }; 338 | 339 | // We want to continue to the next address if we were not successful 340 | if let Ok(ret) = ret { 341 | break Poll::Ready(Ok(ret)); 342 | } 343 | } else { 344 | stream.track.write_blocked = true; 345 | 346 | if stream 347 | .update_interests( 348 | Key::Stream(idx).key(), 349 | this.backend.poll.lock().registry(), 350 | ) 351 | .is_err() 352 | { 353 | let _ = TcpStream::new(idx, this.backend.clone()); 354 | continue; 355 | } 356 | stream.poll_waker = Some(cx.waker().clone()); 357 | this.idx = Some(idx); 358 | break Poll::Pending; 359 | } 360 | } else { 361 | break Poll::Ready(Err(io_err(State::NotFound))); 362 | } 363 | } else if let Some(addr) = this.addrs.as_mut().and_then(|v| v.next()) { 364 | let stream = net::TcpStream::connect(addr); 365 | 366 | if let Ok(stream) = stream { 367 | let streams = this.backend.streams.read(); 368 | let entry = streams.vacant_entry().unwrap(); 369 | // 2N mapping, to accomodate for streams 370 | let key = Key::Stream(entry.key()); 371 | let stream = StreamInner::from(stream); 372 | 373 | log::trace!( 374 | "Connect stream={:?} state={:?}: key={key:?}", 375 | stream.as_raw_fd(), 376 | this.backend.as_ptr() 377 | ); 378 | 379 | entry.insert(stream.into()); 380 | this.idx = Some(key.idx()); 381 | } 382 | } else { 383 | break Poll::Ready(Err(io_err(State::Exhausted))); 384 | } 385 | } 386 | } 387 | } 388 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(unix)] 2 | mod unix_extra; 3 | 4 | #[cfg(windows)] 5 | mod windows_extra; 6 | 7 | pub mod thread; 8 | 9 | #[cfg(all(not(miri), target_os = "linux", feature = "io-uring"))] 10 | pub mod io_uring; 11 | 12 | #[cfg(all(not(miri), target_os = "windows", feature = "iocp"))] 13 | pub mod iocp; 14 | 15 | #[cfg(all(not(miri), unix, feature = "mio"))] 16 | pub mod mio; 17 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/unix_extra.rs: -------------------------------------------------------------------------------- 1 | use nix::libc; 2 | use nix::sys::socket::*; 3 | use std::net::SocketAddr; 4 | use std::os::fd::RawFd; 5 | 6 | // From mio 7 | #[allow(dead_code)] 8 | pub(crate) fn new_for_addr( 9 | address: SocketAddr, 10 | nonblock: bool, 11 | ) -> std::io::Result<(AddressFamily, RawFd)> { 12 | let domain = match address { 13 | SocketAddr::V4(_) => AddressFamily::Inet, 14 | SocketAddr::V6(_) => AddressFamily::Inet6, 15 | }; 16 | new_socket(domain, SockType::Stream, nonblock).map(|v| (domain, v)) 17 | } 18 | 19 | /// Create a new non-blocking socket. 20 | pub(crate) fn new_socket( 21 | domain: AddressFamily, 22 | socket_type: SockType, 23 | nonblock: bool, 24 | ) -> std::io::Result { 25 | #[cfg(any( 26 | target_os = "android", 27 | target_os = "dragonfly", 28 | target_os = "freebsd", 29 | target_os = "illumos", 30 | target_os = "linux", 31 | target_os = "netbsd", 32 | target_os = "openbsd", 33 | ))] 34 | let flags = SockFlag::SOCK_CLOEXEC.union(SockFlag::from_bits_truncate(if nonblock { 35 | libc::SOCK_NONBLOCK 36 | } else { 37 | 0 38 | })); 39 | #[cfg(not(any( 40 | target_os = "android", 41 | target_os = "dragonfly", 42 | target_os = "freebsd", 43 | target_os = "illumos", 44 | target_os = "linux", 45 | target_os = "netbsd", 46 | target_os = "openbsd", 47 | )))] 48 | let flags = SockFlag::empty(); 49 | 50 | let fd = socket(domain, socket_type, flags, None)?; 51 | 52 | // Mimick `libstd` and set `SO_NOSIGPIPE` on apple systems. 53 | #[cfg(any( 54 | target_os = "ios", 55 | target_os = "macos", 56 | target_os = "tvos", 57 | target_os = "watchos", 58 | ))] 59 | if let Err(e) = nix::errno::Errno::result(unsafe { 60 | libc::setsockopt( 61 | fd, 62 | libc::SOL_SOCKET, 63 | libc::SO_NOSIGPIPE, 64 | &1 as *const libc::c_int as *const libc::c_void, 65 | core::mem::size_of::() as libc::socklen_t, 66 | ) 67 | }) { 68 | let _ = nix::unistd::close(fd); 69 | return Err(e.into()); 70 | } 71 | 72 | // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC. 73 | #[cfg(any( 74 | target_os = "ios", 75 | target_os = "macos", 76 | target_os = "tvos", 77 | target_os = "watchos", 78 | ))] 79 | if nonblock { 80 | use nix::fcntl::*; 81 | 82 | if let Err(e) = set_nonblock(fd) { 83 | let _ = nix::unistd::close(fd); 84 | return Err(e.into()); 85 | } 86 | 87 | if let Err(e) = fcntl(fd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)) { 88 | let _ = nix::unistd::close(fd); 89 | return Err(e.into()); 90 | } 91 | } 92 | 93 | Ok(fd) 94 | } 95 | 96 | pub(crate) fn set_nonblock(fd: RawFd) -> std::result::Result<(), nix::errno::Errno> { 97 | use nix::fcntl::*; 98 | 99 | let flags = fcntl(fd, FcntlArg::F_GETFL)?; 100 | fcntl( 101 | fd, 102 | FcntlArg::F_SETFL(OFlag::from_bits_truncate(flags).union(OFlag::O_NONBLOCK)), 103 | )?; 104 | 105 | Ok(()) 106 | } 107 | -------------------------------------------------------------------------------- /mfio-rt/src/native/impls/windows_extra.rs: -------------------------------------------------------------------------------- 1 | use core::ffi::c_void; 2 | use once_cell::sync::OnceCell; 3 | use std::io; 4 | 5 | use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; 6 | use std::sync::Once; 7 | 8 | use windows::core::GUID; 9 | use windows::Win32::Foundation::{TRUE, WIN32_ERROR}; 10 | use windows::Win32::Networking::WinSock::{ 11 | bind, closesocket, ioctlsocket, socket, WSAGetLastError, WSAIoctl, AF_INET, AF_INET6, FIONBIO, 12 | LPFN_CONNECTEX, SIO_GET_EXTENSION_FUNCTION_POINTER, SOCKADDR, SOCKADDR_IN, SOCKADDR_IN6, 13 | SOCKET, WINSOCK_SOCKET_TYPE, WSAID_CONNECTEX, 14 | }; 15 | use windows::Win32::System::IO::OVERLAPPED; 16 | 17 | #[repr(C)] 18 | #[derive(Clone, Copy)] 19 | pub(crate) union CSockAddr { 20 | pub generic: SOCKADDR, 21 | pub ipv4: SOCKADDR_IN, 22 | pub ipv6: SOCKADDR_IN6, 23 | } 24 | 25 | impl CSockAddr { 26 | pub fn addr_len(&self) -> usize { 27 | match unsafe { self.generic.sa_family } { 28 | AF_INET => core::mem::size_of::(), 29 | AF_INET6 => core::mem::size_of::(), 30 | _ => unreachable!(), 31 | } 32 | } 33 | 34 | pub fn to_socket_addr(self) -> SocketAddr { 35 | match unsafe { self.generic.sa_family } { 36 | AF_INET => SocketAddr::V4(SocketAddrV4::new( 37 | u32::from_be(unsafe { self.ipv4.sin_addr.S_un.S_addr }).into(), 38 | u16::from_be(unsafe { self.ipv4.sin_port }), 39 | )), 40 | AF_INET6 => SocketAddr::V6(SocketAddrV6::new( 41 | unsafe { self.ipv6.sin6_addr.u.Word }.into(), 42 | u16::from_be(unsafe { self.ipv6.sin6_port }), 43 | u32::from_be(unsafe { self.ipv6.sin6_flowinfo }), 44 | unsafe { self.ipv6.Anonymous.sin6_scope_id }, 45 | )), 46 | _ => unreachable!(), 47 | } 48 | } 49 | } 50 | 51 | impl From for CSockAddr { 52 | fn from(a: SocketAddr) -> Self { 53 | match a { 54 | SocketAddr::V4(a) => Self { ipv4: a.into() }, 55 | SocketAddr::V6(a) => Self { ipv6: a.into() }, 56 | } 57 | } 58 | } 59 | 60 | /// Initialise the network stack for Windows. 61 | fn init() { 62 | static INIT: Once = Once::new(); 63 | INIT.call_once(|| { 64 | // Let standard library call `WSAStartup` for us, we can't do it 65 | // ourselves because otherwise using any type in `std::net` would panic 66 | // when it tries to call `WSAStartup` a second time. 67 | drop(std::net::UdpSocket::bind("127.0.0.1:0")); 68 | }); 69 | } 70 | 71 | /// Create a new non-blocking socket. 72 | pub(crate) fn new_ip_socket( 73 | addr: SocketAddr, 74 | socket_type: WINSOCK_SOCKET_TYPE, 75 | ) -> io::Result { 76 | let domain = match addr { 77 | SocketAddr::V4(..) => AF_INET, 78 | SocketAddr::V6(..) => AF_INET6, 79 | }; 80 | 81 | new_socket(domain.0.into(), socket_type) 82 | } 83 | 84 | pub(crate) fn new_socket(domain: u32, socket_type: WINSOCK_SOCKET_TYPE) -> io::Result { 85 | init(); 86 | 87 | let socket = unsafe { socket(domain as i32, socket_type, 0) }; 88 | 89 | let ret = unsafe { ioctlsocket(socket, FIONBIO, &mut 1) }; 90 | 91 | if ret != 0 { 92 | let _ = unsafe { closesocket(socket) }; 93 | return Err(io::Error::from_raw_os_error(ret)); 94 | } 95 | 96 | Ok(socket as SOCKET) 97 | } 98 | 99 | unsafe fn get_wsa_fn(socket: SOCKET, guid: GUID) -> ::windows::core::Result<*const ()> { 100 | init(); 101 | 102 | let mut ptr = core::ptr::null(); 103 | let mut ret = 0; 104 | let r = WSAIoctl( 105 | socket, 106 | SIO_GET_EXTENSION_FUNCTION_POINTER, 107 | Some((&guid as *const GUID).cast()), 108 | core::mem::size_of::() as _, 109 | Some(&mut ptr as *mut _ as *mut c_void), 110 | core::mem::size_of_val(&ptr) as _, 111 | &mut ret, 112 | None, 113 | None, 114 | ); 115 | 116 | if r != 0 { 117 | // TODO: is this correct? 118 | Err(WIN32_ERROR(WSAGetLastError().0 as _).into()) 119 | } else { 120 | Ok(ptr) 121 | } 122 | } 123 | 124 | pub(crate) fn bind_any(socket: SOCKET, dst_addr_type: SocketAddr) -> ::windows::core::Result<()> { 125 | let addr = match dst_addr_type { 126 | SocketAddr::V4(..) => CSockAddr { 127 | ipv4: SOCKADDR_IN { 128 | sin_family: AF_INET, 129 | // INADDR_ANY is just zeroes 130 | ..Default::default() 131 | }, 132 | }, 133 | SocketAddr::V6(..) => CSockAddr { 134 | ipv6: SOCKADDR_IN6 { 135 | sin6_family: AF_INET6, 136 | // in6addr_any is just zeroes 137 | ..Default::default() 138 | }, 139 | }, 140 | }; 141 | 142 | if unsafe { 143 | bind( 144 | socket, 145 | (&addr as *const CSockAddr).cast(), 146 | addr.addr_len() as _, 147 | ) 148 | } != 0 149 | { 150 | // TODO: is this correct? 151 | Err(WIN32_ERROR(unsafe { WSAGetLastError() }.0 as _).into()) 152 | } else { 153 | Ok(()) 154 | } 155 | } 156 | 157 | pub(crate) unsafe fn connect_ex( 158 | socket: SOCKET, 159 | name: *const SOCKADDR, 160 | name_len: i32, 161 | send_buffer: *const c_void, 162 | send_data_length: u32, 163 | bytes_sent: *mut u32, 164 | overlapped: *mut OVERLAPPED, 165 | ) -> ::windows::core::Result<()> { 166 | static CONNECT_EX: OnceCell = OnceCell::new(); 167 | 168 | let connect_ex = CONNECT_EX.get_or_try_init(|| unsafe { 169 | get_wsa_fn(socket, WSAID_CONNECTEX).map(|v| core::mem::transmute(v)) 170 | })?; 171 | 172 | let connect_ex = connect_ex.unwrap(); 173 | 174 | log::trace!("ConnectEx @ {connect_ex:?} ({socket:?}, {name:?}, {name_len}, {send_buffer:?}, {send_data_length}, {bytes_sent:?}, {overlapped:?})"); 175 | 176 | if connect_ex( 177 | socket, 178 | name, 179 | name_len, 180 | send_buffer, 181 | send_data_length, 182 | bytes_sent, 183 | overlapped, 184 | ) == TRUE 185 | { 186 | Ok(()) 187 | } else { 188 | // TODO: is this correct? 189 | Err(WIN32_ERROR(WSAGetLastError().0 as _).into()) 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /mfio-rt/src/util.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), allow(dead_code))] 2 | 3 | use crate::{Component, Path, PathBuf}; 4 | use alloc::{boxed::Box, vec, vec::Vec}; 5 | use core::mem::MaybeUninit; 6 | use mfio::error::{Error, Location, State, Subject, INTERNAL_ERROR}; 7 | use mfio::io::*; 8 | 9 | #[cfg(feature = "std")] 10 | pub mod stream; 11 | 12 | /// Compute path difference 13 | /// 14 | /// This was taken from `pathdiff` crate, but made compatible with `typed-path` paths. 15 | pub fn diff_paths(path: P, base: B) -> Option 16 | where 17 | P: AsRef, 18 | B: AsRef, 19 | { 20 | let path = path.as_ref(); 21 | let base = base.as_ref(); 22 | 23 | if path.is_absolute() != base.is_absolute() { 24 | if path.is_absolute() { 25 | Some(PathBuf::from(path)) 26 | } else { 27 | None 28 | } 29 | } else { 30 | let mut ita = path.components(); 31 | let mut itb = base.components(); 32 | let mut comps: Vec = vec![]; 33 | loop { 34 | match (ita.next(), itb.next()) { 35 | (None, None) => break, 36 | (Some(a), None) => { 37 | comps.push(a); 38 | comps.extend(ita.by_ref()); 39 | break; 40 | } 41 | (None, _) => comps.push(Component::ParentDir), 42 | (Some(a), Some(b)) if comps.is_empty() && a == b => (), 43 | (Some(a), Some(Component::CurDir)) => comps.push(a), 44 | (Some(_), Some(Component::ParentDir)) => return None, 45 | (Some(a), Some(_)) => { 46 | comps.push(Component::ParentDir); 47 | for _ in itb { 48 | comps.push(Component::ParentDir); 49 | } 50 | comps.push(a); 51 | comps.extend(ita.by_ref()); 52 | break; 53 | } 54 | } 55 | } 56 | 57 | Some( 58 | comps 59 | .iter() 60 | .map(|c| { 61 | #[cfg(feature = "std")] 62 | let r = c.as_os_str(); 63 | #[cfg(not(feature = "std"))] 64 | let r: &[u8] = c.as_ref(); 65 | r 66 | }) 67 | .collect(), 68 | ) 69 | } 70 | } 71 | 72 | pub fn path_filename_str(path: &Path) -> Option<&str> { 73 | let filename = path.file_name()?; 74 | #[cfg(feature = "std")] 75 | let filename = filename.to_str()?; 76 | #[cfg(not(feature = "std"))] 77 | let filename = core::str::from_utf8(filename).ok()?; 78 | 79 | Some(filename) 80 | } 81 | 82 | pub fn io_err(state: State) -> Error { 83 | Error { 84 | code: INTERNAL_ERROR, 85 | location: Location::Backend, 86 | subject: Subject::Io, 87 | state, 88 | } 89 | } 90 | 91 | #[cfg(feature = "std")] 92 | pub fn from_io_error(err: std::io::Error) -> Error { 93 | io_err(err.kind().into()) 94 | } 95 | 96 | #[derive(Default)] 97 | pub struct DeferredPackets { 98 | packets: Vec<(AnyPacket, Option)>, 99 | } 100 | 101 | impl Drop for DeferredPackets { 102 | fn drop(&mut self) { 103 | self.flush(); 104 | } 105 | } 106 | 107 | impl DeferredPackets { 108 | pub fn ok(&mut self, p: impl Into) { 109 | self.packets.push((p.into(), None)); 110 | } 111 | 112 | pub fn error(&mut self, p: impl Into, err: Error) { 113 | self.packets.push((p.into(), Some(err))) 114 | } 115 | 116 | pub fn flush(&mut self) { 117 | self.packets 118 | .drain(0..) 119 | .filter_map(|(p, e)| Some(p).zip(e)) 120 | .for_each(|(p, e)| p.error(e)); 121 | } 122 | } 123 | 124 | #[repr(transparent)] 125 | pub struct RawBox(pub(crate) *mut [MaybeUninit]); 126 | 127 | impl RawBox { 128 | #[allow(dead_code)] 129 | pub fn null() -> Self { 130 | Self(unsafe { core::mem::MaybeUninit::zeroed().assume_init() }) 131 | } 132 | } 133 | 134 | unsafe impl Send for RawBox {} 135 | unsafe impl Sync for RawBox {} 136 | 137 | impl Drop for RawBox { 138 | fn drop(&mut self) { 139 | if !self.0.is_null() { 140 | let _ = unsafe { Box::from_raw(self.0) }; 141 | } 142 | } 143 | } 144 | 145 | #[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)] 146 | pub enum Key { 147 | File(usize), 148 | Stream(usize), 149 | TcpListener(usize), 150 | } 151 | 152 | const NUM_KEYS: usize = 3; 153 | 154 | impl From for Key { 155 | fn from(raw: usize) -> Self { 156 | let idx = raw / NUM_KEYS; 157 | match raw % NUM_KEYS { 158 | 0 => Self::File(idx), 159 | 1 => Self::Stream(idx), 160 | 2 => Self::TcpListener(idx), 161 | _ => unreachable!(), 162 | } 163 | } 164 | } 165 | 166 | impl Key { 167 | pub fn idx(self) -> usize { 168 | match self { 169 | Self::File(v) => v, 170 | Self::Stream(v) => v, 171 | Self::TcpListener(v) => v, 172 | } 173 | } 174 | 175 | pub fn key(self) -> usize { 176 | match self { 177 | Self::File(v) => v * NUM_KEYS, 178 | Self::Stream(v) => v * NUM_KEYS + 1, 179 | Self::TcpListener(v) => v * NUM_KEYS + 2, 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /mfio/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mfio" 3 | version = "0.1.0" 4 | rust-version = "1.84" 5 | edition = "2021" 6 | authors = ["Aurimas Blažulionis <0x60@pm.me>"] 7 | license = "MIT" 8 | repository = "https://github.com/memflow/mfio" 9 | documentation = "https://docs.rs/mfio" 10 | description = "Flexible completion I/O primitives" 11 | keywords = [ "mfio", "memflow", "async", "completion", "io" ] 12 | categories = [ "asynchronous", "no-std", "concurrency" ] 13 | readme = "../README.md" 14 | 15 | [package.metadata.docs.rs] 16 | features = ["default", "tokio", "async-io"] 17 | rustdoc-args = ["--cfg", "docsrs"] 18 | 19 | [lib] 20 | bench = false 21 | 22 | [[bench]] 23 | name = "main" 24 | harness = false 25 | 26 | [dependencies] 27 | futures = { version = "0.3", default-features = false } 28 | parking_lot = { version = "0.12", optional = true } 29 | pin-project = "1.0" 30 | tarc = { version = "~0.1.6", default-features = false, features = ["unwind"] } 31 | bytemuck = "1" 32 | cglue = { version = "~0.3.5", features = ["task"], default-features = false } 33 | abi_stable = { version = "0.11", default-features = false, optional = true, features = ["rust_1_84"] } 34 | #abi_stable22 = { package = "abi_stable", version = "0.11", features = ["rust_1_84"] } 35 | mfio-derive = { version = "0.1", path = "../mfio-derive" } 36 | http = { version = "0.2", optional = true } 37 | log = "0.4" 38 | rangemap = "1" 39 | num = { version = "0.4", default-features = false } 40 | atomic-traits = { version = "0.3", default-features = false } 41 | # This is only needed when std feature is disabled, but we can't do negative bounds 42 | spin = "0.9" 43 | serde_json = { version = "1.0.79", features = ["raw_value"] } 44 | 45 | 46 | [target.'cfg(unix)'.dependencies] 47 | nix = { version = "0.26", features = ["poll"] } 48 | async-io = { version = "2", optional = true } 49 | 50 | [target.'cfg(all(unix, not(miri)))'.dependencies] 51 | tokio = { version = "1", optional = true, features = ["net"] } 52 | 53 | [target.'cfg(windows)'.dependencies] 54 | windows-sys = { version = "0.48", features = ["Win32_System_Threading", "Win32_Foundation", "Win32_Security"] } 55 | 56 | [dev-dependencies] 57 | tokio = { version = "1.24", features = ["rt", "macros", "rt-multi-thread"] } 58 | smol = "1" 59 | criterion = { version = "0.5", git = "https://github.com/h33p/criterion.rs", branch = "tput2", features = ["async_tokio", "async_smol", "async_futures"] } 60 | pollster = "0.2" 61 | bytemuck = { version = "1", features = ["derive"] } 62 | 63 | [features] 64 | default = ["std", "http", "cglue/unwind_abi_ext", "cglue/abi_stable11", "cglue/abi_stable"] 65 | std = ["parking_lot", "futures/std"] 66 | cglue-trait = [] 67 | cglue-trait-layout-checks = ["cglue/layout_checks", "cglue-trait", "abi_stable", "cglue/abi_stable11"] 68 | -------------------------------------------------------------------------------- /mfio/examples/sample.rs: -------------------------------------------------------------------------------- 1 | use mfio::backend::*; 2 | use mfio::io::*; 3 | use std::time::{Duration, Instant}; 4 | 5 | use sample::*; 6 | 7 | mod sample { 8 | include!("../src/sample.rs"); 9 | } 10 | 11 | fn black_box(dummy: T) -> T { 12 | unsafe { 13 | let ret = std::ptr::read_volatile(&dummy); 14 | std::mem::forget(dummy); 15 | ret 16 | } 17 | } 18 | 19 | fn bench(size: usize, iters: usize) -> Duration { 20 | let handle = SampleIo::default(); 21 | 22 | handle.block_on(async { 23 | let bufs = (0..size) 24 | .map(|_| Packet::::new_buf(1)) 25 | .collect::>(); 26 | 27 | let start = Instant::now(); 28 | 29 | for _ in 0..iters { 30 | for b in &bufs { 31 | unsafe { b.reset_err() }; 32 | let pv = PacketView::from_arc_ref(b, 0); 33 | let bpv = unsafe { pv.bind(None) }; 34 | handle.send_io(0, bpv); 35 | } 36 | 37 | for b in &bufs { 38 | black_box(&**b).await; 39 | } 40 | } 41 | 42 | start.elapsed() 43 | }) 44 | } 45 | 46 | fn main() { 47 | let mut args = std::env::args().skip(1); 48 | let size = args.next(); 49 | let size = size.as_deref().unwrap_or("256").parse().unwrap(); 50 | let iters = args.next(); 51 | let iters: usize = iters.as_deref().unwrap_or("100000").parse().unwrap(); 52 | 53 | let time = bench(size, iters / size); 54 | println!("Time: {time:?}"); 55 | } 56 | -------------------------------------------------------------------------------- /mfio/src/backend/fd.rs: -------------------------------------------------------------------------------- 1 | //! File descriptor based waker. 2 | //! 3 | //! [`FdWaker`] allows one to wake a runtime by pushing a write operation to the underlying file 4 | //! descriptor, be it a pipe, eventfd, or anything else that can be pollable for readability. 5 | //! 6 | //! Create a [`FdWakerOwner`] from a [`AsRawFd`] object to allow controlling the waker properties. 7 | 8 | use core::mem::ManuallyDrop; 9 | use core::sync::atomic::{AtomicU8, Ordering}; 10 | use core::task::{RawWaker, RawWakerVTable, Waker}; 11 | use std::fs::File; 12 | use std::io::{ErrorKind, Write}; 13 | use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd}; 14 | use tarc::{Arc, BaseArc}; 15 | 16 | /// Owner of [`FdWaker`]s. 17 | /// 18 | /// When this type gets dropped, the underlying file descriptor gets closed and released. This 19 | /// effectively breaks all remaining wakers, however, the references to them stay valid. 20 | /// 21 | /// # Examples 22 | /// 23 | /// Poll for the pipe to become readable: 24 | /// 25 | /// ``` 26 | /// # #[cfg(miri)] 27 | /// # fn main() { } 28 | /// # #[cfg(not(miri))] 29 | /// # fn main() { 30 | /// use mfio::backend::fd::FdWakerOwner; 31 | /// use nix::poll::*; 32 | /// 33 | /// let (wake_read, wake_write) = nix::unistd::pipe().unwrap(); 34 | /// 35 | /// let waker_owner = FdWakerOwner::from(wake_write); 36 | /// 37 | /// std::thread::spawn({ 38 | /// let waker = waker_owner.clone().into_waker(); 39 | /// move || { 40 | /// std::thread::sleep(std::time::Duration::from_millis(500)); 41 | /// waker.wake(); 42 | /// } 43 | /// }); 44 | /// 45 | /// let mut fd = [PollFd::new(wake_read, PollFlags::POLLIN)]; 46 | /// assert_ne!(0, poll(&mut fd[..], 5000).unwrap()); 47 | /// 48 | /// // Let's verify that we did indeed get woken up. 49 | /// assert!(fd[0].revents().unwrap().contains(PollFlags::POLLIN)); 50 | /// # } 51 | /// ``` 52 | #[repr(transparent)] 53 | pub struct FdWakerOwner(FdWaker); 54 | 55 | impl Drop for FdWakerOwner { 56 | fn drop(&mut self) { 57 | self.0.close() 58 | } 59 | } 60 | 61 | impl From for FdWakerOwner { 62 | fn from(fd: F) -> Self { 63 | Self(FdWaker( 64 | BaseArc::new(FdWakerInner { 65 | fd: ManuallyDrop::new(fd), 66 | flags: Default::default(), 67 | }) 68 | .into_raw(), 69 | )) 70 | } 71 | } 72 | 73 | impl core::ops::Deref for FdWakerOwner { 74 | type Target = FdWaker; 75 | 76 | fn deref(&self) -> &Self::Target { 77 | &self.0 78 | } 79 | } 80 | 81 | /// An eventfd/pipe backed waker. 82 | /// 83 | /// This waker simply writes a 8 byte value (little endian 1) to the provided file descriptor upon 84 | /// wakeup. Thus, this waking mechanism is not limited to just eventfd or pipes. 85 | #[repr(transparent)] 86 | pub struct FdWaker(*const FdWakerInner); 87 | 88 | unsafe impl Send for FdWaker {} 89 | unsafe impl Sync for FdWaker {} 90 | 91 | impl Clone for FdWaker { 92 | fn clone(&self) -> Self { 93 | unsafe { 94 | BaseArc::increment_strong_count(self.0); 95 | } 96 | Self(self.0) 97 | } 98 | } 99 | 100 | impl Drop for FdWaker { 101 | fn drop(&mut self) { 102 | unsafe { 103 | BaseArc::decrement_strong_count(self.0); 104 | } 105 | } 106 | } 107 | 108 | impl FdWaker { 109 | pub fn flags(&self) -> Arc { 110 | unsafe { 111 | BaseArc::increment_strong_count(self.0); 112 | } 113 | unsafe { BaseArc::from_raw(self.0) }.transpose() 114 | } 115 | 116 | pub fn wake_by_ref(&self) { 117 | let inner = unsafe { &*self.0 }; 118 | let flags = inner.flags.fetch_or(0b1, Ordering::AcqRel); 119 | log::trace!("Flags {flags:b}"); 120 | if flags & 0b111 == 0 { 121 | let mut f = unsafe { File::from_raw_fd(inner.fd.as_raw_fd()) }; 122 | match f.write_all(&1u64.to_ne_bytes()) { 123 | Ok(()) => (), 124 | Err(e) if e.kind() == ErrorKind::BrokenPipe => (), 125 | Err(e) => panic!("Could not wake the waker up ({e:?})"), 126 | } 127 | let _ = f.into_raw_fd(); 128 | } 129 | } 130 | 131 | pub fn close(&self) { 132 | let inner = unsafe { &*self.0 }; 133 | if inner.flags.fetch_or(0b100, Ordering::AcqRel) & 0b100 == 0 { 134 | // SAFETY: we are attesting exclusive access to the 135 | let fd = unsafe { &mut (*self.0.cast_mut()).fd }; 136 | unsafe { ManuallyDrop::drop(fd) } 137 | } 138 | } 139 | 140 | pub fn into_raw_waker(self) -> RawWaker { 141 | let data: *const () = unsafe { core::mem::transmute(self) }; 142 | let vtbl = &RawWakerVTable::new( 143 | Self::raw_clone, 144 | Self::raw_wake, 145 | Self::raw_wake_by_ref, 146 | Self::raw_drop, 147 | ); 148 | RawWaker::new(data, vtbl) 149 | } 150 | 151 | pub fn into_waker(self) -> Waker { 152 | unsafe { Waker::from_raw(self.into_raw_waker()) } 153 | } 154 | 155 | unsafe fn raw_wake(data: *const ()) { 156 | let waker = core::ptr::read((&data as *const _) as *const Self); 157 | waker.wake_by_ref() 158 | } 159 | 160 | unsafe fn raw_wake_by_ref(data: *const ()) { 161 | let waker: &Self = &*((&data as *const _) as *const Self); 162 | waker.wake_by_ref() 163 | } 164 | 165 | unsafe fn raw_clone(data: *const ()) -> RawWaker { 166 | let waker: &Self = &*((&data as *const _) as *const Self); 167 | waker.clone().into_raw_waker() 168 | } 169 | 170 | unsafe fn raw_drop(data: *const ()) { 171 | core::ptr::drop_in_place((&data as *const _) as *const Self as *mut Self) 172 | } 173 | } 174 | 175 | struct FdWakerInner { 176 | fd: ManuallyDrop, 177 | flags: AtomicU8, 178 | } 179 | 180 | impl Drop for FdWakerInner { 181 | fn drop(&mut self) { 182 | if *self.flags.get_mut() & 0b100 == 0 { 183 | unsafe { ManuallyDrop::drop(&mut self.fd) } 184 | } 185 | } 186 | } 187 | 188 | impl AsRef for FdWakerInner { 189 | fn as_ref(&self) -> &AtomicU8 { 190 | &self.flags 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /mfio/src/backend/handle.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::{AtomicU8, Ordering}; 2 | use core::task::{RawWaker, RawWakerVTable, Waker}; 3 | 4 | use std::os::windows::io::{AsRawHandle, FromRawHandle, OwnedHandle, RawHandle}; 5 | use tarc::{Arc, BaseArc}; 6 | 7 | use windows_sys::Win32::System::Threading::{CreateEventA, ResetEvent, SetEvent}; 8 | 9 | #[repr(transparent)] 10 | pub struct EventWakerOwner(EventWaker); 11 | 12 | impl Drop for EventWakerOwner { 13 | fn drop(&mut self) { 14 | self.0.close() 15 | } 16 | } 17 | 18 | impl EventWakerOwner { 19 | pub fn new() -> Option { 20 | let handle = unsafe { CreateEventA(core::ptr::null(), 1, 0, core::ptr::null()) }; 21 | 22 | let handle = if handle == 0 { 23 | return None; 24 | } else { 25 | unsafe { OwnedHandle::from_raw_handle(handle as _) } 26 | }; 27 | 28 | Some(Self(EventWaker( 29 | BaseArc::new(EventWakerInner { 30 | handle, 31 | flags: Default::default(), 32 | }) 33 | .into_raw(), 34 | ))) 35 | } 36 | 37 | pub fn clear(&self) -> Option<()> { 38 | let inner = unsafe { &*self.0 .0 }; 39 | if unsafe { ResetEvent(inner.handle.as_raw_handle() as _) } != 0 { 40 | Some(()) 41 | } else { 42 | None 43 | } 44 | } 45 | } 46 | 47 | impl core::ops::Deref for EventWakerOwner { 48 | type Target = EventWaker; 49 | 50 | fn deref(&self) -> &Self::Target { 51 | &self.0 52 | } 53 | } 54 | 55 | /// An Event backed waker. 56 | /// 57 | /// This waker simply signals an event handle, which can be awaited for externally. 58 | #[repr(transparent)] 59 | pub struct EventWaker(*const EventWakerInner); 60 | 61 | unsafe impl Send for EventWaker {} 62 | unsafe impl Sync for EventWaker {} 63 | 64 | impl Clone for EventWaker { 65 | fn clone(&self) -> Self { 66 | unsafe { 67 | BaseArc::increment_strong_count(self.0); 68 | } 69 | Self(self.0) 70 | } 71 | } 72 | 73 | impl Drop for EventWaker { 74 | fn drop(&mut self) { 75 | unsafe { 76 | BaseArc::decrement_strong_count(self.0); 77 | } 78 | } 79 | } 80 | 81 | impl EventWaker { 82 | pub fn flags(&self) -> Arc { 83 | unsafe { 84 | BaseArc::increment_strong_count(self.0); 85 | } 86 | unsafe { BaseArc::from_raw(self.0) }.transpose() 87 | } 88 | 89 | pub fn wake_by_ref(&self) { 90 | let inner = unsafe { &*self.0 }; 91 | let flags = inner.flags.fetch_or(0b1, Ordering::AcqRel); 92 | log::trace!("Flags {flags:b}"); 93 | if flags & 0b111 == 0 { 94 | let ret = unsafe { SetEvent(inner.handle.as_raw_handle() as _) }; 95 | assert_ne!(0, ret); 96 | } 97 | } 98 | 99 | pub fn close(&self) { 100 | let inner = unsafe { &*self.0 }; 101 | inner.flags.fetch_or(0b100, Ordering::AcqRel); 102 | } 103 | 104 | pub fn into_raw_waker(self) -> RawWaker { 105 | let data: *const () = unsafe { core::mem::transmute(self) }; 106 | let vtbl = &RawWakerVTable::new( 107 | Self::raw_clone, 108 | Self::raw_wake, 109 | Self::raw_wake_by_ref, 110 | Self::raw_drop, 111 | ); 112 | RawWaker::new(data, vtbl) 113 | } 114 | 115 | pub fn into_waker(self) -> Waker { 116 | unsafe { Waker::from_raw(self.into_raw_waker()) } 117 | } 118 | 119 | unsafe fn raw_wake(data: *const ()) { 120 | let waker = core::ptr::read((&data as *const _) as *const Self); 121 | waker.wake_by_ref() 122 | } 123 | 124 | unsafe fn raw_wake_by_ref(data: *const ()) { 125 | let waker: &Self = &*((&data as *const _) as *const Self); 126 | waker.wake_by_ref() 127 | } 128 | 129 | unsafe fn raw_clone(data: *const ()) -> RawWaker { 130 | let waker: &Self = &*((&data as *const _) as *const Self); 131 | waker.clone().into_raw_waker() 132 | } 133 | 134 | unsafe fn raw_drop(data: *const ()) { 135 | core::ptr::drop_in_place((&data as *const _) as *const Self as *mut Self) 136 | } 137 | } 138 | 139 | impl AsRawHandle for EventWaker { 140 | fn as_raw_handle(&self) -> RawHandle { 141 | unsafe { &*self.0 }.handle.as_raw_handle() 142 | } 143 | } 144 | 145 | struct EventWakerInner { 146 | handle: OwnedHandle, 147 | flags: AtomicU8, 148 | } 149 | 150 | impl AsRef for EventWakerInner { 151 | fn as_ref(&self) -> &AtomicU8 { 152 | &self.flags 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /mfio/src/backend/integrations/async_io.rs: -------------------------------------------------------------------------------- 1 | //! `async-io` 2.0 integration. 2 | //! 3 | //! We technically support `async-io` 1, however, the system had a 4 | //! [limitation](https://github.com/smol-rs/async-io/issues/132) that was only resolved in version 5 | //! 2. 6 | 7 | use async_io::Async; 8 | use std::os::fd::BorrowedFd; 9 | 10 | use super::super::*; 11 | use super::{BorrowingFn, Integration}; 12 | 13 | /// async-io integration. 14 | /// 15 | /// Unlike [`Null`], this integration supports backends with polling handles, however, only 16 | /// async-io based runtimes are supported, such as smol and async_std. 17 | /// 18 | /// Internally, this uses async-io's [`Async`] to wait for readiness of the polling FD, which means 19 | /// only unix platforms are supported. 20 | /// 21 | /// # Examples 22 | /// 23 | /// Using the integration with smol: 24 | /// 25 | /// ``` 26 | /// # mod sample { 27 | /// # include!("../../sample.rs"); 28 | /// # } 29 | /// # use sample::SampleIo; 30 | /// use mfio::prelude::v1::*; 31 | /// 32 | /// # #[cfg(all(unix, not(miri)))] 33 | /// smol::block_on(async { 34 | /// let mut handle = SampleIo::new(vec![1, 2, 3, 4]); 35 | /// 36 | /// // Run the integration. Prefer to use `run_with_mut`, so that panics can be avoided. 37 | /// AsyncIo::run_with_mut(&mut handle, |handle| async move { 38 | /// // Read value 39 | /// let val = handle.read(0).await.unwrap(); 40 | /// assert_eq!(1u8, val); 41 | /// }) 42 | /// .await 43 | /// }); 44 | /// # #[cfg(not(all(unix, not(miri))))] 45 | /// # fn main() {} 46 | /// ``` 47 | #[derive(Clone, Copy, Default)] 48 | pub struct AsyncIo; 49 | 50 | impl Integration for AsyncIo { 51 | type Impl<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn> = 52 | AsyncIoImpl<'a, B, Func, Func::Fut<'a>>; 53 | 54 | fn run_with<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn>( 55 | backend: B, 56 | func: Func, 57 | ) -> Self::Impl<'a, B, Func> { 58 | Self::Impl { 59 | backend, 60 | state: AsyncIoState::Initial(func), 61 | } 62 | } 63 | } 64 | 65 | enum AsyncIoState<'a, Func, F> { 66 | Initial(Func), 67 | Loaded( 68 | WithBackend<'a, F>, 69 | Option<(Async>, &'a PollingFlags, Waker)>, 70 | ), 71 | Finished, 72 | } 73 | 74 | #[doc(hidden)] 75 | pub struct AsyncIoImpl<'a, B: LinksIoBackend + 'a, Func, F> { 76 | backend: B, 77 | state: AsyncIoState<'a, Func, F>, 78 | } 79 | 80 | impl<'a, B: LinksIoBackend + 'a, Func: BorrowingFn> 81 | AsyncIoImpl<'a, B, Func, Func::Fut<'a>> 82 | { 83 | pub async fn run(backend: B, func: Func) -> as Future>::Output { 84 | AsyncIo::run_with(backend, func).await 85 | } 86 | } 87 | 88 | impl<'a, B: LinksIoBackend + 'a, Func: BorrowingFn> Future 89 | for AsyncIoImpl<'a, B, Func, Func::Fut<'a>> 90 | { 91 | type Output = as Future>::Output; 92 | 93 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 94 | let this = unsafe { self.get_unchecked_mut() }; 95 | 96 | loop { 97 | match &mut this.state { 98 | AsyncIoState::Initial(_) => { 99 | let func = if let AsyncIoState::Initial(func) = 100 | core::mem::replace(&mut this.state, AsyncIoState::Finished) 101 | { 102 | func 103 | } else { 104 | unreachable!() 105 | }; 106 | // SAFETY: the backend reference is pinned 107 | let backend: &'a B::Link = 108 | unsafe { &*(this.backend.get_mut() as *const B::Link) }; 109 | let fut = func.call(backend); 110 | let (fut, h) = backend.with_backend(fut); 111 | this.state = AsyncIoState::Loaded( 112 | fut, 113 | h.map( 114 | |PollingHandle { 115 | handle, 116 | cur_flags, 117 | waker, 118 | .. 119 | }| { 120 | let handle = unsafe { BorrowedFd::borrow_raw(handle) }; 121 | ( 122 | Async::new_nonblocking(handle) 123 | .expect("Could not register the IO resource"), 124 | cur_flags, 125 | waker, 126 | ) 127 | }, 128 | ), 129 | ); 130 | } 131 | AsyncIoState::Loaded(wb, fd) => { 132 | break loop { 133 | if let Poll::Ready(v) = unsafe { Pin::new_unchecked(&mut *wb) }.poll(cx) { 134 | break Poll::Ready(v); 135 | } 136 | 137 | if let Some((fd, p, _)) = fd { 138 | let (read, write) = p.get(); 139 | // TODO: what to do when read = write = false? 140 | let mut ret = Some(Poll::Pending); 141 | if read && fd.poll_readable(cx).is_ready() { 142 | ret = None 143 | } 144 | if write && fd.poll_writable(cx).is_ready() { 145 | ret = None 146 | } 147 | if let Some(ret) = ret { 148 | break ret; 149 | } 150 | } 151 | }; 152 | } 153 | AsyncIoState::Finished => unreachable!(), 154 | } 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /mfio/src/backend/integrations/mod.rs: -------------------------------------------------------------------------------- 1 | //! Integrates `mfio` backends with other async runtimes. 2 | //! 3 | //! Integrations allow `mfio` backend objects to be used in other runtimes with true cooperation. 4 | //! 5 | //! Note that the current integration use is rather limited, mainly, both `tokio` and `async-io` 6 | //! require unix platforms, since async equivalents for windows raw handle polling is not exposed 7 | //! at the moment. 8 | 9 | #[cfg(all(unix, feature = "std", feature = "async-io"))] 10 | #[cfg_attr(docsrs, doc(cfg(all(unix, feature = "std", feature = "async-io"))))] 11 | pub mod async_io; 12 | pub mod null; 13 | #[cfg(all(unix, not(miri), feature = "std", feature = "tokio"))] 14 | #[cfg_attr(docsrs, doc(cfg(all(unix, feature = "std", feature = "tokio"))))] 15 | pub mod tokio; 16 | 17 | use super::{IoBackend, LinksIoBackend, RefLink}; 18 | use core::future::Future; 19 | use core::marker::PhantomData; 20 | 21 | pub trait BorrowingFn { 22 | type Fut<'a>: Future 23 | where 24 | B: 'a; 25 | fn call(self, arg: &B) -> Self::Fut<'_>; 26 | } 27 | 28 | impl FnOnce(&'a B) -> F, F: Future> BorrowingFn for Func { 29 | type Fut<'a> 30 | = F 31 | where 32 | B: 'a; 33 | 34 | fn call(self, arg: &B) -> Self::Fut<'_> { 35 | self(arg) 36 | } 37 | } 38 | 39 | /// Wrapper to convert `FnOnce(&'1 _)` to `for<'a> FnOnce(&'a _)`. 40 | /// 41 | /// This wrapper is needed to walk around issues with closures not defaulting to using 42 | /// higher-ranked type bounds. See: . 43 | /// 44 | /// This wrapper is used in `run_with_mut` to accept a non-hrtb closure without complex trickery. 45 | pub struct UnsafeHrtb<'a, B: ?Sized, Func: FnOnce(&'a B) -> F, F: Future> { 46 | func: Func, 47 | _phantom: PhantomData F>, 48 | } 49 | 50 | impl<'a, B: ?Sized, Func: FnOnce(&'a B) -> F, F: Future> UnsafeHrtb<'a, B, Func, F> { 51 | unsafe fn new(func: Func) -> Self { 52 | Self { 53 | func, 54 | _phantom: PhantomData, 55 | } 56 | } 57 | } 58 | 59 | impl<'a, B: ?Sized, Func: FnOnce(&'a B) -> F, F: Future> BorrowingFn 60 | for UnsafeHrtb<'a, B, Func, F> 61 | { 62 | type Fut<'b> 63 | = F 64 | where 65 | B: 'b; 66 | 67 | fn call<'b>(self, arg: &'b B) -> Self::Fut<'b> { 68 | let arg: &'a B = unsafe { &*(arg as *const B) }; 69 | (self.func)(arg) 70 | } 71 | } 72 | 73 | pub trait Integration: Copy + Default { 74 | type Impl<'a, B: LinksIoBackend + 'a, Func: BorrowingFn>: Future< 75 | Output = as Future>::Output, 76 | >; 77 | 78 | fn run_with<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn>( 79 | link: B, 80 | func: Func, 81 | ) -> Self::Impl<'a, B, Func>; 82 | 83 | fn run_with_mut<'a, B: IoBackend + 'a, Func: FnOnce(&'a B) -> F, F: Future + 'a>( 84 | link: &'a mut B, 85 | func: Func, 86 | ) -> Self::Impl<'a, RefLink<'a, B>, UnsafeHrtb<'a, B, Func, F>> { 87 | // SAFETY: we know we will be passing the backend link to this closure, therefore we can 88 | // safely cast the function to be HRTB. 89 | let func = unsafe { UnsafeHrtb::new(func) }; 90 | Self::run_with(RefLink(link), func) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /mfio/src/backend/integrations/null.rs: -------------------------------------------------------------------------------- 1 | //! A null integration. 2 | //! 3 | //! This integration assumes external wakers, and no backend dependence on cooperative handles. 4 | //! This works great whenever the I/O backend runs on a separate thread, however, this usually 5 | //! leads to severe latency penalty due to cross-thread synchronization. 6 | 7 | use super::super::*; 8 | use super::{BorrowingFn, Integration}; 9 | 10 | /// Minimal integration. 11 | /// 12 | /// This integration works in all async runtimes, however, it does not support the backend's 13 | /// `PollingHandle`. If the backend returns `Some(handle)`, then this integration panics. 14 | /// 15 | /// # Examples 16 | /// 17 | /// Running with `pollster`: 18 | /// 19 | /// ``` 20 | /// # mod sample { 21 | /// # include!("../../sample.rs"); 22 | /// # } 23 | /// # use sample::SampleIo; 24 | /// use mfio::prelude::v1::*; 25 | /// 26 | /// pollster::block_on(async { 27 | /// let mut handle = SampleIo::new(vec![1, 2, 3, 4]); 28 | /// 29 | /// // Run the integration. Prefer to use `run_with_mut`, so that panics can be avoided. 30 | /// Null::run_with_mut(&mut handle, |handle| async move { 31 | /// // Read value 32 | /// let val: u8 = handle.read(0).await.unwrap(); 33 | /// assert_eq!(1, val); 34 | /// }) 35 | /// .await 36 | /// }); 37 | /// ``` 38 | #[derive(Clone, Copy, Default)] 39 | pub struct Null; 40 | 41 | impl Integration for Null { 42 | type Impl<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn> = 43 | NullImpl<'a, B, Func, Func::Fut<'a>>; 44 | 45 | fn run_with<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn>( 46 | backend: B, 47 | func: Func, 48 | ) -> Self::Impl<'a, B, Func> { 49 | Self::Impl { 50 | backend, 51 | state: NullState::Initial(func), 52 | } 53 | } 54 | } 55 | 56 | enum NullState<'a, Func, F> { 57 | Initial(Func), 58 | Loaded(WithBackend<'a, F>), 59 | Finished, 60 | } 61 | 62 | #[doc(hidden)] 63 | pub struct NullImpl<'a, B: LinksIoBackend + 'a, Func, F> { 64 | backend: B, 65 | state: NullState<'a, Func, F>, 66 | } 67 | 68 | impl<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn> 69 | NullImpl<'a, B, Func, Func::Fut<'a>> 70 | { 71 | pub async fn run(backend: B, func: Func) -> as Future>::Output { 72 | Null::run_with(backend, func).await 73 | } 74 | } 75 | 76 | impl<'a, B: LinksIoBackend + 'a, Func: BorrowingFn> Future 77 | for NullImpl<'a, B, Func, Func::Fut<'a>> 78 | { 79 | type Output = as Future>::Output; 80 | 81 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 82 | let this = unsafe { self.get_unchecked_mut() }; 83 | 84 | loop { 85 | match &mut this.state { 86 | NullState::Initial(_) => { 87 | let func = if let NullState::Initial(func) = 88 | core::mem::replace(&mut this.state, NullState::Finished) 89 | { 90 | func 91 | } else { 92 | unreachable!() 93 | }; 94 | // SAFETY: the backend reference is pinned 95 | let backend: &'a B::Link = 96 | unsafe { &*(this.backend.get_mut() as *const B::Link) }; 97 | let fut = func.call(backend); 98 | let (fut, h) = backend.with_backend(fut); 99 | assert!( 100 | h.is_none(), 101 | "Null future cannot be used when backend exports a FD!" 102 | ); 103 | this.state = NullState::Loaded(fut); 104 | } 105 | NullState::Loaded(wb) => break unsafe { Pin::new_unchecked(wb) }.poll(cx), 106 | NullState::Finished => unreachable!(), 107 | } 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /mfio/src/backend/integrations/tokio.rs: -------------------------------------------------------------------------------- 1 | //! `tokio` integration. 2 | 3 | use std::os::fd::RawFd; 4 | use tokio::io::{unix::AsyncFd, Interest}; 5 | 6 | use super::super::*; 7 | use super::{BorrowingFn, Integration}; 8 | 9 | /// Tokio integration. 10 | /// 11 | /// Unlike [`Null`], this integration supports backends with polling handles, however, only tokio 12 | /// runtime is supported. 13 | /// 14 | /// Internally, this uses tokio's [`AsyncFd`] to wait for readiness of the polling handle, which 15 | /// means only unix platforms are supported. 16 | /// 17 | /// # Examples 18 | /// 19 | /// ``` 20 | /// # mod sample { 21 | /// # include!("../../sample.rs"); 22 | /// # } 23 | /// # use sample::SampleIo; 24 | /// use mfio::prelude::v1::*; 25 | /// 26 | /// #[tokio::main] 27 | /// # #[cfg(all(unix, not(miri)))] 28 | /// async fn main() { 29 | /// let mut handle = SampleIo::new(vec![1, 2, 3, 4]); 30 | /// 31 | /// // Run the integration. Prefer to use `run_with_mut`, so that panics can be avoided. 32 | /// Tokio::run_with_mut(&mut handle, |handle| async move { 33 | /// // Read value 34 | /// let val = handle.read(0).await.unwrap(); 35 | /// assert_eq!(1u8, val); 36 | /// }) 37 | /// .await 38 | /// } 39 | /// # #[cfg(not(all(unix, not(miri))))] 40 | /// # fn main() {} 41 | /// ``` 42 | #[derive(Clone, Copy, Default)] 43 | pub struct Tokio; 44 | 45 | fn into_tokio(flags: &PollingFlags) -> Interest { 46 | match flags.get() { 47 | (true, true) => Interest::READABLE.add(Interest::WRITABLE), 48 | (false, true) => Interest::WRITABLE, 49 | (true, false) => Interest::READABLE, 50 | (false, false) => panic!("Polling flags incompatible!"), 51 | } 52 | } 53 | 54 | impl Integration for Tokio { 55 | type Impl<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn> = 56 | TokioImpl<'a, B, Func, Func::Fut<'a>>; 57 | 58 | fn run_with<'a, B: LinksIoBackend + 'a, Func: for<'b> BorrowingFn>( 59 | backend: B, 60 | func: Func, 61 | ) -> Self::Impl<'a, B, Func> { 62 | Self::Impl { 63 | backend, 64 | state: TokioState::Initial(func), 65 | } 66 | } 67 | } 68 | 69 | enum TokioState<'a, Func, F> { 70 | Initial(Func), 71 | Loaded( 72 | WithBackend<'a, F>, 73 | Option<(AsyncFd, &'a PollingFlags, Waker)>, 74 | ), 75 | Finished, 76 | } 77 | 78 | #[doc(hidden)] 79 | pub struct TokioImpl<'a, B: LinksIoBackend + 'a, Func, F> { 80 | backend: B, 81 | state: TokioState<'a, Func, F>, 82 | } 83 | 84 | impl<'a, B: LinksIoBackend + 'a, Func: BorrowingFn> TokioImpl<'a, B, Func, Func::Fut<'a>> { 85 | pub async fn run(backend: B, func: Func) -> as Future>::Output { 86 | Tokio::run_with(backend, func).await 87 | } 88 | } 89 | 90 | impl<'a, B: LinksIoBackend + 'a, Func: BorrowingFn> Future 91 | for TokioImpl<'a, B, Func, Func::Fut<'a>> 92 | { 93 | type Output = as Future>::Output; 94 | 95 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 96 | let this = unsafe { self.get_unchecked_mut() }; 97 | 98 | loop { 99 | match &mut this.state { 100 | TokioState::Initial(_) => { 101 | let func = if let TokioState::Initial(func) = 102 | core::mem::replace(&mut this.state, TokioState::Finished) 103 | { 104 | func 105 | } else { 106 | unreachable!() 107 | }; 108 | // SAFETY: the backend reference is pinned 109 | let backend: &'a B::Link = 110 | unsafe { &*(this.backend.get_mut() as *const B::Link) }; 111 | let fut = func.call(backend); 112 | let (fut, h) = backend.with_backend(fut); 113 | this.state = TokioState::Loaded( 114 | fut, 115 | h.map( 116 | |PollingHandle { 117 | handle, 118 | cur_flags, 119 | waker, 120 | max_flags, 121 | }| { 122 | ( 123 | AsyncFd::with_interest(handle, into_tokio(&max_flags)) 124 | .expect("Could not register the IO resource"), 125 | cur_flags, 126 | waker, 127 | ) 128 | }, 129 | ), 130 | ); 131 | } 132 | TokioState::Loaded(wb, fd) => { 133 | break loop { 134 | if let Poll::Ready(v) = unsafe { Pin::new_unchecked(&mut *wb) }.poll(cx) { 135 | break Poll::Ready(v); 136 | } 137 | if let Some((fd, p, _)) = fd { 138 | let (read, write) = p.get(); 139 | // TODO: what to do when read = write = false? 140 | let mut ret = Some(Poll::Pending); 141 | if read { 142 | if let Poll::Ready(Ok(mut guard)) = fd.poll_read_ready(cx) { 143 | // We clear the ready flag, because the backend is expected to consume 144 | // all I/O until it blocks without waking anything. 145 | guard.clear_ready(); 146 | ret = None; 147 | } 148 | } 149 | if write { 150 | if let Poll::Ready(Ok(mut guard)) = fd.poll_write_ready(cx) { 151 | // We clear the ready flag, because the backend is expected to consume 152 | // all I/O until it blocks without waking anything. 153 | guard.clear_ready(); 154 | ret = None; 155 | } 156 | } 157 | if let Some(ret) = ret { 158 | break ret; 159 | } 160 | } 161 | }; 162 | } 163 | TokioState::Finished => unreachable!(), 164 | } 165 | } 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /mfio/src/backend/windows.rs: -------------------------------------------------------------------------------- 1 | use core::task::{RawWaker, RawWakerVTable}; 2 | use std::os::windows::io::{AsRawHandle, OwnedHandle}; 3 | use tarc::BaseArc; 4 | use windows_sys::Win32::System::Threading::SetEvent; 5 | 6 | /// A windows event backed waker. 7 | #[repr(transparent)] 8 | pub struct EventWaker(*const OwnedHandle); 9 | 10 | impl Clone for EventWaker { 11 | fn clone(&self) -> Self { 12 | unsafe { 13 | BaseArc::increment_strong_count(self.0); 14 | } 15 | Self(self.0) 16 | } 17 | } 18 | 19 | impl Drop for EventWaker { 20 | fn drop(&mut self) { 21 | unsafe { 22 | BaseArc::decrement_strong_count(self.0); 23 | } 24 | } 25 | } 26 | 27 | impl From> for EventWaker { 28 | fn from(handle: BaseArc) -> Self { 29 | Self(handle.into_raw()) 30 | } 31 | } 32 | 33 | impl EventWaker { 34 | fn wake_by_ref(&self) { 35 | let handle = unsafe { &*self.0 }.as_raw_handle(); 36 | unsafe { 37 | SetEvent(handle as _); 38 | } 39 | } 40 | 41 | pub fn into_raw_waker(self) -> RawWaker { 42 | let data: *const () = unsafe { core::mem::transmute(self) }; 43 | let vtbl = &RawWakerVTable::new( 44 | Self::raw_clone, 45 | Self::raw_wake, 46 | Self::raw_wake_by_ref, 47 | Self::raw_drop, 48 | ); 49 | RawWaker::new(data, vtbl) 50 | } 51 | 52 | unsafe fn raw_wake(data: *const ()) { 53 | let waker = core::ptr::read((&data as *const _) as *const Self); 54 | waker.wake_by_ref() 55 | } 56 | 57 | unsafe fn raw_wake_by_ref(data: *const ()) { 58 | let waker: &Self = &*((&data as *const _) as *const Self); 59 | waker.wake_by_ref() 60 | } 61 | 62 | unsafe fn raw_clone(data: *const ()) -> RawWaker { 63 | let waker: &Self = &*((&data as *const _) as *const Self); 64 | waker.clone().into_raw_waker() 65 | } 66 | 67 | unsafe fn raw_drop(data: *const ()) { 68 | core::ptr::drop_in_place((&data as *const _) as *const Self as *mut Self) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /mfio/src/error.rs: -------------------------------------------------------------------------------- 1 | //! mfio's error types 2 | //! 3 | //! Errors in mfio area meant to be both descriptive and easy to pass across FFI-boundary. Hence we 4 | //! opt to an integer describing many states. 5 | 6 | use cglue::result::IntError; 7 | use core::num::{NonZeroI32, NonZeroU8}; 8 | 9 | pub type Result = core::result::Result; 10 | 11 | /// Error code 12 | /// 13 | /// This code represents an HTTP client/server error, shifted by 399. This means, that `Code(1)` 14 | /// represents `HTTP` code `400`. If `http` feature is enabled, you can freely transform from 15 | /// `Code` to `http::StatusCode`. 16 | #[repr(transparent)] 17 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] 18 | pub struct Code(NonZeroU8); 19 | 20 | #[cfg(not(feature = "http"))] 21 | impl core::fmt::Display for Code { 22 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 23 | write!(f, "{}", self.0) 24 | } 25 | } 26 | 27 | const HTTP_SHIFT: usize = 399; 28 | 29 | /// HTTP 500 error. 30 | pub const INTERNAL_ERROR: Code = 31 | Code(unsafe { NonZeroU8::new_unchecked((500 - HTTP_SHIFT) as u8) }); 32 | 33 | impl Code { 34 | pub const fn http_code(&self) -> usize { 35 | self.0.get() as usize + HTTP_SHIFT 36 | } 37 | 38 | pub const fn from_http_const(code: usize) -> Self { 39 | if code >= 400 && code < 600 { 40 | Code(unsafe { NonZeroU8::new_unchecked((code - HTTP_SHIFT) as u8) }) 41 | } else { 42 | panic!("Invalid code provided") 43 | } 44 | } 45 | 46 | pub fn from_http(code: usize) -> Option { 47 | if (400..600).contains(&code) { 48 | NonZeroU8::new((code - HTTP_SHIFT) as u8).map(Code) 49 | } else { 50 | None 51 | } 52 | } 53 | } 54 | 55 | #[cfg(feature = "http")] 56 | mod http { 57 | use super::*; 58 | use ::http::StatusCode; 59 | 60 | impl core::fmt::Display for Code { 61 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 62 | write!(f, "{}", StatusCode::from(*self)) 63 | } 64 | } 65 | 66 | impl core::convert::TryFrom for Code { 67 | type Error = (); 68 | fn try_from(code: StatusCode) -> core::result::Result { 69 | Self::from_http(code.as_u16() as usize).ok_or(()) 70 | } 71 | } 72 | 73 | impl core::convert::TryFrom for Error { 74 | type Error = (); 75 | fn try_from(code: StatusCode) -> core::result::Result { 76 | Code::try_from(code).map(|code| Error { 77 | code, 78 | subject: Subject::Other, 79 | state: State::Other, 80 | location: Location::Other, 81 | }) 82 | } 83 | } 84 | 85 | impl From for StatusCode { 86 | fn from(code: Code) -> Self { 87 | Self::from_u16(code.0.get() as u16 + 399).unwrap() 88 | } 89 | } 90 | 91 | impl From for StatusCode { 92 | fn from(Error { code, .. }: Error) -> Self { 93 | Self::from(code) 94 | } 95 | } 96 | } 97 | 98 | /// mfio's error type. 99 | /// 100 | /// This type consists of 4 distinct pieces: 101 | /// 102 | /// - `code`, representing equivalent HTTP status code, which may not be descriptive, and often 103 | /// falls back to `INTERNAL_ERROR`, representing HTTP code 500. 104 | /// - `subject`, represents what errored out. 105 | /// - `state`, represents what kind of error state was reached. 106 | /// - `location`, where in the program the error occured. 107 | #[repr(C)] 108 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] 109 | pub struct Error { 110 | pub code: Code, 111 | pub subject: Subject, 112 | pub state: State, 113 | pub location: Location, 114 | } 115 | 116 | impl core::fmt::Display for Error { 117 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 118 | write!( 119 | f, 120 | "{}: {} in {} state at {}", 121 | self.code, self.subject, self.state, self.location 122 | ) 123 | } 124 | } 125 | 126 | #[cfg(feature = "std")] 127 | impl std::error::Error for Error {} 128 | 129 | impl IntError for Error { 130 | fn into_int_err(self) -> NonZeroI32 { 131 | NonZeroI32::new(i32::from_ne_bytes([ 132 | self.code.0.get(), 133 | self.subject as u8, 134 | self.state as u8, 135 | self.location as u8, 136 | ])) 137 | .unwrap() 138 | } 139 | 140 | fn from_int_err(err: NonZeroI32) -> Self { 141 | let [code, subject, state, location] = err.get().to_ne_bytes(); 142 | 143 | let code = Code(NonZeroU8::new(code).unwrap()); 144 | 145 | Self { 146 | code, 147 | subject: subject.into(), 148 | state: state.into(), 149 | location: location.into(), 150 | } 151 | } 152 | } 153 | 154 | /// Shorthand for building mfio error structure. 155 | /// 156 | /// All enum variants act as if they are imported, therefore in this macro they are to be used 157 | /// without specifying the type. 158 | #[macro_export] 159 | macro_rules! mferr { 160 | ($code:expr, $subject:ident, $state:ident, $location:ident) => { 161 | $crate::error::Error { 162 | code: { 163 | const CODE: $crate::error::Code = $crate::error::Code::from_http_const($code); 164 | CODE 165 | }, 166 | subject: $crate::error::Subject::$subject, 167 | state: $crate::error::State::$state, 168 | location: $crate::error::Location::$location, 169 | } 170 | }; 171 | ($subject:ident, $state:ident, $location:ident) => { 172 | $crate::mferr!(500, $subject, $state, $location) 173 | }; 174 | } 175 | 176 | macro_rules! ienum { 177 | ( 178 | $(#[$meta:meta])* 179 | pub enum $ident:ident { 180 | $($variant:ident,)* 181 | } 182 | ) => { 183 | $(#[$meta])* 184 | #[repr(u8)] 185 | #[non_exhaustive] 186 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] 187 | pub enum $ident { 188 | $($variant),* 189 | } 190 | 191 | impl From for $ident { 192 | fn from(val: u8) -> Self { 193 | if val < $ident::Other as u8 { 194 | unsafe { core::mem::transmute(val) } 195 | } else { 196 | $ident::Other 197 | } 198 | } 199 | } 200 | 201 | impl $ident { 202 | pub const fn to_str(&self) -> &'static str { 203 | match self { 204 | $(Self::$variant => stringify!($variant),)* 205 | } 206 | } 207 | } 208 | 209 | impl AsRef for $ident { 210 | fn as_ref(&self) -> &str { 211 | self.to_str() 212 | } 213 | } 214 | 215 | impl core::ops::Deref for $ident { 216 | type Target = str; 217 | 218 | fn deref(&self) -> &str { 219 | self.to_str() 220 | } 221 | } 222 | 223 | impl core::fmt::Display for $ident { 224 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 225 | write!(f, "{}", self.to_str()) 226 | } 227 | } 228 | }; 229 | } 230 | 231 | ienum! { 232 | /// Describes the error subject. 233 | /// 234 | /// While [`Location`] points to a module where the error originated from, Subject attempts to 235 | /// narrow the error down to the main actor that was involved in the creation of the error. 236 | pub enum Subject { 237 | Argument, 238 | Data, 239 | Path, 240 | File, 241 | Io, 242 | Directory, 243 | Memory, 244 | Size, 245 | Bounds, 246 | Position, 247 | Offset, 248 | Address, 249 | Connection, 250 | Architecture, 251 | Response, 252 | Abi, 253 | Api, 254 | Process, 255 | Value, 256 | Library, 257 | Binary, 258 | Input, 259 | Output, 260 | Plugin, 261 | Target, 262 | Feature, 263 | Module, 264 | Export, 265 | Import, 266 | Section, 267 | Backend, 268 | Entry, 269 | Operation, 270 | Other, 271 | } 272 | } 273 | 274 | ienum! { 275 | /// Describes the state of the error subject. 276 | /// 277 | /// State allows to specify what caused the [`Subject`] to fail. 278 | pub enum State { 279 | Invalid, 280 | Unreadable, 281 | Uninitialized, 282 | Unsupported, 283 | Unavailable, 284 | NotImplemented, 285 | Partial, 286 | Outside, 287 | Exhausted, 288 | Read, 289 | Write, 290 | Create, 291 | Append, 292 | Seek, 293 | Map, 294 | Load, 295 | AlreadyExists, 296 | NotFound, 297 | PermissionDenied, 298 | Interrupted, 299 | Rejected, 300 | Refused, 301 | NotReady, 302 | Aborted, 303 | NotConnected, 304 | BrokenPipe, 305 | Timeout, 306 | Nop, 307 | UnexpectedEof, 308 | InUse, 309 | Corrupted, 310 | Removed, 311 | Other, 312 | } 313 | } 314 | 315 | ienum! { 316 | /// Describes the error origin. 317 | /// 318 | /// The Origin specifies general location where the error originates from - be it a module, 319 | /// crate, or subsystem. It is not meant to be descriptive in terms of error handling. For 320 | /// better locality, check the [`Subject`]. 321 | pub enum Location { 322 | Backend, 323 | Memory, 324 | Client, 325 | Core, 326 | Filesystem, 327 | Application, 328 | ThirdParty, 329 | Network, 330 | Ffi, 331 | Plugin, 332 | Library, 333 | Stdlib, 334 | Other, 335 | } 336 | } 337 | 338 | #[cfg(feature = "std")] 339 | impl From for Error { 340 | fn from(err: std::io::Error) -> Self { 341 | Self { 342 | code: INTERNAL_ERROR, 343 | subject: Subject::Io, 344 | state: err.kind().into(), 345 | location: Location::Other, 346 | } 347 | } 348 | } 349 | 350 | #[cfg(feature = "std")] 351 | impl From for State { 352 | fn from(kind: std::io::ErrorKind) -> Self { 353 | use std::io::ErrorKind::*; 354 | match kind { 355 | NotFound => State::NotFound, 356 | PermissionDenied => State::PermissionDenied, 357 | ConnectionRefused => State::Refused, 358 | ConnectionReset => State::Interrupted, 359 | ConnectionAborted => State::Aborted, 360 | NotConnected => State::NotConnected, 361 | AddrInUse => State::InUse, 362 | AddrNotAvailable => State::Unavailable, 363 | BrokenPipe => State::BrokenPipe, 364 | AlreadyExists => State::AlreadyExists, 365 | WouldBlock => State::NotReady, 366 | InvalidInput => State::Invalid, 367 | InvalidData => State::Invalid, 368 | TimedOut => State::Timeout, 369 | WriteZero => State::Nop, 370 | Interrupted => State::Interrupted, 371 | Unsupported => State::Unsupported, 372 | UnexpectedEof => State::UnexpectedEof, 373 | OutOfMemory => State::Exhausted, 374 | Other => State::Other, 375 | _ => State::Other, 376 | } 377 | } 378 | } 379 | 380 | #[cfg(feature = "std")] 381 | impl From for Error { 382 | fn from(kind: std::io::ErrorKind) -> Self { 383 | Error { 384 | code: INTERNAL_ERROR, 385 | location: Location::Other, 386 | subject: Subject::Io, 387 | state: kind.into(), 388 | } 389 | } 390 | } 391 | 392 | impl From for Error { 393 | fn from(_: core::convert::Infallible) -> Self { 394 | unreachable!() 395 | } 396 | } 397 | 398 | /// Allows easy specification of different error properties. 399 | pub trait ErrorSpecify: Sized { 400 | fn code(self, code: Code) -> Self; 401 | fn subject(self, subject: Subject) -> Self; 402 | fn state(self, state: State) -> Self; 403 | fn location(self, location: Location) -> Self; 404 | } 405 | 406 | impl ErrorSpecify for Error { 407 | fn code(self, code: Code) -> Self { 408 | Self { code, ..self } 409 | } 410 | 411 | fn subject(self, subject: Subject) -> Self { 412 | Self { subject, ..self } 413 | } 414 | 415 | fn state(self, state: State) -> Self { 416 | Self { state, ..self } 417 | } 418 | 419 | fn location(self, location: Location) -> Self { 420 | Self { location, ..self } 421 | } 422 | } 423 | 424 | impl ErrorSpecify for Result { 425 | fn code(self, code: Code) -> Self { 426 | match self { 427 | Err(e) => Err(e.code(code)), 428 | v => v, 429 | } 430 | } 431 | 432 | fn subject(self, subject: Subject) -> Self { 433 | match self { 434 | Err(e) => Err(e.subject(subject)), 435 | v => v, 436 | } 437 | } 438 | 439 | fn state(self, state: State) -> Self { 440 | match self { 441 | Err(e) => Err(e.state(state)), 442 | v => v, 443 | } 444 | } 445 | 446 | fn location(self, location: Location) -> Self { 447 | match self { 448 | Err(e) => Err(e.location(location)), 449 | v => v, 450 | } 451 | } 452 | } 453 | -------------------------------------------------------------------------------- /mfio/src/futures_compat.rs: -------------------------------------------------------------------------------- 1 | //! Provides compatibility with `futures` traits. 2 | 3 | use crate::io::*; 4 | use crate::stdeq::{self, AsyncIoFut}; 5 | use crate::util::PosShift; 6 | use core::future::Future; 7 | use core::pin::Pin; 8 | use core::task::{Context, Poll}; 9 | #[cfg(not(mfio_assume_linear_types))] 10 | use futures::io::AsyncRead; 11 | use futures::io::{AsyncSeek, AsyncWrite}; 12 | use std::io::{Result, SeekFrom}; 13 | 14 | /// Container for intermediate values. 15 | /// 16 | /// Currently, reading and writing is not cancel safe. Meaning, cancelling the I/O operation and 17 | /// issuing a new one would continue the previous operation, and sync the results to the currently 18 | /// provided buffer. Note that the types of operations are handled separately so they do not mix 19 | /// and it is okay to cancel a read to issue a write. 20 | /// 21 | /// If you wish to cancel the operation, do drop the entire `Compat` object. However, be warned 22 | /// that `mfio` may panic, since it does not yet support cancellation at all. 23 | /// 24 | /// Note that at the time of writing, `AsyncRead` is not supported when `mfio_assume_linear_types` 25 | /// config is set. 26 | pub struct Compat<'a, Io: ?Sized> { 27 | io: &'a Io, 28 | #[cfg(not(mfio_assume_linear_types))] 29 | read: Option>, 30 | write: Option>, 31 | } 32 | 33 | /// Bridges mfio with futures. 34 | /// 35 | /// # Examples 36 | /// 37 | /// Read from mfio object through futures traits. 38 | /// 39 | /// ```rust 40 | /// # mod sample { 41 | /// # include!("sample.rs"); 42 | /// # } 43 | /// # use sample::SampleIo; 44 | /// # fn work() -> mfio::error::Result<()> { 45 | /// # mfio::linear_types_switch!( 46 | /// # Linear => { Ok(()) } 47 | /// # Standard => {{ 48 | /// use futures::io::{AsyncReadExt, Cursor}; 49 | /// use mfio::backend::*; 50 | /// use mfio::futures_compat::FuturesCompat; 51 | /// use mfio::stdeq::SeekableRef; 52 | /// 53 | /// let mem = vec![0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]; 54 | /// let handle = SampleIo::new(mem.clone()); 55 | /// 56 | /// handle.block_on(async { 57 | /// let mut buf = Cursor::new(vec![0; mem.len()]); 58 | /// 59 | /// let handle = SeekableRef::from(&handle); 60 | /// futures::io::copy(handle.compat(), &mut buf).await?; 61 | /// assert_eq!(mem, buf.into_inner()); 62 | /// 63 | /// Ok(()) 64 | /// }) 65 | /// # }} 66 | /// # ) 67 | /// # } 68 | /// # work().unwrap(); 69 | /// ``` 70 | /// 71 | /// Write using futures traits. 72 | /// 73 | /// ```rust 74 | /// # mod sample { 75 | /// # include!("sample.rs"); 76 | /// # } 77 | /// # use sample::SampleIo; 78 | /// # fn work() -> mfio::error::Result<()> { 79 | /// use futures::io::AsyncWriteExt; 80 | /// use mfio::backend::*; 81 | /// use mfio::futures_compat::FuturesCompat; 82 | /// use mfio::stdeq::SeekableRef; 83 | /// use mfio::traits::IoRead; 84 | /// 85 | /// let mut mem = vec![0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]; 86 | /// let handle = SampleIo::new(mem.clone()); 87 | /// 88 | /// handle.block_on(async { 89 | /// let handle = SeekableRef::from(&handle); 90 | /// handle.compat().write_all(&[9, 9, 9]).await?; 91 | /// 92 | /// handle.read_all(0, &mut mem[..5]).await.unwrap(); 93 | /// assert_eq!(&mem[..5], &[9, 9, 9, 2, 3]); 94 | /// 95 | /// Ok(()) 96 | /// }) 97 | /// # } 98 | /// # work().unwrap(); 99 | /// ``` 100 | pub trait FuturesCompat { 101 | fn compat(&self) -> Compat { 102 | Compat { 103 | io: self, 104 | #[cfg(not(mfio_assume_linear_types))] 105 | read: None, 106 | write: None, 107 | } 108 | } 109 | } 110 | 111 | // StreamPos is needed for all I/O traits, so we use it to make sure rust gives better diagnostics. 112 | impl> FuturesCompat for Io {} 113 | 114 | // Currently we cannot guarantee that the user won't swap the buffer when using linear types. 115 | // FIXME: always allocate an intermediary and sync in `Compat`. This way we could also retain the 116 | // buffer, so that's nice. 117 | #[cfg(not(mfio_assume_linear_types))] 118 | #[cfg_attr(docsrs, doc(cfg(not(mfio_assume_linear_types))))] 119 | impl<'a, Io: ?Sized + stdeq::AsyncRead> AsyncRead for Compat<'a, Io> 120 | where 121 | u64: PosShift, 122 | { 123 | fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { 124 | let this = unsafe { self.get_unchecked_mut() }; 125 | 126 | loop { 127 | if let Some(read) = this.read.as_mut() { 128 | // Update the sync handle. This is how we hack around the lifetimes of input buffer. 129 | // SAFETY: AsyncIoFut will only use the sync object if, and only if the buffer is 130 | // to be written in this poll. 131 | read.sync = Some(unsafe { &mut *(buf as *mut _) }); 132 | 133 | let read = unsafe { Pin::new_unchecked(read) }; 134 | 135 | break read.poll(cx).map(|v| { 136 | this.read = None; 137 | v.map_err(|_| std::io::ErrorKind::Other.into()) 138 | }); 139 | } else { 140 | // SAFETY: on mfio_assume_linear_types, this is unsafe. Without the switch this is 141 | // safe, because the buffer is stored in a sync variable that is only used whenever 142 | // the I/O completes. That is processed in this poll function, and we update the 143 | // sync at every iteration of the loop. 144 | let buf = unsafe { &mut *(buf as *mut _) }; 145 | this.read = Some(stdeq::AsyncRead::read(this.io, buf)); 146 | } 147 | } 148 | } 149 | } 150 | 151 | impl<'a, Io: ?Sized + stdeq::AsyncWrite> AsyncWrite for Compat<'a, Io> 152 | where 153 | u64: PosShift, 154 | { 155 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { 156 | let this = unsafe { self.get_unchecked_mut() }; 157 | 158 | loop { 159 | if let Some(write) = this.write.as_mut() { 160 | let write = unsafe { Pin::new_unchecked(write) }; 161 | 162 | break write.poll(cx).map(|v| { 163 | this.write = None; 164 | v.map_err(|_| std::io::ErrorKind::Other.into()) 165 | }); 166 | } else { 167 | // SAFETY: on mfio_assume_linear_types, this is unsafe. Without the switch this is 168 | // safe, because the buffer is transferred to an intermediate one before this 169 | // function returns.. 170 | let buf = unsafe { &*(buf as *const _) }; 171 | this.write = Some(stdeq::AsyncWrite::write(this.io, buf)); 172 | } 173 | } 174 | } 175 | 176 | fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { 177 | // Completion of every request currently implies we've flushed. 178 | // TODO: improve semantics 179 | Poll::Ready(Ok(())) 180 | } 181 | 182 | fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { 183 | // We currently imply that we can just close on drop. 184 | // TODO: improve semantics 185 | Poll::Ready(Ok(())) 186 | } 187 | } 188 | 189 | impl<'a, Io: ?Sized + stdeq::StreamPos> AsyncSeek for Compat<'a, Io> { 190 | fn poll_seek(self: Pin<&mut Self>, _: &mut Context<'_>, pos: SeekFrom) -> Poll> { 191 | let this = unsafe { self.get_unchecked_mut() }; 192 | Poll::Ready(stdeq::std_seek(this.io, pos)) 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /mfio/src/io/opaque.rs: -------------------------------------------------------------------------------- 1 | /// Represents types that can be made opaque. 2 | /// 3 | /// This trait enables optimal object storage outcomes, based on the promises and assumptions the 4 | /// caller is able to make. What does this mean? 5 | /// 6 | /// Say the client needs to send a packet reference to the I/O processor. In regular async, the 7 | /// caller is not able to know how long the packet reference will have been sent for to the 8 | /// processor. Therefore, in order to avoid undefined behavior, the reference must be `'static`, 9 | /// i.e. heap allocated. 10 | /// 11 | /// However, this ignores the assumptions the client has made to their stack. What if the caller 12 | /// could prove, that the sent out packet will not outlive the caller's stack? In this case, the 13 | /// caller could skip heap allocation and send out a reference to a packet being stored on the 14 | /// stack instead. Of course, it is currently impossible to prove this, because rust futures are 15 | /// inert, and they may be cancelled at arbitrary points, making the caller unable to be 16 | /// deterministically sure that the stack location will be valid throughout the call. However, if a 17 | /// client were to assume this was the case, there would then be a significant performance uplift 18 | /// possible. 19 | /// 20 | /// This assumption of type linearity is less of a low level decision, rather than a high level one 21 | /// made by the programmer looking at the whole program. Therefore, this trait does not allow for 22 | /// definite promises of "this will 100% be on the stack, and the stack will be valid throughout", 23 | /// instead, it allows the programmer to describe a promise going something like the following: 24 | /// 25 | /// "We can promise, that under type linearity assumption, we are able to store a packet on the 26 | /// stack, and give out a reference that will be valid until the I/O processor manually 27 | /// relinquishes ownership of said references." 28 | /// 29 | /// The decision, whether type linearity is being assumed or not, is left at a project-wide level, 30 | /// usually controlled by `#[cfg(...)]` switches. Implementors of this trait are then able to 31 | /// define 2 different codepaths, one for the "100% safe Rust" way, and the other, for the more 32 | /// expanded, albeit potentially unsound, lifetime rules. 33 | /// 34 | /// In the end, the implementor of an abstraction would promise that an object can be stored on the 35 | /// stack, if it can be done this way, but still have the flexibility of performing fully static 36 | /// heap allocations. 37 | /// 38 | /// # Safety 39 | /// 40 | /// Implementation should adhere to the lifetime requirements, most notably, the fact that 41 | /// both `StackReq` and `Opaque` types must be valid for `'static` lifetime, if 42 | /// `#[cfg(mfio_assume_linear_types)]` config switch is not enabled. 43 | pub unsafe trait OpaqueStore { 44 | type ConstHdr; 45 | type Opaque<'a>: 'a 46 | where 47 | Self: 'a; 48 | type StackReq<'a>: 'a 49 | where 50 | Self: 'a; 51 | type HeapReq: Into> 52 | where 53 | Self: 'static; 54 | 55 | /// Request for object to be stored on the stack. 56 | /// 57 | /// A sound implementation will not actually store the object on the stack, however, lifetime 58 | /// bounds are attached to introduce limits necessary for a more efficient, albeit unsound, 59 | /// implementation that assumes type linearity. 60 | /// 61 | /// Taking the returned object, and storing it on stack allows to take a mutable reference to 62 | /// the stack location and convert it into opaque object. 63 | fn stack<'a>(self) -> Self::StackReq<'a> 64 | where 65 | Self: 'a; 66 | 67 | /// Request for object to be stored on the heap. 68 | /// 69 | /// The returned object may be turned into `Opaque<'static>` by calling [`Into::into`] on it. 70 | /// 71 | /// TODO: can we just directly go to `Opaque<'static>`? 72 | fn heap(self) -> Self::HeapReq 73 | where 74 | Self: 'static; 75 | 76 | fn stack_hdr<'a: 'b, 'b>(stack: &'b Self::StackReq<'a>) -> &'b Self::ConstHdr; 77 | 78 | fn stack_opaque<'a>(stack: &'a Self::StackReq<'a>) -> Self::Opaque<'a>; 79 | } 80 | -------------------------------------------------------------------------------- /mfio/src/poller.rs: -------------------------------------------------------------------------------- 1 | use core::future::Future; 2 | use core::mem; 3 | use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; 4 | 5 | pub trait ThreadLocal: Sized { 6 | /// Get handle to current thread. 7 | fn current() -> Self; 8 | } 9 | 10 | /// Parkable handle. 11 | /// 12 | /// This handle allows a thread to potentially be efficiently blocked. This is used in the polling 13 | /// implementation to wait for wakeups. 14 | pub trait ParkHandle: Sized { 15 | /// Park the current thread. 16 | fn park(&self); 17 | 18 | /// Unpark specified thread. 19 | fn unpark(&self); 20 | } 21 | 22 | pub trait Wakeable: ParkHandle + Clone { 23 | /// Convert self into opaque pointer. 24 | /// 25 | /// This requires `Self` to either be layout compatible with `*const ()` or heap allocated upon 26 | /// switch. 27 | fn into_opaque(self) -> *const (); 28 | 29 | /// Convert opaque pointer into `Self`. 30 | /// 31 | /// # Safety 32 | /// 33 | /// This function is safe if the `data` argument is a valid park handle created by 34 | /// `Self::into_opaque`. 35 | unsafe fn from_opaque(data: *const ()) -> Self; 36 | 37 | /// Create a raw waker out of `self`. 38 | /// 39 | /// This will clone self and build a `RawWaker` with vtable built from this trait's waker 40 | /// functions. 41 | /// 42 | /// `ParkHandle::waker` depends on this method building the correct waker, thus overloading 43 | /// this blanket function needs to be done with great care. 44 | unsafe fn raw_waker(&self) -> RawWaker { 45 | let data = self.clone().into_opaque(); 46 | RawWaker::new( 47 | data, 48 | &RawWakerVTable::new( 49 | Self::clone_waker, 50 | Self::wake, 51 | Self::wake_by_ref, 52 | Self::drop_waker, 53 | ), 54 | ) 55 | } 56 | 57 | /// Create a waker out of `self` 58 | /// 59 | /// This function will clone self and build a `Waker` object. 60 | /// 61 | /// The default implementation relies on `Self::raw_waker` method being correct. 62 | fn waker(&self) -> Waker { 63 | unsafe { Waker::from_raw(self.raw_waker()) } 64 | } 65 | 66 | unsafe fn clone_waker(data: *const ()) -> RawWaker { 67 | let waker = Self::from_opaque(data); 68 | let ret = waker.raw_waker(); 69 | mem::forget(waker); 70 | ret 71 | } 72 | 73 | unsafe fn wake(data: *const ()) { 74 | let waker = Self::from_opaque(data); 75 | waker.unpark(); 76 | } 77 | 78 | unsafe fn wake_by_ref(data: *const ()) { 79 | let waker = Self::from_opaque(data); 80 | waker.unpark(); 81 | mem::forget(waker); 82 | } 83 | 84 | unsafe fn drop_waker(data: *const ()) { 85 | let _ = Self::from_opaque(data); 86 | } 87 | } 88 | 89 | impl ThreadLocal for *const () { 90 | fn current() -> Self { 91 | core::ptr::null() 92 | } 93 | } 94 | 95 | impl ParkHandle for *const () { 96 | fn park(&self) { 97 | core::hint::spin_loop() 98 | } 99 | 100 | fn unpark(&self) {} 101 | } 102 | 103 | impl Wakeable for *const () { 104 | fn into_opaque(self) -> *const () { 105 | self 106 | } 107 | 108 | unsafe fn from_opaque(data: *const ()) -> Self { 109 | data 110 | } 111 | } 112 | 113 | /// Block the thread until the future is ready with current thread's parking handle. 114 | /// 115 | /// This allows one to use custom thread parking mechanisms in `no_std` environments. 116 | /// 117 | /// # Example 118 | /// 119 | /// ```no_run 120 | /// use std::thread::Thread; 121 | /// 122 | /// let my_fut = async {}; 123 | /// //let result = mfio::poller::block_on_t::(my_fut); 124 | /// ``` 125 | pub fn block_on_t(fut: F) -> F::Output { 126 | let handle = T::current(); 127 | let waker = handle.waker(); 128 | block_on_handle(fut, &handle, &waker) 129 | } 130 | 131 | /// Block the thread until the future is ready with given parking handle. 132 | /// 133 | /// This allows one to use custom thread parking mechanisms in `no_std` environments. 134 | /// 135 | /// # Example 136 | /// 137 | /// ```no_run 138 | /// use std::thread::Thread; 139 | /// 140 | /// let my_fut = async {}; 141 | /// //let result = mfio::poller::block_on_handle::(my_fut); 142 | /// ``` 143 | pub fn block_on_handle( 144 | mut fut: F, 145 | handle: &T, 146 | waker: &Waker, 147 | ) -> F::Output { 148 | // Pin the future so that it can be polled. 149 | // SAFETY: We shadow `fut` so that it cannot be used again. The future is now pinned to the stack and will not be 150 | // moved until the end of this scope. This is, incidentally, exactly what the `pin_mut!` macro from `pin_utils` 151 | // does. 152 | let mut fut = unsafe { core::pin::Pin::new_unchecked(&mut fut) }; 153 | 154 | let mut context = Context::from_waker(waker); 155 | 156 | // Poll the future to completion 157 | loop { 158 | match fut.as_mut().poll(&mut context) { 159 | Poll::Pending => handle.park(), 160 | Poll::Ready(item) => break item, 161 | } 162 | } 163 | } 164 | 165 | /// Block the thread until the future is ready. 166 | /// 167 | /// # Example 168 | /// 169 | /// ```no_run 170 | /// let my_fut = async {}; 171 | /// //let result = mfio::poller::block_on(my_fut); 172 | /// ``` 173 | pub fn block_on(fut: F) -> F::Output { 174 | #[cfg(feature = "std")] 175 | return block_on_t::(fut); 176 | #[cfg(not(feature = "std"))] 177 | return block_on_t::<*const (), _>(fut); 178 | } 179 | 180 | #[cfg(feature = "std")] 181 | pub use std_impl::LocalThread; 182 | 183 | #[cfg(feature = "std")] 184 | mod std_impl { 185 | use super::*; 186 | use std::sync::{Arc, Condvar, Mutex}; 187 | 188 | #[derive(Default)] 189 | struct Signal { 190 | signaled: Mutex, 191 | cond: Condvar, 192 | } 193 | 194 | impl Signal { 195 | fn wait(&self) { 196 | let mut signaled = self 197 | .cond 198 | .wait_while(self.signaled.lock().unwrap(), |signaled| !*signaled) 199 | .unwrap(); 200 | *signaled = false; 201 | } 202 | 203 | fn wake(&self) { 204 | let mut signaled = self.signaled.lock().unwrap(); 205 | // Only one thread will be waiting 206 | self.cond.notify_one(); 207 | *signaled = true; 208 | } 209 | } 210 | 211 | thread_local! { 212 | static ACCESS: Arc = Arc::new(Signal::default()); 213 | } 214 | 215 | #[derive(Clone)] 216 | pub struct LocalThread(Arc); 217 | 218 | impl ThreadLocal for LocalThread { 219 | fn current() -> Self { 220 | LocalThread(ACCESS.with(Clone::clone)) 221 | } 222 | } 223 | 224 | impl ParkHandle for LocalThread { 225 | fn park(&self) { 226 | self.0.wait(); 227 | } 228 | 229 | fn unpark(&self) { 230 | self.0.wake(); 231 | } 232 | } 233 | 234 | impl Wakeable for LocalThread { 235 | fn into_opaque(self) -> *const () { 236 | // SAFETY: `Thread` internal layout is an Arc to inner type, which is represented as a 237 | // single pointer. The only thing we do with the pointer is transmute it back to 238 | // ThreadWaker in the waker functions. If for whatever reason Thread layout will change to 239 | // contain multiple fields, this will still be safe, because the compiler will simply 240 | // refuse to compile the program. 241 | unsafe { mem::transmute::<_, *const ()>(self) } 242 | } 243 | 244 | unsafe fn from_opaque(data: *const ()) -> Self { 245 | mem::transmute(data) 246 | } 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /mfio/src/sample.rs: -------------------------------------------------------------------------------- 1 | use mfio::backend::*; 2 | use mfio::error::{Error, Location, State, Subject, INTERNAL_ERROR}; 3 | use mfio::io::*; 4 | use mfio::locks::Mutex; 5 | use core::sync::atomic::{AtomicBool, Ordering}; 6 | use core::task::Waker; 7 | 8 | #[allow(unused)] 9 | use mfio_derive::*; 10 | 11 | use tarc::{Arc, BaseArc}; 12 | 13 | use std::collections::VecDeque; 14 | 15 | type Address = u64; 16 | 17 | 18 | #[derive(Default)] 19 | pub(crate) struct Event { 20 | waker: Mutex>, 21 | signaled: AtomicBool, 22 | } 23 | 24 | impl Event { 25 | pub fn signal(&self) { 26 | if !self.signaled.swap(true, Ordering::Release) { 27 | if let Some(waker) = self.waker.lock().take() { 28 | waker.wake(); 29 | } 30 | } 31 | } 32 | 33 | pub async fn wait(&self) { 34 | let mut yielded = false; 35 | core::future::poll_fn(|cx| { 36 | if !yielded && !self.signaled.swap(false, Ordering::Acquire) { 37 | yielded = true; 38 | let mut guard = self.waker.lock(); 39 | // Check after locking to avoid race conditions 40 | if self.signaled.swap(false, Ordering::Acquire) { 41 | core::task::Poll::Ready(()) 42 | } else { 43 | *guard = Some(cx.waker().clone()); 44 | core::task::Poll::Pending 45 | } 46 | } else { 47 | core::task::Poll::Ready(()) 48 | } 49 | }) 50 | .await; 51 | } 52 | } 53 | 54 | struct IoThreadHandle { 55 | input: Mutex)>>, 56 | event: Arc, 57 | } 58 | 59 | impl Default for IoThreadHandle { 60 | fn default() -> Self { 61 | Self { 62 | input: Default::default(), 63 | event: Default::default(), 64 | } 65 | } 66 | } 67 | 68 | impl PacketIo for IoThreadHandle { 69 | fn send_io(&self, address: Address, packet: BoundPacketView) { 70 | let mut input = self.input.lock(); 71 | input.push_back((address, packet)); 72 | self.event.signal(); 73 | } 74 | } 75 | 76 | struct VolatileMem { 77 | buf: *mut u8, 78 | len: u64, 79 | } 80 | 81 | impl VolatileMem { 82 | fn read(&self, pos: u64, dest: BoundPacketView) { 83 | if pos >= self.len { 84 | dest.error(Error { 85 | code: INTERNAL_ERROR, 86 | location: Location::Backend, 87 | subject: Subject::Address, 88 | state: State::Outside, 89 | }); 90 | return; 91 | } 92 | let dest = if self.len < dest.len() || pos > self.len - dest.len() { 93 | let (a, b) = dest.split_at(self.len - pos); 94 | b.error(Error { 95 | code: INTERNAL_ERROR, 96 | location: Location::Backend, 97 | subject: Subject::Address, 98 | state: State::Outside, 99 | }); 100 | a 101 | } else { 102 | dest 103 | }; 104 | unsafe { 105 | let _ = dest.transfer_data(self.buf.add(pos as usize).cast()); 106 | } 107 | } 108 | 109 | fn write(&self, pos: u64, src: BoundPacketView) { 110 | if pos >= self.len { 111 | src.error(Error { 112 | code: INTERNAL_ERROR, 113 | location: Location::Backend, 114 | subject: Subject::Address, 115 | state: State::Outside, 116 | }); 117 | return; 118 | } 119 | let src = if pos > self.len - src.len() { 120 | let (a, b) = src.split_at(self.len - pos); 121 | b.error(Error { 122 | code: INTERNAL_ERROR, 123 | location: Location::Backend, 124 | subject: Subject::Address, 125 | state: State::Outside, 126 | }); 127 | a 128 | } else { 129 | src 130 | }; 131 | unsafe { 132 | let _ = src.transfer_data(self.buf.add(pos as usize).cast()); 133 | } 134 | } 135 | } 136 | 137 | impl From> for VolatileMem { 138 | fn from(buf: Vec) -> Self { 139 | let len = buf.len() as u64; 140 | 141 | let buf = Box::leak(buf.into_boxed_slice()); 142 | 143 | let buf = buf.as_mut_ptr(); 144 | 145 | Self { buf, len } 146 | } 147 | } 148 | 149 | unsafe impl Send for VolatileMem {} 150 | unsafe impl Sync for VolatileMem {} 151 | 152 | impl Drop for VolatileMem { 153 | fn drop(&mut self) { 154 | unsafe { 155 | let _ = Box::from_raw(core::slice::from_raw_parts_mut(self.buf, self.len as usize)); 156 | } 157 | } 158 | } 159 | 160 | pub struct IoThreadState { 161 | read_io: BaseArc>, 162 | write_io: BaseArc>, 163 | backend: BackendContainer, 164 | } 165 | 166 | impl IoThreadState { 167 | fn new(mem: &Arc) -> Self { 168 | let read_io: BaseArc> = Default::default(); 169 | let write_io: BaseArc> = Default::default(); 170 | 171 | let read = { 172 | let mem = mem.clone(); 173 | let read_io = read_io.clone(); 174 | 175 | async move { 176 | loop { 177 | let proc_inp = |(addr, buf): (Address, BoundPacketView)| { 178 | mem.read(addr, buf); 179 | }; 180 | 181 | // try_pop here many elems 182 | { 183 | let mut input = read_io.input.lock(); 184 | 185 | while let Some(inp) = input.pop_front() { 186 | proc_inp(inp); 187 | } 188 | } 189 | 190 | read_io.event.wait().await; 191 | } 192 | } 193 | }; 194 | 195 | let write = { 196 | let mem = mem.clone(); 197 | let write_io = write_io.clone(); 198 | 199 | async move { 200 | loop { 201 | let proc_inp = |(pos, buf): (Address, BoundPacketView)| { 202 | mem.write(pos, buf); 203 | }; 204 | 205 | // try_pop here many elems 206 | { 207 | let mut input = write_io.input.lock(); 208 | 209 | while let Some(inp) = input.pop_front() { 210 | proc_inp(inp); 211 | } 212 | } 213 | 214 | write_io.event.wait().await; 215 | } 216 | } 217 | }; 218 | 219 | let future = async move { 220 | tokio::join!(read, write); 221 | }; 222 | 223 | let backend = BackendContainer::new_dyn(future); 224 | 225 | Self { 226 | read_io, 227 | write_io, 228 | backend, 229 | } 230 | } 231 | } 232 | 233 | #[derive(SyncIoRead, SyncIoWrite)] 234 | pub struct SampleIo { 235 | mem: Arc, 236 | thread_state: IoThreadState, 237 | } 238 | 239 | impl Clone for SampleIo { 240 | fn clone(&self) -> Self { 241 | let mem = self.mem.clone(); 242 | let thread_state = IoThreadState::new(&mem); 243 | 244 | Self { mem, thread_state } 245 | } 246 | } 247 | 248 | impl Default for SampleIo { 249 | fn default() -> Self { 250 | Self::new(vec![0; 0x100000]) 251 | } 252 | } 253 | 254 | impl SampleIo { 255 | pub fn new(mem: Vec) -> Self { 256 | let mem = Arc::new(mem.into()); 257 | 258 | let thread_state = IoThreadState::new(&mem); 259 | 260 | Self { mem, thread_state } 261 | } 262 | } 263 | 264 | impl PacketIo for SampleIo { 265 | fn send_io(&self, param: Address, view: BoundPacketView) { 266 | self.thread_state.write_io.send_io(param, view) 267 | } 268 | } 269 | 270 | impl PacketIo for SampleIo { 271 | fn send_io(&self, param: Address, view: BoundPacketView) { 272 | self.thread_state.read_io.send_io(param, view) 273 | } 274 | } 275 | 276 | impl IoBackend for SampleIo { 277 | fn polling_handle(&self) -> Option { 278 | None 279 | } 280 | 281 | fn get_backend(&self) -> BackendHandle { 282 | self.thread_state.backend.acquire(None) 283 | } 284 | } 285 | -------------------------------------------------------------------------------- /mfio/src/stdeq.rs: -------------------------------------------------------------------------------- 1 | //! `std::io` equivalent Read/Write traits. 2 | 3 | use crate as mfio; 4 | use crate::error::Result; 5 | use crate::io::*; 6 | use crate::locks::Mutex; 7 | use crate::std_prelude::*; 8 | use crate::traits::*; 9 | use crate::util::{PosShift, UsizeMath}; 10 | use core::future::Future; 11 | use core::pin::Pin; 12 | use core::task::{Context, Poll}; 13 | use mfio_derive::*; 14 | use num::ToPrimitive; 15 | 16 | pub trait StreamPos { 17 | fn set_pos(&self, pos: Param); 18 | 19 | fn get_pos(&self) -> Param; 20 | 21 | fn update_pos Param>(&self, f: F); 22 | 23 | fn end(&self) -> Option { 24 | None 25 | } 26 | } 27 | 28 | #[cfg(feature = "std")] 29 | pub fn std_seek( 30 | io: &(impl StreamPos + ?Sized), 31 | pos: std::io::SeekFrom, 32 | ) -> std::io::Result { 33 | match pos { 34 | std::io::SeekFrom::Start(val) => { 35 | io.set_pos(val); 36 | Ok(val) 37 | } 38 | std::io::SeekFrom::End(val) => { 39 | if let Some(end) = io.end() { 40 | let pos = if val < 0 { 41 | end.checked_sub((-val) as u64) 42 | .ok_or(std::io::ErrorKind::InvalidInput)? 43 | } else { 44 | end + val as u64 45 | }; 46 | io.set_pos(pos); 47 | Ok(pos) 48 | } else { 49 | Err(std::io::ErrorKind::Unsupported.into()) 50 | } 51 | } 52 | std::io::SeekFrom::Current(val) => { 53 | let pos = io.get_pos(); 54 | let pos = if val < 0 { 55 | pos.checked_sub((-val) as u64) 56 | .ok_or(std::io::ErrorKind::InvalidInput)? 57 | } else { 58 | pos + val as u64 59 | }; 60 | io.set_pos(pos); 61 | Ok(pos) 62 | } 63 | } 64 | } 65 | 66 | impl> PosShift for Param { 67 | fn add_pos(&mut self, out: usize, io: &Io) { 68 | self.add_assign(out); 69 | io.set_pos(*self); 70 | } 71 | 72 | fn add_io_pos(io: &Io, out: usize) { 73 | io.update_pos(|pos| pos.add(out)) 74 | } 75 | } 76 | 77 | pub trait AsyncRead: IoRead { 78 | fn read<'a>(&'a self, buf: &'a mut [u8]) -> AsyncIoFut<'a, Self, Write, Param, &'a mut [u8]>; 79 | fn read_to_end<'a>(&'a self, buf: &'a mut Vec) -> StdReadToEndFut<'a, Self, Param>; 80 | } 81 | 82 | impl + StreamPos, Param: 'static + Copy> AsyncRead for T { 83 | fn read<'a>(&'a self, buf: &'a mut [u8]) -> AsyncIoFut<'a, Self, Write, Param, &'a mut [u8]> { 84 | let len = buf.len(); 85 | let (pkt, sync) = <&'a mut [u8] as IntoPacket>::into_packet(buf); 86 | AsyncIoFut { 87 | io: self, 88 | len, 89 | fut: self.io(self.get_pos(), pkt), 90 | sync: Some(sync), 91 | } 92 | } 93 | 94 | fn read_to_end<'a>(&'a self, buf: &'a mut Vec) -> StdReadToEndFut<'a, Self, Param> { 95 | StdReadToEndFut { 96 | io: self, 97 | fut: >::read_to_end(self, self.get_pos(), buf), 98 | } 99 | } 100 | } 101 | 102 | impl> AsyncRead for T { 103 | fn read<'a>(&'a self, buf: &'a mut [u8]) -> AsyncIoFut<'a, Self, Write, NoPos, &'a mut [u8]> { 104 | let len = buf.len(); 105 | let (pkt, sync) = <&'a mut [u8] as IntoPacket>::into_packet(buf); 106 | AsyncIoFut { 107 | io: self, 108 | len, 109 | fut: self.io(NoPos::new(), pkt), 110 | sync: Some(sync), 111 | } 112 | } 113 | 114 | fn read_to_end<'a>(&'a self, buf: &'a mut Vec) -> StdReadToEndFut<'a, Self, NoPos> { 115 | StdReadToEndFut { 116 | io: self, 117 | fut: >::read_to_end(self, NoPos::new(), buf), 118 | } 119 | } 120 | } 121 | 122 | pub trait AsyncWrite: IoWrite { 123 | fn write<'a>(&'a self, buf: &'a [u8]) -> AsyncIoFut<'a, Self, Read, Param, &'a [u8]>; 124 | } 125 | 126 | impl + StreamPos, Param: Copy> AsyncWrite for T { 127 | fn write<'a>(&'a self, buf: &'a [u8]) -> AsyncIoFut<'a, Self, Read, Param, &'a [u8]> { 128 | let len = buf.len(); 129 | let (pkt, sync) = buf.into_packet(); 130 | AsyncIoFut { 131 | io: self, 132 | len, 133 | fut: self.io(self.get_pos(), pkt), 134 | sync: Some(sync), 135 | } 136 | } 137 | } 138 | 139 | impl> AsyncWrite for T { 140 | fn write<'a>(&'a self, buf: &'a [u8]) -> AsyncIoFut<'a, Self, Read, NoPos, &'a [u8]> { 141 | let len = buf.len(); 142 | let (pkt, sync) = buf.into_packet(); 143 | AsyncIoFut { 144 | io: self, 145 | len, 146 | fut: self.io(NoPos::new(), pkt), 147 | sync: Some(sync), 148 | } 149 | } 150 | } 151 | 152 | pub struct AsyncIoFut<'a, Io: ?Sized, Perms: PacketPerms, Param: 'a, Obj: IntoPacket<'a, Perms>> { 153 | io: &'a Io, 154 | fut: IoFut<'a, Io, Perms, Param, Obj::Target>, 155 | pub(crate) sync: Option, 156 | len: usize, 157 | } 158 | 159 | impl< 160 | 'a, 161 | Io: PacketIo, 162 | Perms: PacketPerms, 163 | Param: PosShift, 164 | Obj: IntoPacket<'a, Perms>, 165 | > Future for AsyncIoFut<'a, Io, Perms, Param, Obj> 166 | { 167 | type Output = Result; 168 | 169 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 170 | let this = unsafe { self.get_unchecked_mut() }; 171 | 172 | let fut = unsafe { Pin::new_unchecked(&mut this.fut) }; 173 | 174 | fut.poll(cx).map(|pkt| { 175 | let hdr = <>::Target as OpaqueStore>::stack_hdr(&pkt); 176 | // TODO: put this after error checking 177 | Obj::sync_back(hdr, this.sync.take().unwrap()); 178 | let progressed = core::cmp::min(hdr.error_clamp().to_usize().unwrap_or(!0), this.len); 179 | Param::add_io_pos(this.io, progressed); 180 | // TODO: actual error checking 181 | Ok(progressed) 182 | }) 183 | } 184 | } 185 | 186 | pub struct StdReadToEndFut<'a, Io: PacketIo, Param> { 187 | io: &'a Io, 188 | fut: ReadToEndFut<'a, Io, Param>, 189 | } 190 | 191 | impl<'a, Io: PacketIo, Param: PosShift> Future 192 | for StdReadToEndFut<'a, Io, Param> 193 | { 194 | type Output = Result<()>; 195 | 196 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 197 | let this = unsafe { self.get_unchecked_mut() }; 198 | 199 | match unsafe { Pin::new_unchecked(&mut this.fut) }.poll(cx) { 200 | Poll::Ready(Ok(r)) => { 201 | Param::add_io_pos(this.io, r); 202 | Poll::Ready(Ok(())) 203 | } 204 | Poll::Ready(Err(e)) => Poll::Ready(Err(e)), 205 | Poll::Pending => Poll::Pending, 206 | } 207 | } 208 | } 209 | 210 | #[macro_export] 211 | /// Implements `Read`+`Write`+`Seek` traits on compatible type. 212 | /// 213 | /// Implements `io::Seek` on type implementing `StreamPos`, `io::Write` on type implementing 214 | /// `AsyncWrite` and `io::Read` on type implementing `AsyncRead`. 215 | macro_rules! stdio_impl { 216 | (<$($lt2:lifetime,)* $($ty2:ident),*> $t:ident <$($lt:lifetime,)* $($ty:ident),*> @ $($tt:tt)*) => { 217 | impl<$($lt2,)* $($ty2),*> std::io::Seek for $t<$($lt,)* $($ty),*> where $($tt)* { 218 | fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { 219 | $crate::stdeq::std_seek(self, pos) 220 | } 221 | 222 | fn stream_position(&mut self) -> std::io::Result { 223 | Ok(self.get_pos()) 224 | } 225 | 226 | fn rewind(&mut self) -> std::io::Result<()> { 227 | self.set_pos(0); 228 | Ok(()) 229 | } 230 | } 231 | 232 | impl<$($lt2,)* $($ty2),*> std::io::Read for $t<$($lt,)* $($ty),*> where $t<$($lt,)* $($ty),*>: $crate::stdeq::AsyncRead + $crate::backend::IoBackend, $($tt)* { 233 | fn read(&mut self, buf: &mut [u8]) -> std::io::Result { 234 | use $crate::backend::IoBackendExt; 235 | self.block_on($crate::stdeq::AsyncRead::read(self, buf)).map_err(|_| std::io::ErrorKind::Other.into()) 236 | } 237 | 238 | fn read_to_end(&mut self, buf: &mut Vec) -> std::io::Result { 239 | use $crate::backend::IoBackendExt; 240 | let len = buf.len(); 241 | self.block_on($crate::stdeq::AsyncRead::read_to_end(self, buf)).map_err(|_| std::io::ErrorKind::Other)?; 242 | Ok(buf.len() - len) 243 | } 244 | } 245 | 246 | impl<$($lt2,)* $($ty2),*> std::io::Write for $t<$($lt,)* $($ty),*> where $t<$($lt,)* $($ty),*>: $crate::stdeq::AsyncWrite + $crate::backend::IoBackend, $($tt)* { 247 | fn write(&mut self, buf: &[u8]) -> std::io::Result { 248 | use $crate::backend::IoBackendExt; 249 | self.block_on(AsyncWrite::write(self, buf)).map_err(|_| std::io::ErrorKind::Other.into()) 250 | } 251 | 252 | fn flush(&mut self) -> std::io::Result<()> { 253 | Ok(()) 254 | } 255 | } 256 | }; 257 | ($t:ident @ $($tt:tt)*) => { 258 | $crate::stdio_impl!($t<> @ $($tt)*); 259 | } 260 | } 261 | 262 | #[derive(SyncIoWrite, SyncIoRead)] 263 | pub struct Seekable { 264 | pos: Mutex, 265 | handle: T, 266 | } 267 | 268 | impl From for Seekable { 269 | fn from(handle: T) -> Self { 270 | Self { 271 | pos: Default::default(), 272 | handle, 273 | } 274 | } 275 | } 276 | 277 | impl, Perms: PacketPerms, Param> PacketIo 278 | for Seekable 279 | { 280 | fn send_io(&self, param: Param, view: BoundPacketView) { 281 | self.handle.send_io(param, view) 282 | } 283 | } 284 | 285 | impl StreamPos for Seekable { 286 | fn get_pos(&self) -> Param { 287 | *self.pos.lock() 288 | } 289 | 290 | fn set_pos(&self, pos: Param) { 291 | *self.pos.lock() = pos; 292 | } 293 | 294 | fn update_pos Param>(&self, f: F) { 295 | let mut pos = self.pos.lock(); 296 | *pos = f(*pos); 297 | } 298 | } 299 | 300 | #[cfg(feature = "std")] 301 | stdio_impl!( Seekable @); 302 | 303 | #[derive(SyncIoWrite, SyncIoRead)] 304 | pub struct SeekableRef<'a, T, Param> { 305 | pos: Mutex, 306 | handle: &'a T, 307 | } 308 | 309 | impl<'a, T, Param: Default> From<&'a T> for SeekableRef<'a, T, Param> { 310 | fn from(handle: &'a T) -> Self { 311 | Self { 312 | pos: Default::default(), 313 | handle, 314 | } 315 | } 316 | } 317 | 318 | impl, Perms: PacketPerms, Param: core::fmt::Debug> PacketIo 319 | for SeekableRef<'_, T, Param> 320 | { 321 | fn send_io(&self, param: Param, view: BoundPacketView) { 322 | self.handle.send_io(param, view) 323 | } 324 | } 325 | 326 | impl StreamPos for SeekableRef<'_, T, Param> { 327 | fn get_pos(&self) -> Param { 328 | *self.pos.lock() 329 | } 330 | 331 | fn set_pos(&self, pos: Param) { 332 | *self.pos.lock() = pos; 333 | } 334 | 335 | fn update_pos Param>(&self, f: F) { 336 | let mut pos = self.pos.lock(); 337 | *pos = f(*pos); 338 | } 339 | } 340 | 341 | #[cfg(feature = "std")] 342 | stdio_impl!(<'a, T> SeekableRef<'a, T, u64> @); 343 | 344 | #[derive(SyncIoWrite, SyncIoRead)] 345 | pub struct FakeSeek { 346 | handle: T, 347 | } 348 | 349 | impl From for FakeSeek { 350 | fn from(handle: T) -> Self { 351 | Self { handle } 352 | } 353 | } 354 | 355 | impl, Perms: PacketPerms, Param> PacketIo for FakeSeek { 356 | fn send_io(&self, param: Param, view: BoundPacketView) { 357 | self.handle.send_io(param, view) 358 | } 359 | } 360 | 361 | impl> StreamPos for FakeSeek { 362 | fn get_pos(&self) -> Param { 363 | !Param::default() 364 | } 365 | 366 | fn set_pos(&self, _: Param) {} 367 | 368 | fn update_pos Param>(&self, _: F) {} 369 | } 370 | 371 | #[cfg(feature = "std")] 372 | stdio_impl!( FakeSeek @); 373 | -------------------------------------------------------------------------------- /mfio/src/traits.rs: -------------------------------------------------------------------------------- 1 | //! Helper traits 2 | 3 | use crate::std_prelude::*; 4 | 5 | use crate::io::*; 6 | 7 | use crate::backend::{IoBackend, IoBackendExt}; 8 | use crate::error::Error; 9 | use crate::util::{CopyPos, UsizeMath}; 10 | use bytemuck::Pod; 11 | use core::future::Future; 12 | use core::mem::MaybeUninit; 13 | use core::pin::Pin; 14 | use core::task::{Context, Poll}; 15 | 16 | /// I/O read operations. 17 | pub trait IoRead: PacketIo { 18 | /// Forwards a read request to the I/O object. 19 | /// 20 | /// This is equivalent to `PacketIo::io`, but disambiguates packet permissions. 21 | fn read_raw<'a, T: PacketStore<'a, Write>>( 22 | &'a self, 23 | pos: Pos, 24 | packet: T, 25 | ) -> IoFut<'a, Self, Write, Pos, T> { 26 | self.io(pos, packet) 27 | } 28 | 29 | /// Read all data into the given object. 30 | fn read_all<'a, T: IntoPacket<'a, Write>>( 31 | &'a self, 32 | pos: Pos, 33 | packet: T, 34 | ) -> IoFullFut<'a, Self, Write, Pos, T> { 35 | let (packet, sync) = packet.into_packet(); 36 | IoFullFut { 37 | fut: self.io(pos, packet), 38 | sync: Some(sync), 39 | } 40 | } 41 | 42 | /// Reads data into a `Pod` struct. 43 | fn read_into<'a, T: Pod>( 44 | &'a self, 45 | pos: Pos, 46 | data: &'a mut MaybeUninit, 47 | ) -> IoFullFut<'a, Self, Write, Pos, &'a mut [MaybeUninit]> { 48 | let buf = unsafe { 49 | core::slice::from_raw_parts_mut( 50 | data as *mut MaybeUninit as *mut MaybeUninit, 51 | core::mem::size_of::(), 52 | ) 53 | }; 54 | self.read_all(pos, buf) 55 | } 56 | 57 | /// Reads data into a new `Pod` struct. 58 | fn read(&self, pos: Pos) -> IoReadFut { 59 | let pkt = FullPacket::<_, Write>::new_uninit(); 60 | IoReadFut(self.io(pos, pkt)) 61 | } 62 | 63 | /// Reads data into given buffer until a gap is reached. 64 | fn read_to_end<'a>(&'a self, pos: Pos, buf: &'a mut Vec) -> ReadToEndFut<'a, Self, Pos> 65 | where 66 | Pos: CopyPos, 67 | { 68 | let start_len = buf.len(); 69 | let start_cap = buf.capacity(); 70 | 71 | // Reserve enough for 32 bytes of data initially 72 | if start_cap - start_len < 32 { 73 | buf.reserve(32 - (start_cap - start_len)); 74 | } 75 | 76 | // Issue a read 77 | let data = buf.as_mut_ptr() as *mut MaybeUninit; 78 | // SAFETY: the data here is uninitialized, and we are getting exclusive access 79 | // to it. 80 | let data = unsafe { 81 | core::slice::from_raw_parts_mut(data.add(start_len), buf.capacity() - start_len) 82 | }; 83 | 84 | let fut = Some(data.into_packet()).map(|(pkt, sync)| (self.io(pos.copy_pos(), pkt), sync)); 85 | 86 | ReadToEndFut { 87 | io: self, 88 | pos, 89 | buf, 90 | fut, 91 | start_len, 92 | start_cap, 93 | } 94 | } 95 | } 96 | 97 | impl IoRead for T where T: PacketIo {} 98 | 99 | /// I/O write operations. 100 | pub trait IoWrite: PacketIo { 101 | /// Forwards a write request to the I/O object. 102 | /// 103 | /// This is equivalent to `PacketIo::io`, but disambiguates packet permissions. 104 | fn write_raw<'a, T: PacketStore<'a, Read>>( 105 | &'a self, 106 | pos: Pos, 107 | packet: T, 108 | ) -> IoFut<'a, Self, Read, Pos, T> { 109 | self.io(pos, packet) 110 | } 111 | 112 | /// Writes all data in the given packet to destination. 113 | fn write_all<'a, T: IntoPacket<'a, Read>>( 114 | &'a self, 115 | pos: Pos, 116 | packet: T, 117 | ) -> IoFullFut<'a, Self, Read, Pos, T> { 118 | let (packet, sync) = packet.into_packet(); 119 | IoFullFut { 120 | fut: self.io(pos, packet), 121 | sync: Some(sync), 122 | } 123 | } 124 | 125 | /// Writes a pod object into to destination. 126 | fn write<'a, T>(&'a self, pos: Pos, data: &'a T) -> IoFullFut<'a, Self, Read, Pos, &'a [u8]> { 127 | let buf = unsafe { 128 | core::slice::from_raw_parts(data as *const T as *const u8, core::mem::size_of::()) 129 | }; 130 | self.write_all(pos, buf) 131 | } 132 | } 133 | 134 | impl IoWrite for T where T: PacketIo {} 135 | 136 | pub struct IoFullFut< 137 | 'a, 138 | Io: PacketIo, 139 | Perms: PacketPerms, 140 | Param: 'a, 141 | Obj: IntoPacket<'a, Perms>, 142 | > { 143 | fut: IoFut<'a, Io, Perms, Param, Obj::Target>, 144 | sync: Option, 145 | } 146 | 147 | impl<'a, Io: PacketIo, Perms: PacketPerms, Param, Obj: IntoPacket<'a, Perms>> Future 148 | for IoFullFut<'a, Io, Perms, Param, Obj> 149 | { 150 | type Output = Result<::StackReq<'a>, Error>; 151 | 152 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 153 | let this = unsafe { self.get_unchecked_mut() }; 154 | 155 | let fut = unsafe { Pin::new_unchecked(&mut this.fut) }; 156 | 157 | fut.poll(cx).map(|pkt| { 158 | let hdr = <>::Target as OpaqueStore>::stack_hdr(&pkt); 159 | // TODO: put this after error checking 160 | Obj::sync_back(hdr, this.sync.take().unwrap()); 161 | hdr.err_on_zero().map(|_| pkt) 162 | }) 163 | } 164 | } 165 | 166 | type UninitSlice<'a> = &'a mut [MaybeUninit]; 167 | 168 | #[allow(clippy::type_complexity)] 169 | pub struct ReadToEndFut<'a, Io: PacketIo, Param> { 170 | io: &'a Io, 171 | pos: Param, 172 | buf: &'a mut Vec, 173 | fut: Option<( 174 | IoFut<'a, Io, Write, Param, as IntoPacket<'a, Write>>::Target>, 175 | as IntoPacket<'a, Write>>::SyncHandle, 176 | )>, 177 | start_len: usize, 178 | start_cap: usize, 179 | } 180 | 181 | impl<'a, Io: PacketIo, Param: CopyPos + UsizeMath> Future 182 | for ReadToEndFut<'a, Io, Param> 183 | { 184 | type Output = Result; 185 | 186 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 187 | let this = unsafe { self.get_unchecked_mut() }; 188 | 189 | loop { 190 | let (fut, _) = this.fut.as_mut().expect("Poll called in invalid state"); 191 | let fut = unsafe { Pin::new_unchecked(fut) }; 192 | 193 | match fut.poll(cx) { 194 | Poll::Ready(pkt) => { 195 | // TODO: check into safety of this. We are technically unpinning a previously 196 | // pinned object. 197 | let (_, sync) = this.fut.take().unwrap(); 198 | 199 | let hdr = < as IntoPacket<'a, Write>>::Target as OpaqueStore>::stack_hdr(&pkt); 200 | let len = Write::len(hdr); 201 | let clamp = hdr.error_clamp(); 202 | 203 | as IntoPacket<'a, Write>>::sync_back(hdr, sync); 204 | // SAFETY: all these bytes have been successfully read 205 | unsafe { 206 | this.buf 207 | .set_len(this.buf.len() + core::cmp::min(clamp, len) as usize) 208 | }; 209 | 210 | // We reached the end 211 | if clamp < len || clamp == 0 { 212 | let total_len = this.buf.len() - this.start_len; 213 | // TODO: figure out how to extract error on 0 read 214 | break Poll::Ready(Ok(total_len)); 215 | } else { 216 | // Double read size, but cap it to 2MB 217 | let reserve_len = 218 | core::cmp::min(this.buf.capacity() - this.start_cap, 0x20000); 219 | this.buf.reserve(reserve_len); 220 | 221 | // Issue a read 222 | let data = this.buf.as_mut_ptr() as *mut MaybeUninit; 223 | // SAFETY: the data here is uninitialized, and we are getting exclusive access 224 | // to it. 225 | let data = unsafe { 226 | core::slice::from_raw_parts_mut( 227 | data.add(this.buf.len()), 228 | this.buf.capacity() - this.buf.len(), 229 | ) 230 | }; 231 | 232 | this.fut = Some(data.into_packet()).map(|(pkt, sync)| { 233 | ( 234 | this.io.io( 235 | this.pos.copy_pos().add(this.buf.len() - this.start_len), 236 | pkt, 237 | ), 238 | sync, 239 | ) 240 | }); 241 | } 242 | } 243 | Poll::Pending => break Poll::Pending, 244 | } 245 | } 246 | } 247 | } 248 | 249 | pub struct IoReadFut<'a, Io: PacketIo, Param: 'a, T: 'static>( 250 | IoFut<'a, Io, Write, Param, FullPacket, Write>>, 251 | ); 252 | 253 | impl<'a, Io: PacketIo, Param, T: 'a> Future for IoReadFut<'a, Io, Param, T> { 254 | type Output = Result; 255 | 256 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 257 | let this = unsafe { self.get_unchecked_mut() }; 258 | 259 | let fut = unsafe { Pin::new_unchecked(&mut this.0) }; 260 | 261 | fut.poll(cx).map(|pkt| { 262 | pkt.err_on_zero() 263 | .map(|_| unsafe { core::ptr::read(pkt.simple_data_ptr().cast::()) }) 264 | }) 265 | } 266 | } 267 | 268 | pub mod sync { 269 | //! Synchronous I/O wrappers 270 | use super::*; 271 | 272 | // TODO: figure out how to expose these over cglue 273 | 274 | /// Synchronous I/O read operations. 275 | /// 276 | /// This trait simply wraps `PacketIo + IoBackend` types in order to not subject the user to 277 | /// async code. 278 | pub trait SyncIoRead: IoRead + IoBackend { 279 | fn read_all<'a>( 280 | &'a self, 281 | pos: Pos, 282 | packet: impl IntoPacket<'a, Write>, 283 | ) -> Result<(), Error> { 284 | self.block_on(IoRead::read_all(self, pos, packet)) 285 | .map(|_| ()) 286 | } 287 | 288 | fn read_into<'a, T: Pod>( 289 | &'a self, 290 | pos: Pos, 291 | data: &'a mut MaybeUninit, 292 | ) -> Result<(), Error> { 293 | self.block_on(IoRead::read_into(self, pos, data)) 294 | .map(|_| ()) 295 | } 296 | 297 | fn read(&self, pos: Pos) -> Result { 298 | self.block_on(IoRead::read(self, pos)) 299 | } 300 | 301 | fn read_to_end<'a>(&'a self, pos: Pos, buf: &'a mut Vec) -> Option 302 | where 303 | ReadToEndFut<'a, Self, Pos>: Future>, 304 | Pos: CopyPos, 305 | { 306 | self.block_on(IoRead::read_to_end(self, pos, buf)) 307 | } 308 | } 309 | 310 | /// Synchronous I/O write operations. 311 | /// 312 | /// This trait simply wraps `PacketIo + IoBackend` types in order to not subject the user to 313 | /// async code. 314 | pub trait SyncIoWrite: IoWrite + IoBackend { 315 | fn write_all<'a>( 316 | &'a self, 317 | pos: Pos, 318 | packet: impl IntoPacket<'a, Read>, 319 | ) -> Result<(), Error> { 320 | self.block_on(IoWrite::write_all(self, pos, packet)) 321 | .map(|_| ()) 322 | } 323 | 324 | fn write<'a, T>(&'a self, pos: Pos, data: &'a T) -> Result<(), Error> { 325 | self.block_on(IoWrite::write(self, pos, data)).map(|_| ()) 326 | } 327 | } 328 | } 329 | -------------------------------------------------------------------------------- /mfio/src/util.rs: -------------------------------------------------------------------------------- 1 | use crate::io::NoPos; 2 | 3 | pub(crate) trait UsizeMath { 4 | fn add_assign(&mut self, val: usize); 5 | fn add(self, val: usize) -> Self; 6 | } 7 | 8 | impl UsizeMath for usize { 9 | fn add_assign(&mut self, val: usize) { 10 | *self += val; 11 | } 12 | 13 | fn add(self, val: usize) -> Self { 14 | self + val 15 | } 16 | } 17 | 18 | impl UsizeMath for u64 { 19 | fn add_assign(&mut self, val: usize) { 20 | *self += val as u64; 21 | } 22 | 23 | fn add(self, val: usize) -> Self { 24 | self + val as u64 25 | } 26 | } 27 | 28 | impl UsizeMath for NoPos { 29 | fn add_assign(&mut self, _: usize) {} 30 | 31 | fn add(self, _: usize) -> Self { 32 | self 33 | } 34 | } 35 | 36 | // FIXME: this trait shouldn't be public 37 | pub trait CopyPos: Sized { 38 | fn copy_pos(&self) -> Self; 39 | } 40 | 41 | // This trait unifies implementations on NoPos (streams) with seekable I/O 42 | pub(crate) trait PosShift: CopyPos + UsizeMath { 43 | fn add_pos(&mut self, out: usize, io: &Io); 44 | fn add_io_pos(io: &Io, out: usize); 45 | } 46 | 47 | impl CopyPos for Param { 48 | fn copy_pos(&self) -> Self { 49 | *self 50 | } 51 | } 52 | 53 | impl CopyPos for NoPos { 54 | fn copy_pos(&self) -> Self { 55 | Self::new() 56 | } 57 | } 58 | 59 | impl PosShift for NoPos { 60 | fn add_pos(&mut self, _: usize, _: &Io) {} 61 | fn add_io_pos(_: &Io, _: usize) {} 62 | } 63 | -------------------------------------------------------------------------------- /version-hack/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "version-hack" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | [dependencies] 7 | enclose = "=1.2.0" 8 | 9 | [dev-dependencies] 10 | pollster = "=0.2.0" 11 | -------------------------------------------------------------------------------- /version-hack/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub fn add(left: usize, right: usize) -> usize { 2 | left + right 3 | } 4 | 5 | #[cfg(test)] 6 | mod tests { 7 | use super::*; 8 | 9 | #[test] 10 | fn it_works() { 11 | let result = add(2, 2); 12 | assert_eq!(result, 4); 13 | } 14 | } 15 | --------------------------------------------------------------------------------