├── .github └── workflows │ ├── release.yml │ ├── rust.yml │ └── xfstests.yml ├── .gitignore ├── .gitmodules ├── CHAGNELOG.md ├── CODEOWNERS ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-BSD-3-Clause ├── Makefile ├── README.md ├── build.rs ├── coverage_config_x86_64.json ├── deny.toml ├── docs ├── images │ ├── fuse-backend-architecture.drawio │ ├── fuse-backend-architecture.svg │ ├── overlayfs.drawio │ ├── overlayfs_dir.png │ ├── overlayfs_non_dir_file.png │ └── overlayfs_structs.png └── overlayfs.md ├── src ├── abi │ ├── fuse_abi_linux.rs │ ├── fuse_abi_macos.rs │ ├── mod.rs │ └── virtio_fs.rs ├── api │ ├── filesystem │ │ ├── async_io.rs │ │ ├── mod.rs │ │ ├── overlay.rs │ │ └── sync_io.rs │ ├── mod.rs │ ├── pseudo_fs.rs │ ├── server │ │ ├── async_io.rs │ │ ├── mod.rs │ │ └── sync_io.rs │ └── vfs │ │ ├── async_io.rs │ │ ├── mod.rs │ │ └── sync_io.rs ├── common │ ├── async_file.rs │ ├── async_runtime.rs │ ├── file_buf.rs │ ├── file_traits.rs │ ├── mod.rs │ └── mpmc.rs ├── lib.rs ├── overlayfs │ ├── config.rs │ ├── inode_store.rs │ ├── mod.rs │ ├── sync_io.rs │ └── utils.rs ├── passthrough │ ├── async_io.rs │ ├── config.rs │ ├── credentials.rs │ ├── file_handle.rs │ ├── inode_store.rs │ ├── mod.rs │ ├── mount_fd.rs │ ├── os_compat.rs │ ├── overlay.rs │ ├── statx.rs │ ├── sync_io.rs │ ├── util.rs │ └── xattrmap.rs └── transport │ ├── fs_cache_req_handler.rs │ ├── fusedev │ ├── fuse_t_session.rs │ ├── linux_session.rs │ ├── macos_session.rs │ └── mod.rs │ ├── mod.rs │ └── virtiofs │ └── mod.rs └── tests ├── example ├── macfuse.rs ├── mod.rs └── passthroughfs.rs ├── macfuse_smoke.rs ├── overlay ├── Cargo.toml └── src │ └── main.rs ├── passthrough ├── Cargo.toml └── src │ └── main.rs ├── scripts ├── unionmount_test_overlay.sh ├── xfstests_overlay.exclude ├── xfstests_overlay.sh ├── xfstests_pathr.exclude └── xfstests_pathr.sh └── smoke.rs /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - v[0-9]+.[0-9]+.[0-9]+ 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | release: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Publish 18 | run: | 19 | sudo apt-get update && sudo apt-get install --yes sed 20 | VERSION=$(sed --regexp-extended --silent 's/^version = \"(.*)"$/\1/p' Cargo.toml) 21 | [ "$GITHUB_REF_NAME" == "v$VERSION" ] 22 | cargo publish --token ${{ secrets.CRATES_IO_TOKEN }} 23 | - name: Release 24 | uses: softprops/action-gh-release@v1 25 | with: 26 | generate_release_notes: true 27 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: '*' 6 | pull_request: 7 | branches: [ master ] 8 | workflow_dispatch: 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | RUST_BACKTRACE: 1 13 | 14 | jobs: 15 | CI: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | arch: [amd64, arm64, ppc64le, riscv64, s390x] 20 | steps: 21 | - uses: actions/checkout@v3 22 | - name: Cache cargo 23 | uses: Swatinem/rust-cache@v2.2.0 24 | with: 25 | cache-on-failure: true 26 | key: ${{ runner.os }}-cargo-${{ matrix.arch }} 27 | 28 | - name: Install Rust 29 | uses: actions-rs/toolchain@v1 30 | with: 31 | profile: minimal 32 | toolchain: stable 33 | override: true 34 | 35 | - name: build and check 36 | run: | 37 | declare -A rust_target_map=( ["amd64"]="x86_64-unknown-linux-musl" ["arm64"]="aarch64-unknown-linux-musl" ["ppc64le"]="powerpc64le-unknown-linux-gnu" ["riscv64"]="riscv64gc-unknown-linux-gnu" ["s390x"]="s390x-unknown-linux-gnu") 38 | RUST_TARGET=${rust_target_map[${{ matrix.arch }}]} 39 | cargo install --version 0.2.5 cross 40 | rustup component add rustfmt clippy 41 | make -e RUST_TARGET=$RUST_TARGET -e CARGO=cross check 42 | - name: smoke 43 | if: ${{ matrix.arch == 'amd64' }} 44 | run: | 45 | echo user_allow_other | sudo tee --append /etc/fuse.conf 46 | CARGO_HOME=${HOME}/.cargo 47 | CARGO_BIN=$(which cargo) 48 | sudo -E CARGO=${CARGO_BIN} make test 49 | sudo -E CARGO=${CARGO_BIN} make smoke-all 50 | sudo chown -R $(id -u):$(id -g) "${HOME}/.cargo" 51 | 52 | Macos-CI: 53 | runs-on: macos-latest 54 | steps: 55 | - name: Install macfuse 56 | run: | 57 | brew install --cask macfuse 58 | wget https://github.com/macos-fuse-t/fuse-t/releases/download/1.0.24/fuse-t-macos-installer-1.0.24.pkg 59 | sudo installer -pkg fuse-t-macos-installer-1.0.24.pkg -target / 60 | - uses: actions/checkout@v3 61 | - name: build and check 62 | run: | 63 | make smoke-macos 64 | 65 | deny: 66 | name: Cargo Deny 67 | runs-on: ubuntu-latest 68 | timeout-minutes: 10 69 | steps: 70 | - uses: actions/checkout@v3 71 | - uses: EmbarkStudios/cargo-deny-action@v2 72 | -------------------------------------------------------------------------------- /.github/workflows/xfstests.yml: -------------------------------------------------------------------------------- 1 | name: xfstests 2 | 3 | on: 4 | push: 5 | branches: '*' 6 | pull_request: 7 | branches: [ master ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | RUST_BACKTRACE: 1 12 | 13 | jobs: 14 | xfstests_on_passthrough: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | - name: Install Rust 20 | uses: actions-rs/toolchain@v1 21 | with: 22 | profile: minimal 23 | toolchain: stable 24 | override: true 25 | - name: Build passthrough binary 26 | run: | 27 | cd tests/passthrough 28 | cargo build --release 29 | sudo install -t /usr/sbin/ -m 700 ./target/release/passthrough 30 | - name: Setup and run xfstest 31 | run: | 32 | cd $GITHUB_WORKSPACE 33 | sudo ./tests/scripts/xfstests_pathr.sh 34 | 35 | xfstests_on_overlayfs: 36 | runs-on: ubuntu-latest 37 | steps: 38 | - name: Checkout 39 | uses: actions/checkout@v4 40 | - name: Install Rust 41 | uses: actions-rs/toolchain@v1 42 | with: 43 | profile: minimal 44 | toolchain: stable 45 | override: true 46 | - name: Build overlay binary 47 | run: | 48 | cd tests/overlay 49 | cargo build --release 50 | sudo install -t /usr/sbin/ -m 700 ./target/release/overlay 51 | - name: Setup and run xfstest 52 | run: | 53 | cd $GITHUB_WORKSPACE 54 | sudo ./tests/scripts/xfstests_overlay.sh 55 | 56 | unionmount_testsuite_on_overlayfs: 57 | runs-on: ubuntu-latest 58 | steps: 59 | - name: Checkout 60 | uses: actions/checkout@v4 61 | - name: Install Rust 62 | uses: actions-rs/toolchain@v1 63 | with: 64 | profile: minimal 65 | toolchain: stable 66 | override: true 67 | - name: Build overlay binary 68 | run: | 69 | cd tests/overlay 70 | cargo build --release 71 | sudo install -t /usr/sbin/ -m 700 ./target/release/overlay 72 | - name: Setup and run unionmount testsuite 73 | run: | 74 | cd $GITHUB_WORKSPACE 75 | sudo ./tests/scripts/unionmount_test_overlay.sh -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target 2 | **/*.rs.bk 3 | **/Cargo.lock 4 | **/.vscode 5 | /.idea 6 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rust-vmm-ci"] 2 | path = rust-vmm-ci 3 | url = https://github.com/rust-vmm/rust-vmm-ci.git 4 | -------------------------------------------------------------------------------- /CHAGNELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | ## [Unreleased] 3 | 4 | ## [0.12.0] 5 | ### Added 6 | - [156](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/156): Fuse OverlayFs implementation. 7 | - [166](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/166): Import latest improvement for passthroughfs from virtiofsd project. 8 | - [169](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/169): Optimize implementation of lookup() for passthroughfs. 9 | - [170](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/170): abi: unify st_nlink. 10 | - [172](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/172): ptfs: refine implementation of seal_size_check(). 11 | - [173](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/173): ptfs: use BorrowedFd instead of RawFd when possible. 12 | - [174](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/174): ptfs: add support for new cache mode Metadata. 13 | - [177](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/177): fusedev: add clone_fuse_file method to FuseSession. 14 | 15 | ### Fixed 16 | - [125](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/125): fusedev: add fd-passthrough support. 17 | - [178](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/178): Fix batch forget can't handle too large msg. 18 | - [180](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/180): fix: fuse-t nobrowse. 19 | - [181](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/181): Fix CI: xfstests: restrict xfstests version. 20 | - [184](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/184): bugfix: read/write fd dropped unexpectedly. 21 | - [185](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/185): fix: fuse t channel read bug in during multiple reads 22 | 23 | ## [0.11.0] 24 | ### Added 25 | - [144](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/144): feat: implement fuse-t feature 26 | - [149](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/149): feat: add "persist" feature 27 | - [152](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/152): Add the ability to clean up the dentry cache can be used to clean up resources when VFS umount. 28 | - [153](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/153): linux_session: Make allow_other mount option optional 29 | - [159](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/159): UID/GID remapping support 30 | - [162](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/162): abi: Disable unsupported flags and functionality on MacOS 31 | - [163](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/163): vfs: add method to export root pseudofs's reference 32 | - [167](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/167): CI: add xfstests for passthrough fs 33 | 34 | ### Fixed 35 | - [154](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/154): fuse: Ensure fd has same flags as read/write 36 | - [165](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/165): build: fix a build failure related to conditional compilation 37 | 38 | ## [0.10.5] 39 | ### Added 40 | - [138](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/138): linuxsession: support mount in given mount namespace 41 | - [141](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/141): linux_session: support set fusermount binary 42 | 43 | ### Fixed 44 | - [140](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/140): fuse: Ensure readdir returns same ino as lookup 45 | 46 | ## [0.10.4] 47 | 48 | ### Added 49 | - [135](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/135): ZeroCopyWriter pass through available bytes from inner writer 50 | 51 | ### Fixed 52 | - [133](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/133): https://github.com/cloud-hypervisor/fuse-backend-rs/pull/133 53 | 54 | ## [0.10.3] 55 | 56 | ### Added 57 | - [#115](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/115) 58 | * transport: pre-allocate VecDeque to avoid expending at runtime 59 | * passthroughfs: convert MultiKeyBTreeMap to InodeStore for InodeMap 60 | * passthroughfs: add config to specify entry and attr timeout for dir 61 | * passthroughfs: add config to control count mntid in altkey or not 62 | - [#119](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/119): Support non-privileged users 63 | - [#126 #127](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/126 https://github.com/cloud-hypervisor/fuse-backend-rs/pull/127): vfs: ensure entry attr st_ino consistency 64 | - [#131](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/131): implement stable unique inode for passthroughfs 65 | 66 | ### Fixed 67 | - [#120](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/120): api: forget and batch forget must not reply 68 | - [#123](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/123): fix possible IO hang due to string convertion failure 69 | - [#129](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/129): abi: st_nlink is u32 on aarch64 70 | 71 | ## [0.10.2] 72 | 73 | ### Fixed 74 | - [#105](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/105): abi: fix the conflict of PERFILE_DAX flag 75 | - [#106](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/106): bugfix: passthrough: refect CFileHandle struct 76 | 77 | ## [0.10.1] 78 | 79 | ### Fixed 80 | - [#102](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/102): passthrough: reduce the memory footprint of file handle 81 | - [#103](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/103): vfs: correctly set attr.st_ino for loopup() 82 | 83 | ## [0.10.0] 84 | 85 | ### Added 86 | - [#96](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/96): async_runtime: add probe of io_uring support 87 | - [#88](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/88): add ability to disallow operations that could change file size 88 | 89 | ### Fixed 90 | - [#98](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/98): vfs: fix incorrect st_ino in entry.attr 91 | - [#93](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/93): fix BIG_WRITES doesn't work 92 | 93 | ### Removed 94 | - [#96](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/96): async_runtime: remove thread_local of Runtime 95 | - [#96](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/96): remove temporarily tokio-uring module 96 | 97 | ### Changed 98 | - [#97](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/97): log: print some variables in hexadecimal 99 | - [#96](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/96): async_runtime: improved async file implement 100 | - [#95](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/95): open file with O_APPEND cleared when writeback is enabled 101 | - [#90](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/90): open file for reading if writeback cache is enabled 102 | 103 | ### Upgraded 104 | - virtio-queue from 0.4 to 0.6 105 | - vhost from 0.4 to 0.5 106 | 107 | ## [0.9.6] 108 | - Fix no_opendir option handling 109 | 110 | ## [0.9.5] 111 | 112 | ### Changed 113 | - Update dependency 114 | - Fix a bug in fusedev 115 | - Add toio-uring/tokio based async io framework 116 | 117 | ## [0.9.2] 118 | 119 | ### Added 120 | 121 | - [#77](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/77): Implement Sync for FileVolatileSlice 122 | 123 | ## [0.9.1] 124 | 125 | ### Fixed 126 | - [#74](https://github.com/cloud-hypervisor/fuse-backend-rs/pull/74): Fixed some issues about EINTR and EAGIN handled incorrectly 127 | 128 | ## [v0.4.0] 129 | ### Added 130 | - MacOS support 131 | 132 | ### Changed 133 | - linux_abi renamed to fuse_abi 134 | - switch from epoll to mio for cross-platform support 135 | - VFS umount no longer evicts pseudofs inodes 136 | - virtiofs transport Reader/Writer takes generic typed memory argument 137 | 138 | ## [v0.3.0] 139 | ### Added 140 | - Optionally enable MAX_PAGES feature 141 | - Allow customizing the default FUSE features before creating a new vfs structure 142 | - Support more FUSE server APIs 143 | 144 | ### Changed 145 | - The FUSE server has no default FUSE feature set now. The default feature set is only 146 | defined in VfsOptions. Non VFS users have to define the default FUSE feature set in 147 | the init() method. 148 | 149 | ## [v0.2.0] 150 | ### Added 151 | - Enhance passthrough to reduce active fds by using file handle 152 | - implement From for std::io::Error 153 | - Use `vhost` crate from crates.io 154 | - Introduce readlinkat_proc_file helper 155 | - Update vm-memory to 0.7.0 156 | - Add @eryugey to CODEOWNERS file 157 | 158 | ### Fixed 159 | - Validate path components 160 | - Prevent ".." escape in do_lookup in passthroughfs 161 | - Prevent opening of special file in passthroughfs 162 | - Fix compile error in vfs async test 163 | - Record real root inode's ino of file system backends in vfs 164 | 165 | ### Deprecated 166 | 167 | ## [v0.1.2] 168 | - support KILLPRIV_v2 169 | - enhance vfs to support DAX window map/unmap operations 170 | 171 | ## [v0.1.1] 172 | - Set README.md file for crate 173 | - Add CHANGELOG.md 174 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Add the list of code owners here (using their GitHub username) 2 | * @bergwolf @imeoer @jiangliu @liubogithub @eryugey 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fuse-backend-rs" 3 | version = "0.12.1" 4 | keywords = ["fuse", "virtio", "virtio-fs", "vhost-user-fs"] 5 | categories = ["filesystem", "os::linux-apis"] 6 | description = "A rust library for Fuse(filesystem in userspace) servers and virtio-fs devices" 7 | authors = [ 8 | "Liu Bo ", 9 | "Liu Jiang ", 10 | "Peng Tao ", 11 | ] 12 | readme = "README.md" 13 | license = "Apache-2.0 AND BSD-3-Clause" 14 | edition = "2018" 15 | repository = "https://github.com/cloud-hypervisor/fuse-backend-rs" 16 | homepage = "https://github.com/cloud-hypervisor/" 17 | build = "build.rs" 18 | 19 | [dependencies] 20 | arc-swap = "1.5" 21 | async-trait = { version = "0.1.42", optional = true } 22 | bitflags = "1.1" 23 | dbs-snapshot = { version = "1.5.2", optional = true } 24 | io-uring = { version = "0.5.8", optional = true } 25 | lazy_static = "1.4" 26 | libc = "0.2.68" 27 | log = "0.4.6" 28 | mio = { version = "0.8", features = ["os-poll", "os-ext"] } 29 | nix = "0.24" 30 | radix_trie = "0.2.1" 31 | tokio = { version = "1", optional = true } 32 | tokio-uring = { version = "0.4.0", optional = true } 33 | vmm-sys-util = { version = "0.12.1", optional = true } 34 | vm-memory = { version = "0.14.1", features = ["backend-mmap"] } 35 | virtio-bindings = { version = "=0.2.4", optional = true } 36 | virtio-queue = { version = "0.12.0", optional = true } 37 | vhost = { version = "0.11.0", features = ["vhost-user","vhost-user-backend"], optional = true } 38 | versionize_derive = { version = "0.1.6", optional = true } 39 | versionize = { version = "0.2.0", optional = true } 40 | 41 | [target.'cfg(target_os = "macos")'.dependencies] 42 | core-foundation-sys = { version = ">=0.8", optional = true } 43 | 44 | [target.'cfg(target_os = "linux")'.dependencies] 45 | caps = { version = "0.5", optional = true } 46 | tokio-uring = { version = "0.4.0", optional = true } 47 | 48 | [dev-dependencies] 49 | tokio-test = "0.4.2" 50 | vmm-sys-util = "0.12.1" 51 | vm-memory = { version = "0.14.1", features = ["backend-mmap", "backend-bitmap"] } 52 | 53 | [features] 54 | default = ["fusedev"] 55 | async-io = [ 56 | "async-trait", 57 | "tokio-uring", 58 | "tokio/fs", 59 | "tokio/net", 60 | "tokio/sync", 61 | "tokio/rt", 62 | "tokio/macros", 63 | "io-uring", 64 | ] 65 | fusedev = ["vmm-sys-util", "caps", "core-foundation-sys"] 66 | virtiofs = ["virtio-queue", "caps", "vmm-sys-util"] 67 | vhost-user-fs = ["virtiofs", "vhost", "caps"] 68 | persist = ["dbs-snapshot", "versionize", "versionize_derive"] 69 | fuse-t = [] 70 | 71 | [package.metadata.docs.rs] 72 | all-features = true 73 | targets = [ 74 | "x86_64-unknown-linux-gnu", 75 | "aarch64-unknown-linux-gnu", 76 | "aarch64-apple-darwin", 77 | ] 78 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /LICENSE-BSD-3-Clause: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The Chromium OS Authors. All rights reserved. 2 | // 3 | // Redistribution and use in source and binary forms, with or without 4 | // modification, are permitted provided that the following conditions are 5 | // met: 6 | // 7 | // * Redistributions of source code must retain the above copyright 8 | // notice, this list of conditions and the following disclaimer. 9 | // * Redistributions in binary form must reproduce the above 10 | // copyright notice, this list of conditions and the following disclaimer 11 | // in the documentation and/or other materials provided with the 12 | // distribution. 13 | // * Neither the name of Google Inc. nor the names of its 14 | // contributors may be used to endorse or promote products derived from 15 | // this software without specific prior written permission. 16 | // 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | current_dir := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) 2 | CARGO ?= $(shell which cargo) 3 | 4 | ifdef RUST_TARGET 5 | TARGET = --target ${RUST_TARGET} 6 | endif 7 | 8 | build: 9 | ${CARGO} build ${TARGET} --features="fusedev" 10 | ${CARGO} build ${TARGET} --features="virtiofs" 11 | ${CARGO} build ${TARGET} --features="vhost-user-fs" 12 | ${CARGO} build ${TARGET} --features="fusedev,async-io" 13 | ${CARGO} build ${TARGET} --features="virtiofs,async-io" 14 | ${CARGO} build ${TARGET} --features="vhost-user-fs,async-io" 15 | 16 | check: build 17 | ${CARGO} fmt -- --check 18 | ${CARGO} clippy ${TARGET} --features="fusedev" --no-default-features -- -Dwarnings 19 | ${CARGO} clippy ${TARGET} --features="virtiofs" --no-default-features -- -Dwarnings 20 | ${CARGO} clippy ${TARGET} --features="vhost-user-fs" --no-default-features -- -Dwarnings 21 | ${CARGO} clippy ${TARGET} --features="fusedev,virtiofs" --no-default-features -- -Dwarnings 22 | 23 | test: 24 | ${CARGO} test ${TARGET} --features="fusedev" --no-default-features -- --nocapture --skip integration 25 | ${CARGO} test ${TARGET} --features="virtiofs" --no-default-features -- --nocapture --skip integration 26 | ${CARGO} test ${TARGET} --features="vhost-user-fs" --no-default-features -- --nocapture --skip integration 27 | ${CARGO} test ${TARGET} --features="fusedev,virtiofs" --no-default-features -- --nocapture --skip integration 28 | ${CARGO} test ${TARGET} --features="fusedev,async-io" --no-default-features -- --nocapture --skip integration 29 | ${CARGO} test ${TARGET} --features="virtiofs,async-io" --no-default-features -- --nocapture --skip integration 30 | ${CARGO} test ${TARGET} --features="vhost-user-fs,async-io" --no-default-features -- --nocapture --skip integration 31 | ${CARGO} test ${TARGET} --features="fusedev,virtiofs,async-io" --no-default-features -- --nocapture --skip integration 32 | ${CARGO} test ${TARGET} --features="fusedev,persist" --no-default-features -- --nocapture --skip integration 33 | ${CARGO} test ${TARGET} --all-features -- --nocapture --skip integration 34 | 35 | smoke: 36 | ${CARGO} test ${TARGET} --features="fusedev,persist" -- --nocapture 37 | 38 | smoke-all: smoke 39 | ${CARGO} test ${TARGET} --features="fusedev,persist" -- --nocapture --ignored 40 | 41 | build-macos: 42 | ${CARGO} build --features="fusedev" 43 | ${CARGO} build --features="fusedev,fuse-t" 44 | 45 | check-macos: build-macos 46 | ${CARGO} fmt -- --check 47 | ${CARGO} clippy --features="fusedev" -- -Dwarnings 48 | ${CARGO} test --features="fusedev" -- --nocapture --skip integration 49 | ${CARGO} clippy --features="fusedev,fuse-t" -- -Dwarnings 50 | ${CARGO} test --features="fusedev,fuse-t" -- --nocapture --skip integration 51 | 52 | smoke-macos: check-macos 53 | ${CARGO} test --features="fusedev,fuse-t" -- --nocapture 54 | 55 | docker-smoke: 56 | docker run --env RUST_BACKTRACE=1 --rm --privileged --volume ${current_dir}:/fuse-rs rust:1.68 sh -c "rustup component add clippy rustfmt; cd /fuse-rs; make smoke-all" 57 | 58 | testoverlay: 59 | cd tests/testoverlay && ${CARGO} build 60 | 61 | # Setup xfstests env and run. 62 | xfstests: 63 | ./tests/scripts/xfstests.sh -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rust FUSE library for server, virtio-fs and vhost-user-fs 2 | 3 | ![Crates.io](https://img.shields.io/crates/l/fuse-backend-rs) 4 | [![Crates.io](https://img.shields.io/crates/v/fuse-backend-rs)](https://crates.io/crates/fuse-backend-rs) 5 | 6 | ## Design 7 | 8 | The fuse-backend-rs crate is an rust library to implement Fuse daemons based on the 9 | [Linux FUSE device (/dev/fuse)](https://www.kernel.org/doc/html/latest/filesystems/fuse.html) 10 | or the [virtiofs](https://stefanha.github.io/virtio/virtio-fs.html#x1-41500011) draft specification. 11 | 12 | Linux FUSE is an userspace filesystem framework, and the /dev/fuse device node is the interface for 13 | userspace filesystem daemons to communicate with the in-kernel fuse driver. 14 | 15 | And the virito-fs specification extends the FUSE framework into the virtualization world, which uses 16 | the Virtio protocol to transfer FUSE requests and responses between the Fuse client and server. 17 | With virtio-fs, the Fuse client runs within the guest kernel and the Fuse server runs on the host 18 | userspace or hardware. 19 | 20 | So the fuse-rs crate is a library to communicate with the Linux FUSE clients, which includes: 21 | - ABI layer, which defines all data structures shared between linux Fuse framework and Fuse daemons. 22 | - API layer, defines the interfaces for Fuse daemons to implement a userspace file system. 23 | - Transport layer, which supports both the Linux Fuse device and virtio-fs protocol. 24 | - VFS/pseudo_fs, an abstraction layer to support multiple file systems by a single virtio-fs device. 25 | - A sample passthrough file system implementation, which passes through files from daemons to clients. 26 | 27 | ![arch](docs/images/fuse-backend-architecture.svg) 28 | 29 | ## Examples 30 | 31 | ### Filesystem Drivers 32 | - [Virtual File System](https://github.com/cloud-hypervisor/fuse-backend-rs/tree/master/src/api/vfs) 33 | for an example of union file system. 34 | - [Pseudo File System](https://github.com/cloud-hypervisor/fuse-backend-rs/blob/master/src/api/pseudo_fs.rs) 35 | for an example of pseudo file system. 36 | - [Passthrough File System](https://github.com/cloud-hypervisor/fuse-backend-rs/tree/master/src/passthrough) 37 | for an example of passthrough(stacked) file system. 38 | - [Registry Accelerated File System](https://github.com/dragonflyoss/image-service/tree/master/rafs) 39 | for an example of readonly file system for container images. 40 | 41 | ### Fuse Servers 42 | - [Dragonfly Image Service fusedev Server](https://github.com/dragonflyoss/image-service/blob/master/src/bin/nydusd/fusedev.rs) 43 | for an example of implementing a fuse server based on the 44 | [fuse-backend-rs](https://crates.io/crates/fuse-backend-rs) crate. 45 | - [Dragonfly Image Service vhost-user-fs Server](https://github.com/dragonflyoss/image-service/blob/master/src/bin/nydusd/virtiofs.rs) 46 | for an example of implementing vhost-user-fs server based on the 47 | [fuse-backend-rs](https://crates.io/crates/fuse-backend-rs) crate. 48 | 49 | ### Fuse Server and Main Service Loop 50 | A sample fuse server based on the Linux Fuse device (/dev/fuse): 51 | 52 | ```rust 53 | use fuse_backend_rs::api::{server::Server, Vfs, VfsOptions}; 54 | use fuse_backend_rs::transport::fusedev::{FuseSession, FuseChannel}; 55 | 56 | struct FuseServer { 57 | server: Arc>>, 58 | ch: FuseChannel, 59 | } 60 | 61 | impl FuseServer { 62 | fn svc_loop(&self) -> Result<()> { 63 | // Given error EBADF, it means kernel has shut down this session. 64 | let _ebadf = std::io::Error::from_raw_os_error(libc::EBADF); 65 | loop { 66 | if let Some((reader, writer)) = self 67 | .ch 68 | .get_request() 69 | .map_err(|_| std::io::Error::from_raw_os_error(libc::EINVAL))? 70 | { 71 | if let Err(e) = self.server.handle_message(reader, writer, None, None) { 72 | match e { 73 | fuse_backend_rs::Error::EncodeMessage(_ebadf) => { 74 | break; 75 | } 76 | _ => { 77 | error!("Handling fuse message failed"); 78 | continue; 79 | } 80 | } 81 | } 82 | } else { 83 | info!("fuse server exits"); 84 | break; 85 | } 86 | } 87 | Ok(()) 88 | } 89 | } 90 | ``` 91 | 92 | ## License 93 | This project is licensed under 94 | - [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0 95 | - [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause) 96 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | #[cfg(target_os = "macos")] 3 | println!("cargo:rustc-link-lib=framework=DiskArbitration"); 4 | } 5 | -------------------------------------------------------------------------------- /coverage_config_x86_64.json: -------------------------------------------------------------------------------- 1 | {"coverage_score": 27.3, "exclude_path": "abi/linux_abi.rs", "crate_features": "fusedev"} 2 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | # This template contains all of the possible sections and their default values 2 | 3 | # Note that all fields that take a lint level have these possible values: 4 | # * deny - An error will be produced and the check will fail 5 | # * warn - A warning will be produced, but the check will not fail 6 | # * allow - No warning or error will be produced, though in some cases a note 7 | # will be 8 | 9 | # The values provided in this template are the default values that will be used 10 | # when any section or field is not specified in your own configuration 11 | 12 | # If 1 or more target triples (and optionally, target_features) are specified, 13 | # only the specified targets will be checked when running `cargo deny check`. 14 | # This means, if a particular package is only ever used as a target specific 15 | # dependency, such as, for example, the `nix` crate only being used via the 16 | # `target_family = "unix"` configuration, that only having windows targets in 17 | # this list would mean the nix crate, as well as any of its exclusive 18 | # dependencies not shared by any other crates, would be ignored, as the target 19 | # list here is effectively saying which targets you are building for. 20 | [graph] 21 | targets = [ 22 | # The triple can be any string, but only the target triples built in to 23 | # rustc (as of 1.40) can be checked against actual config expressions 24 | #{ triple = "x86_64-unknown-linux-musl" }, 25 | # You can also specify which target_features you promise are enabled for a 26 | # particular target. target_features are currently not validated against 27 | # the actual valid features supported by the target architecture. 28 | #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, 29 | ] 30 | 31 | # This section is considered when running `cargo deny check advisories` 32 | # More documentation for the advisories section can be found here: 33 | # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html 34 | [advisories] 35 | # The path where the advisory database is cloned/fetched into 36 | db-path = "~/.cargo/advisory-db" 37 | # The url(s) of the advisory databases to use 38 | db-urls = ["https://github.com/rustsec/advisory-db"] 39 | version = 2 40 | # The lint level for crates that have been yanked from their source registry 41 | yanked = "warn" 42 | # A list of advisory IDs to ignore. Note that ignored advisories will still 43 | # output a note when they are encountered. 44 | ignore = [] 45 | # Threshold for security vulnerabilities, any vulnerability with a CVSS score 46 | # lower than the range specified will be ignored. Note that ignored advisories 47 | # will still output a note when they are encountered. 48 | # * None - CVSS Score 0.0 49 | # * Low - CVSS Score 0.1 - 3.9 50 | # * Medium - CVSS Score 4.0 - 6.9 51 | # * High - CVSS Score 7.0 - 8.9 52 | # * Critical - CVSS Score 9.0 - 10.0 53 | #severity-threshold = 54 | 55 | # This section is considered when running `cargo deny check licenses` 56 | # More documentation for the licenses section can be found here: 57 | # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html 58 | [licenses] 59 | # List of explictly allowed licenses 60 | # See https://spdx.org/licenses/ for list of possible licenses 61 | # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. 62 | allow = [ 63 | "MIT", 64 | "Apache-2.0", 65 | "BSD-3-Clause", 66 | "Unicode-3.0", 67 | ] 68 | # The confidence threshold for detecting a license from license text. 69 | # The higher the value, the more closely the license text must be to the 70 | # canonical license text of a valid SPDX license file. 71 | # [possible values: any between 0.0 and 1.0]. 72 | confidence-threshold = 0.8 73 | # Allow 1 or more licenses on a per-crate basis, so that particular licenses 74 | # aren't accepted for every possible crate as with the normal allow list 75 | exceptions = [ 76 | # Each entry is the crate and version constraint, and its specific allow 77 | # list 78 | #{ allow = ["Zlib"], name = "adler32", version = "*" }, 79 | ] 80 | 81 | # Some crates don't have (easily) machine readable licensing information, 82 | # adding a clarification entry for it allows you to manually specify the 83 | # licensing information 84 | #[[licenses.clarify]] 85 | # The name of the crate the clarification applies to 86 | #name = "ring" 87 | # The optional version constraint for the crate 88 | #version = "*" 89 | # The SPDX expression for the license requirements of the crate 90 | #expression = "MIT AND ISC AND OpenSSL" 91 | # One or more files in the crate's source used as the "source of truth" for 92 | # the license expression. If the contents match, the clarification will be used 93 | # when running the license check, otherwise the clarification will be ignored 94 | # and the crate will be checked normally, which may produce warnings or errors 95 | # depending on the rest of your configuration 96 | #license-files = [ 97 | # Each entry is a crate relative path, and the (opaque) hash of its contents 98 | #{ path = "LICENSE", hash = 0xbd0eed23 } 99 | #] 100 | 101 | [licenses.private] 102 | # If true, ignores workspace crates that aren't published, or are only 103 | # published to private registries 104 | ignore = false 105 | # One or more private registries that you might publish crates to, if a crate 106 | # is only published to private registries, and ignore is true, the crate will 107 | # not have its license(s) checked 108 | registries = [ 109 | #"https://sekretz.com/registry 110 | ] 111 | 112 | # This section is considered when running `cargo deny check bans`. 113 | # More documentation about the 'bans' section can be found here: 114 | # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html 115 | [bans] 116 | # Lint level for when multiple versions of the same crate are detected 117 | multiple-versions = "warn" 118 | # Lint level for when a crate version requirement is `*` 119 | wildcards = "allow" 120 | # The graph highlighting used when creating dotgraphs for crates 121 | # with multiple versions 122 | # * lowest-version - The path to the lowest versioned duplicate is highlighted 123 | # * simplest-path - The path to the version with the fewest edges is highlighted 124 | # * all - Both lowest-version and simplest-path are used 125 | highlight = "all" 126 | # List of crates that are allowed. Use with care! 127 | allow = [ 128 | #{ name = "ansi_term", version = "=0.11.0" }, 129 | ] 130 | # List of crates to deny 131 | deny = [ 132 | # Each entry the name of a crate and a version range. If version is 133 | # not specified, all versions will be matched. 134 | #{ name = "ansi_term", version = "=0.11.0" }, 135 | # 136 | # Wrapper crates can optionally be specified to allow the crate when it 137 | # is a direct dependency of the otherwise banned crate 138 | #{ name = "ansi_term", version = "=0.11.0", wrappers = [] }, 139 | ] 140 | # Certain crates/versions that will be skipped when doing duplicate detection. 141 | skip = [ 142 | #{ name = "ansi_term", version = "=0.11.0" }, 143 | ] 144 | # Similarly to `skip` allows you to skip certain crates during duplicate 145 | # detection. Unlike skip, it also includes the entire tree of transitive 146 | # dependencies starting at the specified crate, up to a certain depth, which is 147 | # by default infinite 148 | skip-tree = [ 149 | #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, 150 | ] 151 | 152 | # This section is considered when running `cargo deny check sources`. 153 | # More documentation about the 'sources' section can be found here: 154 | # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html 155 | [sources] 156 | # Lint level for what to happen when a crate from a crate registry that is not 157 | # in the allow list is encountered 158 | unknown-registry = "warn" 159 | # Lint level for what to happen when a crate from a git repository that is not 160 | # in the allow list is encountered 161 | unknown-git = "warn" 162 | # List of URLs for allowed crate registries. Defaults to the crates.io index 163 | # if not specified. If it is specified but empty, no registries are allowed. 164 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 165 | # List of URLs for allowed Git repositories 166 | allow-git = [] 167 | -------------------------------------------------------------------------------- /docs/images/fuse-backend-architecture.drawio: -------------------------------------------------------------------------------- 1 | 5V1bk5s4Fv41Xfs0KXRBEo/p7nQmVZvaznY2k8lLirHVNhtsvEDf5tevsMFGFwNtEBd31VTGCHDjT+d856KjwwW6Wj1/jP3N8nM05+EFdObPF+j6AkKAIRH/y0ZediOsGFjEwTy/6DBwF/zN80EnH30I5jyRLkyjKEyDjTw4i9ZrPkulMT+Ooyf5svsolP/qxl9wbeBu5of66B/BPF3mo4B4hxO/82CxTIvfR3cnVn5xcf5LkqU/j55KQ+jDBbqKoyjdfVo9X/EwA6/AZXffzZGz+weL+TptcgMOgseP7pfPEfz5r/9c/fjiffA2v+Xf8uiHD/kPzh82fSkQiKOH9ZxnX+JcoMunZZDyu40/y84+iTkXY8t0FYojID4maRz92iMlfuPl3E+W27tBfnDrpymP19sR6GSj+TPwOOXPR38c2EMmZI1HK57GL+KS4gbCdrfkYobyWXg6zJlXiNSyNF3EzQf9XE4W+68+QCk+5GiakWXO94/L2fL676+/OPvuRldfnj5ZRhbtYdMwMiB5FDbEHAk2jBwNN8KIjhvE1BJunqfhxOdCJfPDKE6X0SJa++GHw+iljOThmn9G0SbH7788TV9yfvEf0khGlz8H6ffs9ndufvRn6cz1c/7N24OX4mAtfm/ppuzwz/K5w23bo+K++2id5g8CsTazoGpmUz9e8LQCPeTuLswgqxSAmId+GjzKLGeazPzW2ygQz7IXHOLI+kYokb8iiR7iGc/vUkRi/xgtpIRNQb1c4EkoucjT1AtQoKsXQLbUi06Cl4DCSwgxDThm4iVii84JnQJumMq4AU/nc9cz4AagNYEbD507J9E5qKFzycE5Ooc7PqzACTZkeNw1wbez1uOZXTvG2v6EolFNKNRo7lN0+XB/z+PkAhJ/lbFYKH7O5T/8Cyj+hHO3H16ke7yMpAjqSbEDCqQukm1uYSbKNhcaQoHCxHQOKdIgfVz9XPFVlHlA78V/36JsZkN+FwYCl2mgDGVDQ4FuaACjOsoYWUKZjYeJqu1MR/YCN6SXngICBmS1w57iUFgOCLCmZDdCo2o0a1wqpRIXNKiUibiwLVvgVmIqrMLoMMPFcS1msD1m5oQaGZSHyiy056Qaj2jv4Jbuapa+QG5F+kJhnNPyGdgKWXmK9XJx9rP7pCugh+GZbv2b+/M/YqE1hZJpwpQs/U32ccmffSEjAu0NjwPxODw+jN4WQ7BeC++DZ17k3TvSSs9R4DUxGbHEZGatHDYKPTetdO1oJVCS0RT3q5N67PM+eVnPzlcxjebSqJi2zOXe3R1EMcEpiukMp5i1QUHhhNdqMGypwe2WxvSIONMvoSiTiH3dIrSp0qLCwHetReYsnDuoeXtFGq5+7av/RB1oGkoXi079p+qM3DmZSe9gobQH6mwqBbsIbzDu1BMdW79kItyJvXoPpGCzXqgTGJIcDwm/5o9TgtUtqqr2Aa3BJLlej7iiEa379VPG0YJ6SEPqQWQoA1T53CXd+RbEaRDdJ1NSHqRwkmsoNjAnEW2tZUA9Er1JrvzZUsSh//vdX8/DDNszCEBd1jAALQpnuodaLyT6GvvrZCPYR4NY/Mr0wlDFcRWFUQbyOlrzLXBhqAz5YbDISh5nArtsPi4zzIKZH77PT6yC+XzLgaZJkXmxMfG8Yl6UuhGC9HkxToutchukm2XxPXP+KP69F/b5qPQLGPww5GG0iP2VogPSudvDidcqghbE7D6zjmw5lkvraBFKDramMaLkKRwie5oVZ0mrtmUFv4CIOL4HqMYG4gy8psQ5RKedBCqoqbdA2yZ5tre+j2P/pXTBJku/JqVvVrK4iCiVAUgpS6+5HuWV3AeB3T1Bp6lepHsuj7nncsbMojo61NGr5Pt1dArKKs3DDx5HV9HmpcgITtDLcT15VRoSUiwvSilC750h0kXEFtZ61rXAuvDWJ4g1ZrAR1ibnpQOkzYsaeFCDeW6ZOdw0M7eT8L4NnuvJ/ppw4CoNnoto1fWtDZ5ZJKeYLMYuLAvlb847B8MaydwelSzwmMV1EP/steJKMLEqrpUYKuUxdy9Jylf92yk7m4OwvDkIGrwxz1BBa89B0J3ibf3DGXhkVMlvQGLwfLElH8EMtr6fSAJ7wi6ZWsczPNhkig4Zdqlq/WBdHqPO+rVcqCgyU/WmrfOFipNsnee8ztYxhPu3dWSKrtlAK2XNBZBOUgA9Zd9rP86WvgSyr3s8K4+LIWU2TPkvW4bJXJfsaNCOdNfSqbtjq0OvBg1CYMfRmdeUQdpGZ+0EY9C62PNLITWdddA2hdQuKPA0Jv4xOxJ29V0lQRxaJFaLlRrMDKlWr8/VexeOR02s86ehYPZV/HmK4hTtkRrmXjt3uDQPCbtAFkKHqVu3ds/axTYRcyp12B1+50bNjSUMtN0L2I5pHAM1H0nS9F9UCxVi1tsM9UvLI1KR86TlxmsMg1WMmt3aKSbi6IjJs/GmhUGWRhlWqv1BdS0QYaDqejvpD1dfa9pl4sfieVO1GZqB3s0dbazx+4iK+8+T392m/N55nrNd5cuIBOMs+L2pHAzsHOvV2jmFjsRDVht+DE+hRE+6V+Tbz3bjwb65cGVKnhkLJW3tPaBTJLG+OhDXUhaFDSkLekOZrsrnLinkHY8fR0BeEMg6MoK8K9V95iNgnS1zAYW5kIO1OSGGJhDWaKvQp3F3KAZUrnozdZwHRb2O3KEYWwIO6am995ef3o4kQyRH3S4wSHKvm/8MzdG2e/6czzxJshd2DE3JQNnhgbFBinvdSYN07R8ZZtBRezUOjRnWFX9smKldVcnQmNFJvHhB7YOP9R7Q0DG1wXdsGRmq6+e3m7u3Y2RcLJdWuvqM9OotMV33bxP+MI9u9B2gva/nMbnQwtOx6tXbZwMHxLD/iLgmu2tOCR+d+tqguVj3qM/3juv1D8wbVjLoSZKBTs74NpENOJhsdN6e7qRFP4Jln6V4l8/RonsPVF1vZ9GP6SHGN8Pe/977eNGRMb++Z3yE3h6Dsm9h8PaMTYXseXtMz4/d+kmSLgVYi+Xb8foYVYtGh/b7CvWZlsUisMP8vvXVa9Z49XpcDVUhmqKfe7psjGJDRnNh6eRdEK8uYVJKkgCtKWFSNr8q11vyZvT195K5GYFfw1x559XQfo2n+zVZ5m8Mr7QhjrxA4jI9G08NqSt7zcUNvRgEVnc8SYJoPTheWutbpq+NFsm3fvDS2yhkeF0t/fU6ezn6yPAiUK+46BcvvRNC3oH5q5/8GhwvVR8JNMhXn/oInBHtDWxafSl5KK9px2jwXztrx2i1vIXY2YGuORyQyg4K8qi6IerIe3Nqv2rfnL2vnVUaEWStXn/+5c9+8fVcE/rRRc1bf/oIvxikrfHq8n6Fr7x8Zdyn3kHUbJyZYbZttG/meoQ9Tnkx13gawHpNN3100wB2jIzTtiktyRd/h2pKa6Y/fWldMJwIvP3s1qS6E0fP3WmBsTttK9ZDSGY9anDkzTXatljPUFGXPPx1ZCI6SYO3AtB16gH0DJV11vBDg+RaR9PLu3PiJa7cOZQBZeaa0i7FNV/UEe0SItM7y2Opo89FK6+3Q7uGasOVH6zFyH0YPWkSPA6vsyZHa2LrDhhGFcD9zsQSw7A+GRrB8TFMxyn0zlmEso5YhNXRUUcscuzvvPa57LKInlEOVhs93feW6UPQu7IaPDyBDBLZNt1/NQA7MK8jdvDIaezQmULqa2Km9wa9ZX1kTA0YrGmjOIyjKC1PscBh+Tma8+yK/wM= -------------------------------------------------------------------------------- /docs/images/overlayfs_dir.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-hypervisor/fuse-backend-rs/22cf6a6f5e857c641f92f8346f57574d87e9d059/docs/images/overlayfs_dir.png -------------------------------------------------------------------------------- /docs/images/overlayfs_non_dir_file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-hypervisor/fuse-backend-rs/22cf6a6f5e857c641f92f8346f57574d87e9d059/docs/images/overlayfs_non_dir_file.png -------------------------------------------------------------------------------- /docs/images/overlayfs_structs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-hypervisor/fuse-backend-rs/22cf6a6f5e857c641f92f8346f57574d87e9d059/docs/images/overlayfs_structs.png -------------------------------------------------------------------------------- /docs/overlayfs.md: -------------------------------------------------------------------------------- 1 | # Architecture of Overlay FS 2 | 3 | The implementation of userspace Overlay FS follows [the design of the kernel](https://docs.kernel.org/filesystems/overlayfs.html), 4 | but it is not a direct port. 5 | There are some differences between the kernel implementation and the userspace implementation due to FUSE limitations, 6 | it's under heavy development to make it more stable and more compatible. 7 | 8 | ## Basic Struct Definitions 9 | 10 | There're some important structs in the implementation of Overlay FS, they are: 11 | 12 | * `OverlayFs`: the main struct of the union FS, it's composed of multiple layers, normally one optional writable upper layer and many readonly lower layers. 13 | * `OverlayInode`: inode struct in OverlayFs, one OverlayInode is composed of many `RealInode` in each layer. 14 | * `RealInode`: wrapper for backend `inode` in one single layer. 15 | * `HandleData`: opened file handle in OverlayFs, one OverlayInode reflects to one OverlayInode and one optional `RealHandle` in some layer. 16 | * `RealHandle`: wrapper for backend opened file handle in one single layer. 17 | 18 | Also another trait named `Layer` is introducted to represent a single layer in OverlayFs, only filesystems which implement this trait can be used as a layer in OverlayFs. 19 | 20 | Relationship between these structs are illustrated in the following figure: 21 | 22 | ![OverlayFs Structs](./images/overlayfs_structs.png) 23 | 24 | ## Non-Directory File 25 | 26 | Following kernel Overlay semantics, OverlayFs uses the following rules to handle non-directory files: 27 | 28 | * If a file with same name exists in all layers, the topmost file will be choosed, any other files with same name in lower layers will be hidden. 29 | * If a file in lower filesystem is accessed in a way the requires write-access, such as opening for write access, changing some metadata etc., 30 | the file is first copied from the lower filesystem to the upper filesystem (copy_up). 31 | 32 | ![OverlayFs Non-Directory File](./images/overlayfs_non_dir_file.png) 33 | 34 | ## Directory 35 | 36 | Following kernel Overlay semantics, OverlayFs uses the following rules to handle directories: 37 | 38 | * If a directory with same name exists in all layers, the union directory will merge all entries of directory in all layers. 39 | * If a directory is set as opaque, all entries in lower layers will be hidden. 40 | * The copy up logic is similar to non-directory file, any write access to a directory will trigger copy up. 41 | 42 | ![OverlayFs Directory](./images/overlayfs_dir.png) 43 | 44 | ## Whiteout 45 | 46 | A whiteout is a special file in OverlayFs, it indicates a deletion of a file or directory in lower layer. 47 | whiteout is device file with major number 0 and minor number 0, 48 | and the name of whiteout file is the name of file or directory to be deleted. 49 | 50 | ## Opaque 51 | 52 | Opaque is a special flag for directory in OverlayFs, it indicates that all entries of directory in lower layers will be ignored. 53 | Opaque is implemented by setting one of these xattr to 'y': 54 | 55 | * `trusted.overlay.opaque` 56 | * `user.overlay.opaque` 57 | * `user.fuseoverlayfs.opaque` 58 | 59 | `user.fuseoverlayfs.opaque` is customized flag for our fuse-overlayfs. 60 | 61 | -------------------------------------------------------------------------------- /src/abi/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2020 Alibaba Cloud. All rights reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Fuse Application Binary Interfaces(ABI). 5 | 6 | /// Linux Fuse Application Binary Interfaces. 7 | #[cfg(target_os = "linux")] 8 | #[path = "fuse_abi_linux.rs"] 9 | pub mod fuse_abi; 10 | 11 | /// MacOS Fuse Application Binary Interfaces. 12 | #[cfg(target_os = "macos")] 13 | #[path = "fuse_abi_macos.rs"] 14 | pub mod fuse_abi; 15 | 16 | #[cfg(feature = "virtiofs")] 17 | pub mod virtio_fs; 18 | -------------------------------------------------------------------------------- /src/abi/virtio_fs.rs: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Intel Corporation 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | //! Fuse extension protocol messages to support virtio-fs. 6 | 7 | #![allow(missing_docs)] 8 | 9 | use bitflags::bitflags; 10 | use vm_memory::ByteValued; 11 | 12 | bitflags! { 13 | /// Flags for Setupmapping request. 14 | pub struct SetupmappingFlags: u64 { 15 | /// Mapping with write permission 16 | const WRITE = 0x1; 17 | /// Mapping with read permission 18 | const READ = 0x2; 19 | } 20 | } 21 | 22 | /// Setup file mapping request message for virtio-fs. 23 | #[repr(C)] 24 | #[derive(Debug, Default, Copy, Clone)] 25 | pub struct SetupmappingIn { 26 | /// File handler. 27 | pub fh: u64, 28 | /// File offset. 29 | pub foffset: u64, 30 | /// Length to map. 31 | pub len: u64, 32 | /// Mapping flags 33 | pub flags: u64, 34 | /// Mapping offset in the DAX window. 35 | pub moffset: u64, 36 | } 37 | 38 | unsafe impl ByteValued for SetupmappingIn {} 39 | 40 | /// Remove file mapping request message header for virtio-fs. 41 | #[repr(C)] 42 | #[derive(Debug, Default, Copy, Clone)] 43 | pub struct RemovemappingIn { 44 | /// Number of `RemovemappingOne` entries in the message payload. 45 | pub count: u32, 46 | } 47 | 48 | unsafe impl ByteValued for RemovemappingIn {} 49 | 50 | /// Remove file mapping request payload entry for virtio-fs. 51 | #[repr(C)] 52 | #[derive(Debug, Default, Copy, Clone)] 53 | pub struct RemovemappingOne { 54 | /// Mapping offset in the DAX window. 55 | pub moffset: u64, 56 | /// Length to unmap. 57 | pub len: u64, 58 | } 59 | 60 | unsafe impl ByteValued for RemovemappingOne {} 61 | -------------------------------------------------------------------------------- /src/api/filesystem/overlay.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023 Ant Group. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE-BSD-3-Clause file. 4 | 5 | #![allow(missing_docs)] 6 | 7 | use std::ffi::{CStr, CString}; 8 | use std::io::{Error, ErrorKind, Result}; 9 | 10 | use super::{Context, Entry, FileSystem, GetxattrReply}; 11 | use crate::abi::fuse_abi::stat64; 12 | 13 | pub const OPAQUE_XATTR_LEN: u32 = 16; 14 | pub const OPAQUE_XATTR: &str = "user.fuseoverlayfs.opaque"; 15 | pub const UNPRIVILEGED_OPAQUE_XATTR: &str = "user.overlay.opaque"; 16 | pub const PRIVILEGED_OPAQUE_XATTR: &str = "trusted.overlay.opaque"; 17 | 18 | /// A filesystem must implement Layer trait, or it cannot be used as an OverlayFS layer. 19 | pub trait Layer: FileSystem { 20 | /// Return the root inode number 21 | fn root_inode(&self) -> Self::Inode; 22 | 23 | /// Create whiteout file with name . 24 | /// 25 | /// If this call is successful then the lookup count of the `Inode` associated with the returned 26 | /// `Entry` must be increased by 1. 27 | fn create_whiteout(&self, ctx: &Context, parent: Self::Inode, name: &CStr) -> Result { 28 | // Use temp value to avoid moved 'parent'. 29 | let ino: u64 = parent.into(); 30 | match self.lookup(ctx, ino.into(), name) { 31 | Ok(v) => { 32 | // Find whiteout char dev. 33 | if is_whiteout(v.attr) { 34 | return Ok(v); 35 | } 36 | // Non-negative entry with inode larger than 0 indicates file exists. 37 | if v.inode != 0 { 38 | // Decrease the refcount. 39 | self.forget(ctx, v.inode.into(), 1); 40 | // File exists with same name, create whiteout file is not allowed. 41 | return Err(Error::from_raw_os_error(libc::EEXIST)); 42 | } 43 | } 44 | Err(e) => match e.raw_os_error() { 45 | Some(raw_error) => { 46 | // We expect ENOENT error. 47 | if raw_error != libc::ENOENT { 48 | return Err(e); 49 | } 50 | } 51 | None => return Err(e), 52 | }, 53 | } 54 | 55 | // Try to create whiteout char device with 0/0 device number. 56 | let dev = libc::makedev(0, 0); 57 | let mode = libc::S_IFCHR | 0o777; 58 | self.mknod(ctx, ino.into(), name, mode, dev as u32, 0) 59 | } 60 | 61 | /// Delete whiteout file with name . 62 | fn delete_whiteout(&self, ctx: &Context, parent: Self::Inode, name: &CStr) -> Result<()> { 63 | // Use temp value to avoid moved 'parent'. 64 | let ino: u64 = parent.into(); 65 | match self.lookup(ctx, ino.into(), name) { 66 | Ok(v) => { 67 | if v.inode != 0 { 68 | // Decrease the refcount since we make a lookup call. 69 | self.forget(ctx, v.inode.into(), 1); 70 | } 71 | 72 | // Find whiteout so we can safely delete it. 73 | if is_whiteout(v.attr) { 74 | return self.unlink(ctx, ino.into(), name); 75 | } 76 | // Non-negative entry with inode larger than 0 indicates file exists. 77 | if v.inode != 0 { 78 | // File exists but not whiteout file. 79 | return Err(Error::from_raw_os_error(libc::EINVAL)); 80 | } 81 | } 82 | Err(e) => match e.raw_os_error() { 83 | Some(raw_error) => { 84 | // ENOENT is acceptable. 85 | if raw_error != libc::ENOENT { 86 | return Err(e); 87 | } 88 | } 89 | None => return Err(e), 90 | }, 91 | } 92 | Ok(()) 93 | } 94 | 95 | /// Check if the Inode is a whiteout file 96 | fn is_whiteout(&self, ctx: &Context, inode: Self::Inode) -> Result { 97 | let (st, _) = self.getattr(ctx, inode, None)?; 98 | 99 | // Check attributes of the inode to see if it's a whiteout char device. 100 | Ok(is_whiteout(st)) 101 | } 102 | 103 | /// Set the directory to opaque. 104 | fn set_opaque(&self, ctx: &Context, inode: Self::Inode) -> Result<()> { 105 | // Use temp value to avoid moved 'parent'. 106 | let ino: u64 = inode.into(); 107 | 108 | // Get attributes and check if it's directory. 109 | let (st, _d) = self.getattr(ctx, ino.into(), None)?; 110 | if !is_dir(st) { 111 | // Only directory can be set to opaque. 112 | return Err(Error::from_raw_os_error(libc::ENOTDIR)); 113 | } 114 | // A directory is made opaque by setting the xattr "trusted.overlay.opaque" to "y". 115 | // See ref: https://docs.kernel.org/filesystems/overlayfs.html#whiteouts-and-opaque-directories 116 | self.setxattr( 117 | ctx, 118 | ino.into(), 119 | to_cstring(OPAQUE_XATTR)?.as_c_str(), 120 | b"y", 121 | 0, 122 | ) 123 | } 124 | 125 | /// Check if the directory is opaque. 126 | fn is_opaque(&self, ctx: &Context, inode: Self::Inode) -> Result { 127 | // Use temp value to avoid moved 'parent'. 128 | let ino: u64 = inode.into(); 129 | 130 | // Get attributes of the directory. 131 | let (st, _d) = self.getattr(ctx, ino.into(), None)?; 132 | if !is_dir(st) { 133 | return Err(Error::from_raw_os_error(libc::ENOTDIR)); 134 | } 135 | 136 | // Return Result. 137 | let check_attr = |inode: Self::Inode, attr_name: &str, attr_size: u32| -> Result { 138 | let cname = CString::new(attr_name)?; 139 | match self.getxattr(ctx, inode, cname.as_c_str(), attr_size) { 140 | Ok(v) => { 141 | // xattr name exists and we get value. 142 | if let GetxattrReply::Value(buf) = v { 143 | if buf.len() == 1 && buf[0].eq_ignore_ascii_case(&b'y') { 144 | return Ok(true); 145 | } 146 | } 147 | // No value found, go on to next check. 148 | Ok(false) 149 | } 150 | Err(e) => { 151 | if let Some(raw_error) = e.raw_os_error() { 152 | if raw_error == libc::ENODATA { 153 | return Ok(false); 154 | } 155 | } 156 | 157 | Err(e) 158 | } 159 | } 160 | }; 161 | 162 | // A directory is made opaque by setting some specific xattr to "y". 163 | // See ref: https://docs.kernel.org/filesystems/overlayfs.html#whiteouts-and-opaque-directories 164 | 165 | // Check our customized version of the xattr "user.fuseoverlayfs.opaque". 166 | let is_opaque = check_attr(ino.into(), OPAQUE_XATTR, OPAQUE_XATTR_LEN)?; 167 | if is_opaque { 168 | return Ok(true); 169 | } 170 | 171 | // Also check for the unprivileged version of the xattr "trusted.overlay.opaque". 172 | let is_opaque = check_attr(ino.into(), PRIVILEGED_OPAQUE_XATTR, OPAQUE_XATTR_LEN)?; 173 | if is_opaque { 174 | return Ok(true); 175 | } 176 | 177 | // Also check for the unprivileged version of the xattr "user.overlay.opaque". 178 | let is_opaque = check_attr(ino.into(), UNPRIVILEGED_OPAQUE_XATTR, OPAQUE_XATTR_LEN)?; 179 | if is_opaque { 180 | return Ok(true); 181 | } 182 | 183 | Ok(false) 184 | } 185 | } 186 | 187 | pub(crate) fn is_dir(st: stat64) -> bool { 188 | st.st_mode & libc::S_IFMT == libc::S_IFDIR 189 | } 190 | 191 | pub(crate) fn is_chardev(st: stat64) -> bool { 192 | st.st_mode & libc::S_IFMT == libc::S_IFCHR 193 | } 194 | 195 | pub(crate) fn is_whiteout(st: stat64) -> bool { 196 | // A whiteout is created as a character device with 0/0 device number. 197 | // See ref: https://docs.kernel.org/filesystems/overlayfs.html#whiteouts-and-opaque-directories 198 | let major = libc::major(st.st_rdev); 199 | let minor = libc::minor(st.st_rdev); 200 | is_chardev(st) && major == 0 && minor == 0 201 | } 202 | 203 | pub(crate) fn to_cstring(name: &str) -> Result { 204 | CString::new(name).map_err(|e| Error::new(ErrorKind::InvalidData, e)) 205 | } 206 | -------------------------------------------------------------------------------- /src/api/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2020 Alibaba Cloud. All rights reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Fuse Application Programming Interfaces(API). 5 | //! 6 | //! The Fuse application programming interfaces(API) layer is an intermediate layer 7 | //! between the transport layer and the backend file system drivers. It provides: 8 | //! - [struct Server](server/struct.Server.html) to receive requests from/send reply to the 9 | //! transport layer. 10 | //! - [trait FileSystem](filesystem/trait.FileSystem.html) for backend file system drivers to 11 | //! implement fs operations. 12 | //! - [struct Vfs](vfs/struct.Vfs.html), a simple union file system to help organize multiple 13 | //! backend file systems. 14 | 15 | mod pseudo_fs; 16 | 17 | pub mod vfs; 18 | pub use vfs::{ 19 | validate_path_component, BackFileSystem, BackendFileSystem, Vfs, VfsIndex, VfsOptions, 20 | CURRENT_DIR_CSTR, EMPTY_CSTR, PARENT_DIR_CSTR, PROC_SELF_FD_CSTR, SLASH_ASCII, VFS_MAX_INO, 21 | }; 22 | 23 | pub mod filesystem; 24 | pub mod server; 25 | -------------------------------------------------------------------------------- /src/api/server/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2020-2022 Alibaba Cloud. All rights reserved. 2 | // Copyright 2019 The Chromium OS Authors. All rights reserved. 3 | // Use of this source code is governed by a BSD-style license that can be 4 | // found in the LICENSE-BSD-3-Clause file. 5 | 6 | //! Fuse API Server to interconnect transport layers with filesystem drivers. 7 | //! 8 | //! The Fuse API server is a adapter layer between transport layers and file system drivers. 9 | //! The main functionalities of the Fuse API server is: 10 | //! * Support different types of transport layers, fusedev, virtio-fs or vhost-user-fs. 11 | //! * Hide different transport layers details from file system drivers. 12 | //! * Parse transport messages according to the Fuse ABI to avoid duplicated message decoding 13 | //! in every file system driver. 14 | //! * Invoke file system driver handler to serve each request and send the reply. 15 | //! 16 | //! The Fuse API server is performance critical, so it's designed to support multi-threading by 17 | //! adopting interior-mutability. And the arcswap crate is used to implement interior-mutability. 18 | 19 | use std::ffi::CStr; 20 | use std::io::{self, Read}; 21 | use std::marker::PhantomData; 22 | use std::mem::size_of; 23 | use std::sync::Arc; 24 | 25 | use arc_swap::ArcSwap; 26 | 27 | use crate::abi::fuse_abi::*; 28 | use crate::api::filesystem::{Context, FileSystem, ZeroCopyReader, ZeroCopyWriter}; 29 | use crate::file_traits::FileReadWriteVolatile; 30 | use crate::transport::{Reader, Writer}; 31 | use crate::{bytes_to_cstr, BitmapSlice, Error, Result}; 32 | 33 | #[cfg(feature = "async-io")] 34 | mod async_io; 35 | mod sync_io; 36 | 37 | /// Maximum buffer size of FUSE requests. 38 | #[cfg(target_os = "linux")] 39 | pub const MAX_BUFFER_SIZE: u32 = 1 << 20; 40 | /// Maximum buffer size of FUSE requests. 41 | #[cfg(target_os = "macos")] 42 | pub const MAX_BUFFER_SIZE: u32 = 1 << 25; 43 | const MIN_READ_BUFFER: u32 = 8192; 44 | const BUFFER_HEADER_SIZE: u32 = 0x1000; 45 | const DIRENT_PADDING: [u8; 8] = [0; 8]; 46 | 47 | /// Maximum number of pages required for FUSE requests. 48 | pub const MAX_REQ_PAGES: u16 = 256; // 1MB 49 | 50 | /// Fuse Server to handle requests from the Fuse client and vhost user master. 51 | pub struct Server { 52 | fs: F, 53 | vers: ArcSwap, 54 | } 55 | 56 | impl Server { 57 | /// Create a Server instance from a filesystem driver object. 58 | pub fn new(fs: F) -> Server { 59 | Server { 60 | fs, 61 | vers: ArcSwap::new(Arc::new(ServerVersion { 62 | major: KERNEL_VERSION, 63 | minor: KERNEL_MINOR_VERSION, 64 | })), 65 | } 66 | } 67 | } 68 | 69 | struct ZcReader<'a, S: BitmapSlice = ()>(Reader<'a, S>); 70 | 71 | impl ZeroCopyReader for ZcReader<'_, S> { 72 | fn read_to( 73 | &mut self, 74 | f: &mut dyn FileReadWriteVolatile, 75 | count: usize, 76 | off: u64, 77 | ) -> io::Result { 78 | self.0.read_to_at(f, count, off) 79 | } 80 | } 81 | 82 | impl io::Read for ZcReader<'_, S> { 83 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 84 | self.0.read(buf) 85 | } 86 | } 87 | 88 | struct ZcWriter<'a, S: BitmapSlice = ()>(Writer<'a, S>); 89 | 90 | impl ZeroCopyWriter for ZcWriter<'_, S> { 91 | fn write_from( 92 | &mut self, 93 | f: &mut dyn FileReadWriteVolatile, 94 | count: usize, 95 | off: u64, 96 | ) -> io::Result { 97 | self.0.write_from_at(f, count, off) 98 | } 99 | 100 | fn available_bytes(&self) -> usize { 101 | self.0.available_bytes() 102 | } 103 | } 104 | 105 | impl io::Write for ZcWriter<'_, S> { 106 | fn write(&mut self, buf: &[u8]) -> io::Result { 107 | self.0.write(buf) 108 | } 109 | 110 | fn flush(&mut self) -> io::Result<()> { 111 | self.0.flush() 112 | } 113 | } 114 | 115 | #[allow(dead_code)] 116 | struct ServerVersion { 117 | major: u32, 118 | minor: u32, 119 | } 120 | 121 | struct ServerUtil(); 122 | 123 | impl ServerUtil { 124 | fn get_message_body( 125 | r: &mut Reader<'_, S>, 126 | in_header: &InHeader, 127 | sub_hdr_sz: usize, 128 | ) -> Result> { 129 | let len = (in_header.len as usize) 130 | .checked_sub(size_of::()) 131 | .and_then(|l| l.checked_sub(sub_hdr_sz)) 132 | .ok_or(Error::InvalidHeaderLength)?; 133 | 134 | // Allocate buffer without zeroing out the content for performance. 135 | let mut buf = Vec::::with_capacity(len); 136 | // It's safe because read_exact() is called to fill all the allocated buffer. 137 | #[allow(clippy::uninit_vec)] 138 | unsafe { 139 | buf.set_len(len) 140 | }; 141 | r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; 142 | 143 | Ok(buf) 144 | } 145 | 146 | fn extract_two_cstrs(buf: &[u8]) -> Result<(&CStr, &CStr)> { 147 | if let Some(mut pos) = buf.iter().position(|x| *x == 0) { 148 | let first = CStr::from_bytes_with_nul(&buf[0..=pos]).map_err(Error::InvalidCString)?; 149 | pos += 1; 150 | if pos < buf.len() { 151 | return Ok((first, bytes_to_cstr(&buf[pos..])?)); 152 | } 153 | } 154 | 155 | Err(Error::DecodeMessage(std::io::Error::from_raw_os_error( 156 | libc::EINVAL, 157 | ))) 158 | } 159 | } 160 | 161 | /// Provide concrete backend filesystem a way to catch information/metrics from fuse. 162 | pub trait MetricsHook { 163 | /// `collect()` will be invoked before the real request is processed 164 | fn collect(&self, ih: &InHeader); 165 | /// `release()` will be invoked after the real request is processed 166 | fn release(&self, oh: Option<&OutHeader>); 167 | } 168 | 169 | struct SrvContext<'a, F, S: BitmapSlice = ()> { 170 | in_header: InHeader, 171 | context: Context, 172 | r: Reader<'a, S>, 173 | w: Writer<'a, S>, 174 | phantom: PhantomData, 175 | phantom2: PhantomData, 176 | } 177 | 178 | impl<'a, F: FileSystem, S: BitmapSlice> SrvContext<'a, F, S> { 179 | fn new(in_header: InHeader, r: Reader<'a, S>, w: Writer<'a, S>) -> Self { 180 | let context = Context::from(&in_header); 181 | 182 | SrvContext { 183 | in_header, 184 | context, 185 | r, 186 | w, 187 | phantom: PhantomData, 188 | phantom2: PhantomData, 189 | } 190 | } 191 | 192 | fn context(&self) -> &Context { 193 | &self.context 194 | } 195 | 196 | fn unique(&self) -> u64 { 197 | self.in_header.unique 198 | } 199 | 200 | fn nodeid(&self) -> F::Inode { 201 | self.in_header.nodeid.into() 202 | } 203 | 204 | fn take_reader(&mut self) -> Reader<'a, S> { 205 | let mut reader = Reader::default(); 206 | 207 | std::mem::swap(&mut self.r, &mut reader); 208 | 209 | reader 210 | } 211 | } 212 | 213 | #[cfg(test)] 214 | mod tests { 215 | use super::*; 216 | #[cfg(feature = "fusedev")] 217 | use crate::transport::FuseBuf; 218 | 219 | #[test] 220 | fn test_extract_cstrs() { 221 | assert_eq!( 222 | ServerUtil::extract_two_cstrs(&[0x1u8, 0x2u8, 0x0, 0x3, 0x0]).unwrap(), 223 | ( 224 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap(), 225 | CStr::from_bytes_with_nul(&[0x3u8, 0x0]).unwrap(), 226 | ) 227 | ); 228 | assert_eq!( 229 | ServerUtil::extract_two_cstrs(&[0x1u8, 0x2u8, 0x0, 0x3, 0x0, 0x0]).unwrap(), 230 | ( 231 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap(), 232 | CStr::from_bytes_with_nul(&[0x3u8, 0x0]).unwrap(), 233 | ) 234 | ); 235 | assert_eq!( 236 | ServerUtil::extract_two_cstrs(&[0x1u8, 0x2u8, 0x0, 0x3, 0x0, 0x4]).unwrap(), 237 | ( 238 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap(), 239 | CStr::from_bytes_with_nul(&[0x3u8, 0x0]).unwrap(), 240 | ) 241 | ); 242 | assert_eq!( 243 | ServerUtil::extract_two_cstrs(&[0x1u8, 0x2u8, 0x0, 0x0, 0x4]).unwrap(), 244 | ( 245 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap(), 246 | CStr::from_bytes_with_nul(&[0x0]).unwrap(), 247 | ) 248 | ); 249 | 250 | ServerUtil::extract_two_cstrs(&[0x1u8, 0x2u8, 0x0, 0x3]).unwrap_err(); 251 | ServerUtil::extract_two_cstrs(&[0x1u8, 0x2u8, 0x0]).unwrap_err(); 252 | ServerUtil::extract_two_cstrs(&[0x1u8, 0x2u8]).unwrap_err(); 253 | } 254 | 255 | #[cfg(feature = "fusedev")] 256 | #[test] 257 | fn test_get_message_body() { 258 | let mut read_buf = [0u8; 4096]; 259 | 260 | let mut r = Reader::<()>::from_fuse_buffer(FuseBuf::new(&mut read_buf)).unwrap(); 261 | let in_header = InHeader { 262 | len: 0x1000, 263 | ..Default::default() 264 | }; 265 | let buf = ServerUtil::get_message_body(&mut r, &in_header, 0).unwrap(); 266 | assert_eq!(buf.len(), 0x1000 - size_of::()); 267 | 268 | let mut r = Reader::<()>::from_fuse_buffer(FuseBuf::new(&mut read_buf)).unwrap(); 269 | let in_header = InHeader { 270 | len: 0x1000, 271 | ..Default::default() 272 | }; 273 | let buf = ServerUtil::get_message_body(&mut r, &in_header, 0x100).unwrap(); 274 | assert_eq!(buf.len(), 0x1000 - size_of::() - 0x100); 275 | 276 | let mut r = Reader::<()>::from_fuse_buffer(FuseBuf::new(&mut read_buf)).unwrap(); 277 | let in_header = InHeader { 278 | len: 0x1000, 279 | ..Default::default() 280 | }; 281 | // shoutld fail because of invalid sub header size 282 | assert!(ServerUtil::get_message_body(&mut r, &in_header, 0x1000).is_err()); 283 | 284 | let mut r = Reader::<()>::from_fuse_buffer(FuseBuf::new(&mut read_buf)).unwrap(); 285 | let in_header = InHeader { 286 | len: 0x1000, 287 | ..Default::default() 288 | }; 289 | // shoutld fail because of invalid sub header size 290 | assert!(ServerUtil::get_message_body(&mut r, &in_header, 0x1001).is_err()); 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /src/api/vfs/async_io.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2021 Alibaba Cloud. All rights reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::io; 5 | 6 | use async_trait::async_trait; 7 | 8 | use super::*; 9 | 10 | #[async_trait] 11 | impl AsyncFileSystem for Vfs { 12 | async fn async_lookup( 13 | &self, 14 | ctx: &Context, 15 | parent: ::Inode, 16 | name: &CStr, 17 | ) -> Result { 18 | // Don't use is_safe_path_component(), allow "." and ".." for NFS export support 19 | if name.to_bytes_with_nul().contains(&SLASH_ASCII) { 20 | return Err(io::Error::from_raw_os_error(libc::EINVAL)); 21 | } 22 | 23 | match self.get_real_rootfs(parent)? { 24 | (Left(fs), idata) => self.lookup_pseudo(fs, idata, ctx, name), 25 | (Right(fs), idata) => { 26 | // parent is in an underlying rootfs 27 | let mut entry = fs.async_lookup(ctx, idata.ino(), name).await?; 28 | // lookup success, hash it to a real fuse inode 29 | self.convert_entry(idata.fs_idx(), entry.inode, &mut entry) 30 | } 31 | } 32 | } 33 | 34 | async fn async_getattr( 35 | &self, 36 | ctx: &Context, 37 | inode: ::Inode, 38 | handle: Option<::Handle>, 39 | ) -> Result<(libc::stat64, Duration)> { 40 | match self.get_real_rootfs(inode)? { 41 | (Left(fs), idata) => fs.getattr(ctx, idata.ino(), handle), 42 | (Right(fs), idata) => fs.async_getattr(ctx, idata.ino(), handle).await, 43 | } 44 | } 45 | 46 | async fn async_setattr( 47 | &self, 48 | ctx: &Context, 49 | inode: ::Inode, 50 | attr: libc::stat64, 51 | handle: Option<::Handle>, 52 | valid: SetattrValid, 53 | ) -> Result<(libc::stat64, Duration)> { 54 | match self.get_real_rootfs(inode)? { 55 | (Left(fs), idata) => fs.setattr(ctx, idata.ino(), attr, handle, valid), 56 | (Right(fs), idata) => { 57 | fs.async_setattr(ctx, idata.ino(), attr, handle, valid) 58 | .await 59 | } 60 | } 61 | } 62 | 63 | async fn async_open( 64 | &self, 65 | ctx: &Context, 66 | inode: ::Inode, 67 | flags: u32, 68 | fuse_flags: u32, 69 | ) -> Result<(Option<::Handle>, OpenOptions)> { 70 | if self.opts.load().no_open { 71 | Err(Error::from_raw_os_error(libc::ENOSYS)) 72 | } else { 73 | match self.get_real_rootfs(inode)? { 74 | (Left(fs), idata) => fs 75 | .open(ctx, idata.ino(), flags, fuse_flags) 76 | .map(|(a, b, _)| (a, b)), 77 | (Right(fs), idata) => fs 78 | .async_open(ctx, idata.ino(), flags, fuse_flags) 79 | .await 80 | .map(|(h, opt)| (h.map(Into::into), opt)), 81 | } 82 | } 83 | } 84 | 85 | async fn async_create( 86 | &self, 87 | ctx: &Context, 88 | parent: ::Inode, 89 | name: &CStr, 90 | args: CreateIn, 91 | ) -> Result<(Entry, Option<::Handle>, OpenOptions)> { 92 | validate_path_component(name)?; 93 | 94 | match self.get_real_rootfs(parent)? { 95 | (Left(fs), idata) => fs 96 | .create(ctx, idata.ino(), name, args) 97 | .map(|(a, b, c, _)| (a, b, c)), 98 | (Right(fs), idata) => { 99 | fs.async_create(ctx, idata.ino(), name, args) 100 | .await 101 | .map(|(mut a, b, c)| { 102 | self.convert_entry(idata.fs_idx(), a.inode, &mut a)?; 103 | Ok((a, b, c)) 104 | })? 105 | } 106 | } 107 | } 108 | 109 | #[allow(clippy::too_many_arguments)] 110 | async fn async_read( 111 | &self, 112 | ctx: &Context, 113 | inode: ::Inode, 114 | handle: ::Handle, 115 | w: &mut (dyn AsyncZeroCopyWriter + Send), 116 | size: u32, 117 | offset: u64, 118 | lock_owner: Option, 119 | flags: u32, 120 | ) -> Result { 121 | match self.get_real_rootfs(inode)? { 122 | (Left(_fs), _idata) => Err(io::Error::from_raw_os_error(libc::ENOSYS)), 123 | (Right(fs), idata) => { 124 | fs.async_read(ctx, idata.ino(), handle, w, size, offset, lock_owner, flags) 125 | .await 126 | } 127 | } 128 | } 129 | 130 | #[allow(clippy::too_many_arguments)] 131 | async fn async_write( 132 | &self, 133 | ctx: &Context, 134 | inode: ::Inode, 135 | handle: ::Handle, 136 | r: &mut (dyn AsyncZeroCopyReader + Send), 137 | size: u32, 138 | offset: u64, 139 | lock_owner: Option, 140 | delayed_write: bool, 141 | flags: u32, 142 | fuse_flags: u32, 143 | ) -> Result { 144 | match self.get_real_rootfs(inode)? { 145 | (Left(_fs), _idata) => Err(io::Error::from_raw_os_error(libc::ENOSYS)), 146 | (Right(fs), idata) => { 147 | fs.async_write( 148 | ctx, 149 | idata.ino(), 150 | handle, 151 | r, 152 | size, 153 | offset, 154 | lock_owner, 155 | delayed_write, 156 | flags, 157 | fuse_flags, 158 | ) 159 | .await 160 | } 161 | } 162 | } 163 | 164 | async fn async_fsync( 165 | &self, 166 | ctx: &Context, 167 | inode: ::Inode, 168 | datasync: bool, 169 | handle: ::Handle, 170 | ) -> Result<()> { 171 | match self.get_real_rootfs(inode)? { 172 | (Left(fs), idata) => fs.fsync(ctx, idata.ino(), datasync, handle), 173 | (Right(fs), idata) => fs.async_fsync(ctx, idata.ino(), datasync, handle).await, 174 | } 175 | } 176 | 177 | async fn async_fallocate( 178 | &self, 179 | ctx: &Context, 180 | inode: ::Inode, 181 | handle: ::Handle, 182 | mode: u32, 183 | offset: u64, 184 | length: u64, 185 | ) -> Result<()> { 186 | match self.get_real_rootfs(inode)? { 187 | (Left(fs), idata) => fs.fallocate(ctx, idata.ino(), handle, mode, offset, length), 188 | (Right(fs), idata) => { 189 | fs.async_fallocate(ctx, idata.ino(), handle, mode, offset, length) 190 | .await 191 | } 192 | } 193 | } 194 | 195 | async fn async_fsyncdir( 196 | &self, 197 | ctx: &Context, 198 | inode: ::Inode, 199 | datasync: bool, 200 | handle: ::Handle, 201 | ) -> Result<()> { 202 | match self.get_real_rootfs(inode)? { 203 | (Left(fs), idata) => fs.fsyncdir(ctx, idata.ino(), datasync, handle), 204 | (Right(fs), idata) => fs.async_fsyncdir(ctx, idata.ino(), datasync, handle).await, 205 | } 206 | } 207 | } 208 | 209 | #[cfg(test)] 210 | mod tests { 211 | use super::super::tests::FakeFileSystemOne; 212 | use super::*; 213 | use crate::api::Vfs; 214 | 215 | use std::ffi::CString; 216 | 217 | #[tokio::test] 218 | async fn test_vfs_async_lookup() { 219 | let vfs = Vfs::new(VfsOptions::default()); 220 | let fs = FakeFileSystemOne {}; 221 | let ctx = Context { 222 | uid: 0, 223 | gid: 0, 224 | pid: 0, 225 | }; 226 | 227 | assert!(vfs.mount(Box::new(fs), "/x/y").is_ok()); 228 | 229 | let handle = tokio::spawn(async move { 230 | // Lookup inode on pseudo file system. 231 | let name = CString::new("x").unwrap(); 232 | let future = vfs.async_lookup(&ctx, ROOT_ID.into(), name.as_c_str()); 233 | let entry1 = future.await.unwrap(); 234 | assert_eq!(entry1.inode, 0x2); 235 | 236 | // Lookup inode on mounted file system. 237 | let entry2 = vfs 238 | .async_lookup( 239 | &ctx, 240 | entry1.inode.into(), 241 | CString::new("y").unwrap().as_c_str(), 242 | ) 243 | .await 244 | .unwrap(); 245 | assert_eq!(entry2.inode, 0x100_0000_0000_0001); 246 | 247 | // lookup for negative result. 248 | let entry3 = vfs 249 | .async_lookup( 250 | &ctx, 251 | entry2.inode.into(), 252 | CString::new("z").unwrap().as_c_str(), 253 | ) 254 | .await 255 | .unwrap(); 256 | assert_eq!(entry3.inode, 0); 257 | }); 258 | handle.await.unwrap(); 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /src/common/async_file.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2022 Alibaba Cloud. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | //! `File` to wrap over `tokio::fs::File` and `tokio-uring::fs::File`. 6 | 7 | use std::fmt::{Debug, Formatter}; 8 | use std::io::{ErrorKind, IoSlice, IoSliceMut}; 9 | use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; 10 | use std::path::Path; 11 | 12 | use crate::async_runtime::{RuntimeType, RUNTIME_TYPE}; 13 | use crate::file_buf::FileVolatileBuf; 14 | use crate::{off64_t, preadv64, pwritev64}; 15 | 16 | /// An adapter enum to support both tokio and tokio-uring asynchronous `File`. 17 | pub enum File { 18 | /// Tokio asynchronous `File`. 19 | Tokio(tokio::fs::File), 20 | #[cfg(target_os = "linux")] 21 | /// Tokio-uring asynchronous `File`. 22 | Uring(tokio_uring::fs::File), 23 | } 24 | 25 | impl File { 26 | /// Asynchronously open a file. 27 | pub async fn async_open>( 28 | path: P, 29 | write: bool, 30 | create: bool, 31 | ) -> std::io::Result { 32 | match *RUNTIME_TYPE { 33 | RuntimeType::Tokio => tokio::fs::OpenOptions::new() 34 | .read(true) 35 | .write(write) 36 | .create(create) 37 | .open(path) 38 | .await 39 | .map(File::Tokio), 40 | #[cfg(target_os = "linux")] 41 | RuntimeType::Uring => tokio_uring::fs::OpenOptions::new() 42 | .read(true) 43 | .write(write) 44 | .create(create) 45 | .open(path) 46 | .await 47 | .map(File::Uring), 48 | } 49 | } 50 | 51 | /// Asynchronously read data at `offset` into the buffer. 52 | pub async fn async_read_at( 53 | &self, 54 | buf: FileVolatileBuf, 55 | offset: u64, 56 | ) -> (std::io::Result, FileVolatileBuf) { 57 | match self { 58 | File::Tokio(f) => { 59 | // tokio::fs:File doesn't support read_at() yet. 60 | //f.read_at(buf, offset).await, 61 | let mut bufs = [buf]; 62 | let res = preadv(f.as_raw_fd(), &mut bufs, offset); 63 | (res, bufs[0]) 64 | } 65 | #[cfg(target_os = "linux")] 66 | File::Uring(f) => f.read_at(buf, offset).await, 67 | } 68 | } 69 | 70 | /// Asynchronously read data at `offset` into buffers. 71 | pub async fn async_readv_at( 72 | &self, 73 | mut bufs: Vec, 74 | offset: u64, 75 | ) -> (std::io::Result, Vec) { 76 | match self { 77 | File::Tokio(f) => { 78 | // tokio::fs:File doesn't support read_at() yet. 79 | //f.read_at(buf, offset).await, 80 | let res = preadv(f.as_raw_fd(), &mut bufs, offset); 81 | (res, bufs) 82 | } 83 | #[cfg(target_os = "linux")] 84 | File::Uring(f) => f.readv_at(bufs, offset).await, 85 | } 86 | } 87 | 88 | /// Asynchronously write data at `offset` from the buffer. 89 | pub async fn async_write_at( 90 | &self, 91 | buf: FileVolatileBuf, 92 | offset: u64, 93 | ) -> (std::io::Result, FileVolatileBuf) { 94 | match self { 95 | File::Tokio(f) => { 96 | // tokio::fs:File doesn't support read_at() yet. 97 | //f.read_at(buf, offset).await, 98 | let bufs = [buf]; 99 | let res = pwritev(f.as_raw_fd(), &bufs, offset); 100 | (res, bufs[0]) 101 | } 102 | #[cfg(target_os = "linux")] 103 | File::Uring(f) => f.write_at(buf, offset).await, 104 | } 105 | } 106 | 107 | /// Asynchronously write data at `offset` from buffers. 108 | pub async fn async_writev_at( 109 | &self, 110 | bufs: Vec, 111 | offset: u64, 112 | ) -> (std::io::Result, Vec) { 113 | match self { 114 | File::Tokio(f) => { 115 | // tokio::fs:File doesn't support read_at() yet. 116 | //f.read_at(buf, offset).await, 117 | let res = pwritev(f.as_raw_fd(), &bufs, offset); 118 | (res, bufs) 119 | } 120 | #[cfg(target_os = "linux")] 121 | File::Uring(f) => f.writev_at(bufs, offset).await, 122 | } 123 | } 124 | 125 | /// Get metadata about the file. 126 | pub fn metadata(&self) -> std::io::Result { 127 | // Safe because we have manually forget() the `file` object below. 128 | let file = unsafe { std::fs::File::from_raw_fd(self.as_raw_fd()) }; 129 | let res = file.metadata(); 130 | std::mem::forget(file); 131 | res 132 | } 133 | 134 | /// Try to clone the file object. 135 | pub async fn async_try_clone(&self) -> std::io::Result { 136 | match self { 137 | File::Tokio(f) => f.try_clone().await.map(File::Tokio), 138 | #[cfg(target_os = "linux")] 139 | File::Uring(f) => { 140 | // Safe because file.as_raw_fd() is valid RawFd and we have checked the result. 141 | let fd = unsafe { libc::dup(f.as_raw_fd()) }; 142 | if fd < 0 { 143 | Err(std::io::Error::last_os_error()) 144 | } else { 145 | // Safe because we dup a new raw fd. 146 | Ok(File::Uring(unsafe { 147 | tokio_uring::fs::File::from_raw_fd(fd) 148 | })) 149 | } 150 | } 151 | } 152 | } 153 | } 154 | 155 | impl AsRawFd for File { 156 | fn as_raw_fd(&self) -> RawFd { 157 | match self { 158 | File::Tokio(f) => f.as_raw_fd(), 159 | #[cfg(target_os = "linux")] 160 | File::Uring(f) => f.as_raw_fd(), 161 | } 162 | } 163 | } 164 | 165 | impl Debug for File { 166 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 167 | let fd = self.as_raw_fd(); 168 | write!(f, "Async File {}", fd) 169 | } 170 | } 171 | 172 | /// A simple wrapper over posix `preadv` to deal with `FileVolatileBuf`. 173 | pub fn preadv(fd: RawFd, bufs: &mut [FileVolatileBuf], offset: u64) -> std::io::Result { 174 | let iov: Vec = bufs.iter().map(|v| v.io_slice_mut()).collect(); 175 | 176 | loop { 177 | // SAFETY: it is ABI compatible, a pointer cast here is valid 178 | let res = unsafe { 179 | preadv64( 180 | fd, 181 | iov.as_ptr() as *const libc::iovec, 182 | iov.len() as libc::c_int, 183 | offset as off64_t, 184 | ) 185 | }; 186 | 187 | if res >= 0 { 188 | let mut count = res as usize; 189 | for buf in bufs.iter_mut() { 190 | let cnt = std::cmp::min(count, buf.cap() - buf.len()); 191 | unsafe { buf.set_size(buf.len() + cnt) }; 192 | count -= cnt; 193 | if count == 0 { 194 | break; 195 | } 196 | } 197 | assert_eq!(count, 0); 198 | return Ok(res as usize); 199 | } else { 200 | let e = std::io::Error::last_os_error(); 201 | // Retry if the IO is interrupted by signal. 202 | if e.kind() != ErrorKind::Interrupted { 203 | return Err(e); 204 | } 205 | } 206 | } 207 | } 208 | 209 | /// A simple wrapper over posix `pwritev` to deal with `FileVolatileBuf`. 210 | pub fn pwritev(fd: RawFd, bufs: &[FileVolatileBuf], offset: u64) -> std::io::Result { 211 | let iov: Vec = bufs.iter().map(|v| v.io_slice()).collect(); 212 | 213 | loop { 214 | // SAFETY: it is ABI compatible, a pointer cast here is valid 215 | let res = unsafe { 216 | pwritev64( 217 | fd, 218 | iov.as_ptr() as *const libc::iovec, 219 | iov.len() as libc::c_int, 220 | offset as off64_t, 221 | ) 222 | }; 223 | 224 | if res >= 0 { 225 | return Ok(res as usize); 226 | } else { 227 | let e = std::io::Error::last_os_error(); 228 | // Retry if the IO is interrupted by signal. 229 | if e.kind() != ErrorKind::Interrupted { 230 | return Err(e); 231 | } 232 | } 233 | } 234 | } 235 | 236 | #[cfg(test)] 237 | mod tests { 238 | use super::*; 239 | use crate::async_runtime::block_on; 240 | use vmm_sys_util::tempdir::TempDir; 241 | 242 | #[test] 243 | fn test_new_async_file() { 244 | let dir = TempDir::new().unwrap(); 245 | let path = dir.as_path().to_path_buf().join("test.txt"); 246 | std::fs::write(&path, b"test").unwrap(); 247 | 248 | let file = block_on(async { File::async_open(&path, false, false).await.unwrap() }); 249 | assert!(file.as_raw_fd() >= 0); 250 | drop(file); 251 | } 252 | 253 | #[test] 254 | fn test_async_file_metadata() { 255 | let dir = TempDir::new().unwrap(); 256 | let path = dir.as_path().to_path_buf(); 257 | std::fs::write(path.join("test.txt"), b"test").unwrap(); 258 | let file = block_on(async { 259 | File::async_open(path.join("test.txt"), false, false) 260 | .await 261 | .unwrap() 262 | }); 263 | 264 | let md = file.metadata().unwrap(); 265 | assert!(md.is_file()); 266 | let md = file.metadata().unwrap(); 267 | assert!(md.is_file()); 268 | 269 | drop(file); 270 | } 271 | 272 | #[test] 273 | fn test_async_read_at() { 274 | let dir = TempDir::new().unwrap(); 275 | let path = dir.as_path().to_path_buf(); 276 | std::fs::write(path.join("test.txt"), b"test").unwrap(); 277 | 278 | block_on(async { 279 | let file = File::async_open(path.join("test.txt"), false, false) 280 | .await 281 | .unwrap(); 282 | 283 | let mut buffer = [0u8; 3]; 284 | let buf = unsafe { FileVolatileBuf::new(&mut buffer) }; 285 | let (res, buf) = file.async_read_at(buf, 0).await; 286 | assert_eq!(res.unwrap(), 3); 287 | assert_eq!(buf.len(), 3); 288 | let buf = unsafe { FileVolatileBuf::new(&mut buffer) }; 289 | let (res, buf) = file.async_read_at(buf, 2).await; 290 | assert_eq!(res.unwrap(), 2); 291 | assert_eq!(buf.len(), 2); 292 | }); 293 | } 294 | 295 | #[test] 296 | fn test_async_readv_at() { 297 | let dir = TempDir::new().unwrap(); 298 | let path = dir.as_path().to_path_buf(); 299 | std::fs::write(path.join("test.txt"), b"test").unwrap(); 300 | 301 | block_on(async { 302 | let file = File::async_open(path.join("test.txt"), false, false) 303 | .await 304 | .unwrap(); 305 | 306 | let mut buffer = [0u8; 3]; 307 | let buf = unsafe { FileVolatileBuf::new(&mut buffer) }; 308 | let mut buffer2 = [0u8; 3]; 309 | let buf2 = unsafe { FileVolatileBuf::new(&mut buffer2) }; 310 | let bufs = vec![buf, buf2]; 311 | let (res, bufs) = file.async_readv_at(bufs, 0).await; 312 | 313 | assert_eq!(res.unwrap(), 4); 314 | assert_eq!(bufs[0].len(), 3); 315 | assert_eq!(bufs[1].len(), 1); 316 | }); 317 | } 318 | 319 | #[test] 320 | fn test_async_write_at() { 321 | let dir = TempDir::new().unwrap(); 322 | let path = dir.as_path().to_path_buf(); 323 | 324 | block_on(async { 325 | let file = File::async_open(path.join("test.txt"), true, true) 326 | .await 327 | .unwrap(); 328 | 329 | let buffer = b"test"; 330 | let buf = unsafe { 331 | FileVolatileBuf::from_raw_ptr( 332 | buffer.as_ptr() as *mut u8, 333 | buffer.len(), 334 | buffer.len(), 335 | ) 336 | }; 337 | let (res, buf) = file.async_write_at(buf, 0).await; 338 | assert_eq!(res.unwrap(), 4); 339 | assert_eq!(buf.len(), 4); 340 | 341 | let res = std::fs::read_to_string(path.join("test.txt")).unwrap(); 342 | assert_eq!(&res, "test"); 343 | }); 344 | } 345 | 346 | #[test] 347 | fn test_async_writev_at() { 348 | let dir = TempDir::new().unwrap(); 349 | let path = dir.as_path().to_path_buf(); 350 | 351 | block_on(async { 352 | let file = File::async_open(path.join("test.txt"), true, true) 353 | .await 354 | .unwrap(); 355 | 356 | let buffer = b"tes"; 357 | let buf = unsafe { 358 | FileVolatileBuf::from_raw_ptr( 359 | buffer.as_ptr() as *mut u8, 360 | buffer.len(), 361 | buffer.len(), 362 | ) 363 | }; 364 | let buffer2 = b"t"; 365 | let buf2 = unsafe { 366 | FileVolatileBuf::from_raw_ptr( 367 | buffer2.as_ptr() as *mut u8, 368 | buffer2.len(), 369 | buffer2.len(), 370 | ) 371 | }; 372 | let bufs = vec![buf, buf2]; 373 | let (res, bufs) = file.async_writev_at(bufs, 0).await; 374 | 375 | assert_eq!(res.unwrap(), 4); 376 | assert_eq!(bufs[0].len(), 3); 377 | assert_eq!(bufs[1].len(), 1); 378 | 379 | let res = std::fs::read_to_string(path.join("test.txt")).unwrap(); 380 | assert_eq!(&res, "test"); 381 | }); 382 | } 383 | 384 | #[test] 385 | fn test_async_try_clone() { 386 | let dir = TempDir::new().unwrap(); 387 | let path = dir.as_path().to_path_buf(); 388 | 389 | block_on(async { 390 | let file = File::async_open(path.join("test.txt"), true, true) 391 | .await 392 | .unwrap(); 393 | 394 | let file2 = file.async_try_clone().await.unwrap(); 395 | drop(file); 396 | 397 | let buffer = b"test"; 398 | let buf = unsafe { 399 | FileVolatileBuf::from_raw_ptr( 400 | buffer.as_ptr() as *mut u8, 401 | buffer.len(), 402 | buffer.len(), 403 | ) 404 | }; 405 | let (res, buf) = file2.async_write_at(buf, 0).await; 406 | assert_eq!(res.unwrap(), 4); 407 | assert_eq!(buf.len(), 4); 408 | }); 409 | } 410 | } 411 | -------------------------------------------------------------------------------- /src/common/async_runtime.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2022 Alibaba Cloud. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | //! `Runtime` to wrap over tokio current-thread `Runtime` and tokio-uring `Runtime`. 6 | 7 | use std::future::Future; 8 | 9 | use lazy_static::lazy_static; 10 | 11 | lazy_static! { 12 | pub(crate) static ref RUNTIME_TYPE: RuntimeType = RuntimeType::new(); 13 | } 14 | 15 | pub(crate) enum RuntimeType { 16 | Tokio, 17 | #[cfg(target_os = "linux")] 18 | Uring, 19 | } 20 | 21 | impl RuntimeType { 22 | fn new() -> Self { 23 | #[cfg(target_os = "linux")] 24 | { 25 | if Self::probe_io_uring() { 26 | return Self::Uring; 27 | } 28 | } 29 | Self::Tokio 30 | } 31 | 32 | #[cfg(target_os = "linux")] 33 | fn probe_io_uring() -> bool { 34 | use io_uring::{opcode, IoUring, Probe}; 35 | 36 | let io_uring = match IoUring::new(1) { 37 | Ok(io_uring) => io_uring, 38 | Err(_) => { 39 | return false; 40 | } 41 | }; 42 | let submitter = io_uring.submitter(); 43 | 44 | let mut probe = Probe::new(); 45 | 46 | // Check we can register a probe to validate supported operations. 47 | if let Err(_) = submitter.register_probe(&mut probe) { 48 | return false; 49 | } 50 | 51 | // Check IORING_OP_FSYNC is supported 52 | if !probe.is_supported(opcode::Fsync::CODE) { 53 | return false; 54 | } 55 | 56 | // Check IORING_OP_READ is supported 57 | if !probe.is_supported(opcode::Read::CODE) { 58 | return false; 59 | } 60 | 61 | // Check IORING_OP_WRITE is supported 62 | if !probe.is_supported(opcode::Write::CODE) { 63 | return false; 64 | } 65 | return true; 66 | } 67 | } 68 | 69 | /// An adapter enum to support both tokio current-thread Runtime and tokio-uring Runtime. 70 | pub enum Runtime { 71 | /// Tokio current thread Runtime. 72 | Tokio(tokio::runtime::Runtime), 73 | #[cfg(target_os = "linux")] 74 | /// Tokio-uring Runtime. 75 | Uring(std::sync::Mutex), 76 | } 77 | 78 | impl Runtime { 79 | /// Create a new instance of async Runtime. 80 | /// 81 | /// A `tokio-uring::Runtime` is create if io-uring is available, otherwise a tokio current 82 | /// thread Runtime will be created. 83 | /// 84 | /// # Panic 85 | /// Panic if failed to create the Runtime object. 86 | pub fn new() -> Self { 87 | // Check whether io-uring is available. 88 | #[cfg(target_os = "linux")] 89 | if matches!(*RUNTIME_TYPE, RuntimeType::Uring) { 90 | if let Ok(rt) = tokio_uring::Runtime::new(&tokio_uring::builder()) { 91 | return Runtime::Uring(std::sync::Mutex::new(rt)); 92 | } 93 | } 94 | 95 | // Create tokio runtime if io-uring is not supported. 96 | let rt = tokio::runtime::Builder::new_current_thread() 97 | .enable_all() 98 | .build() 99 | .expect("utils: failed to create tokio runtime for current thread"); 100 | Runtime::Tokio(rt) 101 | } 102 | 103 | /// Run a future to completion. 104 | pub fn block_on(&self, f: F) -> F::Output { 105 | match self { 106 | Runtime::Tokio(rt) => rt.block_on(f), 107 | #[cfg(target_os = "linux")] 108 | Runtime::Uring(rt) => rt.lock().unwrap().block_on(f), 109 | } 110 | } 111 | 112 | /// Spawns a new asynchronous task, returning a [`JoinHandle`] for it. 113 | /// 114 | /// Spawning a task enables the task to execute concurrently to other tasks. 115 | /// There is no guarantee that a spawned task will execute to completion. When a 116 | /// runtime is shutdown, all outstanding tasks are dropped, regardless of the 117 | /// lifecycle of that task. 118 | /// 119 | /// This function must be called from the context of a `tokio-uring` runtime. 120 | /// 121 | /// [`JoinHandle`]: tokio::task::JoinHandle 122 | pub fn spawn( 123 | &self, 124 | task: T, 125 | ) -> tokio::task::JoinHandle { 126 | match self { 127 | Runtime::Tokio(_) => tokio::task::spawn_local(task), 128 | #[cfg(target_os = "linux")] 129 | Runtime::Uring(_) => tokio_uring::spawn(task), 130 | } 131 | } 132 | } 133 | 134 | /// Start an async runtime. 135 | pub fn start(future: F) -> F::Output { 136 | Runtime::new().block_on(future) 137 | } 138 | 139 | impl Default for Runtime { 140 | fn default() -> Self { 141 | Runtime::new() 142 | } 143 | } 144 | 145 | /// Run a callback with the default `Runtime` object. 146 | pub fn with_runtime(f: F) -> R 147 | where 148 | F: FnOnce(&Runtime) -> R, 149 | { 150 | let rt = Runtime::new(); 151 | f(&rt) 152 | } 153 | 154 | /// Run a future to completion with the default `Runtime` object. 155 | pub fn block_on(f: F) -> F::Output { 156 | Runtime::new().block_on(f) 157 | } 158 | 159 | /// Spawns a new asynchronous task with the defualt `Runtime`, returning a [`JoinHandle`] for it. 160 | /// 161 | /// Spawning a task enables the task to execute concurrently to other tasks. 162 | /// There is no guarantee that a spawned task will execute to completion. When a 163 | /// runtime is shutdown, all outstanding tasks are dropped, regardless of the 164 | /// lifecycle of that task. 165 | /// 166 | /// This will create a new Runtime to run spawn. 167 | /// 168 | /// [`JoinHandle`]: tokio::task::JoinHandle 169 | pub fn spawn(task: T) -> tokio::task::JoinHandle { 170 | let rt = Runtime::new(); 171 | rt.spawn(task) 172 | } 173 | 174 | #[cfg(test)] 175 | mod tests { 176 | use super::*; 177 | 178 | #[test] 179 | fn test_with_runtime() { 180 | let res = with_runtime(|rt| rt.block_on(async { 1 })); 181 | assert_eq!(res, 1); 182 | 183 | let res = with_runtime(|rt| rt.block_on(async { 3 })); 184 | assert_eq!(res, 3); 185 | } 186 | 187 | #[test] 188 | fn test_block_on() { 189 | let res = block_on(async { 1 }); 190 | assert_eq!(res, 1); 191 | 192 | let res = block_on(async { 3 }); 193 | assert_eq!(res, 3); 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /src/common/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2022 Alibaba Cloud. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | //! Some utilities to support fuse-backend-rs. 6 | //! 7 | //! ### Wrappers for Rust async io 8 | //! It's challenging to support Rust async io, and it's even more challenging to support Rust async io with Linux io-uring. 9 | //! 10 | //! This `common` module adds a wrapper layer over [tokio](https://github.com/tokio-rs/tokio) and [tokio-uring](https://github.com/tokio-rs/tokio-uring) to simplify the way to support Rust async io by providing: 11 | //! - [FileReadWriteVolatile](https://github.com/dragonflyoss/image-service): A trait similar to [std::io::Read] and [std::io::Write], but uses [FileVolatileSlice](https://github.com/dragonflyoss/image-service) objects as data buffers. 12 | //! - [FileVolatileSlice](crate::buf::FileVolatileSlice): An adapter structure to work around limitations of the [vm-memory](https://github.com/rust-vmm/vm-memory) crate. 13 | //! - [FileVolatileBuf](crate::buf::FileVolatileBuf): An adapter structure to support [io-uring](https://github.com/tokio-rs/io-uring) based asynchronous IO. 14 | //! - [File](crate::async_file::File): An adapter for for [tokio::fs::File] and [tokio-uring::fs::File]. 15 | //! - [Runtime](crate::async_runtime::Runtime): An adapter for for [tokio::runtime::Runtime] and [tokio-uring::Runtime]. 16 | 17 | pub mod file_buf; 18 | pub mod file_traits; 19 | 20 | #[cfg(feature = "async-io")] 21 | pub mod async_file; 22 | #[cfg(feature = "async-io")] 23 | pub mod async_runtime; 24 | #[cfg(feature = "async-io")] 25 | pub mod mpmc; 26 | 27 | #[cfg(target_os = "linux")] 28 | #[doc(hidden)] 29 | pub use libc::{off64_t, pread64, preadv64, pwrite64, pwritev64}; 30 | #[cfg(target_os = "macos")] 31 | #[doc(hidden)] 32 | pub use libc::{ 33 | off_t as off64_t, pread as pread64, preadv as preadv64, pwrite as pwrite64, 34 | pwritev as pwritev64, 35 | }; 36 | -------------------------------------------------------------------------------- /src/common/mpmc.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2022 Alibaba Cloud. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | // Async implementation of Multi-Producer-Multi-Consumer channel. 5 | 6 | //! Asynchronous Multi-Producer Multi-Consumer channel. 7 | //! 8 | //! This module provides an asynchronous multi-producer multi-consumer channel based on [tokio::sync::Notify]. 9 | 10 | use std::collections::VecDeque; 11 | use std::io::{Error, ErrorKind, Result}; 12 | use std::sync::atomic::{AtomicBool, Ordering}; 13 | use std::sync::{Mutex, MutexGuard}; 14 | use tokio::sync::Notify; 15 | 16 | /// An asynchronous multi-producer multi-consumer channel based on [tokio::sync::Notify]. 17 | pub struct Channel { 18 | closed: AtomicBool, 19 | notifier: Notify, 20 | requests: Mutex>, 21 | } 22 | 23 | impl Default for Channel { 24 | fn default() -> Self { 25 | Self::new() 26 | } 27 | } 28 | 29 | impl Channel { 30 | /// Create a new instance of [`Channel`]. 31 | pub fn new() -> Self { 32 | Channel { 33 | closed: AtomicBool::new(false), 34 | notifier: Notify::new(), 35 | requests: Mutex::new(VecDeque::new()), 36 | } 37 | } 38 | 39 | /// Close the channel. 40 | pub fn close(&self) { 41 | self.closed.store(true, Ordering::Release); 42 | self.notifier.notify_waiters(); 43 | } 44 | 45 | /// Send a message to the channel. 46 | /// 47 | /// The message object will be returned on error, to ease the lifecycle management. 48 | pub fn send(&self, msg: T) -> std::result::Result<(), T> { 49 | if self.closed.load(Ordering::Acquire) { 50 | Err(msg) 51 | } else { 52 | self.requests.lock().unwrap().push_back(msg); 53 | self.notifier.notify_one(); 54 | Ok(()) 55 | } 56 | } 57 | 58 | /// Try to receive a message from the channel. 59 | pub fn try_recv(&self) -> Option { 60 | self.requests.lock().unwrap().pop_front() 61 | } 62 | 63 | /// Receive message from the channel in asynchronous mode. 64 | pub async fn recv(&self) -> Result { 65 | let future = self.notifier.notified(); 66 | tokio::pin!(future); 67 | 68 | loop { 69 | // Make sure that no wakeup is lost if we get `None` from `try_recv`. 70 | future.as_mut().enable(); 71 | 72 | if let Some(msg) = self.try_recv() { 73 | return Ok(msg); 74 | } else if self.closed.load(Ordering::Acquire) { 75 | return Err(Error::new(ErrorKind::BrokenPipe, "channel has been closed")); 76 | } 77 | 78 | // Wait for a call to `notify_one`. 79 | // 80 | // This uses `.as_mut()` to avoid consuming the future, 81 | // which lets us call `Pin::set` below. 82 | future.as_mut().await; 83 | 84 | // Reset the future in case another call to `try_recv` got the message before us. 85 | future.set(self.notifier.notified()); 86 | } 87 | } 88 | 89 | /// Flush all pending requests specified by the predicator. 90 | /// 91 | pub fn flush_pending_prefetch_requests(&self, mut f: F) 92 | where 93 | F: FnMut(&T) -> bool, 94 | { 95 | self.requests.lock().unwrap().retain(|t| !f(t)); 96 | } 97 | 98 | /// Lock the channel to block all queue operations. 99 | pub fn lock_channel(&self) -> MutexGuard> { 100 | self.requests.lock().unwrap() 101 | } 102 | 103 | /// Notify all waiters. 104 | pub fn notify_waiters(&self) { 105 | self.notifier.notify_waiters(); 106 | } 107 | } 108 | 109 | #[cfg(test)] 110 | mod tests { 111 | use super::*; 112 | use std::sync::Arc; 113 | 114 | #[test] 115 | fn test_new_channel() { 116 | let channel = Channel::new(); 117 | 118 | channel.send(1u32).unwrap(); 119 | channel.send(2u32).unwrap(); 120 | assert_eq!(channel.try_recv().unwrap(), 1); 121 | assert_eq!(channel.try_recv().unwrap(), 2); 122 | 123 | channel.close(); 124 | channel.send(2u32).unwrap_err(); 125 | } 126 | 127 | #[test] 128 | fn test_flush_channel() { 129 | let channel = Channel::new(); 130 | 131 | channel.send(1u32).unwrap(); 132 | channel.send(2u32).unwrap(); 133 | channel.flush_pending_prefetch_requests(|_| true); 134 | assert!(channel.try_recv().is_none()); 135 | 136 | channel.notify_waiters(); 137 | let _guard = channel.lock_channel(); 138 | } 139 | 140 | #[test] 141 | fn test_async_recv() { 142 | let channel = Arc::new(Channel::new()); 143 | let channel2 = channel.clone(); 144 | 145 | let t = std::thread::spawn(move || { 146 | channel2.send(1u32).unwrap(); 147 | }); 148 | 149 | let rt = tokio::runtime::Builder::new_current_thread() 150 | .enable_all() 151 | .build() 152 | .unwrap(); 153 | rt.block_on(async { 154 | let msg = channel.recv().await.unwrap(); 155 | assert_eq!(msg, 1); 156 | }); 157 | 158 | t.join().unwrap(); 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2020 Alibaba Cloud. All rights reserved. 2 | // Copyright © 2019 Intel Corporation 3 | // 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | #![deny(missing_docs)] 7 | #![allow(unexpected_cfgs)] 8 | 9 | //! A rust library for Fuse(filesystem in userspace) servers and virtio-fs devices. 10 | //! 11 | //! Filesystem in Userspace [`FUSE`](https://www.kernel.org/doc/html/latest/filesystems/fuse.html) 12 | //! is a software interface for Unix and Unix-like computer operating systems that lets 13 | //! non-privileged users create their own file systems without editing kernel code. 14 | //! This is achieved by running file system code in user space while the FUSE module provides 15 | //! only a "bridge" to the actual kernel interfaces. 16 | //! 17 | //! On Linux, the FUSE device driver is a general purpose filesystem abstraction layer, which 18 | //! loads as a kernel module and presents a virtual device (/dev/fuse) to communicate with 19 | //! a user (non-kernel) program via a well defined API. The user code need not run with root 20 | //! priviledge if it does not need to access protected data or devices, and can implement 21 | //! a virtual filesystem much more simply than a traditional device driver. 22 | //! 23 | //! In addition to traditional Fuse filesystems, the 24 | //! [virtiofs](https://www.kernel.org/doc/html/latest/filesystems/virtiofs.html) 25 | //! file system for Linux implements a driver for the paravirtualized VIRTIO “virtio-fs” device 26 | //! for guest<->host file system sharing. It allows a guest to mount a directory that has 27 | //! been exported on the host. 28 | //! 29 | //! Virtio-fs uses FUSE as the foundation. Unlike traditional FUSE where the file system daemon 30 | //! runs in userspace, the virtio-fs daemon runs on the host. A VIRTIO device carries FUSE 31 | //! messages and provides extensions for advanced features not available in traditional FUSE. 32 | //! Since the virtio-fs device uses the FUSE protocol for file system requests, the virtiofs 33 | //! file system for Linux is integrated closely with the FUSE file system client. The guest acts 34 | //! as the FUSE client while the host acts as the FUSE server. The /dev/fuse interface between 35 | //! the kernel and userspace is replaced with the virtio-fs device interface. 36 | //! 37 | //! The fuse-backend-rs crate includes several subsystems: 38 | //! * [Fuse API](api/index.html). The Fuse API is the connection between transport layers and file 39 | //! system drivers. It receives Fuse requests from transport layers, parses the request 40 | //! according to Fuse ABI, invokes filesystem drivers to server the requests, and eventually 41 | //! send back the result to the transport layer. 42 | //! * [Fuse ABI](abi/index.html). Currently only Linux Fuse ABIs since v7.27 are supported. 43 | //! * [Transport Layer](transport/index.html). The transport layer receives Fuse requests from 44 | //! the clients and sends back replies. Currently there are two transport layers are supported: 45 | //! Linux Fuse device(/dev/fuse) and virtiofs. 46 | //! * Filesystem Drivers. Filesystem drivers implement the concrete Fuse filesystem logic, 47 | //! at what ever is suitable. A default ["passthrough"](passthrough/index.html) filesystem 48 | //! driver is implemented as a sample. 49 | 50 | extern crate bitflags; 51 | extern crate libc; 52 | #[macro_use] 53 | extern crate log; 54 | extern crate vm_memory; 55 | 56 | use std::ffi::{CStr, FromBytesWithNulError}; 57 | use std::io::ErrorKind; 58 | use std::{error, fmt, io}; 59 | 60 | use vm_memory::bitmap::BitmapSlice; 61 | 62 | /// Error codes for Fuse related operations. 63 | #[derive(Debug)] 64 | pub enum Error { 65 | /// Failed to decode protocol messages. 66 | DecodeMessage(io::Error), 67 | /// Failed to encode protocol messages. 68 | EncodeMessage(io::Error), 69 | /// One or more parameters are missing. 70 | MissingParameter, 71 | /// A C string parameter is invalid. 72 | InvalidCString(FromBytesWithNulError), 73 | /// The `len` field of the header is too small. 74 | InvalidHeaderLength, 75 | /// The `size` field of the `SetxattrIn` message does not match the length 76 | /// of the decoded value. 77 | InvalidXattrSize((u32, usize)), 78 | /// Invalid message that the server cannot handle properly. 79 | InvalidMessage(io::Error), 80 | /// Failed to write buffer to writer. 81 | FailedToWrite(io::Error), 82 | /// Failed to split a writer. 83 | FailedToSplitWriter(transport::Error), 84 | /// Failed to remap uid/gid. 85 | FailedToRemapID((u32, u32)), 86 | } 87 | 88 | impl error::Error for Error {} 89 | 90 | impl fmt::Display for Error { 91 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 92 | use Error::*; 93 | match self { 94 | DecodeMessage(err) => write!(f, "failed to decode fuse message: {err}"), 95 | EncodeMessage(err) => write!(f, "failed to encode fuse message: {err}"), 96 | MissingParameter => write!(f, "one or more parameters are missing"), 97 | InvalidHeaderLength => write!(f, "the `len` field of the header is too small"), 98 | InvalidCString(err) => write!(f, "a c string parameter is invalid: {err}"), 99 | InvalidXattrSize((size, len)) => write!( 100 | f, 101 | "The `size` field of the `SetxattrIn` message does not match the length of the \ 102 | decoded value: size = {size}, value.len() = {len}" 103 | ), 104 | InvalidMessage(err) => write!(f, "cannot process fuse message: {err}"), 105 | FailedToWrite(err) => write!(f, "cannot write to buffer: {err}"), 106 | FailedToSplitWriter(err) => write!(f, "cannot split a writer: {err}"), 107 | FailedToRemapID((uid, gid)) => write!( 108 | f, 109 | "failed to remap the context of user (uid={uid}, gid={gid})." 110 | ), 111 | } 112 | } 113 | } 114 | 115 | /// Result for Fuse related operations. 116 | pub type Result = ::std::result::Result; 117 | 118 | pub mod abi; 119 | pub mod api; 120 | 121 | #[cfg(all(any(feature = "fusedev", feature = "virtiofs"), target_os = "linux"))] 122 | pub mod overlayfs; 123 | #[cfg(all(any(feature = "fusedev", feature = "virtiofs"), target_os = "linux"))] 124 | pub mod passthrough; 125 | pub mod transport; 126 | 127 | pub mod common; 128 | pub use self::common::*; 129 | 130 | /// Convert io::ErrorKind to OS error code. 131 | /// Reference to libstd/sys/unix/mod.rs => decode_error_kind. 132 | pub fn encode_io_error_kind(kind: ErrorKind) -> i32 { 133 | match kind { 134 | //ErrorKind::ConnectionRefused => libc::ECONNREFUSED, 135 | //ErrorKind::ConnectionReset => libc::ECONNRESET, 136 | ErrorKind::PermissionDenied => libc::EPERM | libc::EACCES, 137 | //ErrorKind::BrokenPipe => libc::EPIPE, 138 | //ErrorKind::NotConnected => libc::ENOTCONN, 139 | //ErrorKind::ConnectionAborted => libc::ECONNABORTED, 140 | //ErrorKind::AddrNotAvailable => libc::EADDRNOTAVAIL, 141 | //ErrorKind::AddrInUse => libc::EADDRINUSE, 142 | ErrorKind::NotFound => libc::ENOENT, 143 | ErrorKind::Interrupted => libc::EINTR, 144 | //ErrorKind::InvalidInput => libc::EINVAL, 145 | //ErrorKind::TimedOut => libc::ETIMEDOUT, 146 | ErrorKind::AlreadyExists => libc::EEXIST, 147 | ErrorKind::WouldBlock => libc::EWOULDBLOCK, 148 | _ => libc::EIO, 149 | } 150 | } 151 | 152 | /// trim all trailing nul terminators. 153 | pub fn bytes_to_cstr(buf: &[u8]) -> Result<&CStr> { 154 | // There might be multiple 0s at the end of buf, find & use the first one and trim other zeros. 155 | match buf.iter().position(|x| *x == 0) { 156 | // Convert to a `CStr` so that we can drop the '\0' byte at the end and make sure 157 | // there are no interior '\0' bytes. 158 | Some(pos) => CStr::from_bytes_with_nul(&buf[0..=pos]).map_err(Error::InvalidCString), 159 | None => { 160 | // Invalid input, just call CStr::from_bytes_with_nul() for suitable error code 161 | CStr::from_bytes_with_nul(buf).map_err(Error::InvalidCString) 162 | } 163 | } 164 | } 165 | 166 | #[cfg(test)] 167 | mod tests { 168 | use super::*; 169 | 170 | #[test] 171 | fn test_bytes_to_cstr() { 172 | assert_eq!( 173 | bytes_to_cstr(&[0x1u8, 0x2u8, 0x0]).unwrap(), 174 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap() 175 | ); 176 | assert_eq!( 177 | bytes_to_cstr(&[0x1u8, 0x2u8, 0x0, 0x0]).unwrap(), 178 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap() 179 | ); 180 | assert_eq!( 181 | bytes_to_cstr(&[0x1u8, 0x2u8, 0x0, 0x1]).unwrap(), 182 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap() 183 | ); 184 | assert_eq!( 185 | bytes_to_cstr(&[0x1u8, 0x2u8, 0x0, 0x0, 0x1]).unwrap(), 186 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap() 187 | ); 188 | assert_eq!( 189 | bytes_to_cstr(&[0x1u8, 0x2u8, 0x0, 0x1, 0x0]).unwrap(), 190 | CStr::from_bytes_with_nul(&[0x1u8, 0x2u8, 0x0]).unwrap() 191 | ); 192 | 193 | assert_eq!( 194 | bytes_to_cstr(&[0x0u8, 0x2u8, 0x0]).unwrap(), 195 | CStr::from_bytes_with_nul(&[0x0u8]).unwrap() 196 | ); 197 | assert_eq!( 198 | bytes_to_cstr(&[0x0u8, 0x0]).unwrap(), 199 | CStr::from_bytes_with_nul(&[0x0u8]).unwrap() 200 | ); 201 | assert_eq!( 202 | bytes_to_cstr(&[0x0u8]).unwrap(), 203 | CStr::from_bytes_with_nul(&[0x0u8]).unwrap() 204 | ); 205 | 206 | bytes_to_cstr(&[0x1u8]).unwrap_err(); 207 | bytes_to_cstr(&[0x1u8, 0x1]).unwrap_err(); 208 | } 209 | 210 | #[test] 211 | fn test_encode_io_error_kind() { 212 | assert_eq!(encode_io_error_kind(ErrorKind::NotFound), libc::ENOENT); 213 | assert_eq!(encode_io_error_kind(ErrorKind::Interrupted), libc::EINTR); 214 | assert_eq!(encode_io_error_kind(ErrorKind::AlreadyExists), libc::EEXIST); 215 | assert_eq!( 216 | encode_io_error_kind(ErrorKind::WouldBlock), 217 | libc::EWOULDBLOCK 218 | ); 219 | assert_eq!(encode_io_error_kind(ErrorKind::TimedOut), libc::EIO); 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /src/overlayfs/config.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023 Ant Group. All rights reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use self::super::CachePolicy; 5 | use std::fmt; 6 | use std::time::Duration; 7 | 8 | #[derive(Default, Clone, Debug)] 9 | pub struct Config { 10 | pub mountpoint: String, 11 | pub work: String, 12 | pub do_import: bool, 13 | // Filesystem options. 14 | pub writeback: bool, 15 | pub no_open: bool, 16 | pub no_opendir: bool, 17 | pub killpriv_v2: bool, 18 | pub no_readdir: bool, 19 | pub perfile_dax: bool, 20 | pub cache_policy: CachePolicy, 21 | pub attr_timeout: Duration, 22 | pub entry_timeout: Duration, 23 | } 24 | 25 | impl Clone for CachePolicy { 26 | fn clone(&self) -> Self { 27 | match *self { 28 | CachePolicy::Never => CachePolicy::Never, 29 | CachePolicy::Always => CachePolicy::Always, 30 | CachePolicy::Auto => CachePolicy::Auto, 31 | } 32 | } 33 | } 34 | 35 | impl fmt::Debug for CachePolicy { 36 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 37 | let policy = match *self { 38 | CachePolicy::Never => "Never", 39 | CachePolicy::Always => "Always", 40 | CachePolicy::Auto => "Auto", 41 | }; 42 | 43 | write!(f, "CachePolicy: {}", policy) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/overlayfs/inode_store.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023 Ant Group. All rights reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::io::{Error, ErrorKind, Result}; 5 | use std::{ 6 | collections::HashMap, 7 | sync::{atomic::Ordering, Arc}, 8 | }; 9 | 10 | use super::{Inode, OverlayInode, VFS_MAX_INO}; 11 | 12 | use radix_trie::Trie; 13 | 14 | pub struct InodeStore { 15 | // Active inodes. 16 | inodes: HashMap>, 17 | // Deleted inodes which were unlinked but have non zero lookup count. 18 | deleted: HashMap>, 19 | // Path to inode mapping, used to reserve inode number for same path. 20 | path_mapping: Trie, 21 | next_inode: u64, 22 | } 23 | 24 | impl InodeStore { 25 | pub(crate) fn new() -> Self { 26 | Self { 27 | inodes: HashMap::new(), 28 | deleted: HashMap::new(), 29 | path_mapping: Trie::new(), 30 | next_inode: 1, 31 | } 32 | } 33 | 34 | pub(crate) fn alloc_unique_inode(&mut self) -> Result { 35 | // Iter VFS_MAX_INO times to find a free inode number. 36 | let mut ino = self.next_inode; 37 | for _ in 0..VFS_MAX_INO { 38 | if ino > VFS_MAX_INO { 39 | ino = 1; 40 | } 41 | if !self.inodes.contains_key(&ino) && !self.deleted.contains_key(&ino) { 42 | self.next_inode = ino + 1; 43 | return Ok(ino); 44 | } 45 | ino += 1; 46 | } 47 | error!("reached maximum inode number: {}", VFS_MAX_INO); 48 | Err(Error::new( 49 | ErrorKind::Other, 50 | format!("maximum inode number {} reached", VFS_MAX_INO), 51 | )) 52 | } 53 | 54 | pub(crate) fn alloc_inode(&mut self, path: &String) -> Result { 55 | match self.path_mapping.get(path) { 56 | // If the path is already in the mapping, return the reserved inode number. 57 | Some(v) => Ok(*v), 58 | // Or allocate a new inode number. 59 | None => self.alloc_unique_inode(), 60 | } 61 | } 62 | 63 | pub(crate) fn insert_inode(&mut self, inode: Inode, node: Arc) { 64 | self.path_mapping.insert(node.path.clone(), inode); 65 | self.inodes.insert(inode, node); 66 | } 67 | 68 | pub(crate) fn get_inode(&self, inode: Inode) -> Option> { 69 | self.inodes.get(&inode).cloned() 70 | } 71 | 72 | pub(crate) fn get_deleted_inode(&self, inode: Inode) -> Option> { 73 | self.deleted.get(&inode).cloned() 74 | } 75 | 76 | // Return the inode only if it's permanently deleted from both self.inodes and self.deleted_inodes. 77 | pub(crate) fn remove_inode( 78 | &mut self, 79 | inode: Inode, 80 | path_removed: Option, 81 | ) -> Option> { 82 | let removed = match self.inodes.remove(&inode) { 83 | Some(v) => { 84 | // Refcount is not 0, we have to delay the removal. 85 | if v.lookups.load(Ordering::Relaxed) > 0 { 86 | self.deleted.insert(inode, v.clone()); 87 | return None; 88 | } 89 | Some(v) 90 | } 91 | None => { 92 | // If the inode is not in hash, it must be in deleted_inodes. 93 | match self.deleted.get(&inode) { 94 | Some(v) => { 95 | // Refcount is 0, the inode can be removed now. 96 | if v.lookups.load(Ordering::Relaxed) == 0 { 97 | self.deleted.remove(&inode) 98 | } else { 99 | // Refcount is not 0, the inode will be removed later. 100 | None 101 | } 102 | } 103 | None => None, 104 | } 105 | } 106 | }; 107 | 108 | if let Some(path) = path_removed { 109 | self.path_mapping.remove(&path); 110 | } 111 | removed 112 | } 113 | 114 | // As a debug function, print all inode numbers in hash table. 115 | // This function consumes quite lots of memory, so it's disabled by default. 116 | #[allow(dead_code)] 117 | pub(crate) fn debug_print_all_inodes(&self) { 118 | // Convert the HashMap to Vector<(inode, pathname)> 119 | let mut all_inodes = self 120 | .inodes 121 | .iter() 122 | .map(|(inode, ovi)| (inode, ovi.path.clone(), ovi.lookups.load(Ordering::Relaxed))) 123 | .collect::>(); 124 | all_inodes.sort_by(|a, b| a.0.cmp(b.0)); 125 | trace!("all active inodes: {:?}", all_inodes); 126 | 127 | let mut to_delete = self 128 | .deleted 129 | .iter() 130 | .map(|(inode, ovi)| (inode, ovi.path.clone(), ovi.lookups.load(Ordering::Relaxed))) 131 | .collect::>(); 132 | to_delete.sort_by(|a, b| a.0.cmp(b.0)); 133 | trace!("all deleted inodes: {:?}", to_delete); 134 | } 135 | } 136 | 137 | #[cfg(test)] 138 | mod test { 139 | use super::*; 140 | 141 | #[test] 142 | fn test_alloc_unique() { 143 | let mut store = InodeStore::new(); 144 | let empty_node = Arc::new(OverlayInode::new()); 145 | store.insert_inode(1, empty_node.clone()); 146 | store.insert_inode(2, empty_node.clone()); 147 | store.insert_inode(VFS_MAX_INO - 1, empty_node.clone()); 148 | 149 | let inode = store.alloc_unique_inode().unwrap(); 150 | assert_eq!(inode, 3); 151 | assert_eq!(store.next_inode, 4); 152 | 153 | store.next_inode = VFS_MAX_INO - 1; 154 | let inode = store.alloc_unique_inode().unwrap(); 155 | assert_eq!(inode, VFS_MAX_INO); 156 | 157 | let inode = store.alloc_unique_inode().unwrap(); 158 | assert_eq!(inode, 3); 159 | } 160 | 161 | #[test] 162 | fn test_alloc_existing_path() { 163 | let mut store = InodeStore::new(); 164 | let mut node_a = OverlayInode::new(); 165 | node_a.path = "/a".to_string(); 166 | store.insert_inode(1, Arc::new(node_a)); 167 | let mut node_b = OverlayInode::new(); 168 | node_b.path = "/b".to_string(); 169 | store.insert_inode(2, Arc::new(node_b)); 170 | let mut node_c = OverlayInode::new(); 171 | node_c.path = "/c".to_string(); 172 | store.insert_inode(VFS_MAX_INO - 1, Arc::new(node_c)); 173 | 174 | let inode = store.alloc_inode(&"/a".to_string()).unwrap(); 175 | assert_eq!(inode, 1); 176 | 177 | let inode = store.alloc_inode(&"/b".to_string()).unwrap(); 178 | assert_eq!(inode, 2); 179 | 180 | let inode = store.alloc_inode(&"/c".to_string()).unwrap(); 181 | assert_eq!(inode, VFS_MAX_INO - 1); 182 | 183 | let inode = store.alloc_inode(&"/notexist".to_string()).unwrap(); 184 | assert_eq!(inode, 3); 185 | } 186 | 187 | #[test] 188 | fn test_remove_inode() { 189 | let mut store = InodeStore::new(); 190 | let mut node_a = OverlayInode::new(); 191 | node_a.lookups.fetch_add(1, Ordering::Relaxed); 192 | node_a.path = "/a".to_string(); 193 | store.insert_inode(1, Arc::new(node_a)); 194 | 195 | let mut node_b = OverlayInode::new(); 196 | node_b.path = "/b".to_string(); 197 | store.insert_inode(2, Arc::new(node_b)); 198 | 199 | let mut node_c = OverlayInode::new(); 200 | node_c.lookups.fetch_add(1, Ordering::Relaxed); 201 | node_c.path = "/c".to_string(); 202 | store.insert_inode(VFS_MAX_INO - 1, Arc::new(node_c)); 203 | 204 | let inode = store.alloc_inode(&"/new".to_string()).unwrap(); 205 | assert_eq!(inode, 3); 206 | 207 | // Not existing. 208 | let inode = store.remove_inode(4, None); 209 | assert!(inode.is_none()); 210 | 211 | // Existing but with non-zero refcount. 212 | let inode = store.remove_inode(1, None); 213 | assert!(inode.is_none()); 214 | assert!(store.get_deleted_inode(1).is_some()); 215 | assert!(store.path_mapping.get(&"/a".to_string()).is_some()); 216 | 217 | // Remove again with file path. 218 | let inode = store.remove_inode(1, Some("/a".to_string())); 219 | assert!(inode.is_none()); 220 | assert!(store.get_deleted_inode(1).is_some()); 221 | assert!(store.path_mapping.get(&"/a".to_string()).is_none()); 222 | 223 | // Node b has refcount 0, removing will be permanent. 224 | let inode = store.remove_inode(2, Some("/b".to_string())); 225 | assert!(inode.is_some()); 226 | assert!(store.get_deleted_inode(2).is_none()); 227 | assert!(store.path_mapping.get(&"/b".to_string()).is_none()); 228 | 229 | // Allocate new inode, it should reuse inode 2 since inode 1 is still in deleted list. 230 | store.next_inode = 1; 231 | let inode = store.alloc_inode(&"/b".to_string()).unwrap(); 232 | assert_eq!(inode, 2); 233 | 234 | // Allocate inode with path "/c" will reuse its inode number. 235 | let inode = store.alloc_inode(&"/c".to_string()).unwrap(); 236 | assert_eq!(inode, VFS_MAX_INO - 1); 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /src/overlayfs/utils.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2023 Ant Group. All rights reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::abi::fuse_abi::stat64; 5 | use std::ffi::CString; 6 | use std::io::{self, Error, Result}; 7 | 8 | pub(super) fn is_dir(st: stat64) -> bool { 9 | st.st_mode & libc::S_IFMT == libc::S_IFDIR 10 | } 11 | 12 | pub(super) fn to_cstring(name: &str) -> Result { 13 | CString::new(name).map_err(|e| Error::new(io::ErrorKind::InvalidData, e)) 14 | } 15 | -------------------------------------------------------------------------------- /src/passthrough/config.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2020-2022 Alibaba Cloud. All rights reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::str::FromStr; 5 | use std::time::Duration; 6 | 7 | /// The caching policy that the file system should report to the FUSE client. By default the FUSE 8 | /// protocol uses close-to-open consistency. This means that any cached contents of the file are 9 | /// invalidated the next time that file is opened. 10 | #[derive(Debug, Default, Clone, Eq, PartialEq)] 11 | pub enum CachePolicy { 12 | /// The client should never cache file data and all I/O should be directly forwarded to the 13 | /// server. This policy must be selected when file contents may change without the knowledge of 14 | /// the FUSE client (i.e., the file system does not have exclusive access to the directory). 15 | Never, 16 | 17 | /// This is almost same as Never, but it allows page cache of directories, dentries and attr 18 | /// cache in guest. In other words, it acts like cache=never for normal files, and like 19 | /// cache=always for directories, besides, metadata like dentries and attrs are kept as well. 20 | /// This policy can be used if: 21 | /// 1. the client wants to use Never policy but it's performance in I/O is not good enough 22 | /// 2. the file system has exclusive access to the directory 23 | /// 3. cache directory content and other fs metadata can make a difference on performance. 24 | Metadata, 25 | 26 | /// The client is free to choose when and how to cache file data. This is the default policy and 27 | /// uses close-to-open consistency as described in the enum documentation. 28 | #[default] 29 | Auto, 30 | 31 | /// The client should always cache file data. This means that the FUSE client will not 32 | /// invalidate any cached data that was returned by the file system the last time the file was 33 | /// opened. This policy should only be selected when the file system has exclusive access to the 34 | /// directory. 35 | Always, 36 | } 37 | 38 | impl FromStr for CachePolicy { 39 | type Err = &'static str; 40 | 41 | fn from_str(s: &str) -> Result { 42 | match s { 43 | "never" | "Never" | "NEVER" | "none" | "None" | "NONE" => Ok(CachePolicy::Never), 44 | "metadata" => Ok(CachePolicy::Metadata), 45 | "auto" | "Auto" | "AUTO" => Ok(CachePolicy::Auto), 46 | "always" | "Always" | "ALWAYS" => Ok(CachePolicy::Always), 47 | _ => Err("invalid cache policy"), 48 | } 49 | } 50 | } 51 | 52 | /// Options that configure the behavior of the passthrough fuse file system. 53 | #[derive(Debug, Clone, Eq, PartialEq)] 54 | pub struct Config { 55 | /// How long the FUSE client should consider file and directory attributes to be valid. If the 56 | /// attributes of a file or directory can only be modified by the FUSE client (i.e., the file 57 | /// system has exclusive access), then this should be set to a large value. 58 | /// 59 | /// The default value for this option is 5 seconds. 60 | pub attr_timeout: Duration, 61 | 62 | /// How long the FUSE client should consider directory entries to be valid. If the contents of a 63 | /// directory can only be modified by the FUSE client (i.e., the file system has exclusive 64 | /// access), then this should be a large value. 65 | /// 66 | /// The default value for this option is 5 seconds. 67 | pub entry_timeout: Duration, 68 | 69 | /// Same as `attr_timeout`, override `attr_timeout` config, but only take effect on directories 70 | /// when specified. This is useful to set different timeouts for directories and regular files. 71 | pub dir_attr_timeout: Option, 72 | 73 | /// Same as `entry_timeout`, override `entry_timeout` config, but only take effect on 74 | /// directories when specified. This is useful to set different timeouts for directories and 75 | /// regular files. 76 | pub dir_entry_timeout: Option, 77 | 78 | /// The caching policy the file system should use. See the documentation of `CachePolicy` for 79 | /// more details. 80 | pub cache_policy: CachePolicy, 81 | 82 | /// Whether the file system should enable writeback caching. This can improve performance as it 83 | /// allows the FUSE client to cache and coalesce multiple writes before sending them to the file 84 | /// system. However, enabling this option can increase the risk of data corruption if the file 85 | /// contents can change without the knowledge of the FUSE client (i.e., the server does **NOT** 86 | /// have exclusive access). Additionally, the file system should have read access to all files 87 | /// in the directory it is serving as the FUSE client may send read requests even for files 88 | /// opened with `O_WRONLY`. 89 | /// 90 | /// Therefore callers should only enable this option when they can guarantee that: 1) the file 91 | /// system has exclusive access to the directory and 2) the file system has read permissions for 92 | /// all files in that directory. 93 | /// 94 | /// The default value for this option is `false`. 95 | pub writeback: bool, 96 | 97 | /// The path of the root directory. 98 | /// 99 | /// The default is `/`. 100 | pub root_dir: String, 101 | 102 | /// Whether the file system should support Extended Attributes (xattr). Enabling this feature may 103 | /// have a significant impact on performance, especially on write parallelism. This is the result 104 | /// of FUSE attempting to remove the special file privileges after each write request. 105 | /// 106 | /// The default value for this options is `false`. 107 | pub xattr: bool, 108 | 109 | /// To be compatible with Vfs and PseudoFs, PassthroughFs needs to prepare 110 | /// root inode before accepting INIT request. 111 | /// 112 | /// The default value for this option is `true`. 113 | pub do_import: bool, 114 | 115 | /// Control whether no_open is allowed. 116 | /// 117 | /// The default value for this option is `false`. 118 | pub no_open: bool, 119 | 120 | /// Control whether no_opendir is allowed. 121 | /// 122 | /// The default value for this option is `false`. 123 | pub no_opendir: bool, 124 | 125 | /// Control whether kill_priv_v2 is enabled. 126 | /// 127 | /// The default value for this option is `false`. 128 | pub killpriv_v2: bool, 129 | 130 | /// Whether to use file handles to reference inodes. We need to be able to open file 131 | /// descriptors for arbitrary inodes, and by default that is done by storing an `O_PATH` FD in 132 | /// `InodeData`. Not least because there is a maximum number of FDs a process can have open 133 | /// users may find it preferable to store a file handle instead, which we can use to open an FD 134 | /// when necessary. 135 | /// So this switch allows to choose between the alternatives: When set to `false`, `InodeData` 136 | /// will store `O_PATH` FDs. Otherwise, we will attempt to generate and store a file handle 137 | /// instead. 138 | /// 139 | /// The default is `false`. 140 | pub inode_file_handles: bool, 141 | 142 | /// Control whether readdir/readdirplus requests return zero dirent to client, as if the 143 | /// directory is empty even if it has children. 144 | pub no_readdir: bool, 145 | 146 | /// Control whether to refuse operations which modify the size of the file. For a share memory 147 | /// file mounted from host, seal_size can prohibit guest to increase the size of 148 | /// share memory file to attack the host. 149 | pub seal_size: bool, 150 | 151 | /// Whether count mount ID or not when comparing two inodes. By default we think two inodes 152 | /// are same if their inode number and st_dev are the same. When `enable_mntid` is set as 153 | /// 'true', inode's mount ID will be taken into account as well. For example, bindmount the 154 | /// same file into virtiofs' source dir, the two bindmounted files will be identified as two 155 | /// different inodes when this option is true, so the don't share pagecache. 156 | /// 157 | /// The default value for this option is `false`. 158 | pub enable_mntid: bool, 159 | 160 | /// What size file supports dax 161 | /// * If dax_file_size == None, DAX will disable to all files. 162 | /// * If dax_file_size == 0, DAX will enable all files. 163 | /// * If dax_file_size == N, DAX will enable only when the file size is greater than or equal 164 | /// to N Bytes. 165 | pub dax_file_size: Option, 166 | 167 | /// Reduce memory consumption by directly use host inode when possible. 168 | /// 169 | /// When set to false, a virtual inode number will be allocated for each file managed by 170 | /// the passthroughfs driver. A map is used to maintain the relationship between virtual 171 | /// inode numbers and host file objects. 172 | /// When set to true, the host inode number will be directly used as virtual inode number 173 | /// if it's less than the threshold (1 << 47), so reduce memory consumed by the map. 174 | /// A virtual inode number will still be allocated and maintained if the host inode number 175 | /// is bigger than the threshold. 176 | /// The default value for this option is `false`. 177 | pub use_host_ino: bool, 178 | 179 | /// Whether the file system should honor the O_DIRECT flag. If this option is disabled, 180 | /// that flag will be filtered out at `open_inode`. 181 | /// 182 | /// The default is `true`. 183 | pub allow_direct_io: bool, 184 | } 185 | 186 | impl Default for Config { 187 | fn default() -> Self { 188 | Config { 189 | entry_timeout: Duration::from_secs(5), 190 | attr_timeout: Duration::from_secs(5), 191 | cache_policy: Default::default(), 192 | writeback: false, 193 | root_dir: String::from("/"), 194 | xattr: false, 195 | do_import: true, 196 | no_open: false, 197 | no_opendir: false, 198 | killpriv_v2: false, 199 | inode_file_handles: false, 200 | no_readdir: false, 201 | seal_size: false, 202 | enable_mntid: false, 203 | dax_file_size: None, 204 | dir_entry_timeout: None, 205 | dir_attr_timeout: None, 206 | use_host_ino: false, 207 | allow_direct_io: true, 208 | } 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/passthrough/credentials.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: BSD-3-Clause 2 | 3 | use crate::oslib; 4 | use crate::passthrough::util::einval; 5 | use std::io; 6 | 7 | pub struct UnixCredentials { 8 | uid: libc::uid_t, 9 | gid: libc::gid_t, 10 | sup_gid: Option, 11 | keep_capability: bool, 12 | } 13 | 14 | impl UnixCredentials { 15 | pub fn new(uid: libc::uid_t, gid: libc::gid_t) -> Self { 16 | UnixCredentials { 17 | uid, 18 | gid, 19 | sup_gid: None, 20 | keep_capability: false, 21 | } 22 | } 23 | 24 | /// Set a supplementary group. Set `supported_extension` to `false` to signal that a 25 | /// supplementary group maybe required, but the guest was not able to tell us which, 26 | /// so we have to rely on keeping the DAC_OVERRIDE capability. 27 | pub fn supplementary_gid(self, supported_extension: bool, sup_gid: Option) -> Self { 28 | UnixCredentials { 29 | uid: self.uid, 30 | gid: self.gid, 31 | sup_gid, 32 | keep_capability: !supported_extension, 33 | } 34 | } 35 | 36 | /// Changes the effective uid/gid of the current thread to `val`. Changes 37 | /// the thread's credentials back to root when the returned struct is dropped. 38 | pub fn set(self) -> io::Result> { 39 | let change_uid = self.uid != 0; 40 | let change_gid = self.gid != 0; 41 | 42 | // We have to change the gid before we change the uid because if we 43 | // change the uid first then we lose the capability to change the gid. 44 | // However changing back can happen in any order. 45 | if let Some(sup_gid) = self.sup_gid { 46 | oslib::setsupgroup(sup_gid)?; 47 | } 48 | 49 | if change_gid { 50 | oslib::seteffgid(self.gid)?; 51 | } 52 | 53 | if change_uid { 54 | oslib::seteffuid(self.uid)?; 55 | } 56 | 57 | if change_uid && self.keep_capability { 58 | // Before kernel 6.3, we don't have access to process supplementary groups. 59 | // To work around this we can set the `DAC_OVERRIDE` in the effective set. 60 | // We are allowed to set the capability because we only change the effective 61 | // user ID, so we still have the 'DAC_OVERRIDE' in the permitted set. 62 | // After switching back to root the permitted set is copied to the effective set, 63 | // so no additional steps are required. 64 | if let Err(e) = crate::util::add_cap_to_eff("DAC_OVERRIDE") { 65 | warn!("failed to add 'DAC_OVERRIDE' to the effective set of capabilities: {e}"); 66 | } 67 | } 68 | 69 | if !change_uid && !change_gid { 70 | return Ok(None); 71 | } 72 | 73 | Ok(Some(UnixCredentialsGuard { 74 | reset_uid: change_uid, 75 | reset_gid: change_gid, 76 | drop_sup_gid: self.sup_gid.is_some(), 77 | })) 78 | } 79 | } 80 | 81 | pub struct UnixCredentialsGuard { 82 | reset_uid: bool, 83 | reset_gid: bool, 84 | drop_sup_gid: bool, 85 | } 86 | 87 | impl Drop for UnixCredentialsGuard { 88 | fn drop(&mut self) { 89 | if self.reset_uid { 90 | oslib::seteffuid(0).unwrap_or_else(|e| { 91 | error!("failed to change uid back to root: {e}"); 92 | }); 93 | } 94 | 95 | if self.reset_gid { 96 | oslib::seteffgid(0).unwrap_or_else(|e| { 97 | error!("failed to change gid back to root: {e}"); 98 | }); 99 | } 100 | 101 | if self.drop_sup_gid { 102 | oslib::dropsupgroups().unwrap_or_else(|e| { 103 | error!("failed to drop supplementary groups: {e}"); 104 | }); 105 | } 106 | } 107 | } 108 | 109 | pub struct ScopedCaps { 110 | cap: capng::Capability, 111 | } 112 | 113 | impl ScopedCaps { 114 | fn new(cap_name: &str) -> io::Result> { 115 | use capng::{Action, CUpdate, Set, Type}; 116 | 117 | let cap = capng::name_to_capability(cap_name).map_err(|_| { 118 | let err = io::Error::last_os_error(); 119 | error!( 120 | "couldn't get the capability id for name {}: {:?}", 121 | cap_name, err 122 | ); 123 | err 124 | })?; 125 | 126 | if capng::have_capability(Type::EFFECTIVE, cap) { 127 | let req = vec![CUpdate { 128 | action: Action::DROP, 129 | cap_type: Type::EFFECTIVE, 130 | capability: cap, 131 | }]; 132 | capng::update(req).map_err(|e| { 133 | error!("couldn't drop {} capability: {:?}", cap, e); 134 | einval() 135 | })?; 136 | capng::apply(Set::CAPS).map_err(|e| { 137 | error!( 138 | "couldn't apply capabilities after dropping {}: {:?}", 139 | cap, e 140 | ); 141 | einval() 142 | })?; 143 | Ok(Some(Self { cap })) 144 | } else { 145 | Ok(None) 146 | } 147 | } 148 | } 149 | 150 | impl Drop for ScopedCaps { 151 | fn drop(&mut self) { 152 | use capng::{Action, CUpdate, Set, Type}; 153 | 154 | let req = vec![CUpdate { 155 | action: Action::ADD, 156 | cap_type: Type::EFFECTIVE, 157 | capability: self.cap, 158 | }]; 159 | 160 | if let Err(e) = capng::update(req) { 161 | panic!("couldn't restore {} capability: {:?}", self.cap, e); 162 | } 163 | if let Err(e) = capng::apply(Set::CAPS) { 164 | panic!( 165 | "couldn't apply capabilities after restoring {}: {:?}", 166 | self.cap, e 167 | ); 168 | } 169 | } 170 | } 171 | 172 | pub fn drop_effective_cap(cap_name: &str) -> io::Result> { 173 | ScopedCaps::new(cap_name) 174 | } 175 | -------------------------------------------------------------------------------- /src/passthrough/inode_store.rs: -------------------------------------------------------------------------------- 1 | // Use of this source code is governed by a BSD-style license that can be 2 | // found in the LICENSE-BSD-3-Clause file. 3 | 4 | use std::collections::BTreeMap; 5 | use std::sync::Arc; 6 | 7 | use super::file_handle::FileHandle; 8 | use super::statx::StatExt; 9 | use super::{Inode, InodeData, InodeHandle}; 10 | 11 | #[derive(Clone, Copy, Default, PartialOrd, Ord, PartialEq, Eq, Debug)] 12 | /// Identify an inode in `PassthroughFs` by `InodeId`. 13 | pub struct InodeId { 14 | pub ino: libc::ino64_t, 15 | pub dev: libc::dev_t, 16 | pub mnt: u64, 17 | } 18 | 19 | impl InodeId { 20 | #[inline] 21 | pub(super) fn from_stat(st: &StatExt) -> Self { 22 | InodeId { 23 | ino: st.st.st_ino, 24 | dev: st.st.st_dev, 25 | mnt: st.mnt_id, 26 | } 27 | } 28 | } 29 | 30 | #[derive(Default)] 31 | pub struct InodeStore { 32 | data: BTreeMap>, 33 | by_id: BTreeMap, 34 | by_handle: BTreeMap, Inode>, 35 | } 36 | 37 | impl InodeStore { 38 | /// Insert an inode into the manager 39 | /// 40 | /// The caller needs to ensure that no inode with the same key exists, otherwise the old inode 41 | /// will get lost. 42 | pub fn insert(&mut self, data: Arc) { 43 | self.by_id.insert(data.id, data.inode); 44 | if let InodeHandle::Handle(handle) = &data.handle { 45 | self.by_handle 46 | .insert(handle.file_handle().clone(), data.inode); 47 | } 48 | self.data.insert(data.inode, data); 49 | } 50 | 51 | /// Remove an inode from the manager, keeping the (key, ino) mapping if `remove_data_only` is true. 52 | pub fn remove(&mut self, inode: &Inode, remove_data_only: bool) -> Option> { 53 | let data = self.data.remove(inode); 54 | if remove_data_only { 55 | // Don't remove by_id and by_handle, we need use it to store inode 56 | // record the mapping of inodes using these two structures to ensure 57 | // that the same files always use the same inode 58 | return data; 59 | } 60 | 61 | if let Some(data) = data.as_ref() { 62 | if let InodeHandle::Handle(handle) = &data.handle { 63 | self.by_handle.remove(handle.file_handle()); 64 | } 65 | self.by_id.remove(&data.id); 66 | } 67 | data 68 | } 69 | 70 | pub fn clear(&mut self) { 71 | self.data.clear(); 72 | self.by_handle.clear(); 73 | self.by_id.clear(); 74 | } 75 | 76 | pub fn get(&self, inode: &Inode) -> Option<&Arc> { 77 | self.data.get(inode) 78 | } 79 | 80 | pub fn get_by_id(&self, id: &InodeId) -> Option<&Arc> { 81 | let inode = self.inode_by_id(id)?; 82 | self.get(inode) 83 | } 84 | 85 | pub fn get_by_handle(&self, handle: &FileHandle) -> Option<&Arc> { 86 | let inode = self.inode_by_handle(handle)?; 87 | self.get(inode) 88 | } 89 | 90 | pub fn inode_by_id(&self, id: &InodeId) -> Option<&Inode> { 91 | self.by_id.get(id) 92 | } 93 | 94 | pub fn inode_by_handle(&self, handle: &FileHandle) -> Option<&Inode> { 95 | self.by_handle.get(handle) 96 | } 97 | } 98 | 99 | #[cfg(test)] 100 | mod test { 101 | use super::super::*; 102 | use super::*; 103 | 104 | use std::ffi::CStr; 105 | use std::mem::MaybeUninit; 106 | use std::os::unix::io::AsRawFd; 107 | use std::sync::atomic::Ordering; 108 | use vmm_sys_util::tempfile::TempFile; 109 | 110 | impl PartialEq for InodeData { 111 | fn eq(&self, other: &Self) -> bool { 112 | if self.inode != other.inode 113 | || self.id != other.id 114 | || self.mode != other.mode 115 | || self.refcount.load(Ordering::Relaxed) != other.refcount.load(Ordering::Relaxed) 116 | { 117 | return false; 118 | } 119 | 120 | match (&self.handle, &other.handle) { 121 | (InodeHandle::File(f1), InodeHandle::File(f2)) => f1.as_raw_fd() == f2.as_raw_fd(), 122 | (InodeHandle::Handle(h1), InodeHandle::Handle(h2)) => { 123 | h1.file_handle() == h2.file_handle() 124 | } 125 | _ => false, 126 | } 127 | } 128 | } 129 | 130 | fn stat_fd(fd: &impl AsRawFd) -> io::Result { 131 | let mut st = MaybeUninit::::zeroed(); 132 | let null_path = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") }; 133 | 134 | // Safe because the kernel will only write data in `st` and we check the return value. 135 | let res = unsafe { 136 | libc::fstatat64( 137 | fd.as_raw_fd(), 138 | null_path.as_ptr(), 139 | st.as_mut_ptr(), 140 | libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW, 141 | ) 142 | }; 143 | if res >= 0 { 144 | // Safe because the kernel guarantees that the struct is now fully initialized. 145 | Ok(unsafe { st.assume_init() }) 146 | } else { 147 | Err(io::Error::last_os_error()) 148 | } 149 | } 150 | 151 | #[test] 152 | fn test_inode_store() { 153 | let mut m = InodeStore::default(); 154 | let tmpfile1 = TempFile::new().unwrap(); 155 | let tmpfile2 = TempFile::new().unwrap(); 156 | 157 | let inode1: Inode = 3; 158 | let inode2: Inode = 4; 159 | let inode_stat1 = StatExt { 160 | st: stat_fd(tmpfile1.as_file()).unwrap(), 161 | mnt_id: 0, 162 | }; 163 | let inode_stat2 = StatExt { 164 | st: stat_fd(tmpfile2.as_file()).unwrap(), 165 | mnt_id: 0, 166 | }; 167 | let id1 = InodeId::from_stat(&inode_stat1); 168 | let id2 = InodeId::from_stat(&inode_stat2); 169 | let file_or_handle1 = InodeHandle::File(tmpfile1.into_file()); 170 | let file_or_handle2 = InodeHandle::File(tmpfile2.into_file()); 171 | let data1 = InodeData::new(inode1, file_or_handle1, 2, id1, inode_stat1.st.st_mode); 172 | let data2 = InodeData::new(inode2, file_or_handle2, 2, id2, inode_stat2.st.st_mode); 173 | let data1 = Arc::new(data1); 174 | let data2 = Arc::new(data2); 175 | 176 | m.insert(data1.clone()); 177 | 178 | // get not present key, expect none 179 | assert!(m.get(&1).is_none()); 180 | 181 | // get just inserted value by key, by id, by handle 182 | assert!(m.get_by_id(&InodeId::default()).is_none()); 183 | assert!(m.get_by_handle(&FileHandle::default()).is_none()); 184 | assert_eq!(m.get(&inode1).unwrap(), &data1); 185 | assert_eq!(m.get_by_id(&id1).unwrap(), &data1); 186 | 187 | // insert another value, and check again 188 | m.insert(data2.clone()); 189 | assert!(m.get(&1).is_none()); 190 | assert!(m.get_by_id(&InodeId::default()).is_none()); 191 | assert!(m.get_by_handle(&FileHandle::default()).is_none()); 192 | assert_eq!(m.get(&inode1).unwrap(), &data1); 193 | assert_eq!(m.get_by_id(&id1).unwrap(), &data1); 194 | assert_eq!(m.get(&inode2).unwrap(), &data2); 195 | assert_eq!(m.get_by_id(&id2).unwrap(), &data2); 196 | 197 | // remove non-present key 198 | assert!(m.remove(&1, false).is_none()); 199 | 200 | // remove present key, return its value 201 | assert_eq!(m.remove(&inode1, false).unwrap(), data1.clone()); 202 | assert!(m.get(&inode1).is_none()); 203 | assert!(m.get_by_id(&id1).is_none()); 204 | assert_eq!(m.get(&inode2).unwrap(), &data2); 205 | assert_eq!(m.get_by_id(&id2).unwrap(), &data2); 206 | 207 | // clear the map 208 | m.clear(); 209 | assert!(m.get(&1).is_none()); 210 | assert!(m.get_by_id(&InodeId::default()).is_none()); 211 | assert!(m.get_by_handle(&FileHandle::default()).is_none()); 212 | assert!(m.get(&inode1).is_none()); 213 | assert!(m.get_by_id(&id1).is_none()); 214 | assert!(m.get(&inode2).is_none()); 215 | assert!(m.get_by_id(&id2).is_none()); 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /src/passthrough/os_compat.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2020-2022 Alibaba Cloud. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE-BSD-3-Clause file. 4 | // SPDX-License-Identifier: Apache-2.0 5 | 6 | use vm_memory::ByteValued; 7 | 8 | #[repr(C, packed)] 9 | #[derive(Clone, Copy, Debug, Default)] 10 | pub struct LinuxDirent64 { 11 | pub d_ino: libc::ino64_t, 12 | pub d_off: libc::off64_t, 13 | pub d_reclen: libc::c_ushort, 14 | pub d_ty: libc::c_uchar, 15 | } 16 | unsafe impl ByteValued for LinuxDirent64 {} 17 | 18 | #[cfg(target_env = "gnu")] 19 | pub use libc::statx as statx_st; 20 | 21 | #[cfg(target_env = "gnu")] 22 | pub use libc::{STATX_BASIC_STATS, STATX_MNT_ID}; 23 | 24 | // musl provides the 'struct statx', but without stx_mnt_id. 25 | // However, the libc crate does not provide libc::statx 26 | // if musl is used. So we add just the required struct and 27 | // constants to make it works. 28 | #[cfg(not(target_env = "gnu"))] 29 | #[repr(C)] 30 | pub struct statx_st_timestamp { 31 | pub tv_sec: i64, 32 | pub tv_nsec: u32, 33 | pub __statx_timestamp_pad1: [i32; 1], 34 | } 35 | 36 | #[cfg(not(target_env = "gnu"))] 37 | #[repr(C)] 38 | pub struct statx_st { 39 | pub stx_mask: u32, 40 | pub stx_blksize: u32, 41 | pub stx_attributes: u64, 42 | pub stx_nlink: u32, 43 | pub stx_uid: u32, 44 | pub stx_gid: u32, 45 | pub stx_mode: u16, 46 | __statx_pad1: [u16; 1], 47 | pub stx_ino: u64, 48 | pub stx_size: u64, 49 | pub stx_blocks: u64, 50 | pub stx_attributes_mask: u64, 51 | pub stx_atime: statx_st_timestamp, 52 | pub stx_btime: statx_st_timestamp, 53 | pub stx_ctime: statx_st_timestamp, 54 | pub stx_mtime: statx_st_timestamp, 55 | pub stx_rdev_major: u32, 56 | pub stx_rdev_minor: u32, 57 | pub stx_dev_major: u32, 58 | pub stx_dev_minor: u32, 59 | pub stx_mnt_id: u64, 60 | __statx_pad2: u64, 61 | __statx_pad3: [u64; 12], 62 | } 63 | 64 | #[cfg(not(target_env = "gnu"))] 65 | pub const STATX_BASIC_STATS: libc::c_uint = 0x07ff; 66 | 67 | #[cfg(not(target_env = "gnu"))] 68 | pub const STATX_MNT_ID: libc::c_uint = 0x1000; 69 | -------------------------------------------------------------------------------- /src/passthrough/overlay.rs: -------------------------------------------------------------------------------- 1 | // Use of this source code is governed by a BSD-style license that can be 2 | // found in the LICENSE-BSD-3-Clause file. 3 | 4 | use super::PassthroughFs; 5 | use crate::abi::fuse_abi; 6 | use crate::api::filesystem::Layer; 7 | 8 | // Implment Layer trait for PassthroughFs. 9 | impl Layer for PassthroughFs { 10 | // Return root inode of this layer. 11 | fn root_inode(&self) -> Self::Inode { 12 | fuse_abi::ROOT_ID 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/passthrough/statx.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Red Hat, Inc. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. 4 | 5 | use std::ffi::CStr; 6 | use std::io; 7 | use std::mem::MaybeUninit; 8 | use std::os::unix::io::AsRawFd; 9 | 10 | use super::os_compat::{statx_st, STATX_BASIC_STATS, STATX_MNT_ID}; 11 | use super::FileHandle; 12 | use crate::api::EMPTY_CSTR; 13 | 14 | pub type MountId = u64; 15 | 16 | pub struct StatExt { 17 | pub st: libc::stat64, 18 | pub mnt_id: MountId, 19 | } 20 | 21 | /* 22 | * Fields in libc::statx are only valid if their respective flag in 23 | * .stx_mask is set. This trait provides functions that allow safe 24 | * access to the libc::statx components we are interested in. 25 | * 26 | * (The implementations of these functions need to check whether the 27 | * associated flag is set, and then extract the respective information 28 | * to return it.) 29 | */ 30 | trait SafeStatXAccess { 31 | fn stat64(&self) -> Option; 32 | fn mount_id(&self) -> Option; 33 | } 34 | 35 | impl SafeStatXAccess for statx_st { 36 | fn stat64(&self) -> Option { 37 | fn makedev(maj: libc::c_uint, min: libc::c_uint) -> libc::dev_t { 38 | libc::makedev(maj, min) 39 | } 40 | 41 | if self.stx_mask & STATX_BASIC_STATS != 0 { 42 | /* 43 | * Unfortunately, we cannot use an initializer to create the 44 | * stat64 object, because it may contain padding and reserved 45 | * fields (depending on the architecture), and it does not 46 | * implement the Default trait. 47 | * So we take a zeroed struct and set what we can. 48 | * (Zero in all fields is wrong, but safe.) 49 | */ 50 | let mut st = unsafe { MaybeUninit::::zeroed().assume_init() }; 51 | 52 | st.st_dev = makedev(self.stx_dev_major, self.stx_dev_minor); 53 | st.st_ino = self.stx_ino; 54 | st.st_mode = self.stx_mode as _; 55 | st.st_nlink = self.stx_nlink as _; 56 | st.st_uid = self.stx_uid; 57 | st.st_gid = self.stx_gid; 58 | st.st_rdev = makedev(self.stx_rdev_major, self.stx_rdev_minor); 59 | st.st_size = self.stx_size as _; 60 | st.st_blksize = self.stx_blksize as _; 61 | st.st_blocks = self.stx_blocks as _; 62 | st.st_atime = self.stx_atime.tv_sec; 63 | st.st_atime_nsec = self.stx_atime.tv_nsec as _; 64 | st.st_mtime = self.stx_mtime.tv_sec; 65 | st.st_mtime_nsec = self.stx_mtime.tv_nsec as _; 66 | st.st_ctime = self.stx_ctime.tv_sec; 67 | st.st_ctime_nsec = self.stx_ctime.tv_nsec as _; 68 | 69 | Some(st) 70 | } else { 71 | None 72 | } 73 | } 74 | 75 | fn mount_id(&self) -> Option { 76 | if self.stx_mask & STATX_MNT_ID != 0 { 77 | Some(self.stx_mnt_id) 78 | } else { 79 | None 80 | } 81 | } 82 | } 83 | 84 | fn get_mount_id(dir: &impl AsRawFd, path: &CStr) -> Option { 85 | match FileHandle::from_name_at(dir, path) { 86 | Ok(Some(v)) => Some(v.mnt_id), 87 | _ => None, 88 | } 89 | } 90 | 91 | // Only works on Linux, and libc::SYS_statx is only defined for these 92 | // environments 93 | /// Performs a statx() syscall. libc provides libc::statx() that does 94 | /// the same, however, the system's libc may not have a statx() wrapper 95 | /// (e.g. glibc before 2.28), so linking to it may fail. 96 | /// libc::syscall() and libc::SYS_statx are always present, though, so 97 | /// we can safely rely on them. 98 | unsafe fn do_statx( 99 | dirfd: libc::c_int, 100 | pathname: *const libc::c_char, 101 | flags: libc::c_int, 102 | mask: libc::c_uint, 103 | statxbuf: *mut statx_st, 104 | ) -> libc::c_int { 105 | libc::syscall(libc::SYS_statx, dirfd, pathname, flags, mask, statxbuf) as libc::c_int 106 | } 107 | 108 | /// Execute `statx()` to get extended status with mount id. 109 | pub fn statx(dir: &impl AsRawFd, path: Option<&CStr>) -> io::Result { 110 | let mut stx_ui = MaybeUninit::::zeroed(); 111 | 112 | // Safe because this is a constant value and a valid C string. 113 | let path = path.unwrap_or_else(|| unsafe { CStr::from_bytes_with_nul_unchecked(EMPTY_CSTR) }); 114 | 115 | // Safe because the kernel will only write data in `stx_ui` and we 116 | // check the return value. 117 | let res = unsafe { 118 | do_statx( 119 | dir.as_raw_fd(), 120 | path.as_ptr(), 121 | libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW, 122 | STATX_BASIC_STATS | STATX_MNT_ID, 123 | stx_ui.as_mut_ptr(), 124 | ) 125 | }; 126 | if res >= 0 { 127 | // Safe because we are only going to use the SafeStatXAccess 128 | // trait methods 129 | let stx = unsafe { stx_ui.assume_init() }; 130 | 131 | // if `statx()` doesn't provide the mount id (before kernel 5.8), 132 | // let's try `name_to_handle_at()`, if everything fails just use 0 133 | let mnt_id = stx 134 | .mount_id() 135 | .or_else(|| get_mount_id(dir, path)) 136 | .unwrap_or(0); 137 | let st = stx 138 | .stat64() 139 | .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOSYS))?; 140 | 141 | Ok(StatExt { st, mnt_id }) 142 | } else { 143 | Err(io::Error::last_os_error()) 144 | } 145 | } 146 | 147 | #[cfg(test)] 148 | mod tests { 149 | use super::*; 150 | use std::ffi::CString; 151 | use std::fs::File; 152 | 153 | #[test] 154 | fn test_statx() { 155 | let topdir = env!("CARGO_MANIFEST_DIR"); 156 | let dir = File::open(topdir).unwrap(); 157 | let filename = CString::new("build.rs").unwrap(); 158 | 159 | let st1 = statx(&dir, None).unwrap(); 160 | let st2 = statx(&dir, Some(&filename)).unwrap(); 161 | let mnt_id = get_mount_id(&dir, &filename).unwrap(); 162 | 163 | assert_eq!(st1.mnt_id, st2.mnt_id); 164 | assert_eq!(st1.mnt_id, mnt_id); 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /src/transport/fs_cache_req_handler.rs: -------------------------------------------------------------------------------- 1 | // Copyright © 2019 Intel Corporation 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #[cfg(not(feature = "virtiofs"))] 6 | /// Fake trait to simplify implementation when vhost-user-fs is not used. 7 | pub trait FsCacheReqHandler {} 8 | 9 | #[cfg(feature = "virtiofs")] 10 | pub use virtiofs::FsCacheReqHandler; 11 | 12 | #[cfg(feature = "virtiofs")] 13 | mod virtiofs { 14 | use std::io; 15 | use std::os::unix::io::RawFd; 16 | 17 | #[cfg(feature = "vhost-user-fs")] 18 | use vhost::vhost_user::message::{ 19 | VhostUserFSBackendMsg, VhostUserFSBackendMsgFlags, VHOST_USER_FS_BACKEND_ENTRIES, 20 | }; 21 | #[cfg(feature = "vhost-user-fs")] 22 | use vhost::vhost_user::{Backend, VhostUserFrontendReqHandler}; 23 | 24 | use crate::abi::virtio_fs::RemovemappingOne; 25 | #[cfg(feature = "vhost-user-fs")] 26 | use crate::abi::virtio_fs::SetupmappingFlags; 27 | 28 | /// Trait to support virtio-fs DAX Window operations. 29 | /// 30 | /// The virtio-fs DAX Window allows bypassing guest page cache and allows mapping host 31 | /// page cache directly in guest address space. 32 | /// 33 | /// When a page of file is needed, guest sends a request to map that page (in host page cache) 34 | /// in VMM address space. Inside guest this is a physical memory range controlled by virtiofs 35 | /// device. And guest directly maps this physical address range using DAX and hence getsi 36 | /// access to file data on host. 37 | /// 38 | /// This can speed up things considerably in many situations. Also this can result in 39 | /// substantial memory savings as file data does not have to be copied in guest and it is 40 | /// directly accessed from host page cache. 41 | pub trait FsCacheReqHandler: Send + Sync + 'static { 42 | /// Setup a dedicated mapping so that guest can access file data in DAX style. 43 | fn map( 44 | &mut self, 45 | foffset: u64, 46 | moffset: u64, 47 | len: u64, 48 | flags: u64, 49 | fd: RawFd, 50 | ) -> io::Result<()>; 51 | 52 | /// Remove those mappings that provide the access to file data. 53 | fn unmap(&mut self, requests: Vec) -> io::Result<()>; 54 | } 55 | 56 | #[cfg(feature = "vhost-user-fs")] 57 | impl FsCacheReqHandler for Backend { 58 | fn map( 59 | &mut self, 60 | foffset: u64, 61 | moffset: u64, 62 | len: u64, 63 | flags: u64, 64 | fd: RawFd, 65 | ) -> io::Result<()> { 66 | let mut msg: VhostUserFSBackendMsg = Default::default(); 67 | msg.fd_offset[0] = foffset; 68 | msg.cache_offset[0] = moffset; 69 | msg.len[0] = len; 70 | msg.flags[0] = if (flags & SetupmappingFlags::WRITE.bits()) != 0 { 71 | VhostUserFSBackendMsgFlags::MAP_W | VhostUserFSBackendMsgFlags::MAP_R 72 | } else { 73 | VhostUserFSBackendMsgFlags::MAP_R 74 | }; 75 | 76 | self.fs_backend_map(&msg, &fd)?; 77 | 78 | Ok(()) 79 | } 80 | 81 | fn unmap(&mut self, requests: Vec) -> io::Result<()> { 82 | for chunk in requests.chunks(VHOST_USER_FS_BACKEND_ENTRIES) { 83 | let mut msg: VhostUserFSBackendMsg = Default::default(); 84 | 85 | for (ind, req) in chunk.iter().enumerate() { 86 | msg.len[ind] = req.len; 87 | msg.cache_offset[ind] = req.moffset; 88 | } 89 | 90 | self.fs_backend_unmap(&msg)?; 91 | } 92 | 93 | Ok(()) 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /tests/example/macfuse.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Ant Group. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | use libc::time_t; 6 | use log::{error, info, warn}; 7 | use std::any::Any; 8 | use std::ffi::CStr; 9 | use std::io::Result; 10 | use std::path::Path; 11 | use std::sync::Arc; 12 | use std::thread; 13 | use std::time::{Duration, SystemTime}; 14 | 15 | use fuse_backend_rs::abi::fuse_abi::Attr; 16 | 17 | use fuse_backend_rs::api::filesystem::{Context, DirEntry, Entry, FileSystem, ZeroCopyWriter}; 18 | use fuse_backend_rs::api::{server::Server, BackendFileSystem, Vfs, VfsOptions}; 19 | use fuse_backend_rs::transport::{FuseChannel, FuseSession}; 20 | 21 | pub(crate) struct HelloFileSystem {} 22 | 23 | impl FileSystem for HelloFileSystem { 24 | type Inode = u64; 25 | type Handle = u64; 26 | #[allow(unused_variables)] 27 | fn lookup(&self, _: &Context, parent: Self::Inode, name: &CStr) -> Result { 28 | let content = "hello, fuse".as_bytes(); 29 | let now = SystemTime::now(); 30 | let time = now 31 | .duration_since(SystemTime::UNIX_EPOCH) 32 | .unwrap() 33 | .as_secs(); 34 | Ok(Entry { 35 | inode: 2, 36 | generation: 0, 37 | attr: Attr { 38 | ino: 2, 39 | size: content.len() as u64, 40 | blocks: 1, 41 | atime: time, 42 | mtime: time, 43 | ctime: time, 44 | crtime: time, 45 | atimensec: 0, 46 | mtimensec: 0, 47 | ctimensec: 0, 48 | crtimensec: 0, 49 | mode: (libc::S_IFREG 50 | | libc::S_IREAD 51 | | libc::S_IEXEC 52 | | libc::S_IRGRP 53 | | libc::S_IXGRP 54 | | libc::S_IROTH 55 | | libc::S_IXOTH) as u32, 56 | nlink: 1, 57 | uid: 0, 58 | gid: 0, 59 | rdev: 0, 60 | flags: 0, 61 | blksize: 4096, 62 | padding: 0, 63 | } 64 | .into(), 65 | attr_flags: 0, 66 | attr_timeout: Duration::new(0, 0), 67 | entry_timeout: Duration::new(0, 0), 68 | }) 69 | } 70 | 71 | #[allow(unused_variables)] 72 | fn readdir( 73 | &self, 74 | ctx: &Context, 75 | inode: Self::Inode, 76 | handle: Self::Handle, 77 | size: u32, 78 | offset: u64, 79 | add_entry: &mut dyn FnMut(DirEntry) -> Result, 80 | ) -> Result<()> { 81 | if offset != 0 { 82 | return Ok(()); 83 | } 84 | let mut offset: usize = offset as usize; 85 | let entry = DirEntry { 86 | ino: 1, 87 | offset: offset as u64, 88 | type_: libc::DT_DIR as u32, 89 | name: ".".as_bytes(), 90 | }; 91 | offset += add_entry(entry).unwrap(); 92 | 93 | let entry = DirEntry { 94 | ino: 1, 95 | offset: offset as u64, 96 | type_: libc::DT_DIR as u32, 97 | name: "..".as_bytes(), 98 | }; 99 | offset += add_entry(entry).unwrap(); 100 | 101 | let entry = DirEntry { 102 | ino: 2, 103 | offset: offset as u64, 104 | type_: libc::DT_REG as u32, 105 | name: "hello".as_bytes(), 106 | }; 107 | add_entry(entry).unwrap(); 108 | Ok(()) 109 | } 110 | 111 | #[allow(unused_variables)] 112 | fn read( 113 | &self, 114 | ctx: &Context, 115 | inode: Self::Inode, 116 | handle: Self::Handle, 117 | w: &mut dyn ZeroCopyWriter, 118 | size: u32, 119 | offset: u64, 120 | lock_owner: Option, 121 | flags: u32, 122 | ) -> Result { 123 | let offset = offset as usize; 124 | let content = "hello, fuse".as_bytes(); 125 | let mut buf = Vec::::with_capacity(size as usize); 126 | let can_read_size = content.len() - offset; 127 | let read_size = if can_read_size < size as usize { 128 | can_read_size 129 | } else { 130 | size as usize 131 | }; 132 | let read_end = (offset as usize) + read_size; 133 | buf.extend_from_slice(&content[(offset as usize)..(read_end as usize)]); 134 | w.write(buf.as_slice())?; 135 | Ok(read_size) 136 | } 137 | 138 | #[allow(unused_variables)] 139 | fn getattr( 140 | &self, 141 | ctx: &Context, 142 | inode: Self::Inode, 143 | handle: Option, 144 | ) -> Result<(libc::stat, Duration)> { 145 | if inode == 1 { 146 | let now = SystemTime::now(); 147 | let time = now 148 | .duration_since(SystemTime::UNIX_EPOCH) 149 | .unwrap() 150 | .as_secs() as time_t; 151 | return Ok(( 152 | libc::stat { 153 | st_dev: 0, 154 | st_mode: (libc::S_IFDIR 155 | | libc::S_IREAD 156 | | libc::S_IEXEC 157 | | libc::S_IRGRP 158 | | libc::S_IXGRP 159 | | libc::S_IROTH 160 | | libc::S_IXOTH), 161 | st_nlink: 1, 162 | st_ino: 1, 163 | st_uid: 0, 164 | st_gid: 0, 165 | st_rdev: 0, 166 | st_atime: time, 167 | st_atime_nsec: 0, 168 | st_mtime: time, 169 | st_mtime_nsec: 0, 170 | st_ctime: time, 171 | st_ctime_nsec: 0, 172 | st_birthtime: 0, 173 | st_birthtime_nsec: 0, 174 | st_size: 0, 175 | st_blocks: 0, 176 | st_blksize: 4096, 177 | st_flags: 0, 178 | st_gen: 0, 179 | st_lspare: 0, 180 | st_qspare: [0, 0], 181 | }, 182 | Duration::from_secs(1), 183 | )); 184 | } else { 185 | let content = "hello, fuse".as_bytes(); 186 | let now = SystemTime::now(); 187 | let time = now 188 | .duration_since(SystemTime::UNIX_EPOCH) 189 | .unwrap() 190 | .as_secs() as time_t; 191 | return Ok(( 192 | libc::stat { 193 | st_dev: 0, 194 | st_mode: (libc::S_IFREG 195 | | libc::S_IREAD 196 | | libc::S_IEXEC 197 | | libc::S_IRGRP 198 | | libc::S_IXGRP 199 | | libc::S_IROTH 200 | | libc::S_IXOTH), 201 | st_nlink: 1, 202 | st_ino: 1, 203 | st_uid: 0, 204 | st_gid: 0, 205 | st_rdev: 0, 206 | st_atime: time, 207 | st_atime_nsec: 0, 208 | st_mtime: time, 209 | st_mtime_nsec: 0, 210 | st_ctime: time, 211 | st_ctime_nsec: 0, 212 | st_birthtime: 0, 213 | st_birthtime_nsec: 0, 214 | st_size: content.len() as libc::off_t, 215 | st_blocks: 1, 216 | st_blksize: 4096, 217 | st_flags: 0, 218 | st_gen: 0, 219 | st_lspare: 0, 220 | st_qspare: [0, 0], 221 | }, 222 | Duration::from_secs(1), 223 | )); 224 | } 225 | } 226 | 227 | #[allow(unused_variables)] 228 | fn access(&self, ctx: &Context, inode: Self::Inode, mask: u32) -> Result<()> { 229 | return Ok(()); 230 | } 231 | } 232 | 233 | impl BackendFileSystem for HelloFileSystem { 234 | fn mount(&self) -> Result<(Entry, u64)> { 235 | Ok(( 236 | Entry { 237 | inode: 1, 238 | generation: 0, 239 | attr: Attr::default().into(), 240 | attr_flags: 0, 241 | attr_timeout: Duration::new(0, 0), 242 | entry_timeout: Duration::new(0, 0), 243 | }, 244 | 0, 245 | )) 246 | } 247 | 248 | fn as_any(&self) -> &dyn Any { 249 | self 250 | } 251 | } 252 | 253 | /// A fusedev daemon example 254 | #[allow(dead_code)] 255 | pub struct Daemon { 256 | mountpoint: String, 257 | server: Arc>>, 258 | thread_cnt: u32, 259 | session: Option, 260 | } 261 | 262 | #[allow(dead_code)] 263 | impl Daemon { 264 | /// Creates a fusedev daemon instance 265 | pub fn new(mountpoint: &str, thread_cnt: u32) -> Result { 266 | // create vfs 267 | let vfs = Vfs::new(VfsOptions { 268 | #[cfg(not(target_os = "macos"))] 269 | no_open: false, 270 | #[cfg(not(target_os = "macos"))] 271 | no_opendir: false, 272 | ..Default::default() 273 | }); 274 | 275 | let fs = HelloFileSystem {}; 276 | vfs.mount(Box::new(fs), "/").unwrap(); 277 | 278 | Ok(Daemon { 279 | mountpoint: mountpoint.to_string(), 280 | server: Arc::new(Server::new(Arc::new(vfs))), 281 | thread_cnt, 282 | session: None, 283 | }) 284 | } 285 | 286 | /// Mounts a fusedev daemon to the mountpoint, then start service threads to handle 287 | /// FUSE requests. 288 | pub fn mount(&mut self) -> Result<()> { 289 | let mut se = 290 | FuseSession::new(Path::new(&self.mountpoint), "passthru_example", "", true).unwrap(); 291 | se.mount().unwrap(); 292 | for _ in 0..self.thread_cnt { 293 | let mut server = FuseServer { 294 | server: self.server.clone(), 295 | ch: se.new_channel().unwrap(), 296 | }; 297 | let _thread = thread::Builder::new() 298 | .name("fuse_server".to_string()) 299 | .spawn(move || { 300 | info!("new fuse thread"); 301 | let _ = server.svc_loop(); 302 | warn!("fuse service thread exits"); 303 | }) 304 | .unwrap(); 305 | } 306 | #[cfg(feature = "fuse-t")] 307 | se.wait_mount().unwrap(); 308 | self.session = Some(se); 309 | Ok(()) 310 | } 311 | 312 | /// Umounts and destroies a fusedev daemon 313 | pub fn umount(&mut self) -> Result<()> { 314 | if let Some(mut se) = self.session.take() { 315 | se.umount().unwrap(); 316 | } 317 | Ok(()) 318 | } 319 | } 320 | 321 | impl Drop for Daemon { 322 | fn drop(&mut self) { 323 | let _ = self.umount(); 324 | } 325 | } 326 | 327 | struct FuseServer { 328 | server: Arc>>, 329 | ch: FuseChannel, 330 | } 331 | 332 | impl FuseServer { 333 | fn svc_loop(&mut self) -> Result<()> { 334 | // Given error EBADF, it means kernel has shut down this session. 335 | let _ebadf = std::io::Error::from_raw_os_error(libc::EBADF); 336 | loop { 337 | if let Some((reader, writer)) = self 338 | .ch 339 | .get_request() 340 | .map_err(|_| std::io::Error::from_raw_os_error(libc::EINVAL))? 341 | { 342 | if let Err(e) = self 343 | .server 344 | .handle_message(reader, writer.into(), None, None) 345 | { 346 | match e { 347 | fuse_backend_rs::Error::EncodeMessage(_ebadf) => { 348 | break; 349 | } 350 | _ => { 351 | error!("Handling fuse message failed"); 352 | continue; 353 | } 354 | } 355 | } 356 | } else { 357 | info!("fuse server exits"); 358 | break; 359 | } 360 | } 361 | Ok(()) 362 | } 363 | } 364 | -------------------------------------------------------------------------------- /tests/example/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Ant Group. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | #[cfg(all(feature = "fusedev", target_os = "linux"))] 6 | pub(crate) mod passthroughfs; 7 | 8 | #[cfg(all(feature = "fusedev", target_os = "macos"))] 9 | pub(crate) mod macfuse; 10 | -------------------------------------------------------------------------------- /tests/example/passthroughfs.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Ant Group. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | 5 | use log::{error, info, warn}; 6 | use std::io::Result; 7 | use std::path::Path; 8 | use std::sync::Arc; 9 | use std::thread; 10 | 11 | use fuse_backend_rs::api::{server::Server, Vfs, VfsOptions}; 12 | use fuse_backend_rs::passthrough::{Config, PassthroughFs}; 13 | use fuse_backend_rs::transport::{FuseChannel, FuseSession}; 14 | 15 | /// A fusedev daemon example 16 | #[allow(dead_code)] 17 | pub struct Daemon { 18 | mountpoint: String, 19 | server: Arc>>, 20 | thread_cnt: u32, 21 | session: Option, 22 | } 23 | 24 | #[allow(dead_code)] 25 | impl Daemon { 26 | /// Creates a fusedev daemon instance 27 | pub fn new(src: &str, mountpoint: &str, thread_cnt: u32) -> Result { 28 | // create vfs 29 | let vfs = Vfs::new(VfsOptions { 30 | no_open: false, 31 | no_opendir: false, 32 | ..Default::default() 33 | }); 34 | 35 | // create passthrough fs 36 | let mut cfg = Config::default(); 37 | cfg.root_dir = src.to_string(); 38 | cfg.do_import = false; 39 | let fs = PassthroughFs::<()>::new(cfg).unwrap(); 40 | fs.import().unwrap(); 41 | 42 | // attach passthrough fs to vfs root 43 | vfs.mount(Box::new(fs), "/").unwrap(); 44 | 45 | Ok(Daemon { 46 | mountpoint: mountpoint.to_string(), 47 | server: Arc::new(Server::new(Arc::new(vfs))), 48 | thread_cnt, 49 | session: None, 50 | }) 51 | } 52 | 53 | /// Mounts a fusedev daemon to the mountpoint, then start service threads to handle 54 | /// FUSE requests. 55 | pub fn mount(&mut self) -> Result<()> { 56 | let mut se = 57 | FuseSession::new(Path::new(&self.mountpoint), "passthru_example", "", false).unwrap(); 58 | se.mount().unwrap(); 59 | for _ in 0..self.thread_cnt { 60 | let mut server = FuseServer { 61 | server: self.server.clone(), 62 | ch: se.new_channel().unwrap(), 63 | }; 64 | let _thread = thread::Builder::new() 65 | .name("fuse_server".to_string()) 66 | .spawn(move || { 67 | info!("new fuse thread"); 68 | let _ = server.svc_loop(); 69 | warn!("fuse service thread exits"); 70 | }) 71 | .unwrap(); 72 | } 73 | self.session = Some(se); 74 | Ok(()) 75 | } 76 | 77 | /// Umounts and destroies a fusedev daemon 78 | pub fn umount(&mut self) -> Result<()> { 79 | if let Some(mut se) = self.session.take() { 80 | se.umount().unwrap(); 81 | se.wake().unwrap(); 82 | } 83 | Ok(()) 84 | } 85 | } 86 | 87 | impl Drop for Daemon { 88 | fn drop(&mut self) { 89 | let _ = self.umount(); 90 | } 91 | } 92 | 93 | struct FuseServer { 94 | server: Arc>>, 95 | ch: FuseChannel, 96 | } 97 | 98 | impl FuseServer { 99 | fn svc_loop(&mut self) -> Result<()> { 100 | // Given error EBADF, it means kernel has shut down this session. 101 | let _ebadf = std::io::Error::from_raw_os_error(libc::EBADF); 102 | loop { 103 | if let Some((reader, writer)) = self 104 | .ch 105 | .get_request() 106 | .map_err(|_| std::io::Error::from_raw_os_error(libc::EINVAL))? 107 | { 108 | if let Err(e) = self 109 | .server 110 | .handle_message(reader, writer.into(), None, None) 111 | { 112 | match e { 113 | fuse_backend_rs::Error::EncodeMessage(_ebadf) => { 114 | break; 115 | } 116 | _ => { 117 | error!("Handling fuse message failed"); 118 | continue; 119 | } 120 | } 121 | } 122 | } else { 123 | info!("fuse server exits"); 124 | break; 125 | } 126 | } 127 | Ok(()) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /tests/macfuse_smoke.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2022 Ant Group. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | // 5 | 6 | #[cfg(all(feature = "fusedev", feature = "fuse-t", target_os = "macos"))] 7 | #[macro_use] 8 | extern crate log; 9 | 10 | mod example; 11 | 12 | #[cfg(all(feature = "fusedev", feature = "fuse-t", target_os = "macos"))] 13 | mod macfuse_tests { 14 | use std::io::Result; 15 | use std::process::Command; 16 | 17 | use vmm_sys_util::tempdir::TempDir; 18 | 19 | use crate::example::macfuse; 20 | 21 | fn validate_hello_file(dest: &str) -> bool { 22 | let files = exec(format!("cd {}; ls -la .;cd - > /dev/null", dest).as_str()).unwrap(); 23 | if files.find("hello").is_none() { 24 | error!("files {}:\n not include hello \n", files); 25 | return false; 26 | } 27 | println!("files: {}", files); 28 | 29 | let content = exec(format!("cat {}/hello;", dest).as_str()).unwrap(); 30 | if !content.eq("hello, fuse") { 31 | error!("content {}:\n is not right\n", content); 32 | return false; 33 | } 34 | 35 | return true; 36 | } 37 | 38 | fn exec(cmd: &str) -> Result { 39 | debug!("exec: {}", cmd); 40 | let output = Command::new("sh") 41 | .arg("-c") 42 | .arg(cmd) 43 | .env("RUST_BACKTRACE", "1") 44 | .output()?; 45 | 46 | if !output.status.success() || output.stderr.len() > 0 { 47 | let msg = std::str::from_utf8(&output.stderr).unwrap(); 48 | panic!("exec failed: {}: {}", cmd, msg); 49 | } 50 | let stdout = std::str::from_utf8(&output.stdout).unwrap(); 51 | 52 | return Ok(stdout.to_string()); 53 | } 54 | 55 | #[test] 56 | #[cfg(feature = "fuse-t")] 57 | fn integration_test_macfuse_hello() -> Result<()> { 58 | // test the fuse-rs repository 59 | let tmp_dir = TempDir::new().unwrap(); 60 | let mnt_dir = tmp_dir.as_path().to_str().unwrap(); 61 | info!("test macfuse mountpoint {}", mnt_dir); 62 | 63 | let mut daemon = macfuse::Daemon::new(mnt_dir, 2).unwrap(); 64 | daemon.mount().unwrap(); 65 | assert!(validate_hello_file(mnt_dir)); 66 | daemon.umount().unwrap(); 67 | Ok(()) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /tests/overlay/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "overlay" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | fuse-backend-rs = { path = "../../", features = ["fusedev"] } 10 | log = ">=0.4.6" 11 | vmm-sys-util = ">=0.4" 12 | libc = ">=0.2.68" 13 | simple_logger = ">=1.13.0" 14 | signal-hook = ">=0.3.10" 15 | lazy_static = ">=1.4.0" 16 | -------------------------------------------------------------------------------- /tests/overlay/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate fuse_backend_rs; 2 | extern crate lazy_static; 3 | extern crate libc; 4 | extern crate log; 5 | extern crate signal_hook; 6 | extern crate simple_logger; 7 | extern crate vmm_sys_util; 8 | 9 | use std::env; 10 | use std::io::{Error, Result}; 11 | use std::path::Path; 12 | use std::sync::Arc; 13 | use std::thread; 14 | 15 | use fuse_backend_rs::api::filesystem::Layer; 16 | use fuse_backend_rs::api::server::Server; 17 | use fuse_backend_rs::overlayfs::config::Config; 18 | use fuse_backend_rs::overlayfs::OverlayFs; 19 | use fuse_backend_rs::passthrough::{self, PassthroughFs}; 20 | use fuse_backend_rs::transport::{FuseChannel, FuseSession}; 21 | use log::LevelFilter; 22 | use signal_hook::{consts::TERM_SIGNALS, iterator::Signals}; 23 | use simple_logger::SimpleLogger; 24 | 25 | #[derive(Debug, Default)] 26 | pub struct Args { 27 | name: String, 28 | mountpoint: String, 29 | lowerdir: Vec, 30 | upperdir: String, 31 | workdir: String, 32 | log_level: String, 33 | } 34 | 35 | pub struct FuseServer { 36 | server: Arc>>, 37 | ch: FuseChannel, 38 | } 39 | 40 | type BoxedLayer = Box + Send + Sync>; 41 | 42 | fn new_passthroughfs_layer(rootdir: &str) -> Result { 43 | let mut config = passthrough::Config::default(); 44 | config.root_dir = String::from(rootdir); 45 | // enable xattr 46 | config.xattr = true; 47 | config.do_import = true; 48 | let fs = Box::new(PassthroughFs::<()>::new(config)?); 49 | fs.import()?; 50 | Ok(fs as BoxedLayer) 51 | } 52 | 53 | fn help() { 54 | println!( 55 | "Usage:\n overlay -o lowerdir=::,upperdir=,workdir= [-l log_level]\n" 56 | ); 57 | } 58 | 59 | fn parse_args() -> Result { 60 | let args = env::args().collect::>(); 61 | // We expect at least 5 arguments. 62 | if args.len() < 5 { 63 | help(); 64 | return Err(std::io::Error::from_raw_os_error(libc::EINVAL)); 65 | } 66 | 67 | let mut cmd_args = Args { 68 | name: "".to_string(), 69 | mountpoint: "".to_string(), 70 | ..Default::default() 71 | }; 72 | 73 | let mut i = 0; 74 | loop { 75 | i += 1; 76 | if i >= args.len() { 77 | break; 78 | } 79 | if args[i].as_str() == "-h" { 80 | help(); 81 | return Err(std::io::Error::from_raw_os_error(libc::EINVAL)); 82 | } 83 | 84 | if args[i].as_str() == "-o" { 85 | i += 1; 86 | // Parse options. 87 | let option = args[i].clone(); 88 | option.split(",").try_for_each(|value| -> Result<()> { 89 | let kv = value.split("=").collect::>(); 90 | if kv.len() != 2 { 91 | println!("unknown option: {}", value); 92 | return Ok(()); 93 | } 94 | 95 | match kv[0] { 96 | "lowerdir" => { 97 | cmd_args.lowerdir = kv[1] 98 | .split(":") 99 | .map(|s| s.to_string()) 100 | .collect::>(); 101 | } 102 | "upperdir" => { 103 | cmd_args.upperdir = kv[1].to_string(); 104 | } 105 | "workdir" => { 106 | cmd_args.workdir = kv[1].to_string(); 107 | } 108 | _ => { 109 | // Ignore unknown options. 110 | println!("unknown option: {}", kv[0]); 111 | } 112 | } 113 | Ok(()) 114 | })?; 115 | continue; 116 | } 117 | 118 | if args[i].as_str() == "-l" { 119 | i += 1; 120 | cmd_args.log_level = args[i].clone(); 121 | } 122 | 123 | if cmd_args.name.is_empty() { 124 | cmd_args.name = args[i].clone(); 125 | continue; 126 | } else if cmd_args.mountpoint.is_empty() { 127 | cmd_args.mountpoint = args[i].clone(); 128 | continue; 129 | } 130 | } 131 | 132 | // All fields should be set. 133 | if cmd_args.lowerdir.is_empty() || cmd_args.upperdir.is_empty() || cmd_args.workdir.is_empty() { 134 | println!("lowerdir, upperdir and workdir should be set"); 135 | help(); 136 | return Err(Error::from_raw_os_error(libc::EINVAL)); 137 | } 138 | 139 | Ok(cmd_args) 140 | } 141 | 142 | fn set_log(args: &Args) { 143 | let log_level = match args.log_level.as_str() { 144 | "trace" => LevelFilter::Trace, 145 | "debug" => LevelFilter::Debug, 146 | "info" => LevelFilter::Info, 147 | "warn" => LevelFilter::Warn, 148 | "error" => LevelFilter::Error, 149 | _ => LevelFilter::Info, 150 | }; 151 | 152 | SimpleLogger::new().with_level(log_level).init().unwrap(); 153 | } 154 | 155 | fn main() -> Result<()> { 156 | let args = parse_args()?; 157 | println!("args: {:?}", args); 158 | 159 | set_log(&args); 160 | 161 | // let basedir = "/home/zhangwei/program/test-overlay/test2/"; 162 | let upper_layer = Arc::new(new_passthroughfs_layer(&args.upperdir)?); 163 | let mut lower_layers = Vec::new(); 164 | for lower in args.lowerdir { 165 | lower_layers.push(Arc::new(new_passthroughfs_layer(&lower)?)); 166 | } 167 | 168 | let mut config = Config::default(); 169 | config.work = args.workdir.clone(); 170 | config.mountpoint = args.mountpoint.clone(); 171 | config.do_import = true; 172 | 173 | print!("new overlay fs\n"); 174 | let fs = OverlayFs::new(Some(upper_layer), lower_layers, config)?; 175 | print!("init root inode\n"); 176 | fs.import()?; 177 | 178 | print!("open fuse session\n"); 179 | let mut se = FuseSession::new(Path::new(&args.mountpoint), &args.name, "", false).unwrap(); 180 | print!("session opened\n"); 181 | se.mount().unwrap(); 182 | 183 | let mut server = FuseServer { 184 | server: Arc::new(Server::new(Arc::new(fs))), 185 | ch: se.new_channel().unwrap(), 186 | }; 187 | 188 | let handle = thread::spawn(move || { 189 | let _ = server.svc_loop(); 190 | }); 191 | 192 | // main thread 193 | let mut signals = Signals::new(TERM_SIGNALS).unwrap(); 194 | for _sig in signals.forever() { 195 | break; 196 | } 197 | 198 | se.umount().unwrap(); 199 | se.wake().unwrap(); 200 | 201 | let _ = handle.join(); 202 | 203 | Ok(()) 204 | } 205 | 206 | impl FuseServer { 207 | pub fn svc_loop(&mut self) -> Result<()> { 208 | let _ebadf = std::io::Error::from_raw_os_error(libc::EBADF); 209 | print!("entering server loop\n"); 210 | loop { 211 | if let Some((reader, writer)) = self 212 | .ch 213 | .get_request() 214 | .map_err(|_| std::io::Error::from_raw_os_error(libc::EINVAL))? 215 | { 216 | if let Err(e) = self 217 | .server 218 | .handle_message(reader, writer.into(), None, None) 219 | { 220 | match e { 221 | fuse_backend_rs::Error::EncodeMessage(_ebadf) => { 222 | break; 223 | } 224 | _ => { 225 | print!("Handling fuse message failed"); 226 | continue; 227 | } 228 | } 229 | } 230 | } else { 231 | print!("fuse server exits"); 232 | break; 233 | } 234 | } 235 | Ok(()) 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /tests/passthrough/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "passthrough" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | fuse-backend-rs = { path = "../../", features = ["fusedev"] } 10 | log = ">=0.4.6" 11 | libc = ">=0.2.68" 12 | simple_logger = ">=1.13.0" 13 | signal-hook = ">=0.3.10" 14 | -------------------------------------------------------------------------------- /tests/passthrough/src/main.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info, warn, LevelFilter}; 2 | use std::env; 3 | use std::fs; 4 | use std::io::{Error, Result}; 5 | use std::path::Path; 6 | use std::sync::Arc; 7 | use std::thread; 8 | 9 | use signal_hook::{consts::TERM_SIGNALS, iterator::Signals}; 10 | 11 | use fuse_backend_rs::api::{server::Server, Vfs, VfsOptions}; 12 | use fuse_backend_rs::passthrough::{Config, PassthroughFs}; 13 | use fuse_backend_rs::transport::{FuseChannel, FuseSession}; 14 | 15 | use simple_logger::SimpleLogger; 16 | 17 | /// A fusedev daemon example 18 | #[allow(dead_code)] 19 | pub struct Daemon { 20 | mountpoint: String, 21 | server: Arc>>, 22 | thread_cnt: u32, 23 | session: Option, 24 | } 25 | 26 | #[allow(dead_code)] 27 | impl Daemon { 28 | /// Creates a fusedev daemon instance 29 | pub fn new(src: &str, mountpoint: &str, thread_cnt: u32) -> Result { 30 | // create vfs 31 | let vfs = Vfs::new(VfsOptions { 32 | no_open: false, 33 | no_opendir: false, 34 | ..Default::default() 35 | }); 36 | 37 | // create passthrough fs 38 | let mut cfg = Config::default(); 39 | cfg.root_dir = src.to_string(); 40 | cfg.do_import = false; 41 | let fs = PassthroughFs::<()>::new(cfg).unwrap(); 42 | fs.import().unwrap(); 43 | 44 | // attach passthrough fs to vfs root 45 | vfs.mount(Box::new(fs), "/").unwrap(); 46 | 47 | Ok(Daemon { 48 | mountpoint: mountpoint.to_string(), 49 | server: Arc::new(Server::new(Arc::new(vfs))), 50 | thread_cnt, 51 | session: None, 52 | }) 53 | } 54 | 55 | /// Mounts a fusedev daemon to the mountpoint, then start service threads to handle 56 | /// FUSE requests. 57 | pub fn mount(&mut self) -> Result<()> { 58 | let mut se = 59 | FuseSession::new(Path::new(&self.mountpoint), "testpassthrough", "", false).unwrap(); 60 | se.mount().unwrap(); 61 | 62 | se.with_writer(|writer| { 63 | self.server 64 | .notify_resend(writer) 65 | .unwrap_or_else(|e| println!("failed to send resend notification {}", e)); 66 | }); 67 | 68 | for _ in 0..self.thread_cnt { 69 | let mut server = FuseServer { 70 | server: self.server.clone(), 71 | ch: se.new_channel().unwrap(), 72 | }; 73 | let _thread = thread::Builder::new() 74 | .name("fuse_server".to_string()) 75 | .spawn(move || { 76 | info!("new fuse thread"); 77 | let _ = server.svc_loop(); 78 | warn!("fuse service thread exits"); 79 | }) 80 | .unwrap(); 81 | } 82 | self.session = Some(se); 83 | Ok(()) 84 | } 85 | 86 | /// Umounts and destroies a fusedev daemon 87 | pub fn umount(&mut self) -> Result<()> { 88 | if let Some(mut se) = self.session.take() { 89 | se.umount().unwrap(); 90 | se.wake().unwrap(); 91 | } 92 | Ok(()) 93 | } 94 | } 95 | 96 | impl Drop for Daemon { 97 | fn drop(&mut self) { 98 | let _ = self.umount(); 99 | } 100 | } 101 | 102 | struct FuseServer { 103 | server: Arc>>, 104 | ch: FuseChannel, 105 | } 106 | 107 | impl FuseServer { 108 | fn svc_loop(&mut self) -> Result<()> { 109 | // Given error EBADF, it means kernel has shut down this session. 110 | let _ebadf = std::io::Error::from_raw_os_error(libc::EBADF); 111 | loop { 112 | if let Some((reader, writer)) = self 113 | .ch 114 | .get_request() 115 | .map_err(|_| std::io::Error::from_raw_os_error(libc::EINVAL))? 116 | { 117 | if let Err(e) = self 118 | .server 119 | .handle_message(reader, writer.into(), None, None) 120 | { 121 | match e { 122 | fuse_backend_rs::Error::EncodeMessage(_ebadf) => { 123 | break; 124 | } 125 | _ => { 126 | error!("Handling fuse message failed"); 127 | continue; 128 | } 129 | } 130 | } 131 | } else { 132 | info!("fuse server exits"); 133 | break; 134 | } 135 | } 136 | Ok(()) 137 | } 138 | } 139 | 140 | struct Args { 141 | src: String, 142 | dest: String, 143 | } 144 | 145 | fn help() { 146 | println!("Usage:\n passthrough \n"); 147 | } 148 | 149 | fn parse_args() -> Result { 150 | let args = env::args().collect::>(); 151 | let cmd_args = Args { 152 | src: args[1].clone(), 153 | dest: args[2].clone(), 154 | }; 155 | if cmd_args.src.len() == 0 || cmd_args.dest.len() == 0 { 156 | help(); 157 | return Err(Error::from_raw_os_error(libc::EINVAL)); 158 | } 159 | Ok(cmd_args) 160 | } 161 | 162 | fn main() -> Result<()> { 163 | SimpleLogger::new() 164 | .with_level(LevelFilter::Info) 165 | .init() 166 | .unwrap(); 167 | let args = parse_args().unwrap(); 168 | 169 | // Check if src exists, create dir if not. 170 | let src = Path::new(args.src.as_str()); 171 | let src_dir = src.to_str().unwrap(); 172 | if src.exists() { 173 | if !src.is_dir() { 174 | error!("src {} is not a directory", src_dir); 175 | return Err(Error::from_raw_os_error(libc::EINVAL)); 176 | } 177 | } else { 178 | fs::create_dir_all(src_dir).unwrap(); 179 | } 180 | 181 | let dest = Path::new(args.dest.as_str()); 182 | let dest_dir = dest.to_str().unwrap(); 183 | if dest.exists() { 184 | if !dest.is_dir() { 185 | error!("dest {} is not a directory", dest_dir); 186 | return Err(Error::from_raw_os_error(libc::EINVAL)); 187 | } 188 | } else { 189 | fs::create_dir_all(dest_dir).unwrap(); 190 | } 191 | info!( 192 | "test passthroughfs src {:?} mountpoint {}", 193 | src_dir, dest_dir 194 | ); 195 | 196 | let mut daemon = Daemon::new(src_dir, dest_dir, 2).unwrap(); 197 | daemon.mount().unwrap(); 198 | 199 | // main thread 200 | let mut signals = Signals::new(TERM_SIGNALS).unwrap(); 201 | for _sig in signals.forever() { 202 | break; 203 | } 204 | 205 | daemon.umount().unwrap(); 206 | 207 | Ok(()) 208 | } 209 | -------------------------------------------------------------------------------- /tests/scripts/unionmount_test_overlay.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # create fuse overlay mount script. 4 | # /tmp/testoverlay must exists. 5 | sudo cat >/usr/sbin/mount.fuse.testoverlay <>/tmp/testoverlay.log 2>&1 & 10 | sleep 1 11 | EOF 12 | sudo chmod +x /usr/sbin/mount.fuse.testoverlay 13 | 14 | 15 | # clone xfstests and install. 16 | cd /tmp/ 17 | git clone -b ci-fuse-backend-rs https://github.com/WeiZhang555/unionmount-testsuite.git 18 | cd unionmount-testsuite 19 | 20 | echo "====> Start to run unionmount-testsuite." 21 | 22 | mkdir -p /tmp/unionmount/ 23 | touch /tmp/summary 24 | success=0 25 | fail=0 26 | 27 | for testcase in dir-open-dir \ 28 | dir-open \ 29 | dir-sym1-open \ 30 | dir-sym1-weird-open \ 31 | dir-sym2-open \ 32 | dir-sym2-weird-open \ 33 | dir-weird-open \ 34 | hard-link-dir \ 35 | hard-link \ 36 | hard-link-sym \ 37 | impermissible \ 38 | mkdir \ 39 | noent-creat-excl \ 40 | noent-creat-excl-trunc \ 41 | noent-creat \ 42 | noent-creat-trunc \ 43 | noent-plain \ 44 | noent-trunc \ 45 | open-creat-excl \ 46 | open-creat-excl-trunc \ 47 | open-creat \ 48 | open-creat-trunc \ 49 | open-plain \ 50 | open-trunc \ 51 | readlink \ 52 | rename-exdev \ 53 | rmdir \ 54 | rmtree-new \ 55 | rmtree \ 56 | sym1-creat-excl \ 57 | sym1-creat \ 58 | sym1-plain \ 59 | sym1-trunc \ 60 | sym2-creat-excl \ 61 | sym2-creat \ 62 | sym2-plain \ 63 | sym2-trunc \ 64 | symx-creat-excl \ 65 | symx-creat \ 66 | symx-creat-trunc \ 67 | symx-plain \ 68 | symx-trunc \ 69 | truncate \ 70 | unlink 71 | # === Some test cases are not supported by unionmount currently === 72 | # dir-weird-open-dir 73 | # rename-dir 74 | # rename-empty-dir 75 | # rename-file 76 | # rename-hard-link 77 | # rename-mass-2 78 | # rename-mass-3 79 | # rename-mass-4 80 | # rename-mass-5 81 | # rename-mass-dir 82 | # rename-mass 83 | # rename-mass-sym 84 | # rename-move-dir 85 | # rename-new-dir 86 | # rename-new-pop-dir 87 | # rename-pop-dir 88 | do 89 | UNIONMOUNT_BASEDIR=/tmp/unionmount sudo -E ./run --ov --fuse=testoverlay --xdev $testcase 90 | if [ $? -eq 0 ] 91 | then 92 | echo "===== SUCCESS: " $testcase >> /tmp/summary 93 | let success+=1 94 | else 95 | echo ">>>>>>>> FAIL: " $testcase >> /tmp/summary 96 | let fail+=1 97 | fi 98 | done; 99 | 100 | cat /tmp/summary && rm /tmp/summary 101 | echo "Total: success: $success, fail: $fail" 102 | 103 | if [ $fail -gt 0 ] 104 | then 105 | exit 1 106 | fi 107 | -------------------------------------------------------------------------------- /tests/scripts/xfstests_overlay.exclude: -------------------------------------------------------------------------------- 1 | # Exclude list for tests that we know are broken in smb3 2 | # 3 | generic/011 # Broken: dirstress 5 processes. 4 | generic/020 # ENOSPC, suppose to be FUSE compatibility issue. 5 | generic/023 # Rename is not supported currently. 6 | generic/024 # Rename is not supported currently. 7 | generic/025 # Rename is not supported currently. 8 | generic/035 # Rename is not supported currently. 9 | generic/078 # Rename is not supported currently. 10 | generic/089 # Rename is not supported currently. 11 | generic/099 # Suppose to be FUSE compatibility issue. 12 | generic/184 # Special device isn't supported due to 'nodev' mount option. 13 | generic/241 # Rename is not supported currently. 14 | generic/245 # Rename is not supported currently. 15 | generic/375 # Suppose to be FUSE compatibility issue, about posix acl support 16 | generic/426 # Suppose to be FUSE compatibility issue: 'open_by_handle' 17 | generic/434 # Special device isn't supported due to 'nodev' mount option. 18 | generic/444 # Suppose to be FUSE compatibility issue, about posix acl support 19 | generic/467 # Suppose to be FUSE compatibility issue: 'open_by_handle' 20 | generic/477 # Suppose to be FUSE compatibility issue: 'open_by_handle' 21 | generic/591 # Broken. 22 | generic/633 # Suppose to be FUSE compatibility issue. 23 | generic/697 # Suppose to be FUSE compatibility issue. 24 | generic/736 -------------------------------------------------------------------------------- /tests/scripts/xfstests_overlay.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | current_dir=$(dirname $(realpath $0)) 4 | 5 | sudo apt-get update 6 | sudo apt-get install acl attr automake bc dbench dump e2fsprogs fio gawk \ 7 | gcc git indent libacl1-dev libaio-dev libcap-dev libgdbm-dev libtool \ 8 | libtool-bin liburing-dev libuuid1 lvm2 make psmisc python3 quota sed \ 9 | uuid-dev uuid-runtime xfsprogs linux-headers-$(uname -r) sqlite3 10 | sudo apt-get install exfatprogs f2fs-tools ocfs2-tools udftools xfsdump \ 11 | xfslibs-dev 12 | 13 | # clone xfstests and install. 14 | cd /tmp/ 15 | git clone -b v2023.12.10 git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git 16 | cd xfstests-dev 17 | make 18 | sudo make install 19 | # overwrite local config. 20 | cat >local.config </usr/sbin/mount.fuse.testoverlay <>/tmp/testoverlay.log 2>&1 & 39 | sleep 1 40 | EOF 41 | sudo chmod +x /usr/sbin/mount.fuse.testoverlay 42 | 43 | # create related directories. 44 | mkdir -p /tmp/testoverlay/{upper,work,merged,lower2,lower1} 45 | 46 | echo "====> Start to run xfstests." 47 | # run tests. 48 | cd /tmp/xfstests-dev 49 | # Some tests are not supported by fuse or cannot pass currently. 50 | sudo ./check -fuse -E $current_dir/xfstests_overlay.exclude 51 | 52 | 53 | -------------------------------------------------------------------------------- /tests/scripts/xfstests_pathr.exclude: -------------------------------------------------------------------------------- 1 | # Exclude list for tests that we know are broken in passthrough fs 2 | # 3 | generic/002 4 | generic/184 5 | generic/426 6 | generic/434 7 | generic/467 8 | generic/471 9 | generic/477 10 | generic/591 11 | generic/633 12 | generic/736 -------------------------------------------------------------------------------- /tests/scripts/xfstests_pathr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | current_dir=$(dirname $(realpath $0)) 4 | 5 | sudo apt-get update 6 | sudo apt-get install acl attr automake bc dbench dump e2fsprogs fio gawk \ 7 | gcc git indent libacl1-dev libaio-dev libcap-dev libgdbm-dev libtool \ 8 | libtool-bin liburing-dev libuuid1 lvm2 make psmisc python3 quota sed \ 9 | uuid-dev uuid-runtime xfsprogs linux-headers-$(uname -r) sqlite3 \ 10 | exfatprogs f2fs-tools ocfs2-tools udftools xfsdump \ 11 | xfslibs-dev 12 | 13 | # clone xfstests and install. 14 | cd /tmp/ 15 | git clone -b v2023.12.10 git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git 16 | cd xfstests-dev 17 | make 18 | sudo make install 19 | # overwrite local config. 20 | cat >local.config </usr/sbin/mount.fuse.testpassthrough <>/tmp/testpassthrough.log 2>&1 & 35 | sleep 1 36 | EOF 37 | sudo chmod +x /usr/sbin/mount.fuse.testpassthrough 38 | 39 | # create related dirs. 40 | mkdir -p /tmp/pathr_src /tmp/pathr_dst 41 | 42 | echo "====> Start to run xfstests." 43 | # run tests. 44 | cd /tmp/xfstests-dev 45 | # Some tests are not supported by fuse or cannot pass currently. 46 | sudo ./check -fuse -E $current_dir/xfstests_pathr.exclude 47 | 48 | 49 | -------------------------------------------------------------------------------- /tests/smoke.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2022 Ant Group. All rights reserved. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 4 | // 5 | 6 | #[cfg(all(feature = "fusedev", target_os = "linux"))] 7 | #[macro_use] 8 | extern crate log; 9 | 10 | mod example; 11 | 12 | #[cfg(all(feature = "fusedev", target_os = "linux"))] 13 | mod fusedev_tests { 14 | use std::io::Result; 15 | use std::path::Path; 16 | use std::process::Command; 17 | 18 | use vmm_sys_util::tempdir::TempDir; 19 | 20 | use crate::example::passthroughfs; 21 | 22 | fn validate_two_git_directory(src: &str, dest: &str) -> bool { 23 | let str = format!( 24 | "cd {}; git config --global --add safe.directory {}; git ls-files; cd - > /dev/null", 25 | src, src 26 | ); 27 | let src_files = exec(str.as_str()).unwrap(); 28 | let str = format!( 29 | "cd {}; git config --global --add safe.directory {}; git ls-files; cd - > /dev/null", 30 | dest, dest 31 | ); 32 | let dest_files = exec(str.as_str()).unwrap(); 33 | if src_files != dest_files { 34 | error!( 35 | "src {}:\n{}\ndest {}:\n{}", 36 | src, src_files, dest, dest_files 37 | ); 38 | return false; 39 | } 40 | 41 | let src_md5 = exec( 42 | format!( 43 | "cd {}; git ls-files --recurse-submodules | grep --invert-match rust-vmm-ci | xargs md5sum; cd - > /dev/null", 44 | src 45 | ) 46 | .as_str(), 47 | ) 48 | .unwrap(); 49 | let dest_md5 = exec( 50 | format!( 51 | "cd {}; git ls-files --recurse-submodules | grep --invert-match rust-vmm-ci | xargs md5sum; cd - > /dev/null", 52 | dest 53 | ) 54 | .as_str(), 55 | ) 56 | .unwrap(); 57 | if src_md5 != dest_md5 { 58 | error!("src {}:\n{}\ndest {}:\n{}", src, src_md5, dest, dest_md5,); 59 | return false; 60 | } 61 | 62 | return true; 63 | } 64 | 65 | fn exec(cmd: &str) -> Result { 66 | debug!("exec: {}", cmd); 67 | let output = Command::new("sh") 68 | .arg("-c") 69 | .arg(cmd) 70 | .env("RUST_BACKTRACE", "1") 71 | .output()?; 72 | 73 | if !output.status.success() || output.stderr.len() > 0 { 74 | let msg = std::str::from_utf8(&output.stderr).unwrap(); 75 | panic!("exec failed: {}: {}", cmd, msg); 76 | } 77 | let stdout = std::str::from_utf8(&output.stdout).unwrap(); 78 | 79 | return Ok(stdout.to_string()); 80 | } 81 | 82 | #[test] 83 | #[ignore] // it depends on privileged mode to pass through /dev/fuse 84 | fn integration_test_tree_gitrepo() -> Result<()> { 85 | // test the fuse-rs repository 86 | let src = Path::new(".").canonicalize().unwrap(); 87 | let src_dir = src.to_str().unwrap(); 88 | let tmp_dir = TempDir::new().unwrap(); 89 | let mnt_dir = tmp_dir.as_path().to_str().unwrap(); 90 | info!( 91 | "test passthroughfs src {:?} mountpoint {}", 92 | src_dir, mnt_dir 93 | ); 94 | 95 | let mut daemon = passthroughfs::Daemon::new(src_dir, mnt_dir, 2).unwrap(); 96 | daemon.mount().unwrap(); 97 | std::thread::sleep(std::time::Duration::from_secs(1)); 98 | assert!(validate_two_git_directory(src_dir, mnt_dir)); 99 | daemon.umount().unwrap(); 100 | Ok(()) 101 | } 102 | } 103 | --------------------------------------------------------------------------------