├── .github └── workflows │ ├── ci.yaml │ └── future_proof.yaml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE-GPL-3.0 ├── README.md ├── build.rs ├── clippy.toml ├── contrib ├── 19-ublk-unprivileged.example.rules ├── config-memory.example.toml ├── config-onedrive.example.toml ├── cryptsetup-format-zoned.sh ├── orb.nix └── orb@.example.service ├── default.nix ├── flake.lock ├── flake.nix ├── orb-ublk ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── build.rs ├── examples │ ├── loop.rs │ ├── management.rs │ └── zoned.rs ├── src │ ├── lib.rs │ ├── runtime.rs │ ├── sys.rs │ └── ublk.rs └── tests │ ├── basic.rs │ └── interrupt.rs ├── src ├── cli.rs ├── lib.rs ├── main.rs ├── memory_backend.rs ├── onedrive_backend.rs ├── onedrive_backend │ └── login.rs ├── service.rs └── tests.rs └── ublk-chown-unprivileged ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT └── src └── main.rs /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | pull_request: 4 | push: 5 | 6 | permissions: 7 | contents: read 8 | 9 | env: 10 | RUST_BACKTRACE: full 11 | RUSTDOCFLAGS: -Dwarnings 12 | RUSTFLAGS: -Dwarnings 13 | 14 | jobs: 15 | style: 16 | name: Code style 17 | runs-on: ubuntu-latest 18 | timeout-minutes: 15 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v4 22 | 23 | - name: Install Rust stable 24 | run: | 25 | rustup update --no-self-update stable 26 | rustup default stable 27 | 28 | - name: Cache Dependencies 29 | uses: Swatinem/rust-cache@v2 30 | 31 | - name: Rustfmt 32 | run: cargo fmt -- --check 33 | 34 | - name: Clippy 35 | run: cargo clippy --workspace --all-targets -- -D clippy::dbg_macro -D clippy::todo 36 | 37 | - name: Rustdoc 38 | run: cargo doc --workspace 39 | 40 | test: 41 | strategy: 42 | matrix: 43 | rust: [stable, '1.76'] # NB. Sync with Cargo.toml. 44 | name: Test ${{ matrix.rust }} 45 | runs-on: ubuntu-latest 46 | timeout-minutes: 15 47 | steps: 48 | - name: Load kernel module ublk_drv 49 | run: | 50 | sudo apt-get update 51 | sudo apt-get install --no-install-recommends --yes "linux-modules-extra-$(uname -r)" 52 | sudo modprobe ublk_drv 53 | 54 | - name: Checkout 55 | uses: actions/checkout@v4 56 | 57 | - name: Install Rust ${{ matrix.rust }} 58 | run: | 59 | rustup update --no-self-update ${{ matrix.rust }} 60 | rustup default ${{ matrix.rust }} 61 | 62 | - name: Cache Dependencies 63 | uses: Swatinem/rust-cache@v2 64 | 65 | - name: Build 66 | run: cargo build --workspace --all-targets 67 | 68 | - name: Test 69 | run: cargo test --workspace --all-targets -- --include-ignored 70 | env: 71 | CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER: sudo 72 | 73 | nix-flake: 74 | name: Flake package 75 | runs-on: ubuntu-latest 76 | timeout-minutes: 15 77 | steps: 78 | - name: Checkout 79 | uses: actions/checkout@v4 80 | 81 | - name: Install Nix 82 | uses: cachix/install-nix-action@v26 83 | with: 84 | github_access_token: ${{ secrets.GITHUB_TOKEN }} 85 | 86 | - name: Flake check 87 | run: nix flake check --no-update-lock-file --show-trace 88 | 89 | - name: Flake build 90 | run: nix build --no-update-lock-file --show-trace --print-build-logs 91 | -------------------------------------------------------------------------------- /.github/workflows/future_proof.yaml: -------------------------------------------------------------------------------- 1 | name: Future proof tests 2 | on: 3 | schedule: 4 | - cron: '6 1 * * 0' # Sun *-*-* 01:06:00 UTC 5 | 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: read 10 | 11 | env: 12 | RUST_BACKTRACE: full 13 | RUSTFLAGS: -Dwarnings 14 | 15 | jobs: 16 | outdated: 17 | name: Outdated 18 | runs-on: ubuntu-latest 19 | timeout-minutes: 15 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | 24 | - name: Install cargo-outdated 25 | uses: dtolnay/install@cargo-outdated 26 | 27 | - name: cargo-outdated 28 | run: | 29 | rm Cargo.lock # Ignore trivially updatable compatible versions. 30 | cargo outdated --workspace --exit-code 1 31 | 32 | test: 33 | strategy: 34 | matrix: 35 | rust: [beta, nightly] 36 | name: Test ${{ matrix.rust }} 37 | runs-on: ubuntu-latest 38 | timeout-minutes: 15 39 | steps: 40 | - name: Load kernel module ublk_drv 41 | run: | 42 | sudo apt-get update 43 | sudo apt-get install --no-install-recommends --yes "linux-modules-extra-$(uname -r)" 44 | sudo modprobe ublk_drv 45 | 46 | - name: Checkout 47 | uses: actions/checkout@v4 48 | 49 | - name: Install Rust ${{ matrix.rust }} 50 | run: | 51 | rustup update --no-self-update ${{ matrix.rust }} 52 | rustup default ${{ matrix.rust }} 53 | 54 | - name: Cache Dependencies 55 | uses: Swatinem/rust-cache@v2 56 | 57 | - name: Build 58 | run: cargo build --workspace --all-targets 59 | 60 | - name: Test 61 | run: cargo test --workspace --all-targets -- --include-ignored 62 | env: 63 | CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER: sudo 64 | 65 | 66 | nix-flake-latest: 67 | name: Flake package following latest 68 | runs-on: ubuntu-latest 69 | timeout-minutes: 15 70 | steps: 71 | - name: Checkout 72 | uses: actions/checkout@v4 73 | 74 | - name: Install Nix 75 | uses: cachix/install-nix-action@v26 76 | with: 77 | github_access_token: ${{ secrets.GITHUB_TOKEN }} 78 | 79 | - name: Flake update 80 | # https://github.com/actions/checkout/tree/v3.3.0#push-a-commit-using-the-built-in-token 81 | run: | 82 | git config user.name github-actions 83 | git config user.email github-actions@github.com 84 | nix flake update --commit-lock-file 85 | 86 | - name: Flake check 87 | run: nix flake check --no-update-lock-file --show-trace 88 | 89 | - name: Flake build 90 | run: nix build --no-update-lock-file --show-trace --print-build-logs 91 | 92 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /completions 3 | 4 | result 5 | result-* 6 | config*.toml 7 | !config*.example.toml 8 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "orb" 3 | version = "0.1.0" 4 | edition = "2021" 5 | description = "OneDrive as a block device" 6 | license = "GPL-3.0-or-later" 7 | # NB. Sync with CI and README. 8 | rust-version = "1.76" # orb-ublk 9 | 10 | [features] 11 | default = [] 12 | completion = ["dep:clap", "dep:clap_complete"] 13 | 14 | [dependencies] 15 | anyhow = "1" 16 | bytes = "1" 17 | bytesize = { version = "2", features = ["serde"] } 18 | clap = { version = "4", features = ["derive"] } 19 | dirs = "6" 20 | futures-util = { version = "0.3", features = ["io"] } 21 | hostname = "0.4" 22 | humantime = "2" 23 | hyper = { version = "1", features = ["http1", "server"] } 24 | hyper-util = "0.1" 25 | itertools = "0.14" 26 | lru = "0.14" 27 | onedrive-api = "0.10" 28 | open = "5.1" 29 | orb-ublk = { path = "./orb-ublk", features = ["tokio"] } 30 | parking_lot = "0.12" 31 | rand = "0.9" 32 | reqwest = { version = "0.12", features = ["stream"] } 33 | rustix = { version = "1", features = ["fs", "time", "stdio"] } 34 | scoped-tls = "1" 35 | scopeguard = "1" 36 | sd-notify = "0.4" 37 | serde = { version = "1", features = ["derive"] } 38 | serde-inline-default = "0.2" 39 | serde_json = "1" 40 | tokio = { version = "1", features = ["macros", "net", "rt", "signal", "sync", "time"] } 41 | toml = "0.8" 42 | tracing = { version = "0.1", features = ["log"] } 43 | tracing-futures = { version = "0.2", features = ["futures-03"] } 44 | tracing-subscriber = { version = "0.3", features = ["env-filter", "tracing-log"] } 45 | 46 | [dev-dependencies] 47 | rustix = { version = "1", features = ["fs"] } 48 | 49 | [build-dependencies] 50 | clap = { version = "4", optional = true, features = ["derive"] } 51 | clap_complete = { version = "4", optional = true } 52 | 53 | [workspace] 54 | resolver = "2" 55 | members = ["orb-ublk", "ublk-chown-unprivileged"] 56 | 57 | [profile.bench] 58 | debug = "full" 59 | 60 | [lints.clippy] 61 | pedantic = { level = "warn", priority = -1 } 62 | 63 | # Of course everything involving networks may fail. 64 | missing-errors-doc = "allow" 65 | # False positive: `rest` vs. `ret`, `off` vs. `coff`, etc. 66 | similar-names = "allow" 67 | # False positive on `unwrap` and `expect` for fail-means-bug semantics. 68 | missing-panics-doc = "allow" 69 | # Long sequential tasks (`login::interactive`, `onedrive_backend::init`) where 70 | # splitting fns can only increase the complexity. 71 | too-many-lines = "allow" 72 | # Workaround: https://github.com/rust-lang/rust-clippy/issues/13184 73 | explicit-iter-loop = "allow" 74 | 75 | # TODO: Caused by zid and coff are used as u32 and usize interchangably. 76 | cast-lossless = "allow" 77 | cast-possible-truncation = "allow" 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OneDrive as a block device 2 | 3 | :warning: This project is in beta stage. 4 | 5 | ## Audience 6 | 7 | If you are not sure whether this project fits your need, then it does not. You 8 | are probably looking for 9 | [OneDrive Online](https://onedrive.live.com/) or sync and FUSE implementations 10 | like [rclone](https://github.com/rclone/rclone). 11 | 12 | This project may be helpful for :penguin: *real nerds* :penguin: who enjoy 13 | wacky block device stacking, intend to leverage block level encryption or their 14 | existing BTRFS backup infrastructure, or explore fresh new bugs in BTRFS zoned 15 | mode, with the cost of *everything*. 16 | 17 | ## Installation 18 | 19 | System requirements: 20 | 21 | - Linux >= 5.19 is required for io-uring with `IORING_SETUP_SQE128` support. 22 | 23 | - Kernel driver `ublk_drv` and zoned block device support should be enabled. 24 | Most distributions like Arch Linux and NixOS unstable meet these requirements 25 | by default. You can check your system by: 26 | 27 | ```console 28 | $ zgrep -E 'CONFIG_BLK_DEV_UBLK|CONFIG_BLK_DEV_ZONED' /proc/config.gz 29 | CONFIG_BLK_DEV_ZONED=y 30 | CONFIG_BLK_DEV_UBLK=m 31 | ``` 32 | If you see the same result, your kernel is probably supported. 33 | 34 | - You may need to run `sudo modprobe ublk_drv` manually to load the driver 35 | first. This is not required for running orb in the shipped systemd service or 36 | via NixOS module, which does this automatically. 37 | 38 | ### Nix/NixOS (flake) 39 | 40 | This project is packaged in Nix flake. Here's the simplified output graph: 41 | ``` 42 | ├───nixosModules 43 | │ ├───default: Alias to `orb`. 44 | │ └───orb: The NixOS module. 45 | └───packages 46 | ├───x86_64-linux 47 | │ ├───default: Alias to `orb`. 48 | │ ├───orb: The main program with systemd units. 49 | │ ├───cryptsetup-format-zoned: workaround script for cryptsetup-luksFormat on zoned devices. 50 | │ └───ublk-chown-unprivileged: The optional utility for unprivileged ublk. 51 | [..more Linux platforms are supported..] 52 | ``` 53 | 54 |
55 | 56 | Example configurations 57 | 58 | To use the orb service, add the flake input `github:oxalica/orb`, and import 59 | its NixOS modules. 60 | ```nix 61 | # Example flake.nix for demostration. Please edit your own one to add changes. 62 | { 63 | inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 64 | inputs.orb.url = "github:oxalica/orb"; 65 | 66 | outputs = { nixpkgs, orb, ... }: { 67 | nixosConfigurations.your-system = nixpkgs.lib.nixosSystem { 68 | system = "x86_64-linux"; 69 | modules = with nixosModules; [ 70 | orb.nixosModules.orb 71 | ./path/to/your/configuration.nix 72 | ]; 73 | }; 74 | }; 75 | } 76 | ``` 77 | 78 | Now you can use the module in your `configuration.nix`: 79 | ```nix 80 | { ... }: 81 | { 82 | services.orb.instances = { 83 | # The instance name. It coresponds to the systemd service 84 | # `orb@my-device.service`. By default it will not be automatically started. 85 | "my-device".settings = { 86 | # Required device id. It's recommended to start at 80. 87 | # This creates block device `/dev/ublkb80`. 88 | ublk.id = 80; 89 | # Other settings and their defaults can be seen in 90 | # ./contrib/config-onedrive.example.toml 91 | device = { 92 | dev_size = "1TiB"; 93 | zone_size = "256MiB"; 94 | min_chunk_size = "1MiB"; 95 | max_chunk_size = "256MiB"; 96 | }; 97 | backend.onedrive.remote_dir = "/orb"; 98 | }; 99 | }; 100 | 101 | # If you want to mount the block device, you can create systemd mounts. 102 | # This is an example. 103 | systemd.mounts = [ 104 | { 105 | type = "btrfs"; 106 | # Fill in your filesystem UUID after mkfs. 107 | what = "/dev/disk/by-uuid/11111111-2222-3333-4444-555555555555"; 108 | where = "/mnt/my-mount-point"; 109 | # Do not forget dependencies. 110 | requires = [ "orb@my-device.service" ]; 111 | after = [ "orb@my-device.service" ]; 112 | # It's recommended to set `noatime` and `compress` to reduce write 113 | # frequency and amplification. 114 | options = "noatime,compress=zstd:7"; 115 | } 116 | ]; 117 | } 118 | ``` 119 | 120 | Note that the service can only work after login and setup first. See the 121 | following sections for details. 122 | 123 |
124 | 125 | ### Other Linux distributions 126 | 127 | You need following dependencies to be installed with your package manager: 128 | - Rust >= 1.76 129 | - pkg-config 130 | - openssl 131 | 132 | Build command: `cargo build --release` 133 | 134 | [`contrib/orb@.example.service`](./contrib/orb@.example.service) 135 | is the example template systemd service to install. 136 | The instance configurations locate at `/etc/orb/.toml`, whose format is 137 | documented in 138 | [`./contrib/config-onedrive.example.toml`](./contrib/config-onedrive.example.toml). 139 | Once configured and logined (see the next section), run 140 | `systemctl start orb@.service` to start the service. 141 | 142 | ## First time login 143 | 144 | The service configuration does not contain the login credential. It must be 145 | interactively setup for the first time, and then the service will rotate the 146 | credentials automatically unless the user revokes the permission, or after a 147 | long offline time (seems to be >1month, but is determined by Microsoft). 148 | 149 | 1. First, you need to know this project (orb) is an third party program which 150 | access your files on Microsoft OneDrive on behalf of you, to provide block 151 | device interface as a service. Your files and/or data on your Microsoft 152 | OneDrive may be lost due to program bugs or other reasons. We provide no 153 | warranties. By following the login steps below, you understood and want to 154 | use orb at your own risk. 155 | 156 | 2. We cannot provide an "official App/Client ID" without risking impersonated 157 | because this project is open sourced and free to distribute. So you need to 158 | [register your own App on Microsoft 159 | Azure](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/ApplicationsListBlade). 160 | 161 | In the registration page, 162 | - In "Supported account types" section, select "Personal Microsoft accounts 163 | only". Other accounts are currently unsupported. 164 | - In "Redirect URI (optional)" section, select "Public client/native 165 | (mobile & desktop)", and enter the following URI: 166 | ```text 167 | http://localhost 168 | ``` 169 | It must be this exactly (it's `http` not `https`), or you may fail the 170 | next step. 171 | 172 | Then click "Register", it will jump to the registered App information page 173 | if success. In "Essential" section, copy the UUID in the "Application 174 | (client) ID" field. This is the Client ID to be used in the next step. 175 | Note that one App can be used in multiple accounts, for multiple times. You 176 | do not need to register more than one App in almost any cases. 177 | 178 | 3. Login with this command with root permission with arguments filled: 179 | ```console 180 | # orb login --systemd --client-id 181 | ``` 182 | `` is the instance name of your systemd service (for example, you 183 | setup `/etc/orb/foo.toml`, then `foo` is the instance name) or in NixOS 184 | module setting `services.orb.instances.`. 185 | 186 | It will prompt a URL, and you need to open it in your browser and following 187 | the interactive login steps to login into your Microsoft account with 188 | OneDrive. 189 | 190 | The credential will be saved under `/var/lib/orb/`, owned by 191 | root, and cannot be accessed by non-root users. It will be rotated by the 192 | service, and please never copy or save it outside the local machine. If you 193 | need to login to the same account on two machines, login twice. 194 | 195 | :warning: 196 | You must not serve the same remote directory simultaneously in multiple 197 | instances (or machines), or it will cause data race and your data will be 198 | corrupted. orb will try its best to detect and prevent such racing serving. 199 | 200 | 4. On success, the web page will redirect to a mostly empty page with only one line: 201 | ```text 202 | Successfully logined. This page can be closed. 203 | ``` 204 | 205 | The command should exit normally with credential saved. Now you are ready 206 | to start the orb service. 207 | 208 | ## Use the emulated block device 209 | 210 | Once your logined and started the service successfully, you are ready to use it. 211 | Usually you need to create an filesystem on the emulated block device, and this 212 | is almost the same as the setup for your fresh hard disks, with a few 213 | exceptions: 214 | 215 | - The emulated device is under `/dev/ublkb` where `ID` is specified in 216 | your configuration `ublk.id`. 217 | 218 | - The device is a 219 | [zoned device](https://zonedstorage.io/docs/introduction/zoned-storage) 220 | (aka. ZBC/ZBD/ZNS, host managed SMR disks) due to API restrictions and 221 | performance reasons. Only a few filesystems and/or device mappers support it, 222 | eg. dm-crypt, F2FS and BTRFS. 223 | 224 | - It has a high latency and low throughput depend on your network. Doing 225 | active works on it should be avoided. It can be used, for example, for 226 | backup purpose. 227 | 228 | - :warning: Since the block device is emulated, you must ensure to `umount` the 229 | filesystem on it before shutting down the backing device service 230 | (`orb@.service`), or you will lose your last written data. This 231 | could be enforced by systemd mounts with a `BindsTo=` dependency. 232 | 233 | ### Caveats on deletion and space usage 234 | 235 | Due to the limitation of OneDrive API, permanently deletion cannot be done via 236 | API. You may need to regularily "Empty recycle bin" on [OneDrive 237 | online](https://onedrive.live.com) to free the capacity occupied. 238 | 239 | :warning: You MUST not "Restore" any files under the directory managed by the 240 | orb service (`backend.onedrive.remote_dir`). Otherwise, it may break filesystem 241 | consistency and your data may be lost. 242 | 243 | ### Example: setup encryption via LUKS/dm-crypt 244 | 245 |
246 | 247 | Details 248 | 249 | 250 | :warning: cryptsetup does not and probably will not support zoned devices 251 | natively, because of non-trivial handling logic, see 252 | [this issue](https://gitlab.com/cryptsetup/cryptsetup/-/issues/877) and 253 | [this merge request](https://gitlab.com/cryptsetup/cryptsetup/-/merge_requests/638). 254 | Generally you should avoid this unsupported usage, unless there is no other way 255 | around. 256 | 257 | :warning: Of course, this will destroy all of your data on the emulated device, 258 | aka. the remote directory in OneDrive holding the data. 259 | 260 | cryptsetup does not support formatting zoned devices, but dm-crypt supports it. 261 | We need to format and place the LUKS2 header manually, and then it can be 262 | opened and/or closed in the normal way. For convenience, there is a script 263 | under 264 | [`./contrib/cryptsetup-format-zoned.sh`](./contrib/cryptsetup-format-zoned.sh) 265 | to mimic `cryptsetup luksFormat` as a workaround. Run: 266 | 267 | ```console 268 | # ./contrib/cryptsetup-format-zoned.sh /dev/ublkb # Use a a password. 269 | OR 270 | # ./contrib/cryptsetup-format-zoned.sh /dev/ublkb /path/to/key/file # Use a key file. 271 | ``` 272 | 273 | Alternatively, you can run the script via flake package: 274 | ```console 275 | $ nix shell github:oxalica/orb#cryptsetup-format-zoned -c sudo cryptsetup-format-zoned /dev/ublkb 276 | ``` 277 | 278 | Note that editing header, ie. adding or removing keys, also requires careful 279 | manual operations. You need do it yourself when needed. 280 | 281 | After formatting the block device, you can open and/or close it in the normal 282 | way: 283 | ```console 284 | # cryptsetup luksOpen /dev/ublkb my-device-unencrypted 285 | # cryptsetup close my-device-unencrypted 286 | ``` 287 | 288 | If you are using key files, you can also use systemd-cryptsetup services to 289 | manage dm-crypt. This is useful when you want to specify dependencies to 290 | `orb@.service` and downstream services, eg. backup services. 291 | ```nix 292 | { ... }: 293 | { 294 | environment.etc."crypttab".text = '' 295 | mydecrypteddev /dev/ublkb /path/to/key/file noauto 296 | ''; 297 | systemd.services."systemd-cryptsetup@mydecrypteddev" = { 298 | # Inform Nix that this is an overriding units for auto-generated ones. 299 | overrideStrategy = "asDropin"; 300 | # Specify dependencies to the orb service. 301 | bindsTo = [ "orb@my-instance.service" ]; 302 | after = [ "orb@my-instance.service" ]; 303 | }; 304 | } 305 | ``` 306 | 307 |
308 | 309 | ### Example: format it as BTRFS 310 | 311 |
312 | 313 | Details 314 | 315 | 316 | :warning: Of course, this will destroy all of your data on the emulated device, 317 | aka. the remote directory in OneDrive holding the data. 318 | 319 | It is recommended to format BTRFS with `block-group-tree` feature enabled, to 320 | dramastically reduce mounting time (~50s to ~2s). You need btrfs-progs >= 6.8.1 321 | with [a relevant bug](https://github.com/kdave/btrfs-progs/issues/765) getting fixed. 322 | 323 | ```console 324 | # mkfs.btrfs /dev/ublkb -O block-group-tree 325 | ``` 326 | 327 | `zoned` feature will be automatically detected and enabled without manual 328 | specification. 329 | 330 | Now you can mount it and do read/write operations. These are recommended mount 331 | options (disable atime, high level zstd compression enabled): 332 | ```console 333 | sudo mount -t btrfs -o noatime,compress=zstd:7 /dev/ublkb /mnt/my-mount-point 334 | ``` 335 | 336 |
337 | 338 | ## License 339 | 340 | The sub-package `orb-ublk` and `ublk-chown-unprivileged` (directory 341 | `/orb-ublk`, `/ublk-chown-unprivileged` and the whole sub-tree of them) 342 | are licensed under either of [Apache License, Version 343 | 2.0](./orb-ublk/LICENSE-APACHE) or [MIT license](./orb-ublk/LICENSE-MIT) at 344 | your option. 345 | 346 | The main package (all other files in the repository except content of 347 | `/orb-ublk` and/or `/ublk-chown-unprivileged` directory) is licensed under 348 | [GNU General Public License v3.0](./LICENSE-GPL-3.0) or (at your option) later 349 | versions. 350 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "completion")] 2 | #[allow(dead_code)] 3 | #[path = "src/cli.rs"] 4 | mod cli; 5 | 6 | fn main() { 7 | // Do NOT rerun on src changes. 8 | println!("cargo:rerun-if-changed=build.rs"); 9 | 10 | println!("cargo:rerun-if-env-changed=CFG_RELEASE"); 11 | if std::env::var("CFG_RELEASE").is_err() { 12 | let version = std::env::var("CARGO_PKG_VERSION").unwrap(); 13 | println!("cargo:rustc-env=CFG_RELEASE={version}"); 14 | } 15 | 16 | #[cfg(feature = "completion")] 17 | { 18 | use clap::ValueEnum; 19 | use clap_complete::{generate_to, shells::Shell}; 20 | 21 | let out_dir = std::path::Path::new("completions"); 22 | let pkg_name = std::env::var("CARGO_PKG_NAME").expect("have CARGO_PKG_NAME"); 23 | let mut cmd = ::command(); 24 | for &shell in Shell::value_variants() { 25 | let out_dir = out_dir.join(shell.to_string()); 26 | std::fs::create_dir_all(&out_dir).expect("create_dir_all"); 27 | if let Err(err) = generate_to(shell, &mut cmd, &pkg_name, &out_dir) { 28 | panic!("failed to generate completion for {shell}: {err}"); 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | doc-valid-idents = [ 2 | "OneDrive", 3 | "READ", "WRITE", "ZONE_RESET", "ZONE_RESET_ALL", "ZONE_APPEND", 4 | "..", 5 | ] 6 | -------------------------------------------------------------------------------- /contrib/19-ublk-unprivileged.example.rules: -------------------------------------------------------------------------------- 1 | KERNEL=="ublk-control", MODE="0666", OPTIONS+="static_node=ublk-control" 2 | ACTION=="add",KERNEL=="ublk[bc]*",RUN+="/usr/libexec/ublk-chown-unprivileged /dev/%k" 3 | -------------------------------------------------------------------------------- /contrib/config-memory.example.toml: -------------------------------------------------------------------------------- 1 | # This is an example configuration for a virtual block device in memory, 2 | # which is mainly for testing and benchmarking the chunking implementation 3 | # (frontend). 4 | 5 | # `[device]` and `[ublk]` section is the same for all backends. 6 | # See `config-onedrive.example.toml` for details. 7 | [device] 8 | dev_size = "1GiB" 9 | zone_size = "8MiB" 10 | min_chunk_size = "1MiB" 11 | max_chunk_size = "8MiB" 12 | 13 | [ublk] 14 | unprivileged = true 15 | 16 | # Use 'memory' backend. 17 | # Exact one backend must be chosen. 18 | [backend.memory] 19 | # This backend has no sub-configurations currently, but the section header must 20 | # not be omitted. 21 | -------------------------------------------------------------------------------- /contrib/config-onedrive.example.toml: -------------------------------------------------------------------------------- 1 | # This is an example configuration serving OneDrive storage as a block device. 2 | # Note that login credentials are not passed here, see README.md for details. 3 | # 4 | # Commented options are not required and have default values as in the comment. 5 | # Uncommented options are required and values are given as examples. 6 | 7 | # Device parameters and geometry. 8 | # Sizes below can be written as integers for byte unit, or a string with usual 9 | # SI-units. They must be multiples of logical sectors (512B). 10 | [device] 11 | # Total device size, must be a multiple of `zone_size`. 12 | dev_size = "4GiB" 13 | # The size of a zone, the minimal reset (delete) unit. It cannot be changed 14 | # without losing all the data. Some filesystems have requirement on it, eg. 15 | # BTRFS requires it to be `4MiB..=4GiB`. 16 | zone_size = "256MiB" 17 | # The minimal size for a standalone chunk to minimize fragmentation, must be 18 | # less than `max_chunk_size`. Chunks smaller than it will be fully rewritten on 19 | # committing until they grow larger than this limit. 20 | min_chunk_size = "1MiB" 21 | # The maximum size a chunk can be, also the maximum buffer size for each zone, 22 | # must be less than `zone_size`. When a trailing chunk in a zone is grown 23 | # exceeding this size, following write requests will wait the chunk to be 24 | # committed to backend before continue. 25 | max_chunk_size = "128MiB" 26 | 27 | # The maximum number of concurrenct download streams. 28 | #max_concurrent_streams = 16 29 | 30 | # The maximum number of concurrent upload streams. The maximum buffer memory 31 | # consumption can be calculated by `max_concurrent_commits * max_chunk_size`. 32 | # Further WRITE/APPEND/FINISH will block until some buffers being committed. 33 | #max_concurrent_commits = 8 34 | 35 | # ublk device and queue configurations. 36 | [ublk] 37 | # The device id, ie. the integer part in `/dev/ublk{b,c}X`, to use. 38 | # A negative id indicates auto-allocation. 39 | #id = -1 40 | 41 | # Create an unprivileged block device, this requires a custom udev rules to 42 | # change permission automatically. An unprivileged device also have a hard 43 | # limit 10 seconds to complete any requests, or the service process will be 44 | # killed by the ublk_drv driver. Using unprivileged block device also disables 45 | # IO_FLUSHER state setting (see prctl(2)), which can potentially cause kernel 46 | # deadlock under memory pressure. 47 | # 48 | # See: 49 | # https://github.com/ublk-org/ublksrv?tab=readme-ov-file#use-unprivileged-ublk-in-docker 50 | # https://man7.org/linux/man-pages/man2/prctl.2.html 51 | #unprivileged = false 52 | 53 | # The max concurrency of the request queue. 54 | #queue_depth = 64 55 | 56 | # Use 'onedrive' backend. 57 | # Exact one backend must be chosen. 58 | [backend.onedrive] 59 | 60 | # The remote directory path for storing data. It must have no trailing slashes. 61 | # It must not be root, to keep this application scoped. 62 | remote_dir = "/orb" 63 | 64 | # The directory to store states, including credentials. It is taken verbatimly 65 | # if it is non-empty. Otherwise, following values are checked with environment 66 | # substitution: 67 | # 1. `$STATE_DIRECTORY`, if it is set. 68 | # 2. `$XDG_STATE_HOME/orb`, if `$XDG_STATE_HOME` is set. 69 | # 3. `$HOME/.local/state/orb`, if `$HOME` is set or can be inferred. 70 | # 4. Fail. 71 | # 72 | # The directory will be created recursively if not exists, and it should be 73 | # writable. 74 | #state_dir = "" 75 | 76 | # Connection timeout in seconds. 77 | #connect_timeout_sec = 15 78 | 79 | # The size of each part request for large uploads. 80 | # It will be clamped to [4MB, 60MiB] and aligned to 320KiB. 81 | #upload_part_max_size = "60MiB" 82 | -------------------------------------------------------------------------------- /contrib/cryptsetup-format-zoned.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script is the workaround for cryptsetup-luksFormat on zoned device 3 | # See: https://gitlab.com/cryptsetup/cryptsetup/-/issues/877 4 | set -euo pipefail 5 | 6 | if [[ $# < 1 || ! -b "$1" ]]; then 7 | echo "Usage: $0 [CRYPTSETUP_OPTS...]" >&2 8 | exit 1 9 | fi 10 | 11 | if [[ $UID -ne 0 ]]; then 12 | echo "WARNING: The script is not running as root. Operations may fail." >&2 13 | fi 14 | 15 | bdev="$1" 16 | shift 17 | zone_size="$(lsblk --noheadings --nodeps --bytes -o ZONE-SZ "$bdev")" 18 | if [[ ! "$zone_size" =~ [0-9]+ ]]; then 19 | echo "Invalid zone size for $bdev: $zone_size" >&2 20 | exit 1 21 | fi 22 | 23 | header_size=$(( 16 << 20 )) 24 | format_args=(--luks2-keyslots-size 15M) 25 | if (( zone_size < header_size )); then 26 | header_size=$zone_size 27 | format_args=() 28 | fi 29 | 30 | echo -n "Reset the first zone of $bdev and format it as LUKS? This will kill all data on the device [y/N]: " >&2 31 | read -r line 32 | if [[ "$line" != [yY] ]]; then 33 | echo "Cancelled" >&2 34 | exit 1 35 | fi 36 | 37 | header="$(mktemp /dev/shm/header.XXX)" 38 | trap 'rm -vf "$header"' EXIT 39 | truncate -s "$header_size" "$header" 40 | 41 | set -x 42 | blkzone reset --offset 0 --count 1 "$bdev" 43 | cryptsetup luksFormat --header "$header" --offset "$(( zone_size >> 9 ))" "${format_args[@]}" "$bdev" "$@" 44 | dd if="$header" of="$bdev" bs=4k count=$(( header_size >> 12 )) oseek=0 conv=notrunc,sync oflag=direct 45 | -------------------------------------------------------------------------------- /contrib/orb.nix: -------------------------------------------------------------------------------- 1 | { self }: 2 | { lib, config, pkgs, ... }: 3 | let 4 | inherit (lib) 5 | literalExpression 6 | literalMD 7 | mdDoc 8 | mkIf 9 | mkOption 10 | types 11 | ; 12 | 13 | cfg = config.services.orb; 14 | 15 | sizeType = types.either types.ints.unsigned types.str; 16 | 17 | lowIdThreshould = 50; 18 | 19 | toml = pkgs.formats.toml {}; 20 | mkConfigFile = name: config: (toml.generate name config).overrideAttrs (old: { 21 | buildCommand = old.buildCommand + '' 22 | ${lib.getExe cfg.package} verify -c $out 23 | ''; 24 | }); 25 | 26 | settingsType = types.submodule { 27 | freeformType = toml.type; 28 | 29 | options = { 30 | ublk = { 31 | id = mkOption { 32 | type = types.ints.unsigned; 33 | example = "80"; 34 | description = mdDoc '' 35 | The device id, ie. the integer part in `/dev/ublk{b,c}X`, to use. 36 | 37 | Low ids (<${toString lowIdThreshould}) are not recommended and will generate 38 | warnings, to avoid colliding with auto-generated ids. 39 | ''; 40 | }; 41 | unprivileged = mkOption { 42 | type = types.enum [ false ]; 43 | default = false; 44 | description = mdDoc '' 45 | Whether to create an unprivileged block device. This must be 46 | `false` since this module generates privileged systemd services. 47 | ''; 48 | }; 49 | }; 50 | device = { 51 | dev_size = mkOption { 52 | type = sizeType; 53 | description = mdDoc '' 54 | Total device size, must be a multiple of `zone_size`. 55 | ''; 56 | }; 57 | zone_size = mkOption { 58 | type = sizeType; 59 | description = mdDoc '' 60 | The size of a zone, the minimal reset (delete) unit. It cannot be changed 61 | without losing all the data. Some filesystems have requirement on it, eg. 62 | BTRFS requires it to be `4MiB..=4GiB`. 63 | ''; 64 | }; 65 | min_chunk_size = mkOption { 66 | type = sizeType; 67 | description = mdDoc '' 68 | The minimal size for a standalone chunk to minimize fragmentation, must be 69 | less than `max_chunk_size`. Chunks smaller than it will be fully rewritten on 70 | committing until they grow larger than this limit. 71 | ''; 72 | }; 73 | max_chunk_size = mkOption { 74 | type = sizeType; 75 | description = mdDoc '' 76 | The maximum size a chunk can be, also the maximum buffer size for each zone, 77 | must be less than `zone_size`. When a trailing chunk in a zone is grown 78 | exceeding this size, following write requests will wait the chunk to be 79 | committed to backend before continue. 80 | ''; 81 | }; 82 | }; 83 | # `onedrive.state_dir` must be `null` but toml generators will fail 84 | # instead of skipping. 85 | }; 86 | }; 87 | 88 | in { 89 | options.services.orb = { 90 | enable = mkOption { 91 | type = lib.types.bool; 92 | description = "Whether to enable orb network block device service."; 93 | default = cfg.instances != {}; 94 | defaultText = literalExpression "config.services.orb.instances != {}"; 95 | example = true; 96 | }; 97 | 98 | package = mkOption { 99 | description = mdDoc "The orb package to install and for systemd services"; 100 | type = types.package; 101 | default = self.packages.${pkgs.system}.orb; 102 | defaultText = literalMD "orb package from its flake output"; 103 | }; 104 | 105 | instances = mkOption { 106 | description = mdDoc "Set of orb instances."; 107 | default = {}; 108 | type = with types; 109 | attrsOf ( 110 | submodule { 111 | options = { 112 | settings = mkOption { 113 | description = "orb configurations."; 114 | type = settingsType; 115 | example = { 116 | ublk.id = 50; 117 | device = { 118 | dev_size = "1TiB"; 119 | zone_size = "256MiB"; 120 | min_chunk_size = "1MiB"; 121 | max_chunk_size = "256MiB"; 122 | max_concurrent_streams = 16; 123 | max_concurrent_commits = 4; 124 | }; 125 | backend.onedrive.remote_dir = "/orb"; 126 | }; 127 | }; 128 | }; 129 | } 130 | ); 131 | }; 132 | }; 133 | 134 | config = mkIf cfg.enable { 135 | assertions = let 136 | groups = lib.groupBy 137 | (name: toString (cfg.instances.${name}.settings.ublk.id or null)) 138 | (lib.attrNames cfg.instances); 139 | in lib.mapAttrsToList (id: names: { 140 | assertion = lib.length names == 1; 141 | message = "orb instances ublk.id collision on ${id}: ${lib.concatStringsSep ", " names}"; 142 | }) groups; 143 | 144 | warnings = 145 | lib.filter (msg: msg != null) 146 | (lib.mapAttrsToList (name: instance: 147 | let id = instance.settings.ublk.id; in 148 | if id < lowIdThreshould then 149 | "orb instance '${name}' uses a low id ${toString id} < ${toString lowIdThreshould} risking collision" 150 | else 151 | null 152 | ) cfg.instances); 153 | 154 | systemd.packages = [ cfg.package ]; 155 | environment.systemPackages = [ cfg.package ]; 156 | 157 | # Do not accidentally stop active filesystems. 158 | systemd.services."orb@" = { 159 | overrideStrategy = "asDropin"; 160 | restartIfChanged = false; 161 | stopIfChanged = false; 162 | }; 163 | 164 | environment.etc = lib.mapAttrs' (name: instance: { 165 | name = "orb/${name}.toml"; 166 | value.source = mkConfigFile "${name}.toml" instance.settings; 167 | }) cfg.instances; 168 | }; 169 | } 170 | -------------------------------------------------------------------------------- /contrib/orb@.example.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=OneDrive Block Device Daemon (%i) 3 | Requires=modprobe@ublk_drv.service 4 | After=network-online.service modprobe@ublk_drv.service 5 | 6 | [Service] 7 | Type=notify-reload 8 | ExecStart=/usr/bin/orb serve --config-file "${CONFIGURATION_DIRECTORY}/%i.toml" 9 | StateDirectory="orb/%i" 10 | StateDirectoryMode=0700 11 | ConfigurationDirectory=orb 12 | # Save debug dumps in the cache directory, unified, because they have timestamp 13 | # suffixes. TMPDIR is otherwise readonly because of PrivateTmp. 14 | CacheDirectory=orb 15 | CacheDirectoryMode=0700 16 | Environment="RUST_BACKTRACE=1" "TMPDIR=%C/orb" 17 | 18 | CapabilityBoundingSet=CAP_SYS_ADMIN CAP_SYS_RESOURCE 19 | DeviceAllow=/dev/ublk-control rw 20 | DeviceAllow=char-ublk-char rw 21 | LockPersonality=yes 22 | MemoryDenyWriteExecute=yes 23 | NoNewPrivileges=yes 24 | PrivateTmp=yes 25 | ProtectClock=yes 26 | ProtectControlGroups=yes 27 | ProtectHome=yes 28 | ProtectHostname=yes 29 | ProtectKernelLogs=yes 30 | ProtectKernelModules=yes 31 | ProtectKernelTunables=yes 32 | ProtectProc=invisible 33 | ProtectSystem=strict 34 | RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 35 | RestrictNamespaces=yes 36 | RestrictRealtime=yes 37 | RestrictSUIDSGID=yes 38 | SystemCallArchitectures=native 39 | SystemCallErrorNumber=EPERM 40 | SystemCallFilter=@system-service 41 | SystemCallFilter=~@privileged 42 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | (import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { 2 | src = builtins.fetchGit ./.; 3 | }).defaultNix 4 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "nixpkgs": { 4 | "locked": { 5 | "lastModified": 1732014248, 6 | "narHash": "sha256-y/MEyuJ5oBWrWAic/14LaIr/u5E0wRVzyYsouYY3W6w=", 7 | "owner": "NixOS", 8 | "repo": "nixpkgs", 9 | "rev": "23e89b7da85c3640bbc2173fe04f4bd114342367", 10 | "type": "github" 11 | }, 12 | "original": { 13 | "owner": "NixOS", 14 | "ref": "nixos-unstable", 15 | "repo": "nixpkgs", 16 | "type": "github" 17 | } 18 | }, 19 | "root": { 20 | "inputs": { 21 | "nixpkgs": "nixpkgs" 22 | } 23 | } 24 | }, 25 | "root": "root", 26 | "version": 7 27 | } 28 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | rec { 2 | description = "OneDrive as a block device"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 6 | }; 7 | 8 | outputs = { self, nixpkgs }: let 9 | inherit (nixpkgs) lib; 10 | eachSystem = 11 | lib.genAttrs ( 12 | lib.filter 13 | (lib.hasSuffix "-linux") 14 | lib.systems.flakeExposed); 15 | in { 16 | packages = eachSystem (system: let 17 | pkgs = nixpkgs.legacyPackages.${system}; 18 | rev = self.rev or (lib.warn "Git changes are not committed" (self.dirtyRev or "dirty")); 19 | in rec { 20 | default = orb; 21 | orb = with pkgs; rustPlatform.buildRustPackage rec { 22 | pname = "orb"; 23 | version = "git-${rev}"; 24 | src = self; 25 | 26 | cargoLock.lockFile = ./Cargo.lock; 27 | 28 | nativeBuildInputs = [ pkg-config installShellFiles ]; 29 | buildInputs = [ openssl ]; 30 | 31 | buildFeatures = [ "completion" ]; 32 | 33 | env.CFG_RELEASE = version; 34 | 35 | postInstall = '' 36 | mkdir -p $out/etc/systemd/system 37 | substitute ./contrib/orb@.example.service $out/etc/systemd/system/orb@.service \ 38 | --replace-fail '/usr/bin/orb' "$out/bin/orb" 39 | 40 | installShellCompletion \ 41 | --bash completions/bash/${pname}.bash \ 42 | --fish completions/fish/${pname}.fish \ 43 | --zsh completions/zsh/_${pname} 44 | ''; 45 | 46 | meta = { 47 | inherit description; 48 | homepage = "https://github.com/oxalica/orb"; 49 | mainProgram = "orb"; 50 | license = [ lib.licenses.gpl3Plus ]; 51 | platforms = lib.platforms.linux; 52 | }; 53 | }; 54 | 55 | ublk-chown-unprivileged = with pkgs; rustPlatform.buildRustPackage { 56 | pname = "ublk-chown-unprivileged"; 57 | version = "git-${rev}"; 58 | src = self; 59 | 60 | cargoLock.lockFile = ./Cargo.lock; 61 | 62 | buildAndTestSubdir = "ublk-chown-unprivileged"; 63 | 64 | postInstall = '' 65 | mv $out/bin $out/libexec 66 | mkdir -p $out/etc/udev/rules.d 67 | substitute ./contrib/19-ublk-unprivileged.example.rules $out/etc/udev/rules.d/19-ublk-unprivileged.rules \ 68 | --replace-fail '/usr/libexec/' "$out/libexec/" 69 | ''; 70 | 71 | meta = { 72 | description = "udev rules to enable unprivileged ublk usage"; 73 | homepage = "https://github.com/oxalica/orb"; 74 | license = with lib.licenses; [ mit asl20 ]; 75 | platforms = lib.platforms.linux; 76 | }; 77 | }; 78 | 79 | cryptsetup-format-zoned = with pkgs; writeShellApplication rec { 80 | name = "cryptsetup-format-zoned"; 81 | runtimeInputs = [ coreutils util-linux cryptsetup ]; 82 | text = builtins.readFile ./contrib/cryptsetup-format-zoned.sh; 83 | meta = { 84 | description = "Workaround script for cryptsetup-luksFormat on zoned devices"; 85 | mainProgram = name; 86 | license = with lib.licenses; [ gpl3Plus ]; 87 | }; 88 | }; 89 | }); 90 | 91 | devShells = eachSystem (system: { 92 | without-rust = 93 | with nixpkgs.legacyPackages.${system}; 94 | mkShell { 95 | nativeBuildInputs = [ pkg-config rustPlatform.bindgenHook ]; 96 | buildInputs = [ linuxHeaders openssl ]; 97 | env = { 98 | RUST_BACKTRACE = "1"; 99 | ORB_LOG = "orb=debug"; 100 | RUST_LOG = "orb_ublk=trace"; 101 | }; 102 | }; 103 | }); 104 | 105 | nixosModules = rec { 106 | default = orb; 107 | orb = import ./contrib/orb.nix { 108 | inherit self; 109 | }; 110 | }; 111 | }; 112 | } 113 | -------------------------------------------------------------------------------- /orb-ublk/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "orb-ublk" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT or Apache-2.0" 6 | rust-version = "1.76" # result_option_inspect 7 | 8 | [features] 9 | default = [] 10 | generate-sys = ["dep:bindgen"] 11 | tokio = ["dep:tokio"] 12 | 13 | [[test]] 14 | name = "basic" 15 | required-features = ["tokio"] 16 | 17 | [[test]] 18 | name = "interrupt" 19 | harness = false 20 | 21 | [[example]] 22 | name = "loop" 23 | required-features = ["tokio"] 24 | 25 | [[example]] 26 | name = "zoned" 27 | required-features = ["tokio"] 28 | 29 | [dependencies] 30 | bitflags = "2" 31 | io-uring = { version = "0.7", features = ["io_safety"] } 32 | rustix = { version = "1", features = ["event", "mm", "process"] } 33 | scopeguard = "1" 34 | tokio = { version = "1", features = ["net", "rt"], optional = true } 35 | tracing = "0.1" 36 | 37 | [build-dependencies] 38 | bindgen = { version = "0.71", optional = true } 39 | 40 | [dev-dependencies] 41 | anyhow = "1" 42 | bytesize = "2" 43 | clap = { version = "4", features = ["derive"] } 44 | ctrlc = "3" 45 | libtest-mimic = "0.8" 46 | rand = "0.9" 47 | rstest = "0.25" 48 | rustix = { version = "1", features = ["fs"] } 49 | serde = { version = "1", features = ["derive"] } 50 | serde_json = "1" 51 | tokio = { version = "1", features = ["time"] } 52 | tracing = "0.1" 53 | tracing-subscriber = { version = "0.3", features = ["env-filter", "tracing-log"] } 54 | xshell = "0.2" 55 | 56 | [lints.rust] 57 | missing_debug_implementations = "warn" 58 | 59 | [lints.clippy] 60 | pedantic = { level = "warn", priority = -1 } 61 | # Of course system calls can fail. 62 | missing-errors-doc = "allow" 63 | # Interop with generated constants 64 | cast-possible-truncation = "allow" 65 | cast-sign-loss = "allow" 66 | # Convenient for C structs. 67 | default-trait-access = "allow" 68 | # For semantics. 69 | items-after-statements = "allow" 70 | # It makes things more unreadable. 71 | transmute-ptr-to-ptr = "allow" 72 | 73 | # TODO 74 | missing-panics-doc = "allow" 75 | cast-lossless = "allow" 76 | wildcard-imports = "allow" 77 | -------------------------------------------------------------------------------- /orb-ublk/LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /orb-ublk/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 2 | 3 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 4 | 5 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 6 | -------------------------------------------------------------------------------- /orb-ublk/README.md: -------------------------------------------------------------------------------- 1 | # Ergonomic interface library for block device in user space (ublk) 2 | 3 | ### License 4 | 5 | This package (the whole sub-tree of the directory containing this file) is 6 | licensed under either of [Apache License, Version 7 | 2.0](./orb-ublk/LICENSE-APACHE) or [MIT license](./orb-ublk/LICENSE-MIT) at 8 | your option. 9 | -------------------------------------------------------------------------------- /orb-ublk/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!("cargo:rerun-if-changed=build.rs"); 3 | 4 | #[cfg(feature = "generate-sys")] 5 | generate("src/sys.rs"); 6 | } 7 | 8 | #[cfg(feature = "generate-sys")] 9 | fn generate(out_path: &str) { 10 | const HEADER_CONTENT: &str = " 11 | #include 12 | #include 13 | 14 | /* Workaround: https://github.com/rust-lang/rust-bindgen/issues/753#issuecomment-459851952 */ 15 | #define MARK_FIX_753(req_name) const __u32 Fix753_##req_name = req_name; 16 | MARK_FIX_753(UBLK_U_CMD_GET_QUEUE_AFFINITY) 17 | MARK_FIX_753(UBLK_U_CMD_GET_DEV_INFO) 18 | MARK_FIX_753(UBLK_U_CMD_ADD_DEV) 19 | MARK_FIX_753(UBLK_U_CMD_DEL_DEV) 20 | MARK_FIX_753(UBLK_U_CMD_START_DEV) 21 | MARK_FIX_753(UBLK_U_CMD_STOP_DEV) 22 | MARK_FIX_753(UBLK_U_CMD_SET_PARAMS) 23 | MARK_FIX_753(UBLK_U_CMD_GET_PARAMS) 24 | MARK_FIX_753(UBLK_U_CMD_START_USER_RECOVERY) 25 | MARK_FIX_753(UBLK_U_CMD_END_USER_RECOVERY) 26 | MARK_FIX_753(UBLK_U_CMD_GET_DEV_INFO2) 27 | MARK_FIX_753(UBLK_U_CMD_GET_FEATURES) 28 | 29 | MARK_FIX_753(UBLK_U_IO_FETCH_REQ) 30 | MARK_FIX_753(UBLK_U_IO_COMMIT_AND_FETCH_REQ) 31 | MARK_FIX_753(UBLK_U_IO_NEED_GET_DATA) 32 | "; 33 | 34 | #[derive(Debug)] 35 | struct CustomCallback; 36 | 37 | impl bindgen::callbacks::ParseCallbacks for CustomCallback { 38 | fn item_name(&self, original_item_name: &str) -> Option { 39 | Some(original_item_name.trim_start_matches("Fix753_").to_owned()) 40 | } 41 | 42 | fn add_derives(&self, info: &bindgen::callbacks::DeriveInfo<'_>) -> Vec { 43 | if info.name == "blk_zone" { 44 | return vec!["PartialEq".into(), "Eq".into()]; 45 | } 46 | Vec::new() 47 | } 48 | } 49 | 50 | bindgen::Builder::default() 51 | .header_contents("wrapper.h", HEADER_CONTENT) 52 | .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) 53 | .parse_callbacks(Box::new(CustomCallback)) 54 | .use_core() 55 | .allowlist_var("UBLK(?:SRV)?_.*|Fix753_.*") 56 | .allowlist_type("ublk(?:srv)?_.*|blk_zone(?:_type|_cond|)") 57 | // `blk_zone_{type,cond}` need no extra prefixes. 58 | .prepend_enum_name(false) 59 | .derive_default(true) 60 | .layout_tests(false) 61 | .generate() 62 | .expect("failed to bindgen") 63 | .write_to_file(out_path) 64 | .expect("failed to write bindgen output"); 65 | } 66 | -------------------------------------------------------------------------------- /orb-ublk/examples/loop.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io; 3 | use std::os::unix::fs::FileExt; 4 | use std::path::PathBuf; 5 | 6 | use anyhow::Context; 7 | use clap::Parser; 8 | use orb_ublk::runtime::TokioRuntimeBuilder; 9 | use orb_ublk::{ 10 | BlockDevice, ControlDevice, DeviceAttrs, DeviceBuilder, DeviceInfo, DeviceParams, 11 | DiscardParams, IoFlags, ReadBuf, Sector, Stopper, WriteBuf, 12 | }; 13 | use rustix::fs::{fallocate, FallocateFlags}; 14 | use rustix::io::Errno; 15 | 16 | /// Example loop device. 17 | #[derive(Debug, Parser)] 18 | struct Cli { 19 | backing_file: PathBuf, 20 | 21 | #[clap(long)] 22 | dev_id: Option, 23 | #[clap(long, default_value = "512")] 24 | logical_block_size: u64, 25 | #[clap(long, default_value = "4096")] 26 | physical_block_size: u64, 27 | 28 | #[clap(long)] 29 | discard: bool, 30 | #[clap(long)] 31 | user_copy: bool, 32 | #[clap(long)] 33 | privileged: bool, 34 | } 35 | 36 | fn main() -> anyhow::Result<()> { 37 | tracing_subscriber::fmt::init(); 38 | let cli = Cli::parse(); 39 | 40 | let file = File::options() 41 | .read(true) 42 | .write(true) 43 | .open(cli.backing_file) 44 | .context("failed to open backing file")?; 45 | let size = file 46 | .metadata() 47 | .context("failed to query backing file")? 48 | .len(); 49 | let size_sectors = 50 | Sector::try_from_bytes(size).context("backing file size must be multiples of sectors")?; 51 | 52 | let ctl = ControlDevice::open() 53 | .context("failed to open control device, kernel module 'ublk_drv' not loaded?")?; 54 | let mut builder = DeviceBuilder::new(); 55 | builder.name("ublk-loop"); 56 | if !cli.privileged { 57 | builder.unprivileged(); 58 | } 59 | if cli.user_copy { 60 | builder.user_copy(); 61 | } 62 | let mut srv = builder 63 | .name("ublk-loop") 64 | .dev_id(cli.dev_id) 65 | .create_service(&ctl) 66 | .context("failed to create ublk device")?; 67 | let mut params = *DeviceParams::new() 68 | .dev_sectors(size_sectors) 69 | .logical_block_size(cli.logical_block_size) 70 | .physical_block_size(cli.physical_block_size) 71 | .io_min_size(cli.physical_block_size) 72 | .io_opt_size(cli.physical_block_size) 73 | .attrs(DeviceAttrs::VolatileCache) 74 | .set_io_flusher(cli.privileged); 75 | if cli.discard { 76 | params.discard(DiscardParams { 77 | alignment: Sector::SIZE as _, 78 | granularity: Sector::SIZE as _, 79 | max_size: Sector(1 << 30), 80 | max_write_zeroes_size: Sector(1 << 30), 81 | max_segments: 1, 82 | }); 83 | } 84 | let handler = LoopDev { file }; 85 | let ret = srv 86 | .serve(&TokioRuntimeBuilder, ¶ms, &handler) 87 | .context("service error"); 88 | handler.file.sync_all().context("failed to sync file")?; 89 | ret 90 | } 91 | 92 | struct LoopDev { 93 | file: File, 94 | } 95 | 96 | impl BlockDevice for LoopDev { 97 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 98 | tracing::info!(dev_id = dev_info.dev_id(), "device ready"); 99 | ctrlc::set_handler(move || stop.stop()).expect("failed to set Ctrl-C hook"); 100 | Ok(()) 101 | } 102 | 103 | async fn read(&self, off: Sector, buf: &mut ReadBuf<'_>, _flags: IoFlags) -> Result<(), Errno> { 104 | let mut buf2 = vec![0u8; buf.remaining()]; 105 | self.file 106 | .read_exact_at(&mut buf2, off.bytes()) 107 | .map_err(convert_err)?; 108 | buf.put_slice(&buf2)?; 109 | Ok(()) 110 | } 111 | 112 | async fn write(&self, off: Sector, buf: WriteBuf<'_>, _flags: IoFlags) -> Result { 113 | self.file 114 | .write_all_at(buf.as_slice().unwrap(), off.bytes()) 115 | .map_err(convert_err)?; 116 | Ok(buf.len()) 117 | } 118 | 119 | async fn flush(&self, _flags: IoFlags) -> Result<(), Errno> { 120 | self.file.sync_data().map_err(convert_err) 121 | } 122 | 123 | async fn discard(&self, off: Sector, len: usize, _flags: IoFlags) -> Result<(), Errno> { 124 | fallocate( 125 | &self.file, 126 | FallocateFlags::PUNCH_HOLE, 127 | off.bytes(), 128 | len as _, 129 | ) 130 | } 131 | 132 | async fn write_zeroes(&self, off: Sector, len: usize, _flags: IoFlags) -> Result<(), Errno> { 133 | fallocate( 134 | &self.file, 135 | FallocateFlags::PUNCH_HOLE, 136 | off.bytes(), 137 | len as _, 138 | ) 139 | } 140 | } 141 | 142 | #[allow(clippy::needless_pass_by_value)] 143 | fn convert_err(err: io::Error) -> Errno { 144 | Errno::from_io_error(&err).unwrap_or(Errno::IO) 145 | } 146 | -------------------------------------------------------------------------------- /orb-ublk/examples/management.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{ensure, Context}; 2 | use clap::Parser; 3 | use orb_ublk::ControlDevice; 4 | 5 | /// Ublk device management. 6 | #[derive(Debug, Parser)] 7 | enum Cli { 8 | /// Print all features supported by the current kernel driver. 9 | GetFeatures, 10 | /// Print the ublk device informantion at `dev_id`. 11 | GetInfo { dev_id: u32 }, 12 | /// Delete the ublk device at `dev_id`. 13 | Delete { dev_id: u32 }, 14 | /// Delete all ublk devices. 15 | DeleteAll, 16 | } 17 | 18 | fn main() -> anyhow::Result<()> { 19 | let cli = Cli::parse(); 20 | let ctl = ControlDevice::open() 21 | .context("failed to open control device, kernel module 'ublk_drv' not loaded?")?; 22 | match cli { 23 | Cli::GetFeatures => { 24 | let feat = ctl.get_features().context("failed to get features")?; 25 | println!("{feat:?}"); 26 | } 27 | Cli::GetInfo { dev_id } => { 28 | let info = ctl 29 | .get_device_info(dev_id) 30 | .context("failed to get device info")?; 31 | println!("{info:?}"); 32 | } 33 | Cli::Delete { dev_id } => { 34 | ctl.delete_device(dev_id) 35 | .context("failed to delete device")?; 36 | } 37 | Cli::DeleteAll => { 38 | let mut success = true; 39 | for ent in std::fs::read_dir("/dev").context("failed to read /dev")? { 40 | if let Some(dev_id) = (|| { 41 | ent.ok()? 42 | .file_name() 43 | .to_str()? 44 | .strip_prefix("ublkc")? 45 | .parse::() 46 | .ok() 47 | })() { 48 | eprintln!("deleting device {dev_id}"); 49 | if let Err(err) = ctl.delete_device(dev_id) { 50 | eprintln!("{err}"); 51 | success = false; 52 | } 53 | } 54 | } 55 | ensure!(success, "some operations failed"); 56 | } 57 | } 58 | Ok(()) 59 | } 60 | -------------------------------------------------------------------------------- /orb-ublk/examples/zoned.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{self, File}; 2 | use std::io; 3 | use std::os::unix::fs::FileExt; 4 | use std::path::PathBuf; 5 | use std::sync::Mutex; 6 | 7 | use anyhow::{ensure, Context}; 8 | use bytesize::ByteSize; 9 | use clap::Parser; 10 | use orb_ublk::runtime::TokioRuntimeBuilder; 11 | use orb_ublk::{ 12 | BlockDevice, ControlDevice, DeviceAttrs, DeviceBuilder, DeviceInfo, DeviceParams, IoFlags, 13 | ReadBuf, Sector, Stopper, WriteBuf, Zone, ZoneBuf, ZoneCond, ZoneType, ZonedParams, 14 | }; 15 | use rustix::fs::{fallocate, FallocateFlags}; 16 | use rustix::io::Errno; 17 | use serde::{Deserialize, Serialize}; 18 | 19 | /// Example loop device. 20 | #[derive(Debug, Parser)] 21 | struct Cli { 22 | backing_file: PathBuf, 23 | metadata_file: PathBuf, 24 | 25 | #[clap(long)] 26 | dev_id: Option, 27 | #[clap(long, default_value = "512")] 28 | logical_block_size: ByteSize, 29 | #[clap(long, default_value = "4KiB")] 30 | physical_block_size: ByteSize, 31 | #[clap(long, default_value = "512KiB")] 32 | io_buf_size: ByteSize, 33 | 34 | #[clap(long)] 35 | zone_size: ByteSize, 36 | #[clap(long)] 37 | max_open_zones: u32, 38 | #[clap(long)] 39 | max_active_zones: u32, 40 | #[clap(long, default_value = "1GiB")] 41 | max_zone_append_size: ByteSize, 42 | 43 | #[clap(long)] 44 | privileged: bool, 45 | } 46 | 47 | #[derive(Debug, Serialize, Deserialize)] 48 | struct ZonesMetadata { 49 | zone_size: u64, 50 | zones: Vec, 51 | } 52 | 53 | #[derive(Debug, Clone, Copy)] 54 | struct ZoneState { 55 | rel_wptr: u64, 56 | cond: ZoneCond, 57 | } 58 | 59 | impl Default for ZoneState { 60 | fn default() -> Self { 61 | Self { 62 | rel_wptr: 0, 63 | cond: ZoneCond::Empty, 64 | } 65 | } 66 | } 67 | 68 | impl Serialize for ZoneState { 69 | fn serialize(&self, serializer: S) -> Result 70 | where 71 | S: serde::Serializer, 72 | { 73 | self.rel_wptr.serialize(serializer) 74 | } 75 | } 76 | 77 | impl<'de> Deserialize<'de> for ZoneState { 78 | fn deserialize(deserializer: D) -> Result 79 | where 80 | D: serde::Deserializer<'de>, 81 | { 82 | let wptr = u64::deserialize(deserializer)?; 83 | Ok(Self { 84 | rel_wptr: wptr, 85 | // Full is processed in main. 86 | cond: if wptr == 0 { 87 | ZoneCond::Empty 88 | } else { 89 | ZoneCond::Closed 90 | }, 91 | }) 92 | } 93 | } 94 | 95 | fn main() -> anyhow::Result<()> { 96 | tracing_subscriber::fmt::init(); 97 | let cli = Cli::parse(); 98 | 99 | let backing_file = File::options() 100 | .read(true) 101 | .write(true) 102 | .open(cli.backing_file) 103 | .context("failed to open backing file")?; 104 | let size = backing_file 105 | .metadata() 106 | .context("failed to query backing file")? 107 | .len(); 108 | let zone_size = cli.zone_size.0; 109 | let zone_sectors = 110 | Sector::try_from_bytes(zone_size).context("zone size mut be multiple of sectors")?; 111 | ensure!( 112 | size % zone_sectors.bytes() == 0, 113 | "device size must be multiples of zone size" 114 | ); 115 | let size_sectors = Sector::try_from_bytes(size).unwrap(); 116 | let zones_cnt = size / zone_sectors.bytes(); 117 | 118 | let zones = cli 119 | .metadata_file 120 | .exists() 121 | .then(|| { 122 | let src = fs::read_to_string(&cli.metadata_file)?; 123 | let mut meta = serde_json::from_str::(&src)?; 124 | ensure!(meta.zone_size == zone_size, "zone size mismatch"); 125 | ensure!(meta.zones.len() as u64 == zones_cnt, "zone number mismatch"); 126 | for (idx, z) in meta.zones.iter_mut().enumerate() { 127 | ensure!(z.rel_wptr <= zone_size, "invalid wptr for zone {idx}"); 128 | z.cond = if z.rel_wptr == 0 { 129 | ZoneCond::Empty 130 | } else if z.rel_wptr == zone_size { 131 | ZoneCond::Full 132 | } else { 133 | ZoneCond::Closed 134 | }; 135 | } 136 | Ok(meta) 137 | }) 138 | .transpose() 139 | .context("failed to read metadata")? 140 | .unwrap_or_else(|| ZonesMetadata { 141 | zone_size: cli.zone_size.0, 142 | zones: vec![ZoneState::default(); zones_cnt.try_into().unwrap()], 143 | }); 144 | 145 | let ctl = ControlDevice::open() 146 | .context("failed to open control device, kernel module 'ublk_drv' not loaded?")?; 147 | let mut builder = DeviceBuilder::new(); 148 | if !cli.privileged { 149 | builder.unprivileged(); 150 | } 151 | let mut srv = builder 152 | .name("ublk-zoned") 153 | .zoned() 154 | .io_buf_size(u32::try_from(cli.io_buf_size.0).context("buffer size too large")?) 155 | .dev_id(cli.dev_id) 156 | .create_service(&ctl) 157 | .context("failed to create ublk device")?; 158 | let zones_cnt_u32 = u32::try_from(zones_cnt).unwrap_or(u32::MAX); 159 | let params = *DeviceParams::new() 160 | .dev_sectors(size_sectors) 161 | .chunk_sectors(zone_sectors) 162 | .attrs(DeviceAttrs::VolatileCache) 163 | .logical_block_size(cli.logical_block_size.0) 164 | .physical_block_size(cli.physical_block_size.0) 165 | .io_min_size(cli.physical_block_size.0) 166 | .io_opt_size(cli.physical_block_size.0) 167 | .io_max_sectors(Sector::from_bytes(cli.io_buf_size.0)) 168 | .set_io_flusher(cli.privileged) 169 | .zoned(ZonedParams { 170 | max_open_zones: cli.max_open_zones.min(zones_cnt_u32), 171 | max_active_zones: cli.max_active_zones.min(zones_cnt_u32), 172 | max_zone_append_size: Sector::try_from_bytes(cli.max_zone_append_size.0) 173 | .unwrap() 174 | .min(size_sectors), 175 | }); 176 | let handler = ZonedDev { 177 | file: backing_file, 178 | size, 179 | zone_size, 180 | zones: Mutex::new(zones), 181 | metadata_path: cli.metadata_file, 182 | }; 183 | let ret = srv 184 | .serve(&TokioRuntimeBuilder, ¶ms, &handler) 185 | .context("service error"); 186 | handler.flush_sync().context("failed to sync")?; 187 | ret 188 | } 189 | 190 | struct ZonedDev { 191 | file: File, 192 | size: u64, 193 | zone_size: u64, 194 | zones: Mutex, 195 | metadata_path: PathBuf, 196 | } 197 | 198 | impl ZonedDev { 199 | fn flush_sync(&self) -> Result<(), Errno> { 200 | self.file.sync_data().map_err(convert_err)?; 201 | let content = serde_json::to_vec(&*self.zones.lock().unwrap()).unwrap(); 202 | let tmp_path = self.metadata_path.with_extension("tmp"); 203 | fs::write(&tmp_path, content).map_err(convert_err)?; 204 | fs::rename(&tmp_path, &self.metadata_path).map_err(convert_err)?; 205 | Ok(()) 206 | } 207 | } 208 | 209 | impl BlockDevice for ZonedDev { 210 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 211 | tracing::info!(dev_id = dev_info.dev_id(), ?dev_info, "device ready"); 212 | ctrlc::set_handler(move || stop.stop()).expect("failed to set Ctrl-C hook"); 213 | Ok(()) 214 | } 215 | 216 | async fn read(&self, off: Sector, buf: &mut ReadBuf<'_>, _flags: IoFlags) -> Result<(), Errno> { 217 | let mut buf2 = vec![0u8; buf.remaining()]; 218 | self.file 219 | .read_exact_at(&mut buf2, off.bytes()) 220 | .map_err(convert_err)?; 221 | buf.put_slice(&buf2)?; 222 | Ok(()) 223 | } 224 | 225 | async fn write(&self, off: Sector, buf: WriteBuf<'_>, _flags: IoFlags) -> Result { 226 | let off = off.bytes(); 227 | let zid = off / self.zone_size; 228 | let mut zones = self.zones.lock().unwrap(); 229 | let z = &mut zones.zones[zid as usize]; 230 | if (zid * self.zone_size + z.rel_wptr) != off { 231 | return Err(Errno::IO); 232 | } 233 | let new_rel_wptr = z 234 | .rel_wptr 235 | .checked_add(buf.len() as u64) 236 | .filter(|&p| p <= self.zone_size) 237 | .ok_or(Errno::IO)?; 238 | let mut buf2 = vec![0u8; buf.len()]; 239 | buf.copy_to_slice(&mut buf2)?; 240 | self.file.write_all_at(&buf2, off).map_err(convert_err)?; 241 | z.rel_wptr = new_rel_wptr; 242 | if new_rel_wptr == self.zone_size { 243 | z.cond = ZoneCond::Full; 244 | } else if matches!(z.cond, ZoneCond::Closed | ZoneCond::Empty) { 245 | z.cond = ZoneCond::ImpOpen; 246 | } 247 | Ok(buf.len()) 248 | } 249 | 250 | async fn flush(&self, _flags: IoFlags) -> Result<(), Errno> { 251 | self.flush_sync() 252 | } 253 | 254 | async fn zone_open(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 255 | let zid = off.bytes() / self.zone_size; 256 | let mut zones = self.zones.lock().unwrap(); 257 | let z = &mut zones.zones[zid as usize]; 258 | z.cond = match z.cond { 259 | ZoneCond::Empty | ZoneCond::ImpOpen | ZoneCond::ExpOpen | ZoneCond::Closed => { 260 | ZoneCond::ExpOpen 261 | } 262 | ZoneCond::Full => return Err(Errno::IO), 263 | _ => unreachable!(), 264 | }; 265 | Ok(()) 266 | } 267 | 268 | async fn zone_close(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 269 | let zid = off.bytes() / self.zone_size; 270 | let mut zones = self.zones.lock().unwrap(); 271 | let z = &mut zones.zones[zid as usize]; 272 | z.cond = match z.cond { 273 | ZoneCond::ExpOpen | ZoneCond::ImpOpen => { 274 | if z.rel_wptr == 0 { 275 | ZoneCond::Empty 276 | } else { 277 | ZoneCond::Closed 278 | } 279 | } 280 | ZoneCond::Empty | ZoneCond::Closed => z.cond, 281 | ZoneCond::Full => return Err(Errno::IO), 282 | _ => unreachable!(), 283 | }; 284 | Ok(()) 285 | } 286 | 287 | async fn zone_finish(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 288 | let zid = off.bytes() / self.zone_size; 289 | let mut zones = self.zones.lock().unwrap(); 290 | let z = &mut zones.zones[zid as usize]; 291 | z.rel_wptr = self.zone_size; 292 | z.cond = ZoneCond::Full; 293 | Ok(()) 294 | } 295 | 296 | async fn zone_reset(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 297 | let zid = off.bytes() / self.zone_size; 298 | let mut zones = self.zones.lock().unwrap(); 299 | let z = &mut zones.zones[zid as usize]; 300 | fallocate( 301 | &self.file, 302 | FallocateFlags::PUNCH_HOLE | FallocateFlags::KEEP_SIZE, 303 | off.bytes(), 304 | self.zone_size, 305 | )?; 306 | z.rel_wptr = 0; 307 | z.cond = ZoneCond::Empty; 308 | Ok(()) 309 | } 310 | 311 | async fn zone_reset_all(&self, _flags: IoFlags) -> Result<(), Errno> { 312 | let mut zones = self.zones.lock().unwrap(); 313 | fallocate( 314 | &self.file, 315 | FallocateFlags::PUNCH_HOLE | FallocateFlags::KEEP_SIZE, 316 | 0, 317 | self.size, 318 | )?; 319 | zones.zones.fill_with(ZoneState::default); 320 | Ok(()) 321 | } 322 | 323 | async fn report_zones( 324 | &self, 325 | off: Sector, 326 | buf: &mut ZoneBuf<'_>, 327 | _flags: IoFlags, 328 | ) -> Result<(), Errno> { 329 | let zid = off.bytes() / self.zone_size; 330 | let zones = self.zones.lock().unwrap(); 331 | let info = zones.zones[zid as usize..][..buf.remaining()] 332 | .iter() 333 | .zip(zid..) 334 | .map(|(z, zid)| { 335 | Zone::new( 336 | Sector::from_bytes(zid * self.zone_size), 337 | Sector::from_bytes(self.zone_size), 338 | Sector::from_bytes(z.rel_wptr), 339 | ZoneType::SeqWriteReq, 340 | z.cond, 341 | ) 342 | }) 343 | .collect::>(); 344 | buf.report(&info)?; 345 | Ok(()) 346 | } 347 | 348 | async fn zone_append( 349 | &self, 350 | off: Sector, 351 | buf: WriteBuf<'_>, 352 | _flags: IoFlags, 353 | ) -> Result { 354 | let zid = off.bytes() / self.zone_size; 355 | let mut zones = self.zones.lock().unwrap(); 356 | let z = &mut zones.zones[zid as usize]; 357 | let new_rel_wptr = z 358 | .rel_wptr 359 | .checked_add(buf.len() as u64) 360 | .filter(|&p| p <= self.zone_size) 361 | .ok_or(Errno::IO)?; 362 | let mut buf2 = vec![0u8; buf.len()]; 363 | buf.copy_to_slice(&mut buf2)?; 364 | let old_wptr = zid * self.zone_size + z.rel_wptr; 365 | self.file 366 | .write_all_at(&buf2, old_wptr) 367 | .map_err(convert_err)?; 368 | z.rel_wptr = new_rel_wptr; 369 | if new_rel_wptr == self.zone_size { 370 | z.cond = ZoneCond::Full; 371 | } else if matches!(z.cond, ZoneCond::Closed | ZoneCond::Empty) { 372 | z.cond = ZoneCond::ImpOpen; 373 | } 374 | Ok(Sector::from_bytes(old_wptr)) 375 | } 376 | } 377 | 378 | #[allow(clippy::needless_pass_by_value)] 379 | fn convert_err(err: io::Error) -> Errno { 380 | tracing::error!(%err); 381 | Errno::from_io_error(&err).unwrap_or(Errno::IO) 382 | } 383 | -------------------------------------------------------------------------------- /orb-ublk/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod runtime; 2 | mod ublk; 3 | 4 | #[allow(warnings)] 5 | #[rustfmt::skip] 6 | mod sys; 7 | 8 | use std::{fmt, ops}; 9 | 10 | pub use ublk::*; 11 | 12 | /// Size or offset in unit of sectors (512bytes). 13 | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] 14 | pub struct Sector(pub u64); 15 | 16 | impl fmt::Display for Sector { 17 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 18 | self.0.fmt(f)?; 19 | "s".fmt(f) 20 | } 21 | } 22 | 23 | impl Sector { 24 | pub const SHIFT: u32 = 9; 25 | pub const SIZE: u32 = 1 << Self::SHIFT; 26 | 27 | #[must_use] 28 | pub const fn from_bytes(bytes: u64) -> Self { 29 | match Self::try_from_bytes(bytes) { 30 | Some(sec) => sec, 31 | None => panic!("bytes is not multiples of sectors"), 32 | } 33 | } 34 | 35 | #[must_use] 36 | pub const fn try_from_bytes(bytes: u64) -> Option { 37 | if bytes % Self::SIZE as u64 == 0 { 38 | Some(Self(bytes >> Self::SHIFT)) 39 | } else { 40 | None 41 | } 42 | } 43 | 44 | #[must_use] 45 | pub const fn bytes(self) -> u64 { 46 | match self.0.checked_mul(Self::SIZE as u64) { 47 | Some(bytes) => bytes, 48 | None => panic!("overflow"), 49 | } 50 | } 51 | 52 | #[must_use] 53 | pub const fn wrapping_bytes(self) -> u64 { 54 | self.0.wrapping_mul(Self::SIZE as u64) 55 | } 56 | } 57 | 58 | impl ops::Add for Sector { 59 | type Output = Sector; 60 | 61 | fn add(self, rhs: Sector) -> Self::Output { 62 | Self(self.0 + rhs.0) 63 | } 64 | } 65 | 66 | impl ops::AddAssign for Sector { 67 | fn add_assign(&mut self, rhs: Self) { 68 | self.0 += rhs.0; 69 | } 70 | } 71 | 72 | impl ops::Sub for Sector { 73 | type Output = Sector; 74 | 75 | fn sub(self, rhs: Sector) -> Self::Output { 76 | Self(self.0 - rhs.0) 77 | } 78 | } 79 | 80 | impl ops::SubAssign for Sector { 81 | fn sub_assign(&mut self, rhs: Self) { 82 | self.0 -= rhs.0; 83 | } 84 | } 85 | 86 | impl ops::Mul for Sector { 87 | type Output = Self; 88 | 89 | fn mul(self, rhs: u64) -> Self::Output { 90 | Self(self.0 * rhs) 91 | } 92 | } 93 | 94 | impl ops::Div for Sector { 95 | type Output = u64; 96 | 97 | fn div(self, rhs: Self) -> Self::Output { 98 | self.0 / rhs.0 99 | } 100 | } 101 | 102 | impl ops::Rem for Sector { 103 | type Output = Sector; 104 | 105 | fn rem(self, rhs: Sector) -> Self::Output { 106 | Sector(self.0 % rhs.0) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /orb-ublk/src/runtime.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::module_name_repetitions)] 2 | use std::future::Future; 3 | use std::io; 4 | use std::ops::ControlFlow; 5 | use std::pin::Pin; 6 | 7 | use io_uring::IoUring; 8 | 9 | pub trait AsyncRuntimeBuilder { 10 | type Runtime: AsyncRuntime; 11 | 12 | fn build(&self) -> io::Result; 13 | } 14 | 15 | pub trait AsyncRuntime { 16 | type Spawner<'env>: AsyncScopeSpawner<'env>; 17 | 18 | fn drive_uring<'env, T, F>(&mut self, uring: &IoUring, on_cqe: F) -> io::Result 19 | where 20 | F: for<'scope> FnMut(&'scope Self::Spawner<'env>) -> io::Result>; 21 | } 22 | 23 | pub trait AsyncScopeSpawner<'env> { 24 | fn spawn(&self, fut: Fut) 25 | where 26 | Fut: Future + 'env; 27 | } 28 | 29 | pub use sync::{Builder as SyncRuntimeBuilder, Runtime as SyncRuntime}; 30 | 31 | mod sync { 32 | use std::cell::RefCell; 33 | use std::collections::VecDeque; 34 | use std::fmt; 35 | use std::sync::Arc; 36 | use std::task::{Context, Poll, Wake}; 37 | 38 | use rustix::io::Errno; 39 | 40 | use super::*; 41 | 42 | #[derive(Debug)] 43 | pub struct Builder; 44 | 45 | impl AsyncRuntimeBuilder for Builder { 46 | type Runtime = Runtime; 47 | 48 | fn build(&self) -> io::Result { 49 | Ok(Runtime::default()) 50 | } 51 | } 52 | 53 | #[derive(Debug, Default)] 54 | pub struct Runtime(()); 55 | 56 | type TaskQueue<'env> = VecDeque + 'env>>>; 57 | 58 | impl AsyncRuntime for Runtime { 59 | type Spawner<'env> = Spawner<'env>; 60 | 61 | fn drive_uring<'env, T, F>(&mut self, uring: &IoUring, mut on_cqe: F) -> io::Result 62 | where 63 | F: for<'scope> FnMut(&'scope Self::Spawner<'env>) -> io::Result>, 64 | { 65 | struct NoopWaker; 66 | impl Wake for NoopWaker { 67 | fn wake(self: Arc) {} 68 | } 69 | let waker = Arc::new(NoopWaker).into(); 70 | let mut cx = Context::from_waker(&waker); 71 | 72 | let spawner = Spawner(RefCell::new(VecDeque::new())); 73 | loop { 74 | // Treat EINTR as a success. 75 | if let Err(err) = uring.submit_and_wait(1) { 76 | if err.raw_os_error() != Some(Errno::INTR.raw_os_error()) { 77 | return Err(err); 78 | } 79 | } 80 | 81 | if let ControlFlow::Break(v) = on_cqe(&spawner)? { 82 | break Ok(v); 83 | } 84 | 85 | while let Some(fut) = spawner.0.borrow_mut().pop_front() { 86 | match std::pin::pin!(fut).poll(&mut cx) { 87 | Poll::Ready(()) => {} 88 | Poll::Pending => panic!("sync runtime does not support yielding"), 89 | } 90 | } 91 | } 92 | } 93 | } 94 | 95 | pub struct Spawner<'env>(RefCell>); 96 | 97 | impl fmt::Debug for Spawner<'_> { 98 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 99 | f.debug_struct("Spawner").finish_non_exhaustive() 100 | } 101 | } 102 | 103 | impl<'env> AsyncScopeSpawner<'env> for Spawner<'env> { 104 | fn spawn(&self, fut: Fut) 105 | where 106 | Fut: Future + 'env, 107 | { 108 | self.0.borrow_mut().push_back(Box::pin(fut) as _); 109 | } 110 | } 111 | } 112 | 113 | #[cfg(feature = "tokio")] 114 | pub use tokio_support::Builder as TokioRuntimeBuilder; 115 | 116 | #[cfg(feature = "tokio")] 117 | mod tokio_support { 118 | use std::marker::PhantomData; 119 | use std::ptr::NonNull; 120 | 121 | use rustix::fd::AsRawFd; 122 | use tokio::io::unix::AsyncFd; 123 | use tokio::io::Interest; 124 | use tokio::task::LocalSet; 125 | 126 | use super::*; 127 | 128 | #[derive(Debug)] 129 | pub struct Builder; 130 | 131 | impl AsyncRuntimeBuilder for Builder { 132 | type Runtime = tokio::runtime::Runtime; 133 | 134 | fn build(&self) -> io::Result { 135 | let runtime = tokio::runtime::Builder::new_current_thread() 136 | .enable_all() 137 | .build()?; 138 | Ok(runtime) 139 | } 140 | } 141 | 142 | impl AsyncRuntime for tokio::runtime::Runtime { 143 | type Spawner<'env> = Spawner<'env>; 144 | 145 | fn drive_uring<'env, T, F>(&mut self, uring: &IoUring, mut on_cqe: F) -> io::Result 146 | where 147 | F: for<'scope> FnMut(&'scope Self::Spawner<'env>) -> io::Result>, 148 | { 149 | let _guard = self.enter(); 150 | let uring_fd = AsyncFd::with_interest(uring.as_raw_fd(), Interest::READABLE)?; 151 | // NB. This must be dropped before return. See more in `Spawner::spawn`. 152 | let local_set = LocalSet::new(); 153 | let spawner = Spawner { 154 | local_set: NonNull::from(&local_set), 155 | _marker: PhantomData, 156 | }; 157 | local_set.block_on(self, async { 158 | loop { 159 | uring_fd.readable().await?.clear_ready(); 160 | if let ControlFlow::Break(ret) = on_cqe(&spawner)? { 161 | break Ok(ret); 162 | } 163 | } 164 | }) 165 | } 166 | } 167 | 168 | #[derive(Debug)] 169 | pub struct Spawner<'env> { 170 | // `&'scope LocalSet` 171 | local_set: NonNull, 172 | _marker: PhantomData<&'env mut &'env ()>, 173 | } 174 | 175 | impl<'env> AsyncScopeSpawner<'env> for Spawner<'env> { 176 | fn spawn(&self, fut: Fut) 177 | where 178 | Fut: Future + 'env, 179 | { 180 | // SAFETY: Valid when `Spawner` is alive. 181 | let local_set = unsafe { self.local_set.as_ref() }; 182 | // SAFETY: All futures are spawned here are collected by `drive_uring` above and will 183 | // be either completed or dropped before its return. 184 | local_set.spawn_local(unsafe { 185 | std::mem::transmute::< 186 | Pin + 'env>>, 187 | Pin + 'static>>, 188 | >(Box::pin(fut)) 189 | }); 190 | } 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /orb-ublk/src/sys.rs: -------------------------------------------------------------------------------- 1 | /* automatically generated by rust-bindgen 0.71.0 */ 2 | 3 | pub const UBLK_CMD_GET_QUEUE_AFFINITY: u32 = 1; 4 | pub const UBLK_CMD_GET_DEV_INFO: u32 = 2; 5 | pub const UBLK_CMD_ADD_DEV: u32 = 4; 6 | pub const UBLK_CMD_DEL_DEV: u32 = 5; 7 | pub const UBLK_CMD_START_DEV: u32 = 6; 8 | pub const UBLK_CMD_STOP_DEV: u32 = 7; 9 | pub const UBLK_CMD_SET_PARAMS: u32 = 8; 10 | pub const UBLK_CMD_GET_PARAMS: u32 = 9; 11 | pub const UBLK_CMD_START_USER_RECOVERY: u32 = 16; 12 | pub const UBLK_CMD_END_USER_RECOVERY: u32 = 17; 13 | pub const UBLK_CMD_GET_DEV_INFO2: u32 = 18; 14 | pub const UBLK_FEATURES_LEN: u32 = 8; 15 | pub const UBLK_IO_FETCH_REQ: u32 = 32; 16 | pub const UBLK_IO_COMMIT_AND_FETCH_REQ: u32 = 33; 17 | pub const UBLK_IO_NEED_GET_DATA: u32 = 34; 18 | pub const UBLK_IO_RES_OK: u32 = 0; 19 | pub const UBLK_IO_RES_NEED_GET_DATA: u32 = 1; 20 | pub const UBLKSRV_CMD_BUF_OFFSET: u32 = 0; 21 | pub const UBLKSRV_IO_BUF_OFFSET: u32 = 2147483648; 22 | pub const UBLK_MAX_QUEUE_DEPTH: u32 = 4096; 23 | pub const UBLK_IO_BUF_OFF: u32 = 0; 24 | pub const UBLK_IO_BUF_BITS: u32 = 25; 25 | pub const UBLK_IO_BUF_BITS_MASK: u32 = 33554431; 26 | pub const UBLK_TAG_OFF: u32 = 25; 27 | pub const UBLK_TAG_BITS: u32 = 16; 28 | pub const UBLK_TAG_BITS_MASK: u32 = 65535; 29 | pub const UBLK_QID_OFF: u32 = 41; 30 | pub const UBLK_QID_BITS: u32 = 12; 31 | pub const UBLK_QID_BITS_MASK: u32 = 4095; 32 | pub const UBLK_MAX_NR_QUEUES: u32 = 4096; 33 | pub const UBLKSRV_IO_BUF_TOTAL_BITS: u32 = 53; 34 | pub const UBLKSRV_IO_BUF_TOTAL_SIZE: u64 = 9007199254740992; 35 | pub const UBLK_F_SUPPORT_ZERO_COPY: u32 = 1; 36 | pub const UBLK_F_URING_CMD_COMP_IN_TASK: u32 = 2; 37 | pub const UBLK_F_NEED_GET_DATA: u32 = 4; 38 | pub const UBLK_F_USER_RECOVERY: u32 = 8; 39 | pub const UBLK_F_USER_RECOVERY_REISSUE: u32 = 16; 40 | pub const UBLK_F_UNPRIVILEGED_DEV: u32 = 32; 41 | pub const UBLK_F_CMD_IOCTL_ENCODE: u32 = 64; 42 | pub const UBLK_F_USER_COPY: u32 = 128; 43 | pub const UBLK_F_ZONED: u32 = 256; 44 | pub const UBLK_S_DEV_DEAD: u32 = 0; 45 | pub const UBLK_S_DEV_LIVE: u32 = 1; 46 | pub const UBLK_S_DEV_QUIESCED: u32 = 2; 47 | pub const UBLK_IO_OP_READ: u32 = 0; 48 | pub const UBLK_IO_OP_WRITE: u32 = 1; 49 | pub const UBLK_IO_OP_FLUSH: u32 = 2; 50 | pub const UBLK_IO_OP_DISCARD: u32 = 3; 51 | pub const UBLK_IO_OP_WRITE_SAME: u32 = 4; 52 | pub const UBLK_IO_OP_WRITE_ZEROES: u32 = 5; 53 | pub const UBLK_IO_OP_ZONE_OPEN: u32 = 10; 54 | pub const UBLK_IO_OP_ZONE_CLOSE: u32 = 11; 55 | pub const UBLK_IO_OP_ZONE_FINISH: u32 = 12; 56 | pub const UBLK_IO_OP_ZONE_APPEND: u32 = 13; 57 | pub const UBLK_IO_OP_ZONE_RESET_ALL: u32 = 14; 58 | pub const UBLK_IO_OP_ZONE_RESET: u32 = 15; 59 | pub const UBLK_IO_OP_REPORT_ZONES: u32 = 18; 60 | pub const UBLK_IO_F_FAILFAST_DEV: u32 = 256; 61 | pub const UBLK_IO_F_FAILFAST_TRANSPORT: u32 = 512; 62 | pub const UBLK_IO_F_FAILFAST_DRIVER: u32 = 1024; 63 | pub const UBLK_IO_F_META: u32 = 2048; 64 | pub const UBLK_IO_F_FUA: u32 = 8192; 65 | pub const UBLK_IO_F_NOUNMAP: u32 = 32768; 66 | pub const UBLK_IO_F_SWAP: u32 = 65536; 67 | pub const UBLK_ATTR_READ_ONLY: u32 = 1; 68 | pub const UBLK_ATTR_ROTATIONAL: u32 = 2; 69 | pub const UBLK_ATTR_VOLATILE_CACHE: u32 = 4; 70 | pub const UBLK_ATTR_FUA: u32 = 8; 71 | pub const UBLK_PARAM_TYPE_BASIC: u32 = 1; 72 | pub const UBLK_PARAM_TYPE_DISCARD: u32 = 2; 73 | pub const UBLK_PARAM_TYPE_DEVT: u32 = 4; 74 | pub const UBLK_PARAM_TYPE_ZONED: u32 = 8; 75 | pub type __u8 = ::core::ffi::c_uchar; 76 | pub type __u16 = ::core::ffi::c_ushort; 77 | pub type __s32 = ::core::ffi::c_int; 78 | pub type __u32 = ::core::ffi::c_uint; 79 | pub type __u64 = ::core::ffi::c_ulonglong; 80 | pub const BLK_ZONE_TYPE_CONVENTIONAL: blk_zone_type = 1; 81 | pub const BLK_ZONE_TYPE_SEQWRITE_REQ: blk_zone_type = 2; 82 | pub const BLK_ZONE_TYPE_SEQWRITE_PREF: blk_zone_type = 3; 83 | pub type blk_zone_type = ::core::ffi::c_uint; 84 | pub const BLK_ZONE_COND_NOT_WP: blk_zone_cond = 0; 85 | pub const BLK_ZONE_COND_EMPTY: blk_zone_cond = 1; 86 | pub const BLK_ZONE_COND_IMP_OPEN: blk_zone_cond = 2; 87 | pub const BLK_ZONE_COND_EXP_OPEN: blk_zone_cond = 3; 88 | pub const BLK_ZONE_COND_CLOSED: blk_zone_cond = 4; 89 | pub const BLK_ZONE_COND_READONLY: blk_zone_cond = 13; 90 | pub const BLK_ZONE_COND_FULL: blk_zone_cond = 14; 91 | pub const BLK_ZONE_COND_OFFLINE: blk_zone_cond = 15; 92 | pub type blk_zone_cond = ::core::ffi::c_uint; 93 | #[repr(C)] 94 | #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] 95 | pub struct blk_zone { 96 | pub start: __u64, 97 | pub len: __u64, 98 | pub wp: __u64, 99 | pub type_: __u8, 100 | pub cond: __u8, 101 | pub non_seq: __u8, 102 | pub reset: __u8, 103 | pub resv: [__u8; 4usize], 104 | pub capacity: __u64, 105 | pub reserved: [__u8; 24usize], 106 | } 107 | #[repr(C)] 108 | #[derive(Debug, Default, Copy, Clone)] 109 | pub struct ublksrv_ctrl_cmd { 110 | pub dev_id: __u32, 111 | pub queue_id: __u16, 112 | pub len: __u16, 113 | pub addr: __u64, 114 | pub data: [__u64; 1usize], 115 | pub dev_path_len: __u16, 116 | pub pad: __u16, 117 | pub reserved: __u32, 118 | } 119 | #[repr(C)] 120 | #[derive(Debug, Default, Copy, Clone)] 121 | pub struct ublksrv_ctrl_dev_info { 122 | pub nr_hw_queues: __u16, 123 | pub queue_depth: __u16, 124 | pub state: __u16, 125 | pub pad0: __u16, 126 | pub max_io_buf_bytes: __u32, 127 | pub dev_id: __u32, 128 | pub ublksrv_pid: __s32, 129 | pub pad1: __u32, 130 | pub flags: __u64, 131 | pub ublksrv_flags: __u64, 132 | pub owner_uid: __u32, 133 | pub owner_gid: __u32, 134 | pub reserved1: __u64, 135 | pub reserved2: __u64, 136 | } 137 | #[repr(C)] 138 | #[derive(Copy, Clone)] 139 | pub struct ublksrv_io_desc { 140 | pub op_flags: __u32, 141 | pub __bindgen_anon_1: ublksrv_io_desc__bindgen_ty_1, 142 | pub start_sector: __u64, 143 | pub addr: __u64, 144 | } 145 | #[repr(C)] 146 | #[derive(Copy, Clone)] 147 | pub union ublksrv_io_desc__bindgen_ty_1 { 148 | pub nr_sectors: __u32, 149 | pub nr_zones: __u32, 150 | } 151 | impl Default for ublksrv_io_desc__bindgen_ty_1 { 152 | fn default() -> Self { 153 | let mut s = ::core::mem::MaybeUninit::::uninit(); 154 | unsafe { 155 | ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); 156 | s.assume_init() 157 | } 158 | } 159 | } 160 | impl Default for ublksrv_io_desc { 161 | fn default() -> Self { 162 | let mut s = ::core::mem::MaybeUninit::::uninit(); 163 | unsafe { 164 | ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); 165 | s.assume_init() 166 | } 167 | } 168 | } 169 | #[repr(C)] 170 | #[derive(Copy, Clone)] 171 | pub struct ublksrv_io_cmd { 172 | pub q_id: __u16, 173 | pub tag: __u16, 174 | pub result: __s32, 175 | pub __bindgen_anon_1: ublksrv_io_cmd__bindgen_ty_1, 176 | } 177 | #[repr(C)] 178 | #[derive(Copy, Clone)] 179 | pub union ublksrv_io_cmd__bindgen_ty_1 { 180 | pub addr: __u64, 181 | pub zone_append_lba: __u64, 182 | } 183 | impl Default for ublksrv_io_cmd__bindgen_ty_1 { 184 | fn default() -> Self { 185 | let mut s = ::core::mem::MaybeUninit::::uninit(); 186 | unsafe { 187 | ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); 188 | s.assume_init() 189 | } 190 | } 191 | } 192 | impl Default for ublksrv_io_cmd { 193 | fn default() -> Self { 194 | let mut s = ::core::mem::MaybeUninit::::uninit(); 195 | unsafe { 196 | ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); 197 | s.assume_init() 198 | } 199 | } 200 | } 201 | #[repr(C)] 202 | #[derive(Debug, Default, Copy, Clone)] 203 | pub struct ublk_param_basic { 204 | pub attrs: __u32, 205 | pub logical_bs_shift: __u8, 206 | pub physical_bs_shift: __u8, 207 | pub io_opt_shift: __u8, 208 | pub io_min_shift: __u8, 209 | pub max_sectors: __u32, 210 | pub chunk_sectors: __u32, 211 | pub dev_sectors: __u64, 212 | pub virt_boundary_mask: __u64, 213 | } 214 | #[repr(C)] 215 | #[derive(Debug, Default, Copy, Clone)] 216 | pub struct ublk_param_discard { 217 | pub discard_alignment: __u32, 218 | pub discard_granularity: __u32, 219 | pub max_discard_sectors: __u32, 220 | pub max_write_zeroes_sectors: __u32, 221 | pub max_discard_segments: __u16, 222 | pub reserved0: __u16, 223 | } 224 | #[repr(C)] 225 | #[derive(Debug, Default, Copy, Clone)] 226 | pub struct ublk_param_devt { 227 | pub char_major: __u32, 228 | pub char_minor: __u32, 229 | pub disk_major: __u32, 230 | pub disk_minor: __u32, 231 | } 232 | #[repr(C)] 233 | #[derive(Debug, Default, Copy, Clone)] 234 | pub struct ublk_param_zoned { 235 | pub max_open_zones: __u32, 236 | pub max_active_zones: __u32, 237 | pub max_zone_append_sectors: __u32, 238 | pub reserved: [__u8; 20usize], 239 | } 240 | #[repr(C)] 241 | #[derive(Debug, Default, Copy, Clone)] 242 | pub struct ublk_params { 243 | pub len: __u32, 244 | pub types: __u32, 245 | pub basic: ublk_param_basic, 246 | pub discard: ublk_param_discard, 247 | pub devt: ublk_param_devt, 248 | pub zoned: ublk_param_zoned, 249 | } 250 | pub const UBLK_U_CMD_GET_QUEUE_AFFINITY: __u32 = 2149610753; 251 | pub const UBLK_U_CMD_GET_DEV_INFO: __u32 = 2149610754; 252 | pub const UBLK_U_CMD_ADD_DEV: __u32 = 3223352580; 253 | pub const UBLK_U_CMD_DEL_DEV: __u32 = 3223352581; 254 | pub const UBLK_U_CMD_START_DEV: __u32 = 3223352582; 255 | pub const UBLK_U_CMD_STOP_DEV: __u32 = 3223352583; 256 | pub const UBLK_U_CMD_SET_PARAMS: __u32 = 3223352584; 257 | pub const UBLK_U_CMD_GET_PARAMS: __u32 = 2149610761; 258 | pub const UBLK_U_CMD_START_USER_RECOVERY: __u32 = 3223352592; 259 | pub const UBLK_U_CMD_END_USER_RECOVERY: __u32 = 3223352593; 260 | pub const UBLK_U_CMD_GET_DEV_INFO2: __u32 = 2149610770; 261 | pub const UBLK_U_CMD_GET_FEATURES: __u32 = 2149610771; 262 | pub const UBLK_U_IO_FETCH_REQ: __u32 = 3222304032; 263 | pub const UBLK_U_IO_COMMIT_AND_FETCH_REQ: __u32 = 3222304033; 264 | pub const UBLK_U_IO_NEED_GET_DATA: __u32 = 3222304034; 265 | -------------------------------------------------------------------------------- /orb-ublk/tests/basic.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::io::{self, ErrorKind}; 3 | use std::path::PathBuf; 4 | use std::sync::atomic::{AtomicBool, Ordering}; 5 | use std::sync::{Arc, Mutex}; 6 | use std::time::{Duration, Instant}; 7 | 8 | use orb_ublk::runtime::{AsyncRuntimeBuilder, SyncRuntimeBuilder, TokioRuntimeBuilder}; 9 | use orb_ublk::{ 10 | BlockDevice, ControlDevice, DevState, DeviceAttrs, DeviceBuilder, DeviceInfo, DeviceParams, 11 | DiscardParams, FeatureFlags, IoFlags, ReadBuf, Sector, Stopper, WriteBuf, Zone, ZoneBuf, 12 | ZoneCond, ZoneType, ZonedParams, BDEV_PREFIX, 13 | }; 14 | use rand::rngs::StdRng; 15 | use rand::{Rng, RngCore, SeedableRng}; 16 | use rstest::{fixture, rstest}; 17 | use rustix::io::Errno; 18 | use xshell::{cmd, Shell}; 19 | 20 | const QUEUE_DEPTH: u16 = 2; 21 | const MAX_READ_LEN: usize = 1 << 20; // Is there really a limit in Linux? 22 | 23 | static ZEROES: [u8; MAX_READ_LEN] = [0; MAX_READ_LEN]; 24 | 25 | #[fixture] 26 | fn ctl() -> ControlDevice { 27 | ControlDevice::open() 28 | .expect("failed to open control device, kernel module 'ublk_drv' not loaded?") 29 | } 30 | 31 | fn retry_on_perm(mut f: impl FnMut() -> io::Result) -> io::Result { 32 | const RETRY_DELAY: Duration = Duration::from_millis(100); 33 | let mut retries_left = 10; 34 | loop { 35 | match f() { 36 | Err(err) if err.kind() == ErrorKind::PermissionDenied && retries_left > 0 => { 37 | eprintln!("permission denied, retries left: {retries_left}"); 38 | retries_left -= 1; 39 | std::thread::sleep(RETRY_DELAY); 40 | } 41 | ret => return ret, 42 | } 43 | } 44 | } 45 | 46 | #[allow(clippy::needless_pass_by_value)] 47 | #[track_caller] 48 | fn test_service( 49 | ctl: &ControlDevice, 50 | mut flags: FeatureFlags, 51 | queues: u16, 52 | params: &DeviceParams, 53 | rt_builder: impl AsyncRuntimeBuilder + Sync, 54 | handler: impl FnOnce(Arc) -> B, 55 | ) { 56 | if !flags.contains(FeatureFlags::UserCopy) { 57 | flags.insert(FeatureFlags::UnprivilegedDev); 58 | } 59 | let mut srv = DeviceBuilder::new() 60 | .name("ublk-test") 61 | .add_flags(flags) 62 | .queues(queues) 63 | .queue_depth(QUEUE_DEPTH) 64 | .create_service(ctl) 65 | .unwrap(); 66 | let tested = Arc::new(AtomicBool::new(false)); 67 | if queues == 1 { 68 | let mut rt = rt_builder.build().unwrap(); 69 | srv.serve_local(&mut rt, params, &handler(tested.clone())) 70 | .unwrap(); 71 | } else { 72 | srv.serve(&rt_builder, params, &handler(tested.clone())) 73 | .unwrap(); 74 | } 75 | assert!(tested.load(Ordering::Relaxed)); 76 | } 77 | 78 | fn wait_blockdev_ready(info: &DeviceInfo) -> io::Result { 79 | let path = format!("{}{}", BDEV_PREFIX, info.dev_id()); 80 | retry_on_perm(|| rustix::fs::access(&path, rustix::fs::Access::WRITE_OK).map_err(Into::into))?; 81 | Ok(path) 82 | } 83 | 84 | #[rstest] 85 | fn get_features(ctl: ControlDevice) { 86 | let feat = ctl.get_features().unwrap(); 87 | println!("{feat:?}"); 88 | assert!(feat.contains(FeatureFlags::UnprivilegedDev)); 89 | // Zero-copy is not supported by upstream yet. 90 | assert!(!feat.contains(FeatureFlags::SupportZeroCopy)); 91 | } 92 | 93 | #[rstest] 94 | fn create_info_delete(ctl: ControlDevice) { 95 | const USER_DATA: u64 = 0xDEAD_BEEF_1234_5678; 96 | 97 | let mut builder = DeviceBuilder::new(); 98 | builder 99 | .name("ublk-test") 100 | .queues(3) 101 | .queue_depth(6) 102 | .io_buf_size(12 << 10) 103 | .user_data(USER_DATA) 104 | .unprivileged(); 105 | let info = ctl.create_device(&builder).unwrap(); 106 | scopeguard::defer_on_unwind! { 107 | if let Err(err) = retry_on_perm(|| ctl.delete_device(info.dev_id())) { 108 | if std::thread::panicking() { 109 | eprintln!("failed to delete device: {err}"); 110 | } else { 111 | panic!("failed to delete device: {err}"); 112 | } 113 | } 114 | } 115 | 116 | assert!(info.dev_id() < i32::MAX as u32); 117 | assert_eq!(info.nr_queues(), 3); 118 | assert_eq!(info.queue_depth(), 6); 119 | assert_eq!(info.io_buf_size(), 12 << 10); 120 | assert_eq!(info.state(), DevState::Dead); 121 | assert_eq!(info.user_data(), USER_DATA); 122 | 123 | let info2 = retry_on_perm(|| ctl.get_device_info(info.dev_id())).unwrap(); 124 | assert_eq!(info.dev_id(), info2.dev_id()); 125 | assert_eq!(info.nr_queues(), info2.nr_queues()); 126 | assert_eq!(info.queue_depth(), info2.queue_depth()); 127 | assert_eq!(info.io_buf_size(), info2.io_buf_size()); 128 | assert_eq!(info.state(), info2.state()); 129 | assert_eq!(info.user_data(), info2.user_data()); 130 | 131 | ctl.delete_device(info.dev_id()).unwrap(); 132 | } 133 | 134 | #[rstest] 135 | #[case::local(1)] 136 | #[case::threaded(2)] 137 | fn device_attrs(ctl: ControlDevice, #[case] queues: u16) { 138 | const DEV_SECTORS: Sector = Sector::from_bytes(42 << 10); 139 | let params = *DeviceParams::new() 140 | .dev_sectors(DEV_SECTORS) 141 | .attrs(DeviceAttrs::Rotational); 142 | 143 | test_service( 144 | &ctl, 145 | FeatureFlags::empty(), 146 | queues, 147 | ¶ms, 148 | SyncRuntimeBuilder, 149 | |tested| Handler { tested }, 150 | ); 151 | 152 | struct Handler { 153 | tested: Arc, 154 | } 155 | impl BlockDevice for Handler { 156 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 157 | scopeguard::defer!(stop.stop()); 158 | 159 | assert_eq!( 160 | ControlDevice::open() 161 | .unwrap() 162 | .get_device_info(dev_info.dev_id()) 163 | .unwrap() 164 | .state(), 165 | DevState::Live, 166 | ); 167 | 168 | let dev_sys_path = PathBuf::from(format!("/sys/block/ublkb{}", dev_info.dev_id())); 169 | let size_sec = fs::read_to_string(dev_sys_path.join("size")) 170 | .unwrap() 171 | .trim() 172 | .parse::() 173 | .unwrap(); 174 | assert_eq!(Sector(size_sec), DEV_SECTORS); 175 | let rotational = fs::read_to_string(dev_sys_path.join("queue/rotational")).unwrap(); 176 | assert_eq!(rotational.trim(), "1"); 177 | let ro = fs::read_to_string(dev_sys_path.join("ro")).unwrap(); 178 | assert_eq!(ro.trim(), "0"); 179 | 180 | self.tested.store(true, Ordering::Relaxed); 181 | Ok(()) 182 | } 183 | 184 | async fn read( 185 | &self, 186 | _off: Sector, 187 | _buf: &mut ReadBuf<'_>, 188 | _flags: IoFlags, 189 | ) -> Result<(), Errno> { 190 | Err(Errno::IO) 191 | } 192 | 193 | async fn write( 194 | &self, 195 | _off: Sector, 196 | _buf: WriteBuf<'_>, 197 | _flags: IoFlags, 198 | ) -> Result { 199 | Err(Errno::IO) 200 | } 201 | } 202 | } 203 | 204 | #[derive(Debug, Clone, Copy)] 205 | enum StopMethod { 206 | InternalStop, 207 | ExternalStop, 208 | ExternalDelete, 209 | } 210 | 211 | #[rstest] 212 | fn stop( 213 | ctl: ControlDevice, 214 | #[values(1, 2)] queues: u16, 215 | #[values( 216 | StopMethod::InternalStop, 217 | StopMethod::ExternalStop, 218 | StopMethod::ExternalDelete 219 | )] 220 | stop_method: StopMethod, 221 | ) { 222 | const DELAY: Duration = Duration::from_millis(100); 223 | 224 | let params = *DeviceParams::new() 225 | .dev_sectors(Sector(1)) 226 | .attrs(DeviceAttrs::Rotational); 227 | 228 | let inst = Instant::now(); 229 | test_service( 230 | &ctl, 231 | FeatureFlags::empty(), 232 | queues, 233 | ¶ms, 234 | SyncRuntimeBuilder, 235 | |tested| Handler { 236 | tested, 237 | stop_method, 238 | }, 239 | ); 240 | let elapsed = inst.elapsed(); 241 | assert!(elapsed > DELAY); 242 | 243 | #[derive(Clone)] 244 | struct Handler { 245 | tested: Arc, 246 | stop_method: StopMethod, 247 | } 248 | impl BlockDevice for Handler { 249 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 250 | let Self { 251 | tested, 252 | stop_method, 253 | } = self.clone(); 254 | let id = dev_info.dev_id(); 255 | std::thread::spawn(move || { 256 | std::thread::sleep(DELAY); 257 | tested.store(true, Ordering::Relaxed); 258 | match stop_method { 259 | StopMethod::InternalStop => { 260 | stop.stop(); 261 | } 262 | StopMethod::ExternalStop => { 263 | ControlDevice::open().unwrap().stop_device(id).unwrap(); 264 | } 265 | StopMethod::ExternalDelete => { 266 | ControlDevice::open().unwrap().delete_device(id).unwrap(); 267 | } 268 | } 269 | }); 270 | Ok(()) 271 | } 272 | 273 | async fn read( 274 | &self, 275 | _off: Sector, 276 | _buf: &mut ReadBuf<'_>, 277 | _flags: IoFlags, 278 | ) -> Result<(), Errno> { 279 | Err(Errno::IO) 280 | } 281 | 282 | async fn write( 283 | &self, 284 | _off: Sector, 285 | _buf: WriteBuf<'_>, 286 | _flags: IoFlags, 287 | ) -> Result { 288 | Err(Errno::IO) 289 | } 290 | } 291 | } 292 | 293 | #[rstest] 294 | #[case::default_local(FeatureFlags::empty(), 1)] 295 | #[case::default_threaded(FeatureFlags::empty(), 2)] 296 | #[ignore = "user copy requires privileges"] 297 | #[case::user_copy_local(FeatureFlags::UserCopy, 1)] 298 | #[ignore = "user copy requires privileges"] 299 | #[case::user_copy_threaded(FeatureFlags::UserCopy, 2)] 300 | fn read_write(ctl: ControlDevice, #[case] flags: FeatureFlags, #[case] queues: u16) { 301 | const SIZE_SECTORS: Sector = Sector::from_bytes(32 << 10); 302 | const TEST_WRITE_ROUNDS: usize = 32; 303 | const SEED: u64 = 0xDEAD_BEEF_DEAD_BEEF; 304 | 305 | let mut rng = StdRng::seed_from_u64(SEED); 306 | let mut data = vec![0u8; SIZE_SECTORS.bytes() as usize]; 307 | rng.fill_bytes(&mut data); 308 | let data = Arc::new(Mutex::new(data)); 309 | 310 | test_service( 311 | &ctl, 312 | flags, 313 | queues, 314 | DeviceParams::new().dev_sectors(SIZE_SECTORS), 315 | SyncRuntimeBuilder, 316 | |tested| Handler { tested, data, rng }, 317 | ); 318 | 319 | #[derive(Clone)] 320 | struct Handler { 321 | tested: Arc, 322 | data: Arc>>, 323 | rng: StdRng, 324 | } 325 | impl BlockDevice for Handler { 326 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 327 | let dev_info = *dev_info; 328 | let Handler { 329 | tested, 330 | data, 331 | mut rng, 332 | } = self.clone(); 333 | std::thread::spawn(move || { 334 | scopeguard::defer!(stop.stop()); 335 | 336 | let dev_path = wait_blockdev_ready(&dev_info).unwrap(); 337 | 338 | // NB. Perform I/O in another process to avoid deadlocks. 339 | let sh = Shell::new().unwrap(); 340 | let mut state = cmd!(sh, "cat {dev_path}").output().unwrap().stdout; 341 | // The initial data should match. 342 | assert_eq!(state.len() as u64, SIZE_SECTORS.bytes()); 343 | assert_eq!(state, *data.lock().unwrap()); 344 | 345 | let mut buf = [0u8; Sector::SIZE as usize]; 346 | for _ in 0..TEST_WRITE_ROUNDS { 347 | // Write a random block at random sector. 348 | let sector_offset = rng.random_range(0..SIZE_SECTORS.0); 349 | rng.fill_bytes(&mut buf); 350 | let sector_offset_s = sector_offset.to_string(); 351 | cmd!( 352 | sh, 353 | "dd if=/dev/stdin of={dev_path} bs=512 count=1 seek={sector_offset_s}" 354 | ) 355 | .ignore_stderr() 356 | .stdin(buf) 357 | .run() 358 | .unwrap(); 359 | let offset = sector_offset * Sector::SIZE as u64; 360 | state[offset as usize..][..Sector::SIZE as usize].copy_from_slice(&buf); 361 | 362 | // Retrieve all data, and they should match. 363 | let got = cmd!(sh, "cat {dev_path}").output().unwrap().stdout; 364 | assert_eq!(got, state); 365 | } 366 | 367 | assert_eq!(*data.lock().unwrap(), state); 368 | 369 | tested.store(true, Ordering::Relaxed); 370 | }); 371 | Ok(()) 372 | } 373 | 374 | async fn read( 375 | &self, 376 | off: Sector, 377 | buf: &mut ReadBuf<'_>, 378 | _flags: IoFlags, 379 | ) -> Result<(), Errno> { 380 | buf.put_slice(&self.data.lock().unwrap()[off.bytes() as usize..][..buf.remaining()])?; 381 | Ok(()) 382 | } 383 | 384 | async fn write( 385 | &self, 386 | off: Sector, 387 | buf: WriteBuf<'_>, 388 | _flags: IoFlags, 389 | ) -> Result { 390 | let len = buf.len(); 391 | buf.copy_to_slice(&mut self.data.lock().unwrap()[off.bytes() as usize..][..len])?; 392 | Ok(len) 393 | } 394 | } 395 | } 396 | 397 | #[rstest] 398 | #[ignore = "spam dmesg"] 399 | fn error(ctl: ControlDevice) { 400 | const SIZE_SECTORS: Sector = Sector::from_bytes(4 << 10); 401 | 402 | test_service( 403 | &ctl, 404 | FeatureFlags::empty(), 405 | 1, 406 | DeviceParams::new().dev_sectors(SIZE_SECTORS), 407 | SyncRuntimeBuilder, 408 | |tested| Handler { tested }, 409 | ); 410 | 411 | struct Handler { 412 | tested: Arc, 413 | } 414 | impl BlockDevice for Handler { 415 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 416 | let dev_info = *dev_info; 417 | let tested = self.tested.clone(); 418 | 419 | std::thread::spawn(move || { 420 | scopeguard::defer!(stop.stop()); 421 | let dev_path = wait_blockdev_ready(&dev_info).unwrap(); 422 | 423 | // NB. Perform I/O in another process to avoid deadlocks. 424 | let sh = Shell::new().unwrap(); 425 | 426 | let stderr = cmd!(sh, "dd if={dev_path} of=/dev/null bs=512 count=1") 427 | .ignore_status() 428 | .read_stderr() 429 | .unwrap(); 430 | assert!(stderr.contains("Input/output error")); 431 | 432 | let stderr = cmd!(sh, "dd if=/dev/zero of={dev_path} bs=512 count=1") 433 | .ignore_status() 434 | .read_stderr() 435 | .unwrap(); 436 | assert!(stderr.contains("Input/output error")); 437 | 438 | tested.store(true, Ordering::Relaxed); 439 | }); 440 | Ok(()) 441 | } 442 | 443 | async fn read( 444 | &self, 445 | _off: Sector, 446 | _buf: &mut ReadBuf<'_>, 447 | _flags: IoFlags, 448 | ) -> Result<(), Errno> { 449 | Err(Errno::IO) 450 | } 451 | 452 | async fn write( 453 | &self, 454 | _off: Sector, 455 | _buf: WriteBuf<'_>, 456 | _flags: IoFlags, 457 | ) -> Result { 458 | Err(Errno::IO) 459 | } 460 | } 461 | } 462 | 463 | #[rstest] 464 | #[ignore = "spam dmesg"] 465 | #[case::local(1)] 466 | #[ignore = "spam dmesg"] 467 | #[case::threaded(2)] 468 | fn handler_panic(ctl: ControlDevice, #[case] queues: u16) { 469 | const SIZE_SECTORS: Sector = Sector(1); 470 | const TEST_ROUNDS: u16 = QUEUE_DEPTH * 2; 471 | 472 | test_service( 473 | &ctl, 474 | FeatureFlags::empty(), 475 | queues, 476 | DeviceParams::new().dev_sectors(SIZE_SECTORS), 477 | TokioRuntimeBuilder, 478 | |tested| Handler { 479 | should_ok: Default::default(), 480 | tested, 481 | }, 482 | ); 483 | 484 | #[derive(Clone)] 485 | struct Handler { 486 | should_ok: Arc, 487 | tested: Arc, 488 | } 489 | impl BlockDevice for Handler { 490 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 491 | let dev_info = *dev_info; 492 | let Handler { should_ok, tested } = self.clone(); 493 | std::thread::spawn(move || { 494 | scopeguard::defer!(stop.stop()); 495 | 496 | let dev_path = wait_blockdev_ready(&dev_info).unwrap(); 497 | let sh = Shell::new().unwrap(); 498 | 499 | for _ in 0..TEST_ROUNDS { 500 | cmd!(sh, "cat {dev_path}") 501 | .ignore_stderr() 502 | .run() 503 | .unwrap_err(); 504 | } 505 | 506 | // Should still work after recovered. 507 | should_ok.store(true, Ordering::Relaxed); 508 | for _ in 0..TEST_ROUNDS { 509 | let ret = cmd!(sh, "cat {dev_path}").ignore_stderr().read().unwrap(); 510 | assert_eq!(ret.as_bytes(), [0u8; Sector::SIZE as _]); 511 | } 512 | 513 | tested.store(true, Ordering::Relaxed); 514 | }); 515 | Ok(()) 516 | } 517 | 518 | async fn read( 519 | &self, 520 | _off: Sector, 521 | buf: &mut ReadBuf<'_>, 522 | _flags: IoFlags, 523 | ) -> Result<(), Errno> { 524 | if self.should_ok.load(Ordering::Relaxed) { 525 | buf.put_slice(&ZEROES[..buf.remaining()])?; 526 | Ok(()) 527 | } else { 528 | panic!("nooo"); 529 | } 530 | } 531 | 532 | async fn write( 533 | &self, 534 | _off: Sector, 535 | _buf: WriteBuf<'_>, 536 | _flags: IoFlags, 537 | ) -> Result { 538 | Err(Errno::IO) 539 | } 540 | } 541 | } 542 | 543 | #[rstest] 544 | #[case::default_local(FeatureFlags::empty(), 1)] 545 | #[case::default_threaded(FeatureFlags::empty(), 2)] 546 | #[ignore = "user copy requires privileges"] 547 | #[case::user_copy_local(FeatureFlags::UserCopy, 1)] 548 | #[ignore = "user copy requires privileges"] 549 | #[case::user_copy_threaded(FeatureFlags::UserCopy, 2)] 550 | fn tokio_null(ctl: ControlDevice, #[case] flags: FeatureFlags, #[case] queues: u16) { 551 | const SIZE_SECTORS: Sector = Sector::from_bytes(4 << 10); 552 | const DELAY: Duration = Duration::from_millis(500); 553 | const TOLERANCE: Duration = Duration::from_millis(50); 554 | 555 | test_service( 556 | &ctl, 557 | flags, 558 | queues, 559 | DeviceParams::new().dev_sectors(SIZE_SECTORS), 560 | TokioRuntimeBuilder, 561 | |tested| Handler { tested }, 562 | ); 563 | 564 | struct Handler { 565 | tested: Arc, 566 | } 567 | impl BlockDevice for Handler { 568 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 569 | let dev_info = *dev_info; 570 | let tested = self.tested.clone(); 571 | std::thread::spawn(move || { 572 | scopeguard::defer!(stop.stop()); 573 | 574 | let dev_path = wait_blockdev_ready(&dev_info).unwrap(); 575 | 576 | // NB. Perform I/O in another process to avoid deadlocks. 577 | let sh = Shell::new().unwrap(); 578 | let inst = Instant::now(); 579 | let out = cmd!(sh, "dd if={dev_path} of=/dev/stdout bs=512 count=1") 580 | .ignore_stderr() 581 | .output() 582 | .unwrap() 583 | .stdout; 584 | let elapsed = inst.elapsed(); 585 | assert_eq!(out, [0u8; Sector::SIZE as _]); 586 | assert!( 587 | DELAY - TOLERANCE <= elapsed && elapsed <= DELAY + TOLERANCE, 588 | "unexpected delay: {elapsed:?}", 589 | ); 590 | 591 | tested.store(true, Ordering::Relaxed); 592 | }); 593 | Ok(()) 594 | } 595 | 596 | async fn read( 597 | &self, 598 | _off: Sector, 599 | buf: &mut ReadBuf<'_>, 600 | _flags: IoFlags, 601 | ) -> Result<(), Errno> { 602 | tokio::time::sleep(DELAY).await; 603 | buf.put_slice(&ZEROES[..buf.remaining()])?; 604 | Ok(()) 605 | } 606 | 607 | async fn write( 608 | &self, 609 | _off: Sector, 610 | _buf: WriteBuf<'_>, 611 | _flags: IoFlags, 612 | ) -> Result { 613 | Err(Errno::IO) 614 | } 615 | } 616 | } 617 | 618 | #[rstest] 619 | fn discard(ctl: ControlDevice) { 620 | const SIZE_SECTORS: Sector = Sector::from_bytes(4 << 10); 621 | const GRANULARITY: u32 = 1 << 10; 622 | 623 | test_service( 624 | &ctl, 625 | FeatureFlags::empty(), 626 | 1, 627 | DeviceParams::new() 628 | .dev_sectors(SIZE_SECTORS) 629 | .discard(DiscardParams { 630 | alignment: GRANULARITY, 631 | granularity: GRANULARITY, 632 | max_size: SIZE_SECTORS as _, 633 | max_write_zeroes_size: SIZE_SECTORS as _, 634 | max_segments: 1, 635 | }), 636 | SyncRuntimeBuilder, 637 | |tested| Handler { 638 | tested, 639 | discarded: Default::default(), 640 | }, 641 | ); 642 | 643 | #[derive(Clone)] 644 | struct Handler { 645 | tested: Arc, 646 | discarded: Arc>>, 647 | } 648 | impl BlockDevice for Handler { 649 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 650 | let dev_info = *dev_info; 651 | let Self { tested, discarded } = self.clone(); 652 | std::thread::spawn(move || { 653 | scopeguard::defer!(stop.stop()); 654 | let dev_path = wait_blockdev_ready(&dev_info).unwrap(); 655 | 656 | let sh = Shell::new().unwrap(); 657 | let take_discarded = || std::mem::take(&mut *discarded.lock().unwrap()); 658 | 659 | cmd!(sh, "blkdiscard {dev_path}").run().unwrap(); 660 | assert_eq!(take_discarded(), [(false, 0, SIZE_SECTORS.bytes() as _)]); 661 | cmd!(sh, "blkdiscard --zeroout {dev_path}").run().unwrap(); 662 | assert_eq!(take_discarded(), [(true, 0, SIZE_SECTORS.bytes() as _)]); 663 | 664 | cmd!(sh, "blkdiscard -o 1024 -l 2048 {dev_path}") 665 | .run() 666 | .unwrap(); 667 | assert_eq!(take_discarded(), [(false, 1024, 2048)]); 668 | cmd!(sh, "blkdiscard --zeroout -o 1024 -l 2048 {dev_path}") 669 | .run() 670 | .unwrap(); 671 | assert_eq!(take_discarded(), [(true, 1024, 2048)]); 672 | 673 | tested.store(true, Ordering::Relaxed); 674 | }); 675 | Ok(()) 676 | } 677 | 678 | async fn read( 679 | &self, 680 | _off: Sector, 681 | buf: &mut ReadBuf<'_>, 682 | _flags: IoFlags, 683 | ) -> Result<(), Errno> { 684 | buf.put_slice(&ZEROES[..buf.remaining()])?; 685 | Ok(()) 686 | } 687 | 688 | async fn write( 689 | &self, 690 | _off: Sector, 691 | buf: WriteBuf<'_>, 692 | _flags: IoFlags, 693 | ) -> Result { 694 | Ok(buf.len()) 695 | } 696 | 697 | async fn discard(&self, off: Sector, len: usize, _flags: IoFlags) -> Result<(), Errno> { 698 | self.discarded 699 | .lock() 700 | .unwrap() 701 | .push((false, off.bytes(), len)); 702 | Ok(()) 703 | } 704 | 705 | async fn write_zeroes( 706 | &self, 707 | off: Sector, 708 | len: usize, 709 | _flags: IoFlags, 710 | ) -> Result<(), Errno> { 711 | self.discarded 712 | .lock() 713 | .unwrap() 714 | .push((true, off.bytes(), len)); 715 | Ok(()) 716 | } 717 | } 718 | } 719 | 720 | #[rstest] 721 | #[ignore = "user copy requires privileges"] 722 | fn zoned(ctl: ControlDevice) { 723 | const SIZE_SECTORS: Sector = Sector::from_bytes(4 << 10); 724 | const ZONE_SECTORS: Sector = Sector::from_bytes(1 << 10); 725 | const ZONES: u64 = SIZE_SECTORS.0 / ZONE_SECTORS.0; 726 | const MAX_OPEN_ZONES: u32 = 1; 727 | const MAX_ACTIVE_ZONES: u32 = 1; 728 | const MAX_ZONE_APPEND_SECTORS: Sector = Sector::from_bytes(1 << 10); 729 | 730 | if !ctl.get_features().unwrap().contains(FeatureFlags::Zoned) { 731 | eprintln!("skipped zoned tests because this kernel does not support it"); 732 | return; 733 | } 734 | 735 | let zones = (0..ZONES) 736 | .map(|i| { 737 | if i < 2 { 738 | Zone::new( 739 | ZONE_SECTORS * i, 740 | ZONE_SECTORS, 741 | Sector(0), 742 | ZoneType::Conventional, 743 | ZoneCond::NotWp, 744 | ) 745 | } else { 746 | Zone::new( 747 | ZONE_SECTORS * i, 748 | ZONE_SECTORS, 749 | Sector(i), 750 | ZoneType::SeqWriteReq, 751 | ZoneCond::Empty, 752 | ) 753 | } 754 | }) 755 | .collect::>(); 756 | 757 | test_service( 758 | &ctl, 759 | FeatureFlags::Zoned | FeatureFlags::UserCopy, 760 | 1, 761 | DeviceParams::new() 762 | .dev_sectors(SIZE_SECTORS) 763 | .chunk_sectors(ZONE_SECTORS) 764 | .zoned(ZonedParams { 765 | max_open_zones: MAX_OPEN_ZONES, 766 | max_active_zones: MAX_ACTIVE_ZONES, 767 | max_zone_append_size: MAX_ZONE_APPEND_SECTORS, 768 | }), 769 | SyncRuntimeBuilder, 770 | |tested| Handler { 771 | tested, 772 | zones: zones.into(), 773 | ops: Default::default(), 774 | }, 775 | ); 776 | 777 | #[derive(Clone)] 778 | struct Handler { 779 | tested: Arc, 780 | zones: Arc<[Zone]>, 781 | ops: Arc>, 782 | } 783 | impl BlockDevice for Handler { 784 | fn ready(&self, dev_info: &DeviceInfo, stop: Stopper) -> io::Result<()> { 785 | let dev_info = *dev_info; 786 | let Self { tested, ops, .. } = self.clone(); 787 | std::thread::spawn(move || { 788 | scopeguard::defer!(stop.stop()); 789 | let dev_path = wait_blockdev_ready(&dev_info).unwrap(); 790 | let sys_queue_path = 791 | PathBuf::from(format!("/sys/block/ublkb{}/queue", dev_info.dev_id())); 792 | 793 | let opt_str = |subpath: &str| { 794 | fs::read_to_string(sys_queue_path.join(subpath)) 795 | .unwrap() 796 | .trim() 797 | .to_owned() 798 | }; 799 | let opt_u64 = |subpath: &str| opt_str(subpath).parse::().unwrap(); 800 | 801 | assert_eq!(opt_str("zoned"), "host-managed"); 802 | assert_eq!(opt_u64("chunk_sectors"), ZONE_SECTORS.0); 803 | assert_eq!(opt_u64("nr_zones"), SIZE_SECTORS / ZONE_SECTORS); 804 | assert_eq!( 805 | opt_u64("zone_append_max_bytes"), 806 | MAX_ZONE_APPEND_SECTORS.bytes() 807 | ); 808 | assert_eq!(opt_u64("max_open_zones"), MAX_OPEN_ZONES as _); 809 | assert_eq!(opt_u64("max_active_zones"), MAX_ACTIVE_ZONES as _); 810 | 811 | let sh = Shell::new().unwrap(); 812 | let report = cmd!(sh, "blkzone report {dev_path}").read().unwrap(); 813 | println!("{report}"); 814 | let expect = " 815 | start: 0x000000000, len 0x000002, cap 0x000002, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] 816 | start: 0x000000002, len 0x000002, cap 0x000002, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] 817 | start: 0x000000004, len 0x000002, cap 0x000002, wptr 0x000002 reset:0 non-seq:0, zcond: 1(em) [type: 2(SEQ_WRITE_REQUIRED)] 818 | start: 0x000000006, len 0x000002, cap 0x000002, wptr 0x000003 reset:0 non-seq:0, zcond: 1(em) [type: 2(SEQ_WRITE_REQUIRED)] 819 | "; 820 | assert_eq!(report.trim(), expect.trim()); 821 | 822 | // The zone with id 2. 823 | cmd!(sh, "blkzone open {dev_path} --offset 4 --length 2") 824 | .run() 825 | .unwrap(); 826 | cmd!(sh, "blkzone close {dev_path} --offset 4 --length 2") 827 | .run() 828 | .unwrap(); 829 | cmd!(sh, "blkzone finish {dev_path} --offset 4 --length 2") 830 | .run() 831 | .unwrap(); 832 | cmd!(sh, "blkzone reset {dev_path} --offset 4 --length 2") 833 | .run() 834 | .unwrap(); 835 | cmd!(sh, "blkzone reset {dev_path}").run().unwrap(); 836 | assert_eq!(*ops.lock().unwrap(), "open;close;finish;reset;reset_all;"); 837 | 838 | tested.store(true, Ordering::Relaxed); 839 | }); 840 | Ok(()) 841 | } 842 | 843 | async fn read( 844 | &self, 845 | _off: Sector, 846 | buf: &mut ReadBuf<'_>, 847 | _flags: IoFlags, 848 | ) -> Result<(), Errno> { 849 | buf.put_slice(&ZEROES[..buf.remaining()])?; 850 | Ok(()) 851 | } 852 | 853 | async fn write( 854 | &self, 855 | _off: Sector, 856 | _buf: WriteBuf<'_>, 857 | _flags: IoFlags, 858 | ) -> Result { 859 | Err(Errno::IO) 860 | } 861 | 862 | async fn report_zones( 863 | &self, 864 | off: Sector, 865 | buf: &mut ZoneBuf<'_>, 866 | _flags: IoFlags, 867 | ) -> Result<(), Errno> { 868 | let zid = off / ZONE_SECTORS; 869 | buf.report(&self.zones[zid as usize..][..buf.remaining()])?; 870 | Ok(()) 871 | } 872 | 873 | async fn zone_open(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 874 | assert_eq!(off.bytes(), 2 << 10); 875 | self.ops.lock().unwrap().push_str("open;"); 876 | Ok(()) 877 | } 878 | 879 | async fn zone_close(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 880 | assert_eq!(off.bytes(), 2 << 10); 881 | self.ops.lock().unwrap().push_str("close;"); 882 | Ok(()) 883 | } 884 | 885 | async fn zone_finish(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 886 | assert_eq!(off.bytes(), 2 << 10); 887 | self.ops.lock().unwrap().push_str("finish;"); 888 | Ok(()) 889 | } 890 | 891 | async fn zone_reset(&self, off: Sector, _flags: IoFlags) -> Result<(), Errno> { 892 | assert_eq!(off.bytes(), 2 << 10); 893 | self.ops.lock().unwrap().push_str("reset;"); 894 | Ok(()) 895 | } 896 | 897 | async fn zone_reset_all(&self, _flags: IoFlags) -> Result<(), Errno> { 898 | self.ops.lock().unwrap().push_str("reset_all;"); 899 | Ok(()) 900 | } 901 | } 902 | } 903 | -------------------------------------------------------------------------------- /orb-ublk/tests/interrupt.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | use std::thread; 3 | use std::time::{Duration, Instant}; 4 | 5 | use libtest_mimic::{Arguments, Failed, Trial}; 6 | use orb_ublk::runtime::{AsyncRuntimeBuilder, SyncRuntimeBuilder}; 7 | use orb_ublk::{ 8 | BlockDevice, ControlDevice, DeviceBuilder, DeviceInfo, DeviceParams, IoFlags, ReadBuf, Sector, 9 | Stopper, WriteBuf, 10 | }; 11 | use rustix::io::Errno; 12 | use rustix::process::{kill_process, Pid, Signal}; 13 | 14 | const DELAY: Duration = Duration::from_millis(200); 15 | 16 | static STOPPER: Mutex> = Mutex::new(None); 17 | 18 | fn main() -> std::process::ExitCode { 19 | let mut args = Arguments::from_args(); 20 | // Force run tests in main thread because we do signal handling. 21 | args.test_threads = Some(1); 22 | let tests = vec![ 23 | Trial::test("interrupt_local", || interrupt(1)), 24 | Trial::test("interrupt_threaded", || interrupt(2)), 25 | ]; 26 | 27 | ctrlc::set_handler(move || { 28 | if let Some(s) = STOPPER.lock().unwrap().take() { 29 | s.stop(); 30 | } 31 | }) 32 | .unwrap(); 33 | 34 | libtest_mimic::run(&args, tests).exit_code() 35 | } 36 | 37 | #[allow(clippy::unnecessary_wraps)] 38 | fn interrupt(queues: u16) -> Result<(), Failed> { 39 | let ctl = ControlDevice::open().unwrap(); 40 | 41 | let mut srv = DeviceBuilder::new() 42 | .name("ublk-test") 43 | .queues(queues) 44 | .unprivileged() 45 | .create_service(&ctl) 46 | .unwrap(); 47 | 48 | let params = *DeviceParams::new().dev_sectors(Sector(1)); 49 | let inst = Instant::now(); 50 | let h = Handler { 51 | pid: rustix::process::getpid(), 52 | thread: Mutex::new(None), 53 | }; 54 | if queues == 1 { 55 | srv.serve_local(&mut SyncRuntimeBuilder.build().unwrap(), ¶ms, &h) 56 | .unwrap(); 57 | } else { 58 | srv.serve(&SyncRuntimeBuilder, ¶ms, &h).unwrap(); 59 | } 60 | let elapsed = inst.elapsed(); 61 | assert!(elapsed >= DELAY, "unexpected elapsed time: {elapsed:?}"); 62 | 63 | h.thread.lock().unwrap().take().unwrap().join().unwrap(); 64 | 65 | Ok(()) 66 | } 67 | 68 | struct Handler { 69 | pid: Pid, 70 | thread: Mutex>>, 71 | } 72 | 73 | impl BlockDevice for Handler { 74 | fn ready(&self, _dev_info: &DeviceInfo, stop: Stopper) -> std::io::Result<()> { 75 | *STOPPER.lock().unwrap() = Some(stop); 76 | let pid = self.pid; 77 | let j = thread::spawn(move || { 78 | thread::sleep(DELAY); 79 | kill_process(pid, Signal::INT).unwrap(); 80 | }); 81 | *self.thread.lock().unwrap() = Some(j); 82 | Ok(()) 83 | } 84 | 85 | async fn read( 86 | &self, 87 | _off: Sector, 88 | _buf: &mut ReadBuf<'_>, 89 | _flags: IoFlags, 90 | ) -> Result<(), Errno> { 91 | Err(Errno::IO) 92 | } 93 | 94 | async fn write( 95 | &self, 96 | _off: Sector, 97 | _buf: WriteBuf<'_>, 98 | _flags: IoFlags, 99 | ) -> Result { 100 | Err(Errno::IO) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use clap::builder::TypedValueParser; 4 | 5 | /// OneDrive as a block device. 6 | /// 7 | /// Disclaimer: Microsoft OneDrive is a file hosting service operated by Microsoft. This program 8 | /// orb has nothing to do with Microsoft, other than using their public API interface on behalf of 9 | /// users, once the user explicitly logins via `orb login`. 10 | /// 11 | /// Copyright (C) 2024 Oxalica 12 | /// 13 | /// This program is free software: you can redistribute it and/or modify it under the terms of the 14 | /// GNU General Public License as published by the Free Software Foundation, either version 3 of 15 | /// the License, or (at your option) any later version. This program is distributed in the hope 16 | /// that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of 17 | /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 | /// more details. You should have received a copy of the GNU General Public License along with this 19 | /// program. If not, see . 20 | #[derive(Debug, clap::Parser)] 21 | #[clap(about, version = env!("CFG_RELEASE"))] 22 | pub enum Cli { 23 | Verify(VerifyCmd), 24 | Serve(ServeCmd), 25 | Stop(StopCmd), 26 | Login(LoginCmd), 27 | } 28 | 29 | /// Verify the validity of a given configuration file for `orb serve`. 30 | /// 31 | /// This can catch static errors, including syntax errors and invalid option types or values. It 32 | /// cannot catch runtime errors like invalid or non-existing credentials or non-existing remote 33 | /// directory. 34 | #[derive(Debug, clap::Args)] 35 | pub struct VerifyCmd { 36 | /// The configuration file path. 37 | #[clap(long, short)] 38 | pub config_file: PathBuf, 39 | } 40 | 41 | /// Start and run the service in the foreground. 42 | /// 43 | /// The block device will be ready on `/dev/ublkbX` where X is the next unused integer starting at 44 | /// 0 . Service configurations are passed via the config file. The service will run until it is 45 | /// signaled to exit via SIGINT (Ctrl-C) or SIGTERM, or the device gets deleted by manual `orb 46 | /// stop`. The block device and the control device are cleaned up when the process is exiting. 47 | /// If it somehow failed to correctly clean up, `orb stop` can also be used to release stall 48 | /// control devices. 49 | #[derive(Debug, clap::Args)] 50 | pub struct ServeCmd { 51 | /// The configuration file path. 52 | #[clap(long, short)] 53 | pub config_file: PathBuf, 54 | } 55 | 56 | /// Stop and clean up ublk control and block devices `/dev/ublk{c,b}*`. 57 | /// 58 | /// This can be either used to stop a running service, or release resources when the service 59 | /// aborted unexpectedly without a correct clean up. Foreign ublk devices not created by orb 60 | /// will be skipped from deleting. 61 | /// 62 | /// If the coresponding devices are created by privileged process, this command also requires 63 | /// root privilege to clean them up. 64 | #[derive(Debug, clap::Args)] 65 | pub struct StopCmd { 66 | /// Clean all existing `ublk` devices. 67 | #[clap(long, exclusive = true)] 68 | pub all: bool, 69 | /// The integer device ids to clean up, ie. the number in the tail of `/dev/ublk{b,c}*`. 70 | #[clap(required = true)] 71 | pub dev_ids: Vec, 72 | 73 | /// Disable magic checks and force to delete devices. 74 | #[clap(short, long)] 75 | pub force: bool, 76 | } 77 | 78 | /// Interactive login Microsoft account and save credential for service use. 79 | /// 80 | /// Login can be done while service is running. You can use `systemctl reload` (SIGHUP) to trigger 81 | /// a reload of updated credentials, so that buffered data will not be lost when tokens somehow 82 | /// failed to be refreshed automatically. 83 | /// A successful login always clears existing states (under `state.json`) and enforces a 84 | /// re-synchronization on the next service start. 85 | /// 86 | /// WARNING: When the service is running, credentials updating must guarantee that the new one 87 | /// refers to the same account as the old one, otherwise the state will be inconsistent, and all 88 | /// buffered data will be lost! 89 | #[derive(Debug, clap::Args)] 90 | pub struct LoginCmd { 91 | #[command(flatten)] 92 | pub state_dir: StateDir, 93 | 94 | /// The client ID for the registered application. 95 | #[clap( 96 | long, 97 | name = "UUID", 98 | value_parser = clap::builder::StringValueParser::new().try_map(uuid_checker), 99 | )] 100 | pub client_id: String, 101 | } 102 | 103 | #[derive(Debug, clap::Args)] 104 | #[group(required = true, multiple = false)] 105 | pub struct StateDir { 106 | /// Save credentials for systemd service `orb@.service`. This is a shortcut for 107 | /// `--state-dir /var/lib/orb/`. 108 | /// 109 | /// INSTANCE should be in systemd-escaped form. 110 | #[clap( 111 | long, 112 | name = "INSTANCE", 113 | value_parser = clap::builder::StringValueParser::new().try_map(systemd_name_checker), 114 | )] 115 | pub systemd: Option, 116 | 117 | /// The state directory to store credentials. 118 | #[clap(long)] 119 | pub state_dir: Option, 120 | } 121 | 122 | fn systemd_name_checker(s: String) -> Result { 123 | if !s.is_empty() 124 | && !s.starts_with('.') 125 | && s.bytes() 126 | .all(|b| b.is_ascii_alphanumeric() || b == b'-' || b == b'\\') 127 | { 128 | Ok(s) 129 | } else { 130 | Err("invalid escaped systemd instance name") 131 | } 132 | } 133 | 134 | fn uuid_checker(s: String) -> Result { 135 | const SAMPLE: &[u8] = b"00000000-1111-2222-3333-444444444444"; 136 | if s.len() == SAMPLE.len() 137 | && s.bytes().zip(SAMPLE).all(|(lhs, &rhs)| { 138 | if rhs == b'-' { 139 | lhs == rhs 140 | } else { 141 | lhs.is_ascii_hexdigit() 142 | } 143 | }) 144 | { 145 | Ok(s) 146 | } else { 147 | Err("invalid UUID") 148 | } 149 | } 150 | 151 | impl StateDir { 152 | pub fn to_path(&self) -> PathBuf { 153 | match (&self.state_dir, &self.systemd) { 154 | (Some(path), _) => path.clone(), 155 | (None, Some(inst)) => format!("/var/lib/orb/{inst}").into(), 156 | // Verified by clap. 157 | _ => unreachable!(), 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod memory_backend; 2 | pub mod onedrive_backend; 3 | pub mod service; 4 | 5 | #[cfg(test)] 6 | mod tests; 7 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | use std::num::NonZeroU16; 3 | use std::path::Path; 4 | use std::sync::Arc; 5 | use std::{fs, io}; 6 | 7 | use anyhow::{bail, ensure, Context, Result}; 8 | use clap::Parser; 9 | use cli::{Cli, LoginCmd, ServeCmd, StopCmd, VerifyCmd}; 10 | use orb_ublk::{ControlDevice, DeviceBuilder, DeviceInfo}; 11 | use serde::{de, Deserialize}; 12 | use serde_inline_default::serde_inline_default; 13 | use tokio::runtime::Runtime; 14 | use tokio::signal::unix as signal; 15 | 16 | #[cfg(not(target_os = "linux"))] 17 | compile_error!("Only Linux is supported because of ublk driver"); 18 | 19 | mod cli; 20 | 21 | /// The environment variable for log level and/or filter. 22 | const LOG_ENV_VAR: &str = "ORB_LOG"; 23 | 24 | /// The magic user data for ublk to recognize in `stop` subcommand. 25 | const ORB_MAGIC: u64 = u64::from_le_bytes(*b"orb\0\0\0\0\0"); 26 | 27 | const LOGICAL_SECTOR_SIZE: u32 = 4 << 10; // Typical page size. 28 | 29 | type Frontend = orb::service::Frontend; 30 | 31 | fn main() -> Result<()> { 32 | init_log(); 33 | 34 | match Cli::parse() { 35 | Cli::Verify(cmd) => verify_main(&cmd), 36 | Cli::Serve(cmd) => serve_main(&cmd), 37 | Cli::Stop(cmd) => stop_cmd(cmd), 38 | Cli::Login(cmd) => login_cmd(cmd), 39 | } 40 | } 41 | 42 | fn init_log() { 43 | use tracing::level_filters::LevelFilter; 44 | use tracing_subscriber::layer::SubscriberExt; 45 | use tracing_subscriber::util::SubscriberInitExt; 46 | 47 | let filter = tracing_subscriber::EnvFilter::builder() 48 | .with_env_var(LOG_ENV_VAR) 49 | .with_default_directive(LevelFilter::INFO.into()) 50 | .from_env_lossy(); 51 | 52 | // TODO: It's better to integrate with systemd-journald via `tracing_journald`, but that would 53 | // make all custom fields invisible by plain `journalctl`, which hurt readability a lot. 54 | tracing_subscriber::registry() 55 | .with(tracing_subscriber::fmt::layer()) 56 | .with(filter) 57 | .init(); 58 | } 59 | 60 | #[derive(Debug, Deserialize)] 61 | #[serde(deny_unknown_fields)] 62 | struct Config { 63 | ublk: UblkConfig, 64 | device: orb::service::Config, 65 | backend: BackendConfig, 66 | } 67 | 68 | #[derive(Debug, Deserialize)] 69 | #[serde(deny_unknown_fields, rename_all = "snake_case")] 70 | enum BackendConfig { 71 | Memory(orb::memory_backend::Config), 72 | Onedrive(orb::onedrive_backend::Config), 73 | } 74 | 75 | #[serde_inline_default] 76 | #[derive(Debug, Deserialize)] 77 | #[serde(deny_unknown_fields)] 78 | struct UblkConfig { 79 | #[serde_inline_default(-1)] 80 | id: i32, 81 | #[serde(default)] 82 | unprivileged: bool, 83 | #[serde_inline_default(NonZeroU16::new(64).unwrap())] 84 | #[serde(deserialize_with = "de_queue_depth")] 85 | queue_depth: NonZeroU16, 86 | } 87 | 88 | fn de_queue_depth<'de, D: de::Deserializer<'de>>(de: D) -> Result { 89 | let v = NonZeroU16::deserialize(de)?; 90 | if v.get() <= DeviceBuilder::MAX_QUEUE_DEPTH { 91 | Ok(v) 92 | } else { 93 | Err(de::Error::custom(format_args!( 94 | "invalid queue depth exceeding {}", 95 | DeviceBuilder::MAX_QUEUE_DEPTH, 96 | ))) 97 | } 98 | } 99 | 100 | fn load_and_verify_config(path: &Path) -> Result { 101 | let buf = fs::read_to_string(path).context("failed to read config file")?; 102 | let config = toml::from_str::(&buf).context("failed to parse config file")?; 103 | config.device.validate().context("invalid device config")?; 104 | Ok(config) 105 | } 106 | 107 | fn verify_main(cmd: &VerifyCmd) -> Result<()> { 108 | load_and_verify_config(&cmd.config_file)?; 109 | Ok(()) 110 | } 111 | 112 | fn serve_main(cmd: &ServeCmd) -> Result<()> { 113 | // Fail fast. 114 | let config = load_and_verify_config(&cmd.config_file)?; 115 | let ctl = open_ctl_dev()?; 116 | 117 | let mut rt = tokio::runtime::Builder::new_current_thread() 118 | .enable_all() 119 | .build() 120 | .context("failed to build tokio runtime")?; 121 | 122 | match &config.backend { 123 | BackendConfig::Memory(_) => { 124 | let memory = orb::memory_backend::Memory::new(&config.device); 125 | // SAFETY: `DEBUG_PTR` is set correctly in `serve`. 126 | let frontend = 127 | Frontend::new(config.device, memory, on_ready).expect("config is validated"); 128 | serve(&ctl, &mut rt, &config, &frontend)?; 129 | } 130 | BackendConfig::Onedrive(backend_config) => { 131 | let (remote, chunks) = 132 | orb::onedrive_backend::init(backend_config, &config.device, &rt)?; 133 | let drive = remote.get_drive(); 134 | { 135 | let _guard = rt.enter(); 136 | register_reload_signal(drive)?; 137 | } 138 | 139 | // SAFETY: `DEBUG_PTR` is set correctly in `serve`. 140 | let frontend = 141 | Frontend::new(config.device, remote, on_ready).expect("config is validated"); 142 | let (frontend, rt) = &mut *scopeguard::guard((frontend, rt), |(frontend, rt)| { 143 | tracing::info!("releasing remote lock"); 144 | if let Err(err) = rt.block_on(frontend.into_backend().unlock()) { 145 | tracing::error!(%err, "failed to release remote lock"); 146 | } 147 | }); 148 | 149 | rt.block_on(frontend.init_chunks(&chunks)) 150 | .context("failed to initialize chunks")?; 151 | // Free memory. 152 | drop(chunks); 153 | 154 | serve(&ctl, rt, &config, frontend)?; 155 | 156 | tracing::info!("flushing buffers before exit"); 157 | rt.block_on(orb_ublk::BlockDevice::flush( 158 | &*frontend, 159 | orb_ublk::IoFlags::empty(), 160 | )) 161 | // Error reasons should be reported inside `flush`, the returned error here is 162 | // always EIO and carrying no information. 163 | .inspect_err(|_| tracing::error!("final flush failed, data may be lost!"))?; 164 | } 165 | } 166 | Ok(()) 167 | } 168 | 169 | fn register_reload_signal( 170 | drive: Arc, 171 | ) -> io::Result<()> { 172 | let mut sighup = signal::signal(signal::SignalKind::hangup())?; 173 | tokio::spawn(async move { 174 | loop { 175 | sighup.recv().await.unwrap(); 176 | let ts = rustix::time::clock_gettime(rustix::time::ClockId::Monotonic); 177 | let ts_usec = ts.tv_sec * 1_000_000 + ts.tv_nsec / 1_000; 178 | let _ = sd_notify::notify( 179 | false, 180 | &[ 181 | sd_notify::NotifyState::Reloading, 182 | sd_notify::NotifyState::Custom(&format!("MONOTONIC_USEC={ts_usec}")), 183 | ], 184 | ); 185 | tracing::info!("signaled to reload"); 186 | match drive.reload().await { 187 | Ok(()) => tracing::info!("reloaded successfully"), 188 | Err(err) => tracing::error!(%err, "failed to reload credentials"), 189 | } 190 | let _ = sd_notify::notify(false, &[sd_notify::NotifyState::Ready]); 191 | } 192 | }); 193 | Ok(()) 194 | } 195 | 196 | // Workaround: This is very ugly since scoped_tls support neither fat pointers nor !Sized 197 | // types. There is a PR but the crate is inactive. 198 | // See: https://github.com/alexcrichton/scoped-tls/pull/27 199 | // 200 | // Invariant: pointer stored here must be valid for the lifetime of itself. 201 | scoped_tls::scoped_thread_local!(static DEBUG_PTR: *const dyn Debug); 202 | 203 | fn serve( 204 | ctl: &ControlDevice, 205 | rt: &mut Runtime, 206 | config: &Config, 207 | frontend: &Frontend, 208 | ) -> Result<()> { 209 | let mut builder = DeviceBuilder::new(); 210 | builder.dev_id(u32::try_from(config.ublk.id).ok()); 211 | let mut dev_params = frontend.dev_params(); 212 | if config.ublk.unprivileged { 213 | builder.unprivileged(); 214 | } else { 215 | dev_params.set_io_flusher(true); 216 | } 217 | 218 | // Invariant of `DEBUG_PTR`: `frontend` is valid during this `set` call. 219 | DEBUG_PTR.set(&std::ptr::from_ref(frontend as &dyn Debug), || { 220 | builder 221 | .name("orb") 222 | .user_data(ORB_MAGIC) 223 | .queues(1) 224 | .queue_depth(config.ublk.queue_depth.get()) 225 | .io_buf_size(orb::service::MAX_READ_SECTORS.bytes().try_into().unwrap()) 226 | .zoned() 227 | .create_service(ctl) 228 | .context("failed to create ublk device")? 229 | .serve_local(rt, &dev_params, frontend) 230 | .context("service failed") 231 | }) 232 | } 233 | 234 | fn on_ready(dev_info: &DeviceInfo, stopper: orb_ublk::Stopper) -> io::Result<()> { 235 | let mut sigint = signal::signal(signal::SignalKind::interrupt())?; 236 | let mut sigterm = signal::signal(signal::SignalKind::terminate())?; 237 | tokio::task::spawn(async move { 238 | tokio::select! { 239 | v = sigint.recv() => v, 240 | v = sigterm.recv() => v, 241 | } 242 | .unwrap(); 243 | tracing::info!("signaled to stop"); 244 | let _ = sd_notify::notify(false, &[sd_notify::NotifyState::Stopping]); 245 | stopper.stop(); 246 | }); 247 | 248 | if DEBUG_PTR.is_set() { 249 | let mut sigusr1 = signal::signal(signal::SignalKind::user_defined1())?; 250 | tokio::task::spawn(async move { 251 | use std::io::Write; 252 | use std::os::unix::fs::OpenOptionsExt; 253 | use std::time::SystemTime; 254 | 255 | while let Some(()) = sigusr1.recv().await { 256 | tracing::warn!("debug dumping states"); 257 | let ts = SystemTime::now() 258 | .duration_since(SystemTime::UNIX_EPOCH) 259 | .unwrap_or_default(); 260 | // SAFETY: By the invariant of `DEBUG_PTR`. 261 | let debug_out = DEBUG_PTR.with(|&ptr| format!("{:#?}", unsafe { &*ptr })); 262 | 263 | // Spawn detached. No need to join. 264 | tokio::task::spawn_blocking(move || { 265 | let path = std::env::temp_dir().join(format!( 266 | "orb-state-dump.{}.{:09}", 267 | ts.as_secs(), 268 | ts.subsec_nanos(), 269 | )); 270 | let ret = std::fs::OpenOptions::new() 271 | // Avoid blocking pipe traps. 272 | .create_new(true) 273 | .write(true) 274 | .mode(0o600) // rw------- 275 | .open(&path) 276 | .and_then(|mut f| f.write_all(debug_out.as_bytes())); 277 | match ret { 278 | Ok(()) => tracing::warn!(path = %path.display(), "debug dump saved"), 279 | Err(err) => { 280 | tracing::error!( 281 | path = %path.display(), 282 | %err, 283 | "failed to save debug dump", 284 | ); 285 | } 286 | } 287 | }); 288 | } 289 | }); 290 | } 291 | 292 | tracing::info!("block device ready at /dev/ublkb{}", dev_info.dev_id()); 293 | let _ = sd_notify::notify(false, &[sd_notify::NotifyState::Ready]); 294 | Ok(()) 295 | } 296 | 297 | fn stop_cmd(cmd: StopCmd) -> Result<()> { 298 | use orb_ublk::CDEV_PREFIX; 299 | 300 | let ctl = open_ctl_dev()?; 301 | if cmd.all { 302 | for ent in fs::read_dir("/dev").context("failed to read /dev")? { 303 | if let Some(dev_id) = (|| { 304 | ent.ok()? 305 | .file_name() 306 | .to_str()? 307 | .strip_prefix("ublkc")? 308 | .parse::() 309 | .ok() 310 | })() { 311 | if !cmd.force { 312 | let info = match ctl.get_device_info(dev_id) { 313 | Ok(info) => info, 314 | Err(err) => { 315 | eprintln!("skipped {CDEV_PREFIX}{dev_id}: failed to get info: {err}"); 316 | continue; 317 | } 318 | }; 319 | if info.user_data() != ORB_MAGIC { 320 | eprintln!("skipped {CDEV_PREFIX}{dev_id}: not created by orb"); 321 | continue; 322 | } 323 | } 324 | ctl.delete_device(dev_id) 325 | .with_context(|| format!("failed to delete {CDEV_PREFIX}{dev_id}"))?; 326 | } 327 | } 328 | } else { 329 | for dev_id in cmd.dev_ids { 330 | if !cmd.force { 331 | let info = ctl 332 | .get_device_info(dev_id) 333 | .with_context(|| format!("failed to get info of {CDEV_PREFIX}{dev_id}"))?; 334 | ensure!( 335 | info.user_data() == ORB_MAGIC, 336 | "refused to delete {CDEV_PREFIX}{dev_id}: not created by orb", 337 | ); 338 | } 339 | ctl.delete_device(dev_id) 340 | .with_context(|| format!("failed to delete {CDEV_PREFIX}{dev_id}"))?; 341 | } 342 | } 343 | Ok(()) 344 | } 345 | 346 | fn open_ctl_dev() -> Result { 347 | match ControlDevice::open() { 348 | Ok(ctl) => Ok(ctl), 349 | Err(err) => { 350 | let help = if err.kind() == io::ErrorKind::NotFound { 351 | ", try loading kernel module via 'modprobe ublk_drv'?" 352 | } else { 353 | "" 354 | }; 355 | bail!("failed to open {}{}", ControlDevice::PATH, help); 356 | } 357 | } 358 | } 359 | 360 | pub fn login_cmd(cmd: LoginCmd) -> Result<()> { 361 | orb::onedrive_backend::login::interactive(&cmd.state_dir.to_path(), cmd.client_id) 362 | } 363 | -------------------------------------------------------------------------------- /src/memory_backend.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt; 3 | use std::future::{ready, Future}; 4 | 5 | use anyhow::{anyhow, Result}; 6 | use bytes::Bytes; 7 | use futures_util::{stream, Stream}; 8 | use parking_lot::RwLock; 9 | use serde::Deserialize; 10 | 11 | use crate::service::Backend; 12 | 13 | #[derive(Debug, Deserialize)] 14 | #[serde(deny_unknown_fields)] 15 | pub struct Config {} 16 | 17 | pub struct Memory { 18 | pub(crate) zones: Box<[RwLock>]>, 19 | } 20 | 21 | impl fmt::Debug for Memory { 22 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 23 | struct ZonesDebug<'a>(&'a [RwLock>]); 24 | 25 | impl fmt::Debug for ZonesDebug<'_> { 26 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 27 | let mut map = f.debug_map(); 28 | map.entries(self.0.iter().enumerate().map(|(zid, chunks)| { 29 | let mut ranges = chunks 30 | .read() 31 | .iter() 32 | .map(|(&coff, data)| coff..(coff + data.len() as u32)) 33 | .collect::>(); 34 | ranges.sort_unstable_by_key(|range| range.start); 35 | (zid, ranges) 36 | })); 37 | map.finish() 38 | } 39 | } 40 | 41 | f.debug_struct("Memory") 42 | .field("zones", &ZonesDebug(&self.zones)) 43 | .finish() 44 | } 45 | } 46 | 47 | impl Memory { 48 | #[must_use] 49 | pub fn new(dev_config: &crate::service::Config) -> Self { 50 | let zone_cnt = dev_config.dev_secs / dev_config.zone_secs; 51 | let zones = (0..zone_cnt).map(|_| RwLock::new(HashMap::new())).collect(); 52 | Self { zones } 53 | } 54 | } 55 | 56 | impl Backend for Memory { 57 | fn download_chunk( 58 | &self, 59 | zid: u32, 60 | coff: u32, 61 | read_offset: u64, 62 | ) -> impl Stream> + Send + 'static { 63 | let ret = match self.zones[zid as usize].read().get(&coff) { 64 | Some(data) => Ok(data.slice(read_offset as usize..)), 65 | None => Err(anyhow!("chunk not found: zid={zid} coff={coff}")), 66 | }; 67 | stream::iter(Some(ret)) 68 | } 69 | 70 | fn upload_chunk( 71 | &self, 72 | zid: u32, 73 | coff: u32, 74 | data: Bytes, 75 | ) -> impl Future> + Send + '_ { 76 | self.zones[zid as usize].write().insert(coff, data); 77 | ready(Ok(())) 78 | } 79 | 80 | fn delete_zone(&self, zid: u32) -> impl Future> + Send + '_ { 81 | self.zones[zid as usize].write().clear(); 82 | ready(Ok(())) 83 | } 84 | 85 | fn delete_all_zones(&self) -> impl Future> + Send + '_ { 86 | for zone in self.zones.iter() { 87 | zone.write().clear(); 88 | } 89 | ready(Ok(())) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/onedrive_backend/login.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::net::ToSocketAddrs; 3 | use std::path::Path; 4 | use std::sync::Arc; 5 | use std::time::SystemTime; 6 | 7 | use anyhow::{ensure, Context, Result}; 8 | use futures_util::FutureExt; 9 | use hyper::service::service_fn; 10 | use hyper::{header, Method, Request, Response, StatusCode}; 11 | use hyper_util::rt::TokioIo; 12 | use onedrive_api::{Auth, ClientCredential, Permission, Tenant, TokenResponse}; 13 | use reqwest::Url; 14 | use rustix::fs::Access; 15 | use tokio::sync::mpsc; 16 | use tokio::sync::mpsc::error::TrySendError; 17 | 18 | use super::{safe_write, Credential, STATE_FILE_NAME, USER_AGENT, USER_CREDENTIAL_FILE_NAME}; 19 | 20 | const LOCALHOST: &str = "localhost"; 21 | const LOCALHOST_ADDR: &str = "localhost:0"; 22 | 23 | const DISCLAIMER: &str = "\ 24 | Disclaimer: Microsoft OneDrive is a file hosting service operated by Microsoft. This program \ 25 | orb has nothing to do with Microsoft, other than using their public API interface on behalf of \ 26 | users, once the user explicitly logins here. \ 27 | This program is licensed under GNU General Public License 3 or (at your option) any later versions. \ 28 | This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY. \ 29 | You can run `orb --help` for license details. \ 30 | "; 31 | 32 | pub fn interactive(state_dir: &Path, client_id: String) -> Result<()> { 33 | // Fail fast on insufficient permission. 34 | std::fs::create_dir_all(state_dir).context("failed to create directory")?; 35 | rustix::fs::access( 36 | state_dir, 37 | Access::READ_OK | Access::WRITE_OK | Access::EXEC_OK | Access::EXISTS, 38 | )?; 39 | 40 | let rt = tokio::runtime::Builder::new_current_thread() 41 | .enable_all() 42 | .build() 43 | .context("failed to build tokio runtime")?; 44 | 45 | let client = reqwest::Client::builder() 46 | .user_agent(USER_AGENT) 47 | .https_only(true) 48 | .build() 49 | .context("failed to build reqwest client")?; 50 | let perm = Permission::new_read().write(true).offline_access(true); 51 | 52 | let localhost_addrs = LOCALHOST_ADDR 53 | .to_socket_addrs() 54 | .context("failed to resolve localhost")? 55 | .collect::>(); 56 | ensure!(!localhost_addrs.is_empty(), "no address for localhost"); 57 | tracing::debug!(?localhost_addrs); 58 | 59 | // Ensure the request is really from redirection from our login page, not by some other 60 | // website, which is possible on most browsers though the website cannot get the response. 61 | let cors_token = { 62 | use rand::{Rng, SeedableRng}; 63 | let token = rand::rngs::StdRng::from_os_rng().random::(); 64 | >::from(format!("{token:016x}")) 65 | }; 66 | 67 | let (auth, tokens) = rt.block_on(async { 68 | // Create one listener for each address, but with the same port. 69 | // This is necessary, or it happens that we are listening in `[::1]` but the browser 70 | // send requests to `127.0.0.1`. 71 | let listener = tokio::net::TcpListener::bind(localhost_addrs[0]).await?; 72 | let port = listener.local_addr()?.port(); 73 | 74 | let redirect_uri = format!("http://{LOCALHOST}:{port}"); 75 | let auth = Arc::new(Auth::new_with_client( 76 | client, 77 | client_id, 78 | perm, 79 | redirect_uri, 80 | Tenant::Consumers, 81 | )); 82 | 83 | let (tx, mut rx) = mpsc::channel(1); 84 | let auth2 = auth.clone(); 85 | let cors_token2 = cors_token.clone(); 86 | let handler_fn = move |req| { 87 | request_handler(req, auth2.clone(), cors_token2.clone(), tx.clone()).map( 88 | |(status, msg)| { 89 | Ok::<_, std::convert::Infallible>( 90 | Response::builder() 91 | .status(status) 92 | .header(header::CONTENT_TYPE, "text/plain") 93 | .body(msg) 94 | .expect("no invalid headers"), 95 | ) 96 | }, 97 | ) 98 | }; 99 | 100 | let spawn_server = |listener: tokio::net::TcpListener| { 101 | let handler_fn = handler_fn.clone(); 102 | tokio::spawn(async move { 103 | loop { 104 | let (stream, _) = listener.accept().await.unwrap(); 105 | let handler_fn = handler_fn.clone(); 106 | tokio::spawn(async move { 107 | if let Err(err) = hyper::server::conn::http1::Builder::new() 108 | .serve_connection(TokioIo::new(stream), service_fn(handler_fn)) 109 | .await 110 | { 111 | tracing::error!(%err, "failed to serve connection"); 112 | } 113 | }); 114 | } 115 | }); 116 | }; 117 | spawn_server(listener); 118 | for addr in &localhost_addrs[1..] { 119 | spawn_server(tokio::net::TcpListener::bind((addr.ip(), port)).await?); 120 | } 121 | 122 | // See: https://learn.microsoft.com/en-us/graph/auth-v2-user?view=graph-rest-1.0&tabs=http#parameters 123 | let mut auth_url = auth.code_auth_url(); 124 | auth_url 125 | .query_pairs_mut() 126 | .append_pair("response_mode", "query") 127 | .append_pair("state", &cors_token) 128 | .finish(); 129 | 130 | if let Err(err) = open::that_detached(auth_url.as_str()) { 131 | tracing::error!(%err, "failed to open URL in browser"); 132 | } 133 | println!( 134 | "\ 135 | {DISCLAIMER}\n\n\ 136 | A login page should be opened in your default browser. \ 137 | Please continue login in that page, or press Ctrl-C here to stop login and exit. \ 138 | If it is not opened automatically, please manually open this link:\n\ 139 | {auth_url}\ 140 | " 141 | ); 142 | 143 | // Drop TX before waiting RX. 144 | drop(handler_fn); 145 | let resp = rx 146 | .recv() 147 | .await 148 | .context("local HTTP server exited unexpectedly")?; 149 | anyhow::Ok((auth, resp)) 150 | })?; 151 | 152 | let cred = Credential { 153 | init_time: SystemTime::now(), 154 | read_write: true, 155 | refresh_token: tokens.refresh_token.unwrap(), // Checked in handler. 156 | redirect_uri: auth.redirect_uri().to_owned(), 157 | client_id: auth.client_id().to_owned(), 158 | }; 159 | 160 | let state_path = state_dir.join(STATE_FILE_NAME); 161 | if let Err(err) = std::fs::remove_file(state_path) { 162 | if err.kind() != io::ErrorKind::NotFound { 163 | // Not fatal. 164 | tracing::error!(%err, "failed to clear states"); 165 | } 166 | } 167 | 168 | let cred_path = state_dir.join(USER_CREDENTIAL_FILE_NAME); 169 | safe_write(&cred_path, &cred).context("failed to save credentials")?; 170 | 171 | println!("credential saved"); 172 | Ok(()) 173 | } 174 | 175 | async fn request_handler( 176 | req: Request, 177 | auth: Arc, 178 | cors_token: Arc, 179 | tx: mpsc::Sender, 180 | ) -> (StatusCode, String) { 181 | if req.method() != Method::GET { 182 | return ( 183 | StatusCode::METHOD_NOT_ALLOWED, 184 | "Only GET is allowed.".into(), 185 | ); 186 | } 187 | let Some(pseudo_uri) = req 188 | .uri() 189 | .query() 190 | .and_then(|query| Url::parse(&format!("pseudo:?{query}")).ok()) 191 | else { 192 | return (StatusCode::BAD_REQUEST, "Invalid URL.".into()); 193 | }; 194 | let get = |key: &str| { 195 | pseudo_uri 196 | .query_pairs() 197 | .find_map(|(k, v)| (k == key).then_some(v)) 198 | }; 199 | 200 | if get("state").as_deref() != Some(&*cors_token) { 201 | return (StatusCode::BAD_REQUEST, "Invalid CORS token".into()); 202 | } 203 | 204 | let Some(code) = get("code") else { 205 | return if let Some(err) = get("error") { 206 | let err_msg = get("error_description").unwrap_or_default(); 207 | ( 208 | StatusCode::UNAUTHORIZED, 209 | format!("Login failed ({err}): {err_msg}"), 210 | ) 211 | } else { 212 | ( 213 | StatusCode::BAD_REQUEST, 214 | "Missing query parameter 'code' or 'error'.".into(), 215 | ) 216 | }; 217 | }; 218 | 219 | match auth.login_with_code(&code, &ClientCredential::None).await { 220 | Ok(tokens) if tokens.refresh_token.is_some() => match tx.try_send(tokens) { 221 | Ok(()) | Err(TrySendError::Full(_)) => ( 222 | StatusCode::OK, 223 | "Successfully logined. This page can be closed.".into(), 224 | ), 225 | Err(TrySendError::Closed(_)) => unreachable!(), 226 | }, 227 | Ok(_) => ( 228 | StatusCode::UNAUTHORIZED, 229 | "Missing refresh token in response.".into(), 230 | ), 231 | Err(err) => ( 232 | StatusCode::UNAUTHORIZED, 233 | format!("Login with code failed: {err}"), 234 | ), 235 | } 236 | } 237 | -------------------------------------------------------------------------------- /src/tests.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Write; 2 | use std::fs::File; 3 | use std::future::Future; 4 | use std::io::Read; 5 | use std::num::{NonZeroU32, NonZeroUsize}; 6 | use std::time::Duration; 7 | use std::{mem, ptr, slice}; 8 | 9 | use anyhow::Result; 10 | use bytes::Bytes; 11 | use futures_util::{FutureExt, Stream}; 12 | use orb_ublk::{ 13 | BlockDevice, IoFlags, ReadBuf, Sector, WriteBuf, Zone, ZoneBuf, ZoneCond, ZoneType, 14 | }; 15 | use parking_lot::Mutex; 16 | use rustix::fd::AsFd; 17 | use rustix::io::Errno; 18 | 19 | use crate::memory_backend::Memory; 20 | use crate::service::{Backend, Config, Frontend}; 21 | 22 | #[derive(Debug)] 23 | pub struct TestBackend { 24 | inner: Memory, 25 | log: Mutex, 26 | delay: Mutex, 27 | } 28 | 29 | impl TestBackend { 30 | pub fn new_empty(config: &Config) -> Self { 31 | Self { 32 | inner: Memory::new(config), 33 | log: Mutex::default(), 34 | delay: Mutex::new(Duration::ZERO), 35 | } 36 | } 37 | 38 | #[track_caller] 39 | pub fn new_with_chunks( 40 | config: &Config, 41 | // (zid, cid, data) where cid := coff / SECTOR_SIZE. 42 | chunks: impl IntoIterator)>, 43 | ) -> Self { 44 | let mut this = Self::new_empty(config); 45 | for (zid, cid, data) in chunks { 46 | let coff = cid * Sector::SIZE; 47 | let prev = this.inner.zones[zid as usize] 48 | .get_mut() 49 | .insert(coff, data.into()); 50 | assert!(prev.is_none()); 51 | } 52 | this 53 | } 54 | 55 | pub fn drain_log(&self) -> String { 56 | mem::take(&mut self.log.lock()) 57 | } 58 | 59 | fn delay(&self) -> impl Future + 'static { 60 | let delay = *self.delay.lock(); 61 | async move { 62 | if delay != Duration::ZERO { 63 | tokio::time::sleep(delay).await; 64 | } 65 | } 66 | } 67 | } 68 | 69 | macro_rules! act { 70 | ($this:expr, $($tt:tt)*) => { 71 | write!(*$this.log.lock(), "{};", format_args!($($tt)*)).unwrap() 72 | }; 73 | } 74 | 75 | impl Backend for TestBackend { 76 | fn download_chunk( 77 | &self, 78 | zid: u32, 79 | coff: u32, 80 | read_offset: u64, 81 | ) -> impl Stream> + Send + 'static { 82 | assert_eq!(coff % Sector::SIZE, 0); 83 | assert_eq!(read_offset % Sector::SIZE as u64, 0); 84 | let cid = coff / Sector::SIZE; 85 | let read_offset_sec = read_offset / Sector::SIZE as u64; 86 | act!(self, "download({zid}, {cid}s, {read_offset_sec}s)"); 87 | let delay = self.delay(); 88 | let stream = self.inner.download_chunk(zid, coff, read_offset); 89 | async move { 90 | delay.await; 91 | stream 92 | } 93 | .flatten_stream() 94 | } 95 | 96 | fn upload_chunk( 97 | &self, 98 | zid: u32, 99 | coff: u32, 100 | data: Bytes, 101 | ) -> impl Future> + Send + '_ { 102 | assert_eq!(coff % Sector::SIZE, 0); 103 | assert!([0, 1].contains(&(data.len() % Sector::SIZE as usize))); 104 | let cid = coff / Sector::SIZE; 105 | let len_sec = data.len() as u32 / Sector::SIZE; 106 | let finish_suffix = if data.len() & 1 != 0 { "+" } else { "" }; 107 | act!(self, "upload({zid}, {cid}s, {len_sec}s{finish_suffix})"); 108 | async move { 109 | self.delay().await; 110 | self.inner.upload_chunk(zid, coff, data).await 111 | } 112 | } 113 | 114 | fn delete_zone(&self, zid: u32) -> impl Future> + Send + '_ { 115 | act!(self, "delete_zone({zid})"); 116 | async move { 117 | self.delay().await; 118 | self.inner.delete_zone(zid).await 119 | } 120 | } 121 | 122 | fn delete_all_zones(&self) -> impl Future> + Send + '_ { 123 | act!(self, "delete_all_zones()"); 124 | async move { 125 | self.delay().await; 126 | self.inner.delete_all_zones().await 127 | } 128 | } 129 | } 130 | 131 | trait TestFrontend: BlockDevice { 132 | async fn test_read(&self, off: Sector, len: Sector) -> Result, Errno> { 133 | assert_ne!(len, Sector(0)); 134 | let len = len.bytes() as usize; 135 | let mut buf = vec![0u8; len]; 136 | let read_len = { 137 | let mut read_buf = ReadBuf::from_raw(&mut buf); 138 | self.read(off, &mut read_buf, IoFlags::empty()).await?; 139 | len - read_buf.remaining() 140 | }; 141 | buf.truncate(read_len); 142 | Ok(buf) 143 | } 144 | 145 | async fn test_write_all(&self, off: Sector, buf: &mut [u8]) -> Result<(), Errno> { 146 | self.test_write_all_flags(off, buf, IoFlags::empty()).await 147 | } 148 | 149 | async fn test_write_all_flags( 150 | &self, 151 | off: Sector, 152 | buf: &mut [u8], 153 | flags: IoFlags, 154 | ) -> Result<(), Errno> { 155 | assert!(!buf.is_empty()); 156 | assert_eq!(buf.len() % sec(1), 0); 157 | let len = buf.len(); 158 | let buf = WriteBuf::from_raw(buf); 159 | let written = self.write(off, buf, flags).await?; 160 | assert_eq!(written, len); 161 | Ok(()) 162 | } 163 | 164 | async fn test_zone_append_all(&self, off: Sector, buf: &mut [u8]) -> Result { 165 | assert!(!buf.is_empty()); 166 | assert_eq!(buf.len() % sec(1), 0); 167 | let buf = WriteBuf::from_raw(buf); 168 | let pos = self.zone_append(off, buf, IoFlags::empty()).await?; 169 | Ok(pos) 170 | } 171 | 172 | async fn test_report_zones(&self, off: Sector, zone_cnt: usize) -> Result> { 173 | assert_ne!(zone_cnt, 0); 174 | let memfd = 175 | rustix::fs::memfd_create("zone-report-buf", rustix::fs::MemfdFlags::CLOEXEC).unwrap(); 176 | let mut memfd = File::from(memfd); 177 | memfd 178 | .set_len((zone_cnt * mem::size_of::()) as u64) 179 | .unwrap(); 180 | let mut buf = ZoneBuf::from_raw(memfd.as_fd(), 0, zone_cnt as u32); 181 | self.report_zones(off, &mut buf, IoFlags::empty()).await?; 182 | let read_cnt = zone_cnt - buf.remaining(); 183 | 184 | let mut ret = >::with_capacity(read_cnt); 185 | // SAFETY: Have enough capacity. 186 | unsafe { ptr::write_bytes(ret.as_mut_ptr(), 0, read_cnt) }; 187 | // SAFETY: Have enough capacity. 188 | let spare_buf_u8 = unsafe { 189 | slice::from_raw_parts_mut( 190 | ret.as_mut_ptr().cast::(), 191 | read_cnt * mem::size_of::(), 192 | ) 193 | }; 194 | memfd.read_exact(spare_buf_u8).unwrap(); 195 | // SAFETY: Initialized `read_cnt` elements, each of which is a valid `Zone`. 196 | unsafe { ret.set_len(read_cnt) }; 197 | Ok(ret) 198 | } 199 | } 200 | 201 | impl TestFrontend for T {} 202 | 203 | const NR_ZONES: usize = 4; 204 | // 4 x 4KiB zones, min chunk 1KiB, max chunk 2KiB. 205 | const CONFIG: Config = Config { 206 | dev_secs: Sector(8 * NR_ZONES as u64), 207 | zone_secs: Sector(8), 208 | min_chunk_size: 1 << 10, 209 | max_chunk_size: 2 << 10, 210 | // Workaround: `Option::unwrap` is not const stable yet. 211 | max_concurrent_streams: match NonZeroUsize::new(8) { 212 | Some(n) => n, 213 | None => unreachable!(), 214 | }, 215 | max_concurrent_commits: match NonZeroU32::new(8) { 216 | Some(n) => n, 217 | None => unreachable!(), 218 | }, 219 | }; 220 | 221 | /// Accept `[(zid, cid, data)]`, where `cid := coff / SECTOR_SIZE`. 222 | async fn new_dev(chunks: &[(u32, u32, Vec)]) -> Frontend { 223 | let backend = TestBackend::new_with_chunks(&CONFIG, chunks.iter().cloned()); 224 | let chunk_meta = chunks 225 | .iter() 226 | .map(|(zid, cid, data)| { 227 | let global_off = *zid as u64 * CONFIG.zone_secs.bytes() + Sector(*cid as u64).bytes(); 228 | (global_off, data.len() as u64) 229 | }) 230 | .collect::>(); 231 | let mut dev = Frontend::new(CONFIG, backend, |_, _| Ok(())).unwrap(); 232 | dev.init_chunks(&chunk_meta).await.unwrap(); 233 | dev 234 | } 235 | 236 | pub const fn sec(n: usize) -> usize { 237 | Sector(n as u64).bytes() as _ 238 | } 239 | 240 | fn zone(i: u64, rel_wp: Sector, cond: ZoneCond) -> Zone { 241 | Zone::new( 242 | CONFIG.zone_secs * i, 243 | CONFIG.zone_secs, 244 | rel_wp, 245 | ZoneType::SeqWriteReq, 246 | cond, 247 | ) 248 | } 249 | 250 | #[tokio::test] 251 | async fn report_zones() { 252 | let dev = new_dev(&[]).await; 253 | let got_zones = dev.test_report_zones(Sector(0), 8).await.unwrap(); 254 | let expect_zones = (0..NR_ZONES as u64) 255 | .map(|i| zone(i, Sector(0), ZoneCond::Empty)) 256 | .collect::>(); 257 | assert_eq!(got_zones, expect_zones); 258 | assert_eq!(dev.backend().drain_log(), ""); 259 | } 260 | 261 | #[tokio::test] 262 | async fn init_chunks() { 263 | let dev = new_dev(&[ 264 | (0, 0, vec![1u8; sec(2)]), 265 | // Partial tail. Need download. 266 | (0, 2, vec![2u8; sec(1)]), 267 | // Manually finished. 268 | (1, 0, vec![3u8; sec(1) + 1]), 269 | // Large enough chunk. 270 | (2, 0, vec![4u8; sec(2)]), 271 | // Full of data. 272 | (3, 0, vec![5u8; CONFIG.zone_secs.bytes() as _]), 273 | ]) 274 | .await; 275 | assert_eq!(dev.backend().drain_log(), "download(0, 2s, 0s);"); 276 | 277 | let got = dev.test_report_zones(Sector(0), 4).await.unwrap(); 278 | let expect = vec![ 279 | zone(0, Sector(3), ZoneCond::Closed), 280 | zone(1, CONFIG.zone_secs, ZoneCond::Full), 281 | zone(2, Sector(2), ZoneCond::Closed), 282 | zone(3, CONFIG.zone_secs, ZoneCond::Full), 283 | ]; 284 | assert_eq!(got, expect); 285 | 286 | let got = dev.test_read(Sector(0), CONFIG.zone_secs).await.unwrap(); 287 | let mut expect = [0u8; CONFIG.zone_secs.bytes() as _]; 288 | expect[..sec(2)].fill(1u8); 289 | expect[sec(2)..sec(3)].fill(2u8); 290 | assert_eq!(got, expect); 291 | // Only the first chunk is downloaded. The second chunk is prefetched before. 292 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 293 | 294 | let got = dev 295 | .test_read(CONFIG.zone_secs, CONFIG.zone_secs) 296 | .await 297 | .unwrap(); 298 | // NB. The finish-marker byte will be not read, thus only the first sector is non-zero. 299 | let mut expect = [0u8; CONFIG.zone_secs.bytes() as _]; 300 | expect[..sec(1)].fill(3u8); 301 | assert_eq!(got, expect); 302 | assert_eq!(dev.backend().drain_log(), "download(1, 0s, 0s);"); 303 | } 304 | 305 | #[tokio::test] 306 | async fn read_stream_reuse() { 307 | let dev = new_dev(&[ 308 | // [0, 4s) 309 | (0, 0, vec![1u8; sec(4)]), 310 | // [4, 8s) 311 | (0, 4, vec![1u8; sec(4)]), 312 | ]) 313 | .await; 314 | // The first zone is full, thus no initial download. 315 | assert_eq!(dev.backend().drain_log(), ""); 316 | 317 | // Read [0s, 2s), stream pos at 2s. 318 | let got = dev.test_read(Sector(0), Sector(2)).await.unwrap(); 319 | let expect = [1u8; sec(2)]; 320 | assert_eq!(got, expect); 321 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 322 | 323 | // Read [2s, 6s), drain the first stream, and start another one. 324 | let got = dev.test_read(Sector(2), Sector(4)).await.unwrap(); 325 | assert_eq!(got, [1u8; sec(4)]); 326 | assert_eq!(dev.backend().drain_log(), "download(0, 4s, 0s);"); 327 | 328 | // Read [6s, 8s), drain the second one. 329 | let got = dev.test_read(Sector(6), Sector(2)).await.unwrap(); 330 | assert_eq!(got, expect); 331 | assert_eq!(dev.backend().drain_log(), ""); 332 | 333 | // Read [1s, 2s), start at middle. 334 | let got = dev.test_read(Sector(1), Sector(1)).await.unwrap(); 335 | assert_eq!(got, [1u8; sec(1)]); 336 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 1s);"); 337 | 338 | // Read [2s, 3s), reuse. 339 | let got = dev.test_read(Sector(2), Sector(1)).await.unwrap(); 340 | assert_eq!(got, [1u8; sec(1)]); 341 | assert_eq!(dev.backend().drain_log(), ""); 342 | } 343 | 344 | /// When reading takes some time, multiple consective reads are serialized and 345 | /// reuse one stream. 346 | #[tokio::test] 347 | async fn read_stream_wait_reuse() { 348 | // The exact delay time does not matter. Just to ensure all ready futures are polled into 349 | // Pending state before responding. 350 | const DELAY: Duration = Duration::from_millis(100); 351 | 352 | let expect = [&[1u8; sec(2)][..], &[2u8; sec(2)]].concat(); 353 | let dev = new_dev(&[ 354 | // [0, 4s) 355 | (0, 0, expect.clone()), 356 | // [4, 8s) 357 | (0, 4, vec![1u8; sec(4)]), 358 | ]) 359 | .await; 360 | // The first zone is full, thus no initial download. 361 | assert_eq!(dev.backend().drain_log(), ""); 362 | 363 | *dev.backend().delay.lock() = DELAY; 364 | 365 | // Read [0s, 2s) and [2s, 4s) concurrently, while polling them in order. 366 | let (got1, got2) = tokio::join!( 367 | dev.test_read(Sector(0), Sector(2)), 368 | dev.test_read(Sector(2), Sector(2)), 369 | ); 370 | let (expect1, expect2) = expect.split_at(expect.len() / 2); 371 | assert_eq!(got1.unwrap(), expect1); 372 | assert_eq!(got2.unwrap(), expect2); 373 | 374 | // Only downloads once. 375 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 376 | } 377 | 378 | #[tokio::test] 379 | async fn zone_open_close() { 380 | let dev = new_dev(&[ 381 | (0, 0, vec![1u8; sec(1)]), // With tail. 382 | (2, 0, vec![2u8; sec(2)]), // Without tail. 383 | ]) 384 | .await; 385 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 386 | 387 | let got = dev.test_report_zones(Sector(0), 4).await.unwrap(); 388 | let expect_init = vec![ 389 | zone(0, Sector(1), ZoneCond::Closed), 390 | zone(1, Sector(0), ZoneCond::Empty), 391 | zone(2, Sector(2), ZoneCond::Closed), 392 | zone(3, Sector(0), ZoneCond::Empty), 393 | ]; 394 | assert_eq!(got, expect_init); 395 | 396 | // ZONE_OPEN and ZONE_CLOSE are idempotent. 397 | let open_offsets = [Sector(0), CONFIG.zone_secs].repeat(2); 398 | 399 | for &off in &open_offsets { 400 | dev.zone_open(off, IoFlags::empty()).await.unwrap(); 401 | } 402 | 403 | let got = dev.test_report_zones(Sector(0), 4).await.unwrap(); 404 | let expect_opened = vec![ 405 | zone(0, Sector(1), ZoneCond::ExpOpen), 406 | zone(1, Sector(0), ZoneCond::ExpOpen), 407 | zone(2, Sector(2), ZoneCond::Closed), 408 | zone(3, Sector(0), ZoneCond::Empty), 409 | ]; 410 | assert_eq!(got, expect_opened); 411 | 412 | for &off in &open_offsets { 413 | dev.zone_close(off, IoFlags::empty()).await.unwrap(); 414 | } 415 | 416 | let got = dev.test_report_zones(Sector(0), 4).await.unwrap(); 417 | assert_eq!(got, expect_init); 418 | 419 | assert_eq!(dev.backend().drain_log(), ""); 420 | } 421 | 422 | #[tokio::test] 423 | async fn reset_zone() { 424 | let dev = new_dev(&[ 425 | (0, 0, vec![1u8; sec(1)]), // With tail. 426 | (2, 0, vec![2u8; sec(2)]), // Without tail. 427 | ]) 428 | .await; 429 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 430 | 431 | for off in [Sector(0), CONFIG.zone_secs] { 432 | dev.zone_open(off, IoFlags::empty()).await.unwrap(); 433 | } 434 | 435 | let got = dev.test_report_zones(Sector(0), 4).await.unwrap(); 436 | let expect = vec![ 437 | zone(0, Sector(1), ZoneCond::ExpOpen), 438 | zone(1, Sector(0), ZoneCond::ExpOpen), 439 | zone(2, Sector(2), ZoneCond::Closed), 440 | zone(3, Sector(0), ZoneCond::Empty), 441 | ]; 442 | assert_eq!(got, expect); 443 | 444 | for i in 0..4 { 445 | dev.zone_reset(CONFIG.zone_secs * i, IoFlags::empty()) 446 | .await 447 | .unwrap(); 448 | } 449 | 450 | let got = dev.test_report_zones(Sector(0), 4).await.unwrap(); 451 | let expect = vec![ 452 | zone(0, Sector(0), ZoneCond::Empty), 453 | zone(1, Sector(0), ZoneCond::Empty), 454 | zone(2, Sector(0), ZoneCond::Empty), 455 | zone(3, Sector(0), ZoneCond::Empty), 456 | ]; 457 | assert_eq!(got, expect); 458 | 459 | // Only non-empty zones are committed to backend. 460 | assert_eq!(dev.backend().drain_log(), "delete_zone(0);delete_zone(2);"); 461 | } 462 | 463 | #[tokio::test] 464 | async fn reset_all_zone() { 465 | let dev = new_dev(&[ 466 | (0, 0, vec![1u8; sec(1)]), // With tail. 467 | (2, 0, vec![2u8; sec(1) + 1]), // Full. 468 | ]) 469 | .await; 470 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 471 | 472 | dev.zone_reset_all(IoFlags::empty()).await.unwrap(); 473 | let got = dev.test_report_zones(Sector(0), 4).await.unwrap(); 474 | let expect = vec![ 475 | zone(0, Sector(0), ZoneCond::Empty), 476 | zone(1, Sector(0), ZoneCond::Empty), 477 | zone(2, Sector(0), ZoneCond::Empty), 478 | zone(3, Sector(0), ZoneCond::Empty), 479 | ]; 480 | assert_eq!(got, expect); 481 | assert_eq!(dev.backend().drain_log(), "delete_all_zones();"); 482 | } 483 | 484 | #[tokio::test] 485 | async fn stream_invalidate_on_reset() { 486 | let dev = new_dev(&[ 487 | // [0, 8s) 488 | (0, 0, vec![1u8; sec(8)]), 489 | ]) 490 | .await; 491 | // The first zone is full, thus no initial download. 492 | assert_eq!(dev.backend().drain_log(), ""); 493 | 494 | // Read [0s, 2s), stream pos at 2s. 495 | let got = dev.test_read(Sector(0), Sector(2)).await.unwrap(); 496 | let expect = [1u8; sec(2)]; 497 | assert_eq!(got, expect); 498 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 499 | 500 | // Reset the zone. 501 | dev.zone_reset(Sector(0), IoFlags::empty()).await.unwrap(); 502 | assert_eq!(dev.backend().drain_log(), "delete_zone(0);"); 503 | 504 | // Read [2s, 3s), no reuse. 505 | let got = dev.test_read(Sector(2), Sector(2)).await.unwrap(); 506 | assert_eq!(got, [0u8; sec(2)]); 507 | assert_eq!(dev.backend().drain_log(), ""); 508 | } 509 | 510 | #[tokio::test] 511 | async fn stream_invalidate_on_reupload() { 512 | let dev = new_dev(&[ 513 | // [0, 8s) 514 | (0, 0, vec![1u8; sec(8)]), 515 | ]) 516 | .await; 517 | // The first zone is full, thus no initial download. 518 | assert_eq!(dev.backend().drain_log(), ""); 519 | 520 | // Read [0s, 2s), stream pos at 2s. 521 | let got = dev.test_read(Sector(0), Sector(2)).await.unwrap(); 522 | let expect = [1u8; sec(2)]; 523 | assert_eq!(got, expect); 524 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 525 | 526 | // Reset the zone and replace content. 527 | dev.zone_reset(Sector(0), IoFlags::empty()).await.unwrap(); 528 | dev.test_write_all(Sector(0), &mut [2u8; sec(8)]) 529 | .await 530 | .unwrap(); 531 | assert_eq!( 532 | dev.backend().drain_log(), 533 | "delete_zone(0);upload(0, 0s, 8s);" 534 | ); 535 | 536 | // Read [2s, 3s), no reuse, and got new data. 537 | let got = dev.test_read(Sector(2), Sector(2)).await.unwrap(); 538 | assert_eq!(got, [2u8; sec(2)]); 539 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 2s);"); 540 | } 541 | 542 | #[tokio::test] 543 | async fn bufferred_read_write() { 544 | let dev = new_dev(&[]).await; 545 | 546 | let got = dev.test_read(Sector(0), CONFIG.zone_secs).await.unwrap(); 547 | let mut expect = [0u8; CONFIG.zone_secs.bytes() as usize]; 548 | assert_eq!(got, expect); 549 | 550 | dev.test_write_all(Sector(0), &mut [42u8; sec(1)]) 551 | .await 552 | .unwrap(); 553 | let got = dev.test_read(Sector(0), CONFIG.zone_secs).await.unwrap(); 554 | expect[..sec(1)].fill(42u8); 555 | assert_eq!(got, expect); 556 | assert_eq!( 557 | dev.test_report_zones(Sector(0), 1).await.unwrap()[0], 558 | zone(0, Sector(1), ZoneCond::ImpOpen), 559 | ); 560 | 561 | // No commit before `FLUSH`. 562 | assert_eq!(dev.backend().drain_log(), ""); 563 | 564 | dev.flush(IoFlags::empty()).await.unwrap(); 565 | assert_eq!(dev.backend().drain_log(), "upload(0, 0s, 1s);"); 566 | 567 | // `FLUSH` is idempotent and does no redundant work. 568 | dev.flush(IoFlags::empty()).await.unwrap(); 569 | assert_eq!(dev.backend().drain_log(), ""); 570 | } 571 | 572 | #[tokio::test] 573 | async fn write_fua() { 574 | let dev = new_dev(&[]).await; 575 | 576 | let mut expect = [1u8; sec(2)]; 577 | expect[sec(1)..].fill(2u8); 578 | 579 | dev.test_write_all_flags(Sector(0), &mut [1u8; sec(1)], IoFlags::Fua) 580 | .await 581 | .unwrap(); 582 | // Should commit it inline. 583 | assert_eq!(dev.backend().drain_log(), "upload(0, 0s, 1s);"); 584 | let got = dev.test_read(Sector(0), Sector(1)).await.unwrap(); 585 | assert_eq!(got, expect[..expect.len() / 2]); 586 | 587 | // No action on FLUSH. 588 | assert_eq!(dev.backend().drain_log(), ""); 589 | 590 | dev.test_write_all_flags(Sector(1), &mut [2u8; sec(1)], IoFlags::Fua) 591 | .await 592 | .unwrap(); 593 | // Also commit inline and replace the chunk. 594 | assert_eq!(dev.backend().drain_log(), "upload(0, 0s, 2s);"); 595 | let got = dev.test_read(Sector(0), Sector(2)).await.unwrap(); 596 | assert_eq!(got, expect); 597 | 598 | // No action on FLUSH. 599 | dev.flush(IoFlags::empty()).await.unwrap(); 600 | assert_eq!(dev.backend().drain_log(), ""); 601 | } 602 | 603 | #[tokio::test] 604 | async fn read_tail() { 605 | let dev = new_dev(&[]).await; 606 | 607 | let mut buf = [1u8; sec(2)]; 608 | buf[sec(1)..].fill(2u8); 609 | 610 | // Delayed tail. 611 | dev.test_write_all(Sector(0), &mut buf).await.unwrap(); 612 | assert_eq!(dev.backend().drain_log(), ""); 613 | 614 | assert_eq!( 615 | dev.test_read(Sector(0), Sector(1)).await.unwrap(), 616 | [1u8; sec(1)], 617 | ); 618 | assert_eq!( 619 | dev.test_read(Sector(1), Sector(1)).await.unwrap(), 620 | [2u8; sec(1)], 621 | ); 622 | assert_eq!( 623 | dev.test_read(Sector(2), Sector(1)).await.unwrap(), 624 | [0u8; sec(1)], 625 | ); 626 | 627 | assert_eq!( 628 | dev.test_read(Sector(0), Sector(3)).await.unwrap(), 629 | [&buf[..], &[0u8; sec(1)]].concat(), 630 | ); 631 | 632 | // Read from cache. 633 | assert_eq!(dev.backend().drain_log(), ""); 634 | } 635 | 636 | #[tokio::test] 637 | async fn reset_discard_buffer() { 638 | let dev = new_dev(&[]).await; 639 | 640 | dev.test_write_all(Sector(0), &mut [42u8; sec(1)]) 641 | .await 642 | .unwrap(); 643 | dev.zone_reset(Sector(0), IoFlags::empty()).await.unwrap(); 644 | dev.flush(IoFlags::empty()).await.unwrap(); 645 | assert_eq!(dev.backend().drain_log(), "delete_zone(0);"); 646 | 647 | let got = dev.test_read(Sector(0), CONFIG.zone_secs).await.unwrap(); 648 | assert_eq!(got, [0u8; CONFIG.zone_secs.bytes() as _]); 649 | 650 | dev.test_write_all(Sector(0), &mut [42u8; sec(1)]) 651 | .await 652 | .unwrap(); 653 | dev.zone_reset_all(IoFlags::empty()).await.unwrap(); 654 | dev.flush(IoFlags::empty()).await.unwrap(); 655 | assert_eq!(dev.backend().drain_log(), "delete_all_zones();"); 656 | } 657 | 658 | #[tokio::test] 659 | async fn inline_commit() { 660 | let dev = new_dev(&[]).await; 661 | let mut off = Sector(0); 662 | 663 | // Immediate inline commit. 664 | let mut data1 = [3u8; CONFIG.max_chunk_size]; 665 | dev.test_write_all(off, &mut data1).await.unwrap(); 666 | off += Sector::from_bytes(data1.len() as _); 667 | assert_eq!(dev.backend().drain_log(), "upload(0, 0s, 4s);"); 668 | assert_eq!( 669 | dev.test_report_zones(Sector(0), 1).await.unwrap()[0], 670 | zone(0, Sector(4), ZoneCond::ImpOpen), 671 | ); 672 | 673 | // No effect. Chunks are already committed, 674 | dev.flush(IoFlags::empty()).await.unwrap(); 675 | assert_eq!(dev.backend().drain_log(), ""); 676 | 677 | // Buffered. 678 | let mut data2 = [1u8; sec(1)]; 679 | dev.test_write_all(off, &mut data2).await.unwrap(); 680 | off += Sector::from_bytes(data2.len() as _); 681 | assert_eq!(dev.backend().drain_log(), ""); 682 | 683 | // Append until full, thus trigger another inline commit. 684 | let mut data3 = vec![2u8; (CONFIG.zone_secs - off).bytes() as _]; 685 | dev.test_write_all(off, &mut data3).await.unwrap(); 686 | off += Sector::from_bytes(data3.len() as _); 687 | assert_eq!(dev.backend().drain_log(), "upload(0, 4s, 4s);"); 688 | assert_eq!( 689 | dev.test_report_zones(Sector(0), 1).await.unwrap()[0], 690 | zone(0, Sector(8), ZoneCond::Full), 691 | ); 692 | 693 | // Validate written data. 694 | let got = dev.test_read(Sector(0), CONFIG.zone_secs).await.unwrap(); 695 | let expect = [&data1[..], &data2, &data3].concat(); 696 | assert_eq!(got, expect); 697 | } 698 | 699 | #[tokio::test] 700 | async fn zone_append() { 701 | let dev = new_dev(&[]).await; 702 | 703 | let off = Sector(0); 704 | let mut data1 = [1u8; sec(2)]; 705 | let pos1 = dev.test_zone_append_all(off, &mut data1).await.unwrap(); 706 | assert_eq!(pos1, Sector(0)); 707 | 708 | let mut data2 = [2u8; sec(1)]; 709 | let pos2 = dev.test_zone_append_all(off, &mut data2).await.unwrap(); 710 | assert_eq!(pos2, Sector(2)); 711 | 712 | assert_eq!( 713 | dev.test_report_zones(Sector(0), 1).await.unwrap()[0], 714 | zone(0, Sector(3), ZoneCond::ImpOpen), 715 | ); 716 | } 717 | 718 | #[tokio::test] 719 | async fn zone_finish() { 720 | let dev = new_dev(&[]).await; 721 | 722 | dev.zone_finish(Sector(0), IoFlags::empty()).await.unwrap(); 723 | assert_eq!(dev.backend().drain_log(), "upload(0, 0s, 0s+);"); 724 | assert_eq!( 725 | dev.test_report_zones(Sector(0), 1).await.unwrap()[0], 726 | zone(0, CONFIG.zone_secs, ZoneCond::Full), 727 | ); 728 | 729 | let off = CONFIG.zone_secs; 730 | dev.test_write_all(off, &mut [0u8; sec(1)]).await.unwrap(); 731 | dev.zone_finish(off, IoFlags::empty()).await.unwrap(); 732 | assert_eq!(dev.backend().drain_log(), "upload(1, 0s, 1s+);"); 733 | assert_eq!( 734 | dev.test_report_zones(off, 1).await.unwrap()[0], 735 | zone(1, CONFIG.zone_secs, ZoneCond::Full), 736 | ); 737 | } 738 | 739 | #[tokio::test] 740 | async fn replace_tail() { 741 | let dev = new_dev(&[]).await; 742 | 743 | let mut data = [1u8; sec(2)]; 744 | let (lhs, rhs) = data.split_at_mut(sec(1)); 745 | rhs.fill(2u8); 746 | 747 | dev.test_zone_append_all(Sector(0), lhs).await.unwrap(); 748 | dev.flush(IoFlags::empty()).await.unwrap(); 749 | assert_eq!(dev.backend().drain_log(), "upload(0, 0s, 1s);"); 750 | 751 | // Should replace the first chunk. 752 | dev.test_zone_append_all(Sector(0), rhs).await.unwrap(); 753 | dev.flush(IoFlags::empty()).await.unwrap(); 754 | assert_eq!(dev.backend().drain_log(), "upload(0, 0s, 2s);"); 755 | 756 | let got = dev.test_read(Sector(0), Sector(2)).await.unwrap(); 757 | assert_eq!(got, data); 758 | assert_eq!(dev.backend().drain_log(), "download(0, 0s, 0s);"); 759 | 760 | // Idempotent. 761 | dev.flush(IoFlags::empty()).await.unwrap(); 762 | assert_eq!(dev.backend().drain_log(), ""); 763 | 764 | // Previous chunk size is over min_chunk_size (1KiB), so this creates a new one. 765 | dev.test_zone_append_all(Sector(0), &mut [3u8; sec(1)]) 766 | .await 767 | .unwrap(); 768 | dev.flush(IoFlags::empty()).await.unwrap(); 769 | assert_eq!(dev.backend().drain_log(), "upload(0, 2s, 1s);"); 770 | } 771 | -------------------------------------------------------------------------------- /ublk-chown-unprivileged/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ublk-chown-unprivileged" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT or Apache-2.0" 6 | # NB. Sync with CI and README. 7 | rust-version = "1.76" # orb-ublk 8 | 9 | [dependencies] 10 | orb-ublk = { path = "../orb-ublk" } 11 | rustix = "1" 12 | -------------------------------------------------------------------------------- /ublk-chown-unprivileged/LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /ublk-chown-unprivileged/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 2 | 3 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 4 | 5 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 6 | -------------------------------------------------------------------------------- /ublk-chown-unprivileged/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use orb_ublk::{ControlDevice, FeatureFlags, BDEV_PREFIX, CDEV_PREFIX}; 4 | use rustix::io::Errno; 5 | 6 | fn main() -> io::Result<()> { 7 | let path = std::env::args() 8 | .nth(1) 9 | .ok_or_else(|| io::Error::other("missing argument"))?; 10 | let id = path 11 | .strip_prefix(CDEV_PREFIX) 12 | .or(path.strip_prefix(BDEV_PREFIX)) 13 | .and_then(|n| n.parse::().ok()) 14 | .ok_or_else(|| io::Error::other("argument should be /dev/ublk{b,c}NUM"))?; 15 | 16 | // Open the file to prevent racing deletion. 17 | let f = match std::fs::File::open(&path) { 18 | Ok(f) => f, 19 | // `EBUSY` is reported on the control device when is is opened, likely by the program 20 | // itself with root permission. In this case, we do not need to participate. 21 | Err(err) if err.raw_os_error() == Some(Errno::BUSY.raw_os_error()) => return Ok(()), 22 | Err(err) => return Err(err), 23 | }; 24 | let ctl = ControlDevice::open()?; 25 | let info = ctl.get_device_info(id)?; 26 | assert_eq!(info.dev_id(), id); 27 | if info.flags().contains(FeatureFlags::UnprivilegedDev) { 28 | std::os::unix::fs::fchown(&f, Some(info.owner_uid()), Some(info.owner_gid()))?; 29 | } 30 | Ok(()) 31 | } 32 | --------------------------------------------------------------------------------