├── .gitignore ├── .gitlab-ci.yml ├── .travis.yml ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── fuzz ├── .gitignore ├── Cargo.lock ├── Cargo.toml └── fuzz_targets │ └── fuse_fuzz_target.rs ├── src ├── allocator.rs ├── archive.rs ├── bin │ ├── ar.rs │ ├── clone.rs │ ├── mkfs.rs │ ├── mount.rs │ └── resize.rs ├── block.rs ├── clone.rs ├── dir.rs ├── disk │ ├── cache.rs │ ├── file.rs │ ├── io.rs │ ├── memory.rs │ ├── mod.rs │ └── sparse.rs ├── filesystem.rs ├── header.rs ├── htree.rs ├── key.rs ├── lib.rs ├── mount │ ├── fuse.rs │ ├── mod.rs │ └── redox │ │ ├── mod.rs │ │ ├── resource.rs │ │ └── scheme.rs ├── node.rs ├── record.rs ├── tests.rs ├── transaction.rs ├── tree.rs └── unmount.rs ├── test.sh └── tests └── tests.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | image.bin 3 | image 4 | image* 5 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: "redoxos/redoxer" 2 | 3 | stages: 4 | - build 5 | - test 6 | 7 | cache: 8 | paths: 9 | - target/ 10 | 11 | build:linux: 12 | stage: build 13 | script: cargo +nightly build --verbose 14 | 15 | build:redox: 16 | stage: build 17 | script: redoxer build --verbose 18 | 19 | test:linux: 20 | stage: test 21 | dependencies: 22 | - build:linux 23 | script: cargo +nightly test --verbose 24 | 25 | test:redox: 26 | stage: test 27 | dependencies: 28 | - build:redox 29 | # only run integration test as without KVM unit tests is super slow 30 | script: redoxer test --verbose -- --test '*' -- --nocapture 31 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | language: rust 3 | rust: 4 | - nightly 5 | os: 6 | - linux 7 | - osx 8 | dist: trusty 9 | before_install: 10 | - if [ "$TRAVIS_OS_NAME" = "linux" ]; then 11 | sudo apt-get install -qq pkg-config fuse libfuse-dev; 12 | sudo modprobe fuse; 13 | sudo chmod 666 /dev/fuse; 14 | sudo chown root:$USER /etc/fuse.conf; 15 | fi 16 | - if [ "$TRAVIS_OS_NAME" = "osx" ]; then 17 | brew update; 18 | brew install Caskroom/cask/osxfuse; 19 | fi 20 | notifications: 21 | email: false 22 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "redoxfs" 3 | description = "The Redox Filesystem" 4 | repository = "https://gitlab.redox-os.org/redox-os/redoxfs" 5 | version = "0.8.1" 6 | license-file = "LICENSE" 7 | readme = "README.md" 8 | authors = ["Jeremy Soller "] 9 | edition = "2021" 10 | 11 | [lib] 12 | name = "redoxfs" 13 | path = "src/lib.rs" 14 | 15 | [[bin]] 16 | name = "redoxfs" 17 | path = "src/bin/mount.rs" 18 | doc = false 19 | required-features = ["std"] 20 | 21 | [[bin]] 22 | name = "redoxfs-ar" 23 | path = "src/bin/ar.rs" 24 | doc = false 25 | required-features = ["std"] 26 | 27 | [[bin]] 28 | name = "redoxfs-clone" 29 | path = "src/bin/clone.rs" 30 | doc = false 31 | required-features = ["std"] 32 | 33 | [[bin]] 34 | name = "redoxfs-mkfs" 35 | path = "src/bin/mkfs.rs" 36 | doc = false 37 | required-features = ["std"] 38 | 39 | [[bin]] 40 | name = "redoxfs-resize" 41 | path = "src/bin/resize.rs" 42 | doc = false 43 | required-features = ["std"] 44 | 45 | [dependencies] 46 | aes = { version = "0.8", default-features = false } 47 | argon2 = { version = "0.4", default-features = false, features = ["alloc"] } 48 | base64ct = { version = "1", default-features = false } 49 | bitflags = "2" 50 | endian-num = "0.1" 51 | env_logger = { version = "0.11", optional = true } 52 | getrandom = { version = "0.2.5", optional = true } 53 | humansize = { version = "2", optional = true } 54 | libc = "0.2" 55 | libredox = { version = "0.1.6", features = ["call"], optional = true } 56 | log = { version = "0.4.14", default-features = false, optional = true } 57 | lz4_flex = { version = "0.11", default-features = false, features = ["checked-decode"] } 58 | parse-size = { version = "1", optional = true } 59 | range-tree = { version = "0.1", optional = true } 60 | redox-path = "0.3.0" 61 | redox-scheme = { version = "0.7.0", optional = true } 62 | redox_syscall = "0.5.15" 63 | seahash = { version = "4.1.0", default-features = false } 64 | termion = { version = "4", optional = true } 65 | uuid = { version = "1.4", default-features = false } 66 | xts-mode = { version = "0.5", default-features = false } 67 | 68 | [features] 69 | default = ["std", "log"] 70 | std = [ 71 | "env_logger", 72 | "fuser", 73 | "getrandom", 74 | "humansize", 75 | "libc", 76 | "libredox", 77 | "parse-size", 78 | "range-tree", 79 | "termion", 80 | "time", 81 | "uuid/v4", 82 | "redox_syscall/std", 83 | "redox-scheme", 84 | ] 85 | 86 | [target.'cfg(not(target_os = "redox"))'.dependencies] 87 | fuser = { version = "0.14", optional = true } 88 | libc = { version = "0.2", optional = true } 89 | time = { version = "0.3", optional = true } 90 | 91 | [lints.rust] 92 | unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } 93 | 94 | [dev-dependencies] 95 | assert_cmd = "2.0.17" 96 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Jeremy Soller 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | UNAME := $(shell uname) 2 | 3 | ifeq ($(UNAME),Darwin) 4 | FUMOUNT=umount 5 | else ifeq ($(UNAME),FreeBSD) 6 | FUMOUNT=sudo umount 7 | else 8 | # Detect which version of the fusermount binary is available. 9 | ifneq (, $(shell which fusermount3)) 10 | FUMOUNT=fusermount3 -u 11 | else 12 | FUMOUNT=fusermount -u 13 | endif 14 | endif 15 | 16 | image.bin: 17 | cargo build --release --bin redoxfs-mkfs 18 | dd if=/dev/zero of=image.bin bs=1048576 count=1024 19 | target/release/redoxfs-mkfs image.bin 20 | 21 | mount: image.bin FORCE 22 | mkdir -p image 23 | cargo build --release --bin redoxfs 24 | target/release/redoxfs image.bin image 25 | 26 | unmount: FORCE 27 | sync 28 | -${FUMOUNT} image 29 | rm -rf image 30 | 31 | clean: FORCE 32 | sync 33 | -${FUMOUNT} image 34 | rm -rf image image.bin 35 | cargo clean 36 | 37 | FORCE: 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RedoxFS 2 | 3 | This is the default filesystem of Redox OS inspired by [ZFS](https://docs.freebsd.org/en/books/handbook/zfs/) and adapted to a microkernel architecture. 4 | 5 | (It's a replacement for [TFS](https://gitlab.redox-os.org/redox-os/tfs)) 6 | 7 | Current features: 8 | 9 | - Compatible with Redox and Linux (FUSE) 10 | - Copy-on-write 11 | - Data/metadata checksums 12 | - Transparent encryption 13 | - Standard Unix file attributes 14 | - File/directory size limit up to 193TiB (212TB) 15 | - File/directory quantity limit up to 4 billion per 193TiB (2^32 - 1 = 4294967295) 16 | - MIT licensed 17 | - Disk encryption fully supported by the Redox bootloader, letting it load the kernel off an encrypted partition. 18 | 19 | Being MIT licensed, RedoxFS can be bundled on GPL-licensed operating systems (Linux, for example). 20 | 21 | ### How to mount a partition 22 | 23 | - Install RedoxFS 24 | 25 | ```sh 26 | cargo install redoxfs 27 | ``` 28 | 29 | You can also build RedoxFS from this repository. 30 | 31 | - Configure your storage device to allow rootless usage 32 | 33 | If you are on Linux you need root permission to acess block devices (storage), but it's recommended to run RedoxFS as rootless. 34 | 35 | To do that you need to configure your storage device permission to your user with the following command: 36 | 37 | ```sh 38 | sudo setfacl -m u:your-username:rw /path/to/disk 39 | ``` 40 | 41 | - Mount your RedoxFS partition 42 | 43 | ```sh 44 | redoxfs /path/to/disk /path/to/mount 45 | ``` 46 | 47 | [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) 48 | [![crates.io](http://meritbadge.herokuapp.com/redoxfs)](https://crates.io/crates/redoxfs) 49 | [![docs.rs](https://docs.rs/redoxfs/badge.svg)](https://docs.rs/redoxfs) 50 | -------------------------------------------------------------------------------- /fuzz/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | corpus 3 | artifacts 4 | coverage 5 | -------------------------------------------------------------------------------- /fuzz/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "redoxfs-fuzz" 3 | version = "0.0.0" 4 | publish = false 5 | edition = "2021" 6 | 7 | [features] 8 | default = [] 9 | log = [] 10 | 11 | [package.metadata] 12 | cargo-fuzz = true 13 | 14 | [dependencies] 15 | anyhow = "1.0.86" 16 | arbitrary = { version = "1.3.2", features = ["derive"] } 17 | fuser = { version = "0.14" } 18 | libfuzzer-sys = "0.4" 19 | nix = { version = "0.29.0", features = ["fs"] } 20 | tempfile = "3.10.1" 21 | 22 | [dependencies.redoxfs] 23 | path = ".." 24 | 25 | [[bin]] 26 | name = "fuse_fuzz_target" 27 | path = "fuzz_targets/fuse_fuzz_target.rs" 28 | test = false 29 | doc = false 30 | bench = false 31 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/fuse_fuzz_target.rs: -------------------------------------------------------------------------------- 1 | //! Fuzzer that exercises random file system operations against a FUSE-mounted redoxfs. 2 | 3 | #![no_main] 4 | 5 | use anyhow::{ensure, Result}; 6 | use fuser; 7 | use libfuzzer_sys::{arbitrary::Arbitrary, fuzz_target, Corpus}; 8 | use nix::sys::statvfs::statvfs; 9 | use std::{ 10 | fs::{self, File, FileTimes, OpenOptions}, 11 | io::{Read, Seek, SeekFrom, Write}, 12 | os::unix::fs::{self as unix_fs, PermissionsExt}, 13 | path::{Path, PathBuf}, 14 | thread, 15 | time::{Duration, SystemTime, UNIX_EPOCH}, 16 | }; 17 | use tempfile; 18 | 19 | use redoxfs::{mount::fuse::Fuse, DiskSparse, FileSystem}; 20 | 21 | /// Maximum size for files and buffers. Chosen arbitrarily with fuzzing performance in mind. 22 | const MAX_SIZE: u64 = 10_000_000; 23 | /// Limit on the number of remounts in a single test case. Chosen arbitrarily with fuzzing 24 | /// performance in mind: remounts are costly. 25 | const MAX_MOUNT_SEQUENCES: usize = 3; 26 | 27 | /// An operation to be performed by the fuzzer. 28 | #[derive(Arbitrary, Clone, Debug)] 29 | enum Operation { 30 | Chown { 31 | path: PathBuf, 32 | uid: Option, 33 | gid: Option, 34 | }, 35 | CreateDir { 36 | path: PathBuf, 37 | }, 38 | HardLink { 39 | original: PathBuf, 40 | link: PathBuf, 41 | }, 42 | Metadata { 43 | path: PathBuf, 44 | }, 45 | Read { 46 | path: PathBuf, 47 | }, 48 | ReadDir { 49 | path: PathBuf, 50 | }, 51 | ReadLink { 52 | path: PathBuf, 53 | }, 54 | RemoveDir { 55 | path: PathBuf, 56 | }, 57 | RemoveFile { 58 | path: PathBuf, 59 | }, 60 | Rename { 61 | from: PathBuf, 62 | to: PathBuf, 63 | }, 64 | SeekRead { 65 | path: PathBuf, 66 | seek_pos: u64, 67 | buf_size: usize, 68 | }, 69 | SeekWrite { 70 | path: PathBuf, 71 | seek_pos: u64, 72 | buf_size: usize, 73 | }, 74 | SetLen { 75 | path: PathBuf, 76 | size: u64, 77 | }, 78 | SetPermissions { 79 | path: PathBuf, 80 | readonly: Option, 81 | mode: Option, 82 | }, 83 | SetTimes { 84 | path: PathBuf, 85 | accessed_since_epoch: Option, 86 | modified_since_epoch: Option, 87 | }, 88 | Statvfs {}, 89 | SymLink { 90 | original: PathBuf, 91 | link: PathBuf, 92 | }, 93 | Write { 94 | path: PathBuf, 95 | buf_size: usize, 96 | }, 97 | } 98 | 99 | /// Parameters for mounting the file system and operations to be performed afterwards. 100 | #[derive(Arbitrary, Clone, Debug)] 101 | struct MountSequence { 102 | squash: bool, 103 | operations: Vec, 104 | } 105 | 106 | /// The whole input to a single fuzzer invocation. 107 | #[derive(Arbitrary, Clone, Debug)] 108 | struct TestCase { 109 | disk_size: u64, 110 | reserved_size: u64, 111 | mount_sequences: Vec, 112 | } 113 | 114 | /// Creates the disk for backing the Redoxfs. 115 | fn create_disk(temp_path: &Path, disk_size: u64) -> DiskSparse { 116 | let disk_path = temp_path.join("disk.img"); 117 | DiskSparse::create(disk_path, disk_size).unwrap() 118 | } 119 | 120 | /// Creates an empty Redoxfs. 121 | fn create_redoxfs(disk: DiskSparse, reserved_size: u64) -> bool { 122 | let password = None; 123 | let reserved = vec![0; reserved_size as usize]; 124 | let ctime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 125 | FileSystem::create_reserved( 126 | disk, 127 | password, 128 | &reserved, 129 | ctime.as_secs(), 130 | ctime.subsec_nanos(), 131 | ) 132 | .is_ok() 133 | } 134 | 135 | /// Mounts an existing Redoxfs, runs the callback and performs the unmount. 136 | fn with_redoxfs_mount(temp_path: &Path, disk: DiskSparse, squash: bool, callback: F) 137 | where 138 | F: FnOnce(&Path) + Send + 'static, 139 | { 140 | let password = None; 141 | let block = None; 142 | let mut fs = FileSystem::open(disk, password, block, squash).unwrap(); 143 | 144 | let mount_path = temp_path.join("mount"); 145 | fs::create_dir_all(&mount_path).unwrap(); 146 | let mut session = fuser::Session::new(Fuse { fs: &mut fs }, &mount_path, &[]).unwrap(); 147 | let mut unmounter = session.unmount_callable(); 148 | 149 | let join_handle = thread::spawn(move || { 150 | callback(&mount_path); 151 | unmounter.unmount().unwrap(); 152 | }); 153 | 154 | session.run().unwrap(); 155 | join_handle.join().unwrap(); 156 | } 157 | 158 | fn get_path_within_fs(fs_path: &Path, path_to_add: &Path) -> Result { 159 | ensure!(path_to_add.is_relative()); 160 | ensure!(path_to_add 161 | .components() 162 | .all(|c| c != std::path::Component::ParentDir)); 163 | Ok(fs_path.join(path_to_add)) 164 | } 165 | 166 | fn do_operation(fs_path: &Path, op: &Operation) -> Result<()> { 167 | match op { 168 | Operation::Chown { path, uid, gid } => { 169 | let path = get_path_within_fs(fs_path, path)?; 170 | unix_fs::chown(path, *uid, *gid)?; 171 | } 172 | Operation::CreateDir { path } => { 173 | let path = get_path_within_fs(fs_path, path)?; 174 | fs::create_dir(path)?; 175 | } 176 | Operation::HardLink { original, link } => { 177 | let original = get_path_within_fs(fs_path, original)?; 178 | let link = get_path_within_fs(fs_path, link)?; 179 | fs::hard_link(original, link)?; 180 | } 181 | Operation::Metadata { path } => { 182 | let path = get_path_within_fs(fs_path, path)?; 183 | fs::metadata(path)?; 184 | } 185 | Operation::Read { path } => { 186 | let path = get_path_within_fs(fs_path, path)?; 187 | fs::read(path)?; 188 | } 189 | Operation::ReadDir { path } => { 190 | let path = get_path_within_fs(fs_path, path)?; 191 | let _ = fs::read_dir(path)?.count(); 192 | } 193 | Operation::ReadLink { path } => { 194 | let path = get_path_within_fs(fs_path, path)?; 195 | fs::read_link(path)?; 196 | } 197 | Operation::RemoveDir { path } => { 198 | let path = get_path_within_fs(fs_path, path)?; 199 | fs::remove_dir(path)?; 200 | } 201 | Operation::RemoveFile { path } => { 202 | let path = get_path_within_fs(fs_path, path)?; 203 | fs::remove_file(path)?; 204 | } 205 | Operation::Rename { from, to } => { 206 | let from = get_path_within_fs(fs_path, from)?; 207 | let to = get_path_within_fs(fs_path, to)?; 208 | fs::rename(from, to)?; 209 | } 210 | Operation::SeekRead { 211 | path, 212 | seek_pos, 213 | buf_size, 214 | } => { 215 | ensure!(*buf_size as u64 <= MAX_SIZE); 216 | let path = get_path_within_fs(fs_path, path)?; 217 | let mut file = File::open(path)?; 218 | file.seek(SeekFrom::Start(*seek_pos))?; 219 | let mut buf = vec![0; *buf_size]; 220 | file.read(&mut buf)?; 221 | } 222 | Operation::SeekWrite { 223 | path, 224 | seek_pos, 225 | buf_size, 226 | } => { 227 | ensure!(*seek_pos <= MAX_SIZE); 228 | ensure!(*buf_size as u64 <= MAX_SIZE); 229 | let path = get_path_within_fs(fs_path, path)?; 230 | let mut file = OpenOptions::new().write(true).open(path)?; 231 | file.seek(SeekFrom::Start(*seek_pos))?; 232 | let buf = vec![0; *buf_size]; 233 | file.write(&buf)?; 234 | } 235 | Operation::SetLen { path, size } => { 236 | let path = get_path_within_fs(fs_path, path)?; 237 | let file = OpenOptions::new().write(true).open(path)?; 238 | file.set_len(*size)?; 239 | } 240 | Operation::SetPermissions { 241 | path, 242 | readonly, 243 | mode, 244 | } => { 245 | let path = get_path_within_fs(fs_path, path)?; 246 | let metadata = fs::metadata(&path)?; 247 | let mut perms = metadata.permissions(); 248 | if let Some(readonly) = readonly { 249 | perms.set_readonly(*readonly); 250 | } 251 | if let Some(mode) = mode { 252 | perms.set_mode(*mode); 253 | } 254 | fs::set_permissions(path, perms)?; 255 | } 256 | Operation::SetTimes { 257 | path, 258 | accessed_since_epoch, 259 | modified_since_epoch, 260 | } => { 261 | let path = get_path_within_fs(fs_path, path)?; 262 | let file = File::options().write(true).open(path)?; 263 | let mut times = FileTimes::new(); 264 | if let Some(accessed_since_epoch) = accessed_since_epoch { 265 | if let Some(accessed) = UNIX_EPOCH.checked_add(*accessed_since_epoch) { 266 | times = times.set_accessed(accessed); 267 | } 268 | } 269 | if let Some(modified_since_epoch) = modified_since_epoch { 270 | if let Some(modified) = UNIX_EPOCH.checked_add(*modified_since_epoch) { 271 | times = times.set_modified(modified); 272 | } 273 | } 274 | file.set_times(times)?; 275 | } 276 | Operation::Statvfs {} => { 277 | statvfs(fs_path)?; 278 | } 279 | Operation::SymLink { original, link } => { 280 | let original = get_path_within_fs(fs_path, original)?; 281 | let link = get_path_within_fs(fs_path, link)?; 282 | unix_fs::symlink(original, link)?; 283 | } 284 | Operation::Write { path, buf_size } => { 285 | ensure!(*buf_size as u64 <= MAX_SIZE); 286 | let path = get_path_within_fs(fs_path, path)?; 287 | let buf = vec![0; *buf_size]; 288 | fs::write(path, &buf)?; 289 | } 290 | } 291 | Ok(()) 292 | } 293 | 294 | fuzz_target!(|test_case: TestCase| -> Corpus { 295 | if test_case.disk_size > MAX_SIZE 296 | || test_case.reserved_size > MAX_SIZE 297 | || test_case.mount_sequences.len() > MAX_MOUNT_SEQUENCES 298 | { 299 | return Corpus::Reject; 300 | } 301 | 302 | let temp_dir = tempfile::Builder::new() 303 | .prefix("fuse_fuzz_target") 304 | .tempdir() 305 | .unwrap(); 306 | 307 | #[cfg(feature = "log")] 308 | eprintln!("create fs"); 309 | let disk = create_disk(temp_dir.path(), test_case.disk_size); 310 | if !create_redoxfs(disk, test_case.reserved_size) { 311 | // File system creation failed (e.g., due to insufficient space) so we bail out, still 312 | // exercising this code path is useful. 313 | return Corpus::Keep; 314 | } 315 | 316 | for mount_seq in test_case.mount_sequences.iter() { 317 | #[cfg(feature = "log")] 318 | eprintln!("mount fs: path {:?}, size{}", temp_dir.path(), test_case.disk_size); 319 | 320 | let disk = create_disk(temp_dir.path(), test_case.disk_size); 321 | let operations = mount_seq.operations.clone(); 322 | with_redoxfs_mount(temp_dir.path(), disk, mount_seq.squash, move |fs_path| { 323 | for operation in operations.iter() { 324 | #[cfg(feature = "log")] 325 | eprintln!("do operation {operation:?}"); 326 | 327 | let _result = do_operation(fs_path, operation); 328 | 329 | #[cfg(feature = "log")] 330 | eprintln!("operation result {:?}", _result.err()); 331 | } 332 | }); 333 | 334 | #[cfg(feature = "log")] 335 | eprintln!("unmounted fs"); 336 | } 337 | Corpus::Keep 338 | }); 339 | -------------------------------------------------------------------------------- /src/allocator.rs: -------------------------------------------------------------------------------- 1 | use alloc::{collections::BTreeSet, vec::Vec}; 2 | use core::{fmt, mem, ops, slice}; 3 | use endian_num::Le; 4 | 5 | use crate::{BlockAddr, BlockLevel, BlockMeta, BlockPtr, BlockTrait, BLOCK_SIZE}; 6 | 7 | pub const ALLOC_LIST_ENTRIES: usize = 8 | (BLOCK_SIZE as usize - mem::size_of::>()) / mem::size_of::(); 9 | 10 | /// The RedoxFS block allocator. This struct manages all "data" blocks in RedoxFS 11 | /// (i.e, all blocks that aren't reserved or part of the header chain). 12 | /// 13 | /// [`Allocator`] can allocate blocks of many "levels"---that is, it can 14 | /// allocate multiple consecutive [`BLOCK_SIZE`] blocks in one operation. 15 | /// 16 | /// This reduces the amount of memory that the [`Allocator`] uses: 17 | /// Instead of storing the index of each free [`BLOCK_SIZE`] block, 18 | /// the `levels` array can keep track of higher-level blocks, splitting 19 | /// them when a smaller block is requested. 20 | /// 21 | /// Higher-level blocks also allow us to more efficiently allocate memory 22 | /// for large files. 23 | #[derive(Clone, Default)] 24 | pub struct Allocator { 25 | /// This array keeps track of all free blocks of each level, 26 | /// and is initialized using the AllocList chain when we open the filesystem. 27 | /// 28 | /// Every element of the outer array represents a block level: 29 | /// - item 0: free level 0 blocks (with size [`BLOCK_SIZE`]) 30 | /// - item 1: free level 1 blocks (with size 2*[`BLOCK_SIZE`]) 31 | /// - item 2: free level 2 blocks (with size 4*[`BLOCK_SIZE`]) 32 | /// ...and so on. 33 | /// 34 | /// Each inner array contains a list of free block indices, 35 | levels: Vec>, 36 | } 37 | 38 | impl Allocator { 39 | pub fn levels(&self) -> &Vec> { 40 | &self.levels 41 | } 42 | 43 | /// Count the number of free [`BLOCK_SIZE`] available to this [`Allocator`]. 44 | pub fn free(&self) -> u64 { 45 | let mut free = 0; 46 | for level in 0..self.levels.len() { 47 | let level_size = 1 << level; 48 | free += self.levels[level].len() as u64 * level_size; 49 | } 50 | free 51 | } 52 | 53 | /// Find a free block of the given level, mark it as "used", and return its address. 54 | /// Returns [`None`] if there are no free blocks with this level. 55 | pub fn allocate(&mut self, meta: BlockMeta) -> Option { 56 | // First, find the lowest level with a free block 57 | let mut free_opt = None; 58 | { 59 | let mut level = meta.level.0; 60 | // Start searching at the level we want. Smaller levels are too small! 61 | while level < self.levels.len() { 62 | if let Some(&index) = self.levels[level].first() { 63 | // Find the index closest to the start of the filesystem 64 | free_opt = match free_opt { 65 | Some((free_level, free_index)) if free_index <= index => { 66 | Some((free_level, free_index)) 67 | } 68 | _ => Some((level, index)), 69 | }; 70 | } 71 | level += 1; 72 | } 73 | } 74 | 75 | // If a free block was found, split it until we find a usable block of the right level. 76 | // The left side of the split block is kept free, and the right side is allocated. 77 | let (mut level, index) = free_opt?; 78 | self.levels[level].remove(&index); 79 | while level > meta.level.0 { 80 | level -= 1; 81 | let level_size = 1 << level; 82 | self.levels[level].insert(index + level_size); 83 | } 84 | 85 | Some(unsafe { BlockAddr::new(index, meta) }) 86 | } 87 | 88 | /// Try to allocate the exact block specified, making all necessary splits. 89 | /// Returns [`None`] if this some (or all) of this block is already allocated. 90 | /// 91 | /// Note that [`BlockAddr`] encodes the blocks location _and_ level. 92 | pub fn allocate_exact(&mut self, exact_addr: BlockAddr) -> Option { 93 | // This function only supports level 0 right now 94 | assert_eq!(exact_addr.level().0, 0); 95 | let exact_index = exact_addr.index(); 96 | 97 | let mut index_opt = None; 98 | 99 | // Go from the highest to the lowest level 100 | for level in (0..self.levels.len()).rev() { 101 | let level_size = 1 << level; 102 | 103 | // Split higher block if found 104 | if let Some(index) = index_opt.take() { 105 | self.levels[level].insert(index); 106 | self.levels[level].insert(index + level_size); 107 | } 108 | 109 | // Look for matching block and remove it 110 | for &start in self.levels[level].iter() { 111 | if start <= exact_index { 112 | let end = start + level_size; 113 | if end > exact_index { 114 | self.levels[level].remove(&start); 115 | index_opt = Some(start); 116 | break; 117 | } 118 | } 119 | } 120 | } 121 | 122 | Some(unsafe { BlockAddr::new(index_opt?, exact_addr.meta()) }) 123 | } 124 | 125 | /// Deallocate the given block, marking it "free" so that it can be re-used later. 126 | pub fn deallocate(&mut self, addr: BlockAddr) { 127 | // When we deallocate, we check if block we're deallocating has a free sibling. 128 | // If it does, we join the two to create one free block in the next (higher) level. 129 | // 130 | // We repeat this until we no longer have a sibling to join. 131 | let mut index = addr.index(); 132 | let mut level = addr.level().0; 133 | loop { 134 | while level >= self.levels.len() { 135 | self.levels.push(BTreeSet::new()); 136 | } 137 | 138 | let level_size = 1 << level; 139 | let next_size = level_size << 1; 140 | 141 | let mut found = false; 142 | // look at all free blocks in the current level... 143 | for &level_index in self.levels[level].iter() { 144 | // - the block we just freed aligns with the next largest block, and 145 | // - the second block we're looking at is the right sibling of this block 146 | if index % next_size == 0 && index + level_size == level_index { 147 | // "alloc" the next highest block, repeat deallocation process. 148 | self.levels[level].remove(&level_index); 149 | found = true; 150 | break; 151 | // - the index of this block doesn't align with the next largest block, and 152 | // - the block we're looking at is the left neighbor of this block 153 | } else if level_index % next_size == 0 && level_index + level_size == index { 154 | // "alloc" the next highest block, repeat deallocation process. 155 | self.levels[level].remove(&level_index); 156 | index = level_index; // index moves to left block 157 | found = true; 158 | break; 159 | } 160 | } 161 | 162 | // We couldn't find a higher block, 163 | // deallocate this one and finish 164 | if !found { 165 | self.levels[level].insert(index); 166 | return; 167 | } 168 | 169 | // repeat deallocation process on the 170 | // higher-level block we just created. 171 | level += 1; 172 | } 173 | } 174 | } 175 | 176 | #[repr(C, packed)] 177 | #[derive(Clone, Copy, Default, Debug)] 178 | pub struct AllocEntry { 179 | /// The index of the first block this [`AllocEntry`] refers to 180 | index: Le, 181 | 182 | /// The number of blocks after (and including) `index` that are are free or used. 183 | /// If negative, they are used; if positive, they are free. 184 | count: Le, 185 | } 186 | 187 | impl AllocEntry { 188 | pub fn new(index: u64, count: i64) -> Self { 189 | Self { 190 | index: index.into(), 191 | count: count.into(), 192 | } 193 | } 194 | 195 | pub fn allocate(addr: BlockAddr) -> Self { 196 | Self::new(addr.index(), -addr.level().blocks::()) 197 | } 198 | 199 | pub fn deallocate(addr: BlockAddr) -> Self { 200 | Self::new(addr.index(), addr.level().blocks::()) 201 | } 202 | 203 | pub fn index(&self) -> u64 { 204 | self.index.to_ne() 205 | } 206 | 207 | pub fn count(&self) -> i64 { 208 | self.count.to_ne() 209 | } 210 | 211 | pub fn is_null(&self) -> bool { 212 | self.count() == 0 213 | } 214 | } 215 | 216 | /// A node in the allocation chain. 217 | #[repr(C, packed)] 218 | pub struct AllocList { 219 | /// A pointer to the previous AllocList. 220 | /// If this is the null pointer, this is the first element of the chain. 221 | pub prev: BlockPtr, 222 | 223 | /// Allocation entries. 224 | pub entries: [AllocEntry; ALLOC_LIST_ENTRIES], 225 | } 226 | 227 | unsafe impl BlockTrait for AllocList { 228 | fn empty(level: BlockLevel) -> Option { 229 | if level.0 == 0 { 230 | Some(Self { 231 | prev: BlockPtr::default(), 232 | entries: [AllocEntry::default(); ALLOC_LIST_ENTRIES], 233 | }) 234 | } else { 235 | None 236 | } 237 | } 238 | } 239 | 240 | impl fmt::Debug for AllocList { 241 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 242 | let prev = self.prev; 243 | let entries: Vec<&AllocEntry> = self 244 | .entries 245 | .iter() 246 | .filter(|entry| entry.count() > 0) 247 | .collect(); 248 | f.debug_struct("AllocList") 249 | .field("prev", &prev) 250 | .field("entries", &entries) 251 | .finish() 252 | } 253 | } 254 | 255 | impl ops::Deref for AllocList { 256 | type Target = [u8]; 257 | fn deref(&self) -> &[u8] { 258 | unsafe { 259 | slice::from_raw_parts( 260 | self as *const AllocList as *const u8, 261 | mem::size_of::(), 262 | ) as &[u8] 263 | } 264 | } 265 | } 266 | 267 | impl ops::DerefMut for AllocList { 268 | fn deref_mut(&mut self) -> &mut [u8] { 269 | unsafe { 270 | slice::from_raw_parts_mut( 271 | self as *mut AllocList as *mut u8, 272 | mem::size_of::(), 273 | ) as &mut [u8] 274 | } 275 | } 276 | } 277 | 278 | #[test] 279 | fn alloc_node_size_test() { 280 | assert_eq!(mem::size_of::(), crate::BLOCK_SIZE as usize); 281 | } 282 | 283 | #[test] 284 | fn allocator_test() { 285 | let mut alloc = Allocator::default(); 286 | 287 | assert_eq!(alloc.allocate(BlockMeta::default()), None); 288 | 289 | alloc.deallocate(unsafe { BlockAddr::new(1, BlockMeta::default()) }); 290 | assert_eq!( 291 | alloc.allocate(BlockMeta::default()), 292 | Some(unsafe { BlockAddr::new(1, BlockMeta::default()) }) 293 | ); 294 | assert_eq!(alloc.allocate(BlockMeta::default()), None); 295 | 296 | for addr in 1023..2048 { 297 | alloc.deallocate(unsafe { BlockAddr::new(addr, BlockMeta::default()) }); 298 | } 299 | 300 | assert_eq!(alloc.levels.len(), 11); 301 | for level in 0..alloc.levels.len() { 302 | if level == 0 { 303 | assert_eq!(alloc.levels[level], [1023].into()); 304 | } else if level == 10 { 305 | assert_eq!(alloc.levels[level], [1024].into()); 306 | } else { 307 | assert_eq!(alloc.levels[level], [0u64; 0].into()); 308 | } 309 | } 310 | 311 | for addr in 1023..2048 { 312 | assert_eq!( 313 | alloc.allocate(BlockMeta::default()), 314 | Some(unsafe { BlockAddr::new(addr, BlockMeta::default()) }) 315 | ); 316 | } 317 | assert_eq!(alloc.allocate(BlockMeta::default()), None); 318 | 319 | assert_eq!(alloc.levels.len(), 11); 320 | for level in 0..alloc.levels.len() { 321 | assert_eq!(alloc.levels[level], [0u64; 0].into()); 322 | } 323 | } 324 | -------------------------------------------------------------------------------- /src/archive.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::io; 3 | use std::os::unix::ffi::OsStrExt; 4 | use std::os::unix::fs::MetadataExt; 5 | use std::path::Path; 6 | 7 | use crate::{Disk, FileSystem, Node, Transaction, TreePtr, BLOCK_SIZE}; 8 | 9 | fn syscall_err(err: syscall::Error) -> io::Error { 10 | io::Error::from_raw_os_error(err.errno) 11 | } 12 | 13 | pub fn archive_at>( 14 | tx: &mut Transaction, 15 | parent_path: P, 16 | parent_ptr: TreePtr, 17 | ) -> io::Result<()> { 18 | for entry_res in fs::read_dir(parent_path)? { 19 | let entry = entry_res?; 20 | 21 | let metadata = entry.metadata()?; 22 | let file_type = metadata.file_type(); 23 | 24 | let name = entry.file_name().into_string().map_err(|_| { 25 | io::Error::new(io::ErrorKind::InvalidData, "filename is not valid UTF-8") 26 | })?; 27 | 28 | let mode_type = if file_type.is_dir() { 29 | Node::MODE_DIR 30 | } else if file_type.is_file() { 31 | Node::MODE_FILE 32 | } else if file_type.is_symlink() { 33 | Node::MODE_SYMLINK 34 | } else { 35 | return Err(io::Error::new( 36 | io::ErrorKind::Other, 37 | format!("Does not support parsing {:?}", file_type), 38 | )); 39 | }; 40 | 41 | let node_ptr; 42 | { 43 | let mode = mode_type | (metadata.mode() as u16 & Node::MODE_PERM); 44 | let mut node = tx 45 | .create_node( 46 | parent_ptr, 47 | &name, 48 | mode, 49 | metadata.ctime() as u64, 50 | metadata.ctime_nsec() as u32, 51 | ) 52 | .map_err(syscall_err)?; 53 | 54 | node_ptr = node.ptr(); 55 | 56 | if node.data().uid() != metadata.uid() || node.data().gid() != metadata.gid() { 57 | node.data_mut().set_uid(metadata.uid()); 58 | node.data_mut().set_gid(metadata.gid()); 59 | tx.sync_tree(node).map_err(syscall_err)?; 60 | } 61 | } 62 | 63 | let path = entry.path(); 64 | if file_type.is_dir() { 65 | archive_at(tx, path, node_ptr)?; 66 | } else if file_type.is_file() { 67 | let data = fs::read(path)?; 68 | let count = tx 69 | .write_node( 70 | node_ptr, 71 | 0, 72 | &data, 73 | metadata.mtime() as u64, 74 | metadata.mtime_nsec() as u32, 75 | ) 76 | .map_err(syscall_err)?; 77 | if count != data.len() { 78 | panic!("file write count {} != {}", count, data.len()); 79 | } 80 | } else if file_type.is_symlink() { 81 | let destination = fs::read_link(path)?; 82 | let data = destination.as_os_str().as_bytes(); 83 | let count = tx 84 | .write_node( 85 | node_ptr, 86 | 0, 87 | data, 88 | metadata.mtime() as u64, 89 | metadata.mtime_nsec() as u32, 90 | ) 91 | .map_err(syscall_err)?; 92 | if count != data.len() { 93 | panic!("symlink write count {} != {}", count, data.len()); 94 | } 95 | } else { 96 | return Err(io::Error::new( 97 | io::ErrorKind::Other, 98 | format!("Does not support creating {:?}", file_type), 99 | )); 100 | } 101 | } 102 | 103 | Ok(()) 104 | } 105 | 106 | pub fn archive>(fs: &mut FileSystem, parent_path: P) -> io::Result { 107 | let end_block = fs 108 | .tx(|tx| { 109 | // Archive_at root node 110 | archive_at(tx, parent_path, TreePtr::root()) 111 | .map_err(|err| syscall::Error::new(err.raw_os_error().unwrap()))?; 112 | 113 | // Squash alloc log 114 | tx.sync(true)?; 115 | 116 | let end_block = tx.header.size() / BLOCK_SIZE; 117 | /* TODO: Cut off any free blocks at the end of the filesystem 118 | let mut end_changed = true; 119 | while end_changed { 120 | end_changed = false; 121 | 122 | let allocator = fs.allocator(); 123 | let levels = allocator.levels(); 124 | for level in 0..levels.len() { 125 | let level_size = 1 << level; 126 | for &block in levels[level].iter() { 127 | if block < end_block && block + level_size >= end_block { 128 | end_block = block; 129 | end_changed = true; 130 | } 131 | } 132 | } 133 | } 134 | */ 135 | 136 | // Update header 137 | tx.header.size = (end_block * BLOCK_SIZE).into(); 138 | tx.header_changed = true; 139 | tx.sync(false)?; 140 | 141 | Ok(end_block) 142 | }) 143 | .map_err(syscall_err)?; 144 | 145 | Ok((fs.block + end_block) * BLOCK_SIZE) 146 | } 147 | -------------------------------------------------------------------------------- /src/bin/ar.rs: -------------------------------------------------------------------------------- 1 | extern crate redoxfs; 2 | extern crate syscall; 3 | extern crate uuid; 4 | 5 | use std::io::Read; 6 | use std::time::{SystemTime, UNIX_EPOCH}; 7 | use std::{env, fs, process}; 8 | 9 | use redoxfs::{archive, DiskFile, FileSystem}; 10 | use uuid::Uuid; 11 | 12 | fn main() { 13 | env_logger::init(); 14 | 15 | let mut args = env::args().skip(1); 16 | 17 | let disk_path = if let Some(path) = args.next() { 18 | path 19 | } else { 20 | println!("redoxfs-ar: no disk image provided"); 21 | println!("redoxfs-ar DISK FOLDER [BOOTLOADER]"); 22 | process::exit(1); 23 | }; 24 | 25 | let folder_path = if let Some(path) = args.next() { 26 | path 27 | } else { 28 | println!("redoxfs-ar: no folder provided"); 29 | println!("redoxfs-ar DISK FOLDER [BOOTLOADER]"); 30 | process::exit(1); 31 | }; 32 | 33 | let bootloader_path_opt = args.next(); 34 | 35 | let disk = match DiskFile::open(&disk_path) { 36 | Ok(disk) => disk, 37 | Err(err) => { 38 | println!("redoxfs-ar: failed to open image {}: {}", disk_path, err); 39 | process::exit(1); 40 | } 41 | }; 42 | 43 | let mut bootloader = vec![]; 44 | if let Some(bootloader_path) = bootloader_path_opt { 45 | match fs::File::open(&bootloader_path) { 46 | Ok(mut file) => match file.read_to_end(&mut bootloader) { 47 | Ok(_) => (), 48 | Err(err) => { 49 | println!( 50 | "redoxfs-ar: failed to read bootloader {}: {}", 51 | bootloader_path, err 52 | ); 53 | process::exit(1); 54 | } 55 | }, 56 | Err(err) => { 57 | println!( 58 | "redoxfs-ar: failed to open bootloader {}: {}", 59 | bootloader_path, err 60 | ); 61 | process::exit(1); 62 | } 63 | } 64 | }; 65 | 66 | let ctime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 67 | match FileSystem::create_reserved( 68 | disk, 69 | None, 70 | &bootloader, 71 | ctime.as_secs(), 72 | ctime.subsec_nanos(), 73 | ) { 74 | Ok(mut fs) => { 75 | let size = match archive(&mut fs, &folder_path) { 76 | Ok(ok) => ok, 77 | Err(err) => { 78 | println!("redoxfs-ar: failed to archive {}: {}", folder_path, err); 79 | process::exit(1); 80 | } 81 | }; 82 | 83 | if let Err(err) = fs.disk.file.set_len(size) { 84 | println!( 85 | "redoxfs-ar: failed to truncate {} to {}: {}", 86 | disk_path, size, err 87 | ); 88 | process::exit(1); 89 | } 90 | 91 | let uuid = Uuid::from_bytes(fs.header.uuid()); 92 | println!( 93 | "redoxfs-ar: created filesystem on {}, reserved {} blocks, size {} MB, uuid {}", 94 | disk_path, 95 | fs.block, 96 | fs.header.size() / 1000 / 1000, 97 | uuid.hyphenated() 98 | ); 99 | } 100 | Err(err) => { 101 | println!( 102 | "redoxfs-ar: failed to create filesystem on {}: {}", 103 | disk_path, err 104 | ); 105 | process::exit(1); 106 | } 107 | }; 108 | } 109 | -------------------------------------------------------------------------------- /src/bin/clone.rs: -------------------------------------------------------------------------------- 1 | extern crate redoxfs; 2 | extern crate syscall; 3 | extern crate uuid; 4 | 5 | use std::io::Read; 6 | use std::time::{SystemTime, UNIX_EPOCH}; 7 | use std::{env, fs, process}; 8 | 9 | use redoxfs::{clone, DiskFile, FileSystem}; 10 | use uuid::Uuid; 11 | 12 | fn main() { 13 | env_logger::init(); 14 | 15 | let mut args = env::args().skip(1); 16 | 17 | let disk_path_old = if let Some(path) = args.next() { 18 | path 19 | } else { 20 | println!("redoxfs-clone: no old disk image provided"); 21 | println!("redoxfs-clone NEW-DISK OLD-DISK [BOOTLOADER]"); 22 | process::exit(1); 23 | }; 24 | 25 | let disk_path = if let Some(path) = args.next() { 26 | path 27 | } else { 28 | println!("redoxfs-clone: no new disk image provided"); 29 | println!("redoxfs-clone NEW-DISK OLD-DISK [BOOTLOADER]"); 30 | process::exit(1); 31 | }; 32 | 33 | let bootloader_path_opt = args.next(); 34 | 35 | // Open old disk in readonly mode 36 | let disk_old = match fs::OpenOptions::new() 37 | .read(true) 38 | .write(false) 39 | .open(&disk_path_old) 40 | .map(DiskFile::from) 41 | { 42 | Ok(disk) => disk, 43 | Err(err) => { 44 | println!( 45 | "redoxfs-clone: failed to open old disk image {}: {}", 46 | disk_path_old, err 47 | ); 48 | process::exit(1); 49 | } 50 | }; 51 | 52 | let mut fs_old = match FileSystem::open(disk_old, None, None, false) { 53 | Ok(fs) => fs, 54 | Err(err) => { 55 | println!( 56 | "redoxfs-clone: failed to open filesystem on {}: {}", 57 | disk_path_old, err 58 | ); 59 | process::exit(1); 60 | } 61 | }; 62 | 63 | let disk = match DiskFile::open(&disk_path) { 64 | Ok(disk) => disk, 65 | Err(err) => { 66 | println!( 67 | "redoxfs-clone: failed to open new disk image {}: {}", 68 | disk_path, err 69 | ); 70 | process::exit(1); 71 | } 72 | }; 73 | 74 | let mut bootloader = vec![]; 75 | if let Some(bootloader_path) = bootloader_path_opt { 76 | match fs::File::open(&bootloader_path) { 77 | Ok(mut file) => match file.read_to_end(&mut bootloader) { 78 | Ok(_) => (), 79 | Err(err) => { 80 | println!( 81 | "redoxfs-clone: failed to read bootloader {}: {}", 82 | bootloader_path, err 83 | ); 84 | process::exit(1); 85 | } 86 | }, 87 | Err(err) => { 88 | println!( 89 | "redoxfs-clone: failed to open bootloader {}: {}", 90 | bootloader_path, err 91 | ); 92 | process::exit(1); 93 | } 94 | } 95 | }; 96 | 97 | let ctime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 98 | let mut fs = match FileSystem::create_reserved( 99 | disk, 100 | None, 101 | &bootloader, 102 | ctime.as_secs(), 103 | ctime.subsec_nanos(), 104 | ) { 105 | Ok(fs) => fs, 106 | Err(err) => { 107 | println!( 108 | "redoxfs-clone: failed to create filesystem on {}: {}", 109 | disk_path, err 110 | ); 111 | process::exit(1); 112 | } 113 | }; 114 | 115 | let size_old = fs_old.header.size(); 116 | let free_old = fs_old.allocator().free() * redoxfs::BLOCK_SIZE; 117 | let used_old = size_old - free_old; 118 | let mut last_percent = 0; 119 | let clone_res = clone(&mut fs_old, &mut fs, move |used| { 120 | let percent = (used * 100) / used_old; 121 | if percent != last_percent { 122 | eprint!( 123 | "\r{}%: {} MB/{} MB", 124 | percent, 125 | used / 1000 / 1000, 126 | used_old / 1000 / 1000 127 | ); 128 | last_percent = percent; 129 | } 130 | }); 131 | eprintln!(); 132 | match clone_res { 133 | Ok(()) => (), 134 | Err(err) => { 135 | println!( 136 | "redoxfs-clone: failed to clone {} to {}: {}", 137 | disk_path_old, disk_path, err 138 | ); 139 | process::exit(1); 140 | } 141 | } 142 | 143 | let uuid = Uuid::from_bytes(fs.header.uuid()); 144 | let size = fs.header.size(); 145 | let free = fs.allocator().free() * redoxfs::BLOCK_SIZE; 146 | let used = size - free; 147 | println!("redoxfs-clone: created filesystem on {}", disk_path,); 148 | println!("\treserved: {} blocks", fs.block); 149 | println!("\tuuid: {}", uuid.hyphenated()); 150 | println!("\tsize: {} MB", size / 1000 / 1000); 151 | println!("\tused: {} MB", used / 1000 / 1000); 152 | println!("\tfree: {} MB", free / 1000 / 1000); 153 | } 154 | -------------------------------------------------------------------------------- /src/bin/mkfs.rs: -------------------------------------------------------------------------------- 1 | extern crate redoxfs; 2 | extern crate uuid; 3 | 4 | use std::io::Read; 5 | use std::{env, fs, io, process, time}; 6 | 7 | use redoxfs::{DiskFile, FileSystem}; 8 | use termion::input::TermRead; 9 | use uuid::Uuid; 10 | 11 | fn usage() -> ! { 12 | eprintln!("redoxfs-mkfs [--encrypt] DISK [BOOTLOADER]"); 13 | process::exit(1); 14 | } 15 | 16 | fn main() { 17 | env_logger::init(); 18 | 19 | let mut encrypt = false; 20 | let mut disk_path_opt = None; 21 | let mut bootloader_path_opt = None; 22 | for arg in env::args().skip(1) { 23 | if arg == "--encrypt" { 24 | encrypt = true; 25 | } else if disk_path_opt.is_none() { 26 | disk_path_opt = Some(arg); 27 | } else if bootloader_path_opt.is_none() { 28 | bootloader_path_opt = Some(arg); 29 | } else { 30 | eprintln!("redoxfs-mkfs: too many arguments provided"); 31 | usage(); 32 | } 33 | } 34 | 35 | let disk_path = if let Some(path) = disk_path_opt { 36 | path 37 | } else { 38 | eprintln!("redoxfs-mkfs: no disk image provided"); 39 | usage(); 40 | }; 41 | 42 | let disk = match DiskFile::open(&disk_path) { 43 | Ok(disk) => disk, 44 | Err(err) => { 45 | eprintln!("redoxfs-mkfs: failed to open image {}: {}", disk_path, err); 46 | process::exit(1); 47 | } 48 | }; 49 | 50 | let mut bootloader = vec![]; 51 | if let Some(bootloader_path) = bootloader_path_opt { 52 | match fs::File::open(&bootloader_path) { 53 | Ok(mut file) => match file.read_to_end(&mut bootloader) { 54 | Ok(_) => (), 55 | Err(err) => { 56 | eprintln!( 57 | "redoxfs-mkfs: failed to read bootloader {}: {}", 58 | bootloader_path, err 59 | ); 60 | process::exit(1); 61 | } 62 | }, 63 | Err(err) => { 64 | eprintln!( 65 | "redoxfs-mkfs: failed to open bootloader {}: {}", 66 | bootloader_path, err 67 | ); 68 | process::exit(1); 69 | } 70 | } 71 | }; 72 | 73 | let password_opt = if encrypt { 74 | eprint!("redoxfs-mkfs: password: "); 75 | 76 | let password = io::stdin() 77 | .read_passwd(&mut io::stderr()) 78 | .unwrap() 79 | .unwrap_or_default(); 80 | 81 | eprintln!(); 82 | 83 | if password.is_empty() { 84 | eprintln!("redoxfs-mkfs: empty password, giving up"); 85 | process::exit(1); 86 | } 87 | 88 | Some(password) 89 | } else { 90 | None 91 | }; 92 | 93 | let ctime = time::SystemTime::now() 94 | .duration_since(time::UNIX_EPOCH) 95 | .unwrap(); 96 | match FileSystem::create_reserved( 97 | disk, 98 | password_opt.as_ref().map(|x| x.as_bytes()), 99 | &bootloader, 100 | ctime.as_secs(), 101 | ctime.subsec_nanos(), 102 | ) { 103 | Ok(filesystem) => { 104 | let uuid = Uuid::from_bytes(filesystem.header.uuid()); 105 | eprintln!( 106 | "redoxfs-mkfs: created filesystem on {}, reserved {} blocks, size {} MB, uuid {}", 107 | disk_path, 108 | filesystem.block, 109 | filesystem.header.size() / 1000 / 1000, 110 | uuid.hyphenated() 111 | ); 112 | } 113 | Err(err) => { 114 | eprintln!( 115 | "redoxfs-mkfs: failed to create filesystem on {}: {}", 116 | disk_path, err 117 | ); 118 | process::exit(1); 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/bin/mount.rs: -------------------------------------------------------------------------------- 1 | extern crate libc; 2 | extern crate redoxfs; 3 | #[cfg(target_os = "redox")] 4 | extern crate syscall; 5 | extern crate uuid; 6 | 7 | use std::env; 8 | use std::fs::File; 9 | use std::io::{self, Read, Write}; 10 | use std::os::unix::io::{FromRawFd, RawFd}; 11 | use std::process; 12 | 13 | #[cfg(target_os = "redox")] 14 | use std::{mem::MaybeUninit, ptr::addr_of_mut, sync::atomic::Ordering}; 15 | 16 | use redoxfs::{mount, DiskCache, DiskFile, FileSystem}; 17 | use termion::input::TermRead; 18 | use uuid::Uuid; 19 | 20 | #[cfg(target_os = "redox")] 21 | extern "C" fn unmount_handler(_s: usize) { 22 | redoxfs::IS_UMT.store(1, Ordering::SeqCst); 23 | } 24 | 25 | #[cfg(target_os = "redox")] 26 | //set up a signal handler on redox, this implements unmounting. I have no idea what sa_flags is 27 | //for, so I put 2. I don't think 0,0 is a valid sa_mask. I don't know what i'm doing here. When u 28 | //send it a sigkill, it shuts off the filesystem 29 | fn setsig() { 30 | // TODO: High-level wrapper like the nix crate? 31 | unsafe { 32 | let mut action = MaybeUninit::::uninit(); 33 | 34 | assert_eq!( 35 | libc::sigemptyset(addr_of_mut!((*action.as_mut_ptr()).sa_mask)), 36 | 0 37 | ); 38 | addr_of_mut!((*action.as_mut_ptr()).sa_flags).write(0); 39 | addr_of_mut!((*action.as_mut_ptr()).sa_sigaction).write(unmount_handler as usize); 40 | 41 | assert_eq!( 42 | libc::sigaction(libc::SIGTERM, action.as_ptr(), core::ptr::null_mut()), 43 | 0 44 | ); 45 | } 46 | } 47 | 48 | #[cfg(not(target_os = "redox"))] 49 | // on linux, this is implemented properly, so no need for this unscrupulous nonsense! 50 | fn setsig() {} 51 | 52 | fn fork() -> isize { 53 | unsafe { libc::fork() as isize } 54 | } 55 | 56 | fn pipe(pipes: &mut [i32; 2]) -> isize { 57 | unsafe { libc::pipe(pipes.as_mut_ptr()) as isize } 58 | } 59 | 60 | #[cfg(not(target_os = "redox"))] 61 | fn capability_mode() {} 62 | 63 | #[cfg(not(target_os = "redox"))] 64 | fn bootloader_password() -> Option> { 65 | None 66 | } 67 | 68 | #[cfg(target_os = "redox")] 69 | fn capability_mode() { 70 | libredox::call::setrens(0, 0).expect("redoxfs: failed to enter null namespace"); 71 | } 72 | 73 | #[cfg(target_os = "redox")] 74 | fn bootloader_password() -> Option> { 75 | use libredox::call::MmapArgs; 76 | 77 | let addr_env = env::var_os("REDOXFS_PASSWORD_ADDR")?; 78 | let size_env = env::var_os("REDOXFS_PASSWORD_SIZE")?; 79 | 80 | let addr = usize::from_str_radix( 81 | addr_env.to_str().expect("REDOXFS_PASSWORD_ADDR not valid"), 82 | 16, 83 | ) 84 | .expect("failed to parse REDOXFS_PASSWORD_ADDR"); 85 | 86 | let size = usize::from_str_radix( 87 | size_env.to_str().expect("REDOXFS_PASSWORD_SIZE not valid"), 88 | 16, 89 | ) 90 | .expect("failed to parse REDOXFS_PASSWORD_SIZE"); 91 | 92 | let mut password = Vec::with_capacity(size); 93 | unsafe { 94 | let aligned_size = size.next_multiple_of(syscall::PAGE_SIZE); 95 | 96 | let fd = libredox::Fd::open("memory:physical", libredox::flag::O_CLOEXEC, 0) 97 | .expect("failed to open physical memory file"); 98 | 99 | let password_map = libredox::call::mmap(MmapArgs { 100 | addr: core::ptr::null_mut(), 101 | length: aligned_size, 102 | prot: libredox::flag::PROT_READ, 103 | flags: libredox::flag::MAP_SHARED, 104 | fd: fd.raw(), 105 | offset: addr as u64, 106 | }) 107 | .expect("failed to map REDOXFS_PASSWORD") 108 | .cast::(); 109 | 110 | for i in 0..size { 111 | password.push(password_map.add(i).read()); 112 | } 113 | 114 | let _ = libredox::call::munmap(password_map.cast(), aligned_size); 115 | } 116 | Some(password) 117 | } 118 | 119 | fn print_err_exit(err: impl AsRef) -> ! { 120 | eprintln!("redoxfs: {}", err.as_ref()); 121 | usage(); 122 | process::exit(1) 123 | } 124 | 125 | fn print_usage_exit() -> ! { 126 | usage(); 127 | process::exit(1) 128 | } 129 | 130 | fn usage() { 131 | eprintln!("redoxfs [--no-daemon|-d] [--uuid] [disk or uuid] [mountpoint] [block in hex]"); 132 | } 133 | 134 | enum DiskId { 135 | Path(String), 136 | Uuid(Uuid), 137 | } 138 | 139 | fn filesystem_by_path( 140 | path: &str, 141 | block_opt: Option, 142 | log_errors: bool, 143 | ) -> Option<(String, FileSystem>)> { 144 | log::debug!("opening {}", path); 145 | let attempts = 10; 146 | for attempt in 0..=attempts { 147 | let password_opt = if attempt > 0 { 148 | eprint!("redoxfs: password: "); 149 | 150 | let password = io::stdin() 151 | .read_passwd(&mut io::stderr()) 152 | .unwrap() 153 | .unwrap_or_default(); 154 | 155 | eprintln!(); 156 | 157 | if password.is_empty() { 158 | eprintln!("redoxfs: empty password, giving up"); 159 | 160 | // Password is empty, exit loop 161 | break; 162 | } 163 | 164 | Some(password.into_bytes()) 165 | } else { 166 | bootloader_password() 167 | }; 168 | 169 | match DiskFile::open(path).map(DiskCache::new) { 170 | Ok(disk) => { 171 | match redoxfs::FileSystem::open(disk, password_opt.as_deref(), block_opt, true) { 172 | Ok(filesystem) => { 173 | log::debug!( 174 | "opened filesystem on {} with uuid {}", 175 | path, 176 | Uuid::from_bytes(filesystem.header.uuid()).hyphenated() 177 | ); 178 | 179 | return Some((path.to_string(), filesystem)); 180 | } 181 | Err(err) => match err.errno { 182 | syscall::ENOKEY => { 183 | if password_opt.is_some() { 184 | eprintln!("redoxfs: incorrect password ({}/{})", attempt, attempts); 185 | } 186 | } 187 | _ => { 188 | if log_errors { 189 | log::error!("failed to open filesystem {}: {}", path, err); 190 | } 191 | break; 192 | } 193 | }, 194 | } 195 | } 196 | Err(err) => { 197 | if log_errors { 198 | log::error!("failed to open image {}: {}", path, err); 199 | } 200 | break; 201 | } 202 | } 203 | } 204 | None 205 | } 206 | 207 | #[cfg(not(target_os = "redox"))] 208 | fn filesystem_by_uuid( 209 | _uuid: &Uuid, 210 | _block_opt: Option, 211 | ) -> Option<(String, FileSystem>)> { 212 | None 213 | } 214 | 215 | #[cfg(target_os = "redox")] 216 | fn filesystem_by_uuid( 217 | uuid: &Uuid, 218 | block_opt: Option, 219 | ) -> Option<(String, FileSystem>)> { 220 | use std::fs; 221 | 222 | use redox_path::RedoxPath; 223 | 224 | match fs::read_dir("/scheme") { 225 | Ok(entries) => { 226 | for entry_res in entries { 227 | if let Ok(entry) = entry_res { 228 | if let Some(disk) = entry.path().to_str() { 229 | if RedoxPath::from_absolute(disk) 230 | .unwrap_or(RedoxPath::from_absolute("/")?) 231 | .is_scheme_category("disk") 232 | { 233 | log::debug!("found scheme {}", disk); 234 | match fs::read_dir(disk) { 235 | Ok(entries) => { 236 | for entry_res in entries { 237 | if let Ok(entry) = entry_res { 238 | if let Ok(path) = 239 | entry.path().into_os_string().into_string() 240 | { 241 | log::debug!("found path {}", path); 242 | if let Some((path, filesystem)) = 243 | filesystem_by_path(&path, block_opt, false) 244 | { 245 | if &filesystem.header.uuid() == uuid.as_bytes() 246 | { 247 | log::debug!( 248 | "filesystem on {} matches uuid {}", 249 | path, 250 | uuid.hyphenated() 251 | ); 252 | return Some((path, filesystem)); 253 | } else { 254 | log::debug!( 255 | "filesystem on {} does not match uuid {}", 256 | path, 257 | uuid.hyphenated() 258 | ); 259 | } 260 | } 261 | } 262 | } 263 | } 264 | } 265 | Err(err) => { 266 | log::debug!("failed to list '{}': {}", disk, err); 267 | } 268 | } 269 | } 270 | } 271 | } 272 | } 273 | } 274 | Err(err) => { 275 | log::error!("failed to list schemes: {}", err); 276 | } 277 | } 278 | 279 | None 280 | } 281 | 282 | fn daemon( 283 | disk_id: &DiskId, 284 | mountpoint: &str, 285 | block_opt: Option, 286 | mut write: Option, 287 | ) -> ! { 288 | setsig(); 289 | 290 | let filesystem_opt = match *disk_id { 291 | DiskId::Path(ref path) => filesystem_by_path(path, block_opt, true), 292 | DiskId::Uuid(ref uuid) => filesystem_by_uuid(uuid, block_opt), 293 | }; 294 | 295 | if let Some((path, filesystem)) = filesystem_opt { 296 | match mount(filesystem, mountpoint, |mounted_path| { 297 | capability_mode(); 298 | 299 | log::info!( 300 | "mounted filesystem on {} to {}", 301 | path, 302 | mounted_path.display() 303 | ); 304 | 305 | if let Some(ref mut write) = write { 306 | let _ = write.write(&[0]); 307 | } 308 | }) { 309 | Ok(()) => { 310 | process::exit(0); 311 | } 312 | Err(err) => { 313 | log::error!( 314 | "failed to mount {} to {}: {}", 315 | path, mountpoint, err 316 | ); 317 | } 318 | } 319 | } 320 | 321 | match *disk_id { 322 | DiskId::Path(ref path) => { 323 | log::error!("not able to mount path {}", path); 324 | } 325 | DiskId::Uuid(ref uuid) => { 326 | log::error!("not able to mount uuid {}", uuid.hyphenated()); 327 | } 328 | } 329 | 330 | if let Some(ref mut write) = write { 331 | let _ = write.write(&[1]); 332 | } 333 | 334 | process::exit(1); 335 | } 336 | 337 | fn main() { 338 | env_logger::init(); 339 | 340 | let mut args = env::args().skip(1); 341 | 342 | let mut daemonise = true; 343 | let mut disk_id: Option = None; 344 | let mut mountpoint: Option = None; 345 | let mut block_opt: Option = None; 346 | 347 | while let Some(arg) = args.next() { 348 | match arg.as_str() { 349 | "--no-daemon" | "-d" => daemonise = false, 350 | 351 | "--uuid" if disk_id.is_none() => { 352 | disk_id = Some(DiskId::Uuid( 353 | match args.next().as_deref().map(Uuid::parse_str) { 354 | Some(Ok(uuid)) => uuid, 355 | Some(Err(err)) => { 356 | print_err_exit(format!("invalid uuid '{}': {}", arg, err)) 357 | } 358 | None => print_err_exit("no uuid provided"), 359 | }, 360 | )); 361 | } 362 | 363 | disk if disk_id.is_none() => disk_id = Some(DiskId::Path(disk.to_owned())), 364 | 365 | mnt if disk_id.is_some() && mountpoint.is_none() => mountpoint = Some(mnt.to_owned()), 366 | 367 | opts if mountpoint.is_some() => match u64::from_str_radix(opts, 16) { 368 | Ok(block) => block_opt = Some(block), 369 | Err(err) => print_err_exit(format!("invalid block '{}': {}", opts, err)), 370 | }, 371 | 372 | _ => print_usage_exit(), 373 | } 374 | } 375 | 376 | let Some(disk_id) = disk_id else { 377 | print_err_exit("no disk provided"); 378 | }; 379 | 380 | let Some(mountpoint) = mountpoint else { 381 | print_err_exit("no mountpoint provided"); 382 | }; 383 | 384 | if daemonise { 385 | let mut pipes = [0; 2]; 386 | if pipe(&mut pipes) == 0 { 387 | let mut read = unsafe { File::from_raw_fd(pipes[0] as RawFd) }; 388 | let write = unsafe { File::from_raw_fd(pipes[1] as RawFd) }; 389 | 390 | let pid = fork(); 391 | if pid == 0 { 392 | drop(read); 393 | 394 | daemon(&disk_id, &mountpoint, block_opt, Some(write)); 395 | } else if pid > 0 { 396 | drop(write); 397 | 398 | let mut res = [0]; 399 | read.read_exact(&mut res).unwrap(); 400 | 401 | process::exit(res[0] as i32); 402 | } else { 403 | panic!("redoxfs: failed to fork"); 404 | } 405 | } else { 406 | panic!("redoxfs: failed to create pipe"); 407 | } 408 | } else { 409 | log::info!("running in foreground"); 410 | daemon(&disk_id, &mountpoint, block_opt, None); 411 | } 412 | } 413 | -------------------------------------------------------------------------------- /src/bin/resize.rs: -------------------------------------------------------------------------------- 1 | use std::{env, process}; 2 | 3 | use humansize::{format_size, BINARY, DECIMAL}; 4 | use redoxfs::{BlockAddr, BlockMeta, Disk, DiskFile, FileSystem}; 5 | use uuid::Uuid; 6 | 7 | fn resize(fs: &mut FileSystem, size_arg: String) -> Result<(), String> { 8 | let disk_size = fs 9 | .disk 10 | .size() 11 | .map_err(|err| format!("failed to read disk size: {}", err))?; 12 | 13 | // Find contiguous free region 14 | //TODO: better error management 15 | let mut last_free = None; 16 | let mut last_end = 0; 17 | fs.tx(|tx| { 18 | let mut alloc_ptr = tx.header.alloc; 19 | while !alloc_ptr.is_null() { 20 | let alloc = tx.read_block(alloc_ptr)?; 21 | alloc_ptr = alloc.data().prev; 22 | for entry in alloc.data().entries.iter() { 23 | let count = entry.count(); 24 | if count <= 0 { 25 | continue; 26 | } 27 | let end = entry.index() + count as u64; 28 | if end > last_end { 29 | last_free = Some(*entry); 30 | last_end = end; 31 | } 32 | } 33 | } 34 | Ok(()) 35 | }) 36 | .map_err(|err| format!("failed to read alloc log: {}", err))?; 37 | 38 | let old_size = fs.header.size(); 39 | let min_size = if let Some(entry) = last_free { 40 | entry.index() * redoxfs::BLOCK_SIZE 41 | } else { 42 | old_size 43 | }; 44 | let max_size = disk_size - (fs.block * redoxfs::BLOCK_SIZE); 45 | 46 | let new_size = match size_arg.to_lowercase().as_str() { 47 | "min" | "minimum" => min_size, 48 | "" | "max" | "maximum" => max_size, 49 | _ => match parse_size::parse_size(&size_arg) { 50 | Ok(new_size) => { 51 | if new_size < min_size { 52 | return Err(format!( 53 | "requested size {} is smaller than {} by {}", 54 | new_size, 55 | min_size, 56 | min_size - new_size 57 | )); 58 | } 59 | 60 | if new_size > max_size { 61 | return Err(format!( 62 | "requested size {} is larger than {} by {}", 63 | new_size, 64 | max_size, 65 | new_size - max_size 66 | )); 67 | } 68 | 69 | new_size 70 | } 71 | Err(err) => { 72 | return Err(format!( 73 | "failed to parse size argument {:?}: {}", 74 | size_arg, err 75 | )); 76 | } 77 | }, 78 | }; 79 | 80 | println!( 81 | "minimum size: {} ({})", 82 | format_size(min_size, DECIMAL), 83 | format_size(min_size, BINARY) 84 | ); 85 | println!( 86 | "maximum size: {} ({})", 87 | format_size(max_size, DECIMAL), 88 | format_size(max_size, BINARY) 89 | ); 90 | println!( 91 | "new size: {} ({})", 92 | format_size(new_size, DECIMAL), 93 | format_size(new_size, BINARY) 94 | ); 95 | 96 | let old_blocks = old_size / redoxfs::BLOCK_SIZE; 97 | let new_blocks = new_size / redoxfs::BLOCK_SIZE; 98 | let (start, end, shrink) = if new_size == old_size { 99 | println!("already requested size"); 100 | return Ok(()); 101 | } else if new_size < old_size { 102 | println!("shrinking by {}", old_size - new_size); 103 | (new_blocks, old_blocks, true) 104 | } else { 105 | println!("growing by {}", new_size - old_size); 106 | (old_blocks, new_blocks, false) 107 | }; 108 | 109 | // Allocate or deallocate blocks as needed 110 | unsafe { 111 | let allocator = fs.allocator_mut(); 112 | for index in start..end { 113 | if shrink { 114 | //TODO: replace assert with error? 115 | let addr = BlockAddr::new(index as u64, BlockMeta::default()); 116 | assert_eq!(allocator.allocate_exact(addr), Some(addr)); 117 | } else { 118 | let addr = BlockAddr::new(index as u64, BlockMeta::default()); 119 | allocator.deallocate(addr); 120 | } 121 | } 122 | } 123 | 124 | fs.tx(|tx| { 125 | // Update header 126 | tx.header.size = new_size.into(); 127 | tx.header_changed = true; 128 | 129 | // Sync with squash 130 | tx.sync(true)?; 131 | 132 | Ok(()) 133 | }) 134 | .map_err(|err| format!("transaction failed: {}", err)) 135 | } 136 | 137 | fn main() { 138 | env_logger::init(); 139 | 140 | let mut args = env::args().skip(1); 141 | 142 | let disk_path = if let Some(path) = args.next() { 143 | path 144 | } else { 145 | eprintln!("redoxfs-resize: no new disk image provided"); 146 | eprintln!("redoxfs-resize NEW-DISK [SIZE]"); 147 | process::exit(1); 148 | }; 149 | 150 | let size_arg = args.next().unwrap_or_default(); 151 | 152 | let disk = match DiskFile::open(&disk_path) { 153 | Ok(disk) => disk, 154 | Err(err) => { 155 | eprintln!( 156 | "redoxfs-resize: failed to open disk image {}: {}", 157 | disk_path, err 158 | ); 159 | process::exit(1); 160 | } 161 | }; 162 | 163 | let mut fs = match FileSystem::open(disk, None, None, true) { 164 | Ok(fs) => fs, 165 | Err(err) => { 166 | eprintln!( 167 | "redoxfs-resize: failed to open filesystem on {}: {}", 168 | disk_path, err 169 | ); 170 | process::exit(1); 171 | } 172 | }; 173 | 174 | match resize(&mut fs, size_arg) { 175 | Ok(()) => {} 176 | Err(err) => { 177 | eprintln!( 178 | "redoxfs-resize: failed to resize filesystem on {}: {}", 179 | disk_path, err 180 | ); 181 | process::exit(1); 182 | } 183 | } 184 | 185 | let uuid = Uuid::from_bytes(fs.header.uuid()); 186 | let size = fs.header.size(); 187 | let free = fs.allocator().free() * redoxfs::BLOCK_SIZE; 188 | let used = size - free; 189 | println!("redoxfs-resize: resized filesystem on {}", disk_path); 190 | println!("\tuuid: {}", uuid.hyphenated()); 191 | println!( 192 | "\tsize: {} ({})", 193 | format_size(size, DECIMAL), 194 | format_size(size, BINARY) 195 | ); 196 | println!( 197 | "\tused: {} ({})", 198 | format_size(used, DECIMAL), 199 | format_size(used, BINARY) 200 | ); 201 | println!( 202 | "\tfree: {} ({})", 203 | format_size(free, DECIMAL), 204 | format_size(free, BINARY) 205 | ); 206 | } 207 | -------------------------------------------------------------------------------- /src/block.rs: -------------------------------------------------------------------------------- 1 | use core::{fmt, marker::PhantomData, mem, ops, slice}; 2 | use endian_num::Le; 3 | 4 | use crate::BLOCK_SIZE; 5 | 6 | const BLOCK_LIST_ENTRIES: usize = BLOCK_SIZE as usize / mem::size_of::>(); 7 | 8 | /// An address of a data block. 9 | /// 10 | /// This encodes a block's position _and_ [`BlockLevel`]: 11 | /// the first four bits of this `u64` encode the block's level, 12 | /// the next four bits indicates decompression level, 13 | /// the rest encode its index. 14 | #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] 15 | pub struct BlockAddr(u64); 16 | 17 | impl BlockAddr { 18 | const INDEX_SHIFT: u64 = 8; 19 | const DECOMP_LEVEL_MASK: u64 = 0xF0; 20 | const DECOMP_LEVEL_SHIFT: u64 = 4; 21 | const LEVEL_MASK: u64 = 0xF; 22 | 23 | // Unsafe because this can create invalid blocks 24 | pub unsafe fn new(index: u64, meta: BlockMeta) -> Self { 25 | // Level must fit within LEVEL_MASK 26 | if meta.level.0 > Self::LEVEL_MASK as usize { 27 | panic!("block level too large"); 28 | } 29 | 30 | // Decomp level must fit within DECOMP_LEVEL_MASK 31 | let decomp_level = meta.decomp_level.unwrap_or_default(); 32 | if (decomp_level.0 << Self::DECOMP_LEVEL_SHIFT) > Self::DECOMP_LEVEL_MASK as usize { 33 | panic!("decompressed block level too large"); 34 | } 35 | 36 | // Index must not use the metadata bits 37 | let inner = index 38 | .checked_shl(Self::INDEX_SHIFT as u32) 39 | .expect("block index too large") 40 | | ((decomp_level.0 as u64) << Self::DECOMP_LEVEL_SHIFT) 41 | | (meta.level.0 as u64); 42 | Self(inner) 43 | } 44 | 45 | pub fn null(meta: BlockMeta) -> Self { 46 | unsafe { Self::new(0, meta) } 47 | } 48 | 49 | pub fn index(&self) -> u64 { 50 | // The first four bits store the level 51 | self.0 >> Self::INDEX_SHIFT 52 | } 53 | 54 | pub fn level(&self) -> BlockLevel { 55 | // The first four bits store the level 56 | BlockLevel((self.0 & Self::LEVEL_MASK) as usize) 57 | } 58 | 59 | pub fn decomp_level(&self) -> Option { 60 | let value = (self.0 & Self::DECOMP_LEVEL_MASK) >> Self::DECOMP_LEVEL_SHIFT; 61 | if value != 0 { 62 | Some(BlockLevel(value as usize)) 63 | } else { 64 | None 65 | } 66 | } 67 | 68 | pub fn meta(&self) -> BlockMeta { 69 | BlockMeta { 70 | level: self.level(), 71 | decomp_level: self.decomp_level(), 72 | } 73 | } 74 | 75 | pub fn is_null(&self) -> bool { 76 | self.index() == 0 77 | } 78 | } 79 | 80 | #[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] 81 | pub struct BlockMeta { 82 | pub(crate) level: BlockLevel, 83 | pub(crate) decomp_level: Option, 84 | } 85 | 86 | impl BlockMeta { 87 | pub fn new(level: BlockLevel) -> Self { 88 | Self { 89 | level, 90 | decomp_level: None, 91 | } 92 | } 93 | 94 | pub fn new_compressed(level: BlockLevel, decomp_level: BlockLevel) -> Self { 95 | Self { 96 | level, 97 | decomp_level: Some(decomp_level), 98 | } 99 | } 100 | } 101 | 102 | /// The size of a block. 103 | /// 104 | /// Level 0 blocks are blocks of [`BLOCK_SIZE`] bytes. 105 | /// A level 1 block consists of two consecutive level 0 blocks. 106 | /// A level n block consists of two consecutive level n-1 blocks. 107 | /// 108 | /// See [`crate::Allocator`] docs for more details. 109 | #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] 110 | pub struct BlockLevel(pub(crate) usize); 111 | 112 | impl BlockLevel { 113 | /// Returns the smallest block level that can contain 114 | /// the given number of bytes. 115 | pub(crate) fn for_bytes(bytes: u64) -> Self { 116 | if bytes == 0 { 117 | return BlockLevel(0); 118 | } 119 | let level = bytes 120 | .div_ceil(BLOCK_SIZE) 121 | .next_power_of_two() 122 | .trailing_zeros() as usize; 123 | BlockLevel(level) 124 | } 125 | 126 | /// The number of [`BLOCK_SIZE`] blocks (i.e, level 0 blocks) 127 | /// in a block of this level 128 | pub fn blocks>(self) -> T { 129 | T::from(1u32 << self.0) 130 | } 131 | 132 | /// The number of bytes in a block of this level 133 | pub fn bytes(self) -> u64 { 134 | BLOCK_SIZE << self.0 135 | } 136 | } 137 | 138 | pub unsafe trait BlockTrait { 139 | /// Create an empty block of this type. 140 | fn empty(level: BlockLevel) -> Option 141 | where 142 | Self: Sized; 143 | } 144 | 145 | /// A [`BlockAddr`] and the data it points to. 146 | #[derive(Clone, Copy, Debug, Default)] 147 | pub struct BlockData { 148 | addr: BlockAddr, 149 | data: T, 150 | } 151 | 152 | impl BlockData { 153 | pub fn new(addr: BlockAddr, data: T) -> Self { 154 | Self { addr, data } 155 | } 156 | 157 | pub fn addr(&self) -> BlockAddr { 158 | self.addr 159 | } 160 | 161 | pub fn data(&self) -> &T { 162 | &self.data 163 | } 164 | 165 | pub fn data_mut(&mut self) -> &mut T { 166 | &mut self.data 167 | } 168 | 169 | pub(crate) unsafe fn into_parts(self) -> (BlockAddr, T) { 170 | (self.addr, self.data) 171 | } 172 | 173 | /// Set the address of this [`BlockData`] to `addr`, returning this 174 | /// block's old address. This method does not update block data. 175 | /// 176 | /// `addr` must point to a block with the same level as this block. 177 | #[must_use = "don't forget to de-allocate old block address"] 178 | pub fn swap_addr(&mut self, addr: BlockAddr) -> BlockAddr { 179 | // Address levels must match 180 | assert_eq!(self.addr.level(), addr.level()); 181 | let old = self.addr; 182 | self.addr = addr; 183 | old 184 | } 185 | } 186 | 187 | impl BlockData { 188 | pub fn empty(addr: BlockAddr) -> Option { 189 | let empty = T::empty(addr.level())?; 190 | Some(Self::new(addr, empty)) 191 | } 192 | } 193 | 194 | impl> BlockData { 195 | pub fn create_ptr(&self) -> BlockPtr { 196 | BlockPtr { 197 | addr: self.addr.0.into(), 198 | hash: seahash::hash(self.data.deref()).into(), 199 | phantom: PhantomData, 200 | } 201 | } 202 | } 203 | 204 | #[repr(C, packed)] 205 | pub struct BlockList { 206 | pub ptrs: [BlockPtr; BLOCK_LIST_ENTRIES], 207 | } 208 | 209 | unsafe impl BlockTrait for BlockList { 210 | fn empty(level: BlockLevel) -> Option { 211 | if level.0 == 0 { 212 | Some(Self { 213 | ptrs: [BlockPtr::default(); BLOCK_LIST_ENTRIES], 214 | }) 215 | } else { 216 | None 217 | } 218 | } 219 | } 220 | 221 | impl BlockList { 222 | pub fn is_empty(&self) -> bool { 223 | self.ptrs.iter().all(|ptr| ptr.is_null()) 224 | } 225 | } 226 | 227 | impl ops::Deref for BlockList { 228 | type Target = [u8]; 229 | fn deref(&self) -> &[u8] { 230 | unsafe { 231 | slice::from_raw_parts( 232 | self as *const BlockList as *const u8, 233 | mem::size_of::>(), 234 | ) as &[u8] 235 | } 236 | } 237 | } 238 | 239 | impl ops::DerefMut for BlockList { 240 | fn deref_mut(&mut self) -> &mut [u8] { 241 | unsafe { 242 | slice::from_raw_parts_mut( 243 | self as *mut BlockList as *mut u8, 244 | mem::size_of::>(), 245 | ) as &mut [u8] 246 | } 247 | } 248 | } 249 | 250 | /// An address of a data block, along with a checksum of its data. 251 | /// 252 | /// This encodes a block's position _and_ [`BlockLevel`]. 253 | /// the first four bits of `addr` encode the block's level, 254 | /// the rest encode its index. 255 | /// 256 | /// Also see [`BlockAddr`]. 257 | #[repr(C, packed)] 258 | pub struct BlockPtr { 259 | addr: Le, 260 | hash: Le, 261 | phantom: PhantomData, 262 | } 263 | 264 | impl BlockPtr { 265 | pub fn null(meta: BlockMeta) -> Self { 266 | Self { 267 | addr: BlockAddr::null(meta).0.into(), 268 | hash: 0.into(), 269 | phantom: PhantomData, 270 | } 271 | } 272 | 273 | pub fn addr(&self) -> BlockAddr { 274 | BlockAddr(self.addr.to_ne()) 275 | } 276 | 277 | pub fn hash(&self) -> u64 { 278 | self.hash.to_ne() 279 | } 280 | 281 | pub fn is_null(&self) -> bool { 282 | self.addr().is_null() 283 | } 284 | 285 | pub fn marker(level: u8) -> Self { 286 | assert!(level <= 0xF); 287 | Self { 288 | addr: (0xFFFF_FFFF_FFFF_FFF0 | (level as u64)).into(), 289 | hash: u64::MAX.into(), 290 | phantom: PhantomData, 291 | } 292 | } 293 | 294 | pub fn is_marker(&self) -> bool { 295 | (self.addr.to_ne() | 0xF) == u64::MAX && self.hash.to_ne() == u64::MAX 296 | } 297 | 298 | /// Cast BlockPtr to another type 299 | /// 300 | /// # Safety 301 | /// Unsafe because it can be used to transmute types 302 | pub unsafe fn cast(self) -> BlockPtr { 303 | BlockPtr { 304 | addr: self.addr, 305 | hash: self.hash, 306 | phantom: PhantomData, 307 | } 308 | } 309 | 310 | #[must_use = "the returned pointer should usually be deallocated"] 311 | pub fn clear(&mut self) -> BlockPtr { 312 | let mut ptr = Self::default(); 313 | mem::swap(self, &mut ptr); 314 | ptr 315 | } 316 | } 317 | 318 | impl Clone for BlockPtr { 319 | fn clone(&self) -> Self { 320 | *self 321 | } 322 | } 323 | 324 | impl Copy for BlockPtr {} 325 | 326 | impl Default for BlockPtr { 327 | fn default() -> Self { 328 | Self { 329 | addr: 0.into(), 330 | hash: 0.into(), 331 | phantom: PhantomData, 332 | } 333 | } 334 | } 335 | 336 | impl fmt::Debug for BlockPtr { 337 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 338 | let addr = self.addr(); 339 | let hash = self.hash(); 340 | f.debug_struct("BlockPtr") 341 | .field("addr", &addr) 342 | .field("hash", &hash) 343 | .finish() 344 | } 345 | } 346 | 347 | #[repr(C, packed)] 348 | #[derive(Clone)] 349 | pub struct BlockRaw([u8; BLOCK_SIZE as usize]); 350 | 351 | unsafe impl BlockTrait for BlockRaw { 352 | fn empty(level: BlockLevel) -> Option { 353 | if level.0 == 0 { 354 | Some(Self([0; BLOCK_SIZE as usize])) 355 | } else { 356 | None 357 | } 358 | } 359 | } 360 | 361 | impl ops::Deref for BlockRaw { 362 | type Target = [u8]; 363 | fn deref(&self) -> &[u8] { 364 | &self.0 365 | } 366 | } 367 | 368 | impl ops::DerefMut for BlockRaw { 369 | fn deref_mut(&mut self) -> &mut [u8] { 370 | &mut self.0 371 | } 372 | } 373 | 374 | #[test] 375 | fn block_list_size_test() { 376 | assert_eq!(mem::size_of::>(), BLOCK_SIZE as usize); 377 | } 378 | 379 | #[test] 380 | fn block_raw_size_test() { 381 | assert_eq!(mem::size_of::(), BLOCK_SIZE as usize); 382 | } 383 | 384 | #[test] 385 | fn block_ptr_marker_test() { 386 | let ptr = BlockPtr::::marker(0); 387 | assert_eq!(ptr.addr().level().0, 0); 388 | assert!(ptr.is_marker()); 389 | 390 | let ptr = BlockPtr::::marker(2); 391 | assert_eq!(ptr.addr().level().0, 2); 392 | assert!(ptr.is_marker()); 393 | } 394 | -------------------------------------------------------------------------------- /src/clone.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::io; 3 | use std::os::unix::ffi::OsStrExt; 4 | use std::path::Path; 5 | 6 | use crate::{Disk, FileSystem, Node, Transaction, TreePtr, BLOCK_SIZE}; 7 | 8 | fn syscall_err(err: syscall::Error) -> io::Error { 9 | io::Error::from_raw_os_error(err.errno) 10 | } 11 | 12 | fn tx_progress(tx: &mut Transaction, progress: &mut F) { 13 | let size = tx.header.size(); 14 | let free = tx.allocator.free() * BLOCK_SIZE; 15 | progress(size - free); 16 | } 17 | 18 | //TODO: handle hard links 19 | fn clone_at( 20 | tx_old: &mut Transaction, 21 | parent_ptr_old: TreePtr, 22 | tx: &mut Transaction, 23 | parent_ptr: TreePtr, 24 | buf: &mut [u8], 25 | progress: &mut F, 26 | ) -> syscall::Result<()> { 27 | let mut entries = Vec::new(); 28 | tx_old.child_nodes(parent_ptr_old, &mut entries)?; 29 | for entry in entries { 30 | //TODO: return error instead? 31 | let Some(name) = entry.name() else { 32 | continue; 33 | }; 34 | let node_ptr_old = entry.node_ptr(); 35 | let node_old = tx_old.read_tree(node_ptr_old)?; 36 | 37 | //TODO: this slows down the clone, but Redox has issues without this (Linux is fine) 38 | if tx.write_cache.len() > 64 { 39 | tx.sync(false)?; 40 | } 41 | 42 | let node_ptr = { 43 | let mode = node_old.data().mode(); 44 | let (ctime, ctime_nsec) = node_old.data().ctime(); 45 | let (mtime, mtime_nsec) = node_old.data().mtime(); 46 | let mut node = tx.create_node(parent_ptr, &name, mode, ctime, ctime_nsec)?; 47 | node.data_mut().set_uid(node_old.data().uid()); 48 | node.data_mut().set_gid(node_old.data().gid()); 49 | node.data_mut().set_mtime(mtime, mtime_nsec); 50 | 51 | if !node_old.data().is_dir() { 52 | let mut offset = 0; 53 | loop { 54 | let count = tx_old.read_node_inner(&node_old, offset, buf)?; 55 | if count == 0 { 56 | break; 57 | } 58 | tx.write_node_inner(&mut node, &mut offset, &buf[..count])?; 59 | } 60 | } 61 | 62 | let node_ptr = node.ptr(); 63 | tx.sync_tree(node)?; 64 | node_ptr 65 | }; 66 | 67 | tx_progress(tx, progress); 68 | 69 | if node_old.data().is_dir() { 70 | clone_at(tx_old, node_ptr_old, tx, node_ptr, buf, progress)?; 71 | } 72 | } 73 | 74 | Ok(()) 75 | } 76 | 77 | pub fn clone( 78 | fs_old: &mut FileSystem, 79 | fs: &mut FileSystem, 80 | mut progress: F, 81 | ) -> syscall::Result<()> { 82 | fs_old.tx(|tx_old| { 83 | let mut tx = Transaction::new(fs); 84 | 85 | // Clone at root node 86 | let mut buf = vec![0; 4 * 1024 * 1024]; 87 | clone_at( 88 | tx_old, 89 | TreePtr::root(), 90 | &mut tx, 91 | TreePtr::root(), 92 | &mut buf, 93 | &mut progress, 94 | )?; 95 | 96 | // Commit and squash alloc log 97 | tx.commit(true) 98 | }) 99 | } 100 | -------------------------------------------------------------------------------- /src/dir.rs: -------------------------------------------------------------------------------- 1 | use core::{mem, ops, slice, str}; 2 | 3 | use crate::{BlockLevel, BlockTrait, Node, TreePtr, BLOCK_SIZE, DIR_ENTRY_MAX_LENGTH}; 4 | 5 | #[repr(C, packed)] 6 | #[derive(Clone, Copy)] 7 | pub struct DirEntry { 8 | node_ptr: TreePtr, 9 | name: [u8; DIR_ENTRY_MAX_LENGTH], 10 | } 11 | 12 | impl DirEntry { 13 | pub fn new(node_ptr: TreePtr, name: &str) -> DirEntry { 14 | let mut entry = DirEntry { 15 | node_ptr, 16 | ..Default::default() 17 | }; 18 | 19 | entry.name[..name.len()].copy_from_slice(name.as_bytes()); 20 | 21 | entry 22 | } 23 | 24 | pub fn node_ptr(&self) -> TreePtr { 25 | self.node_ptr 26 | } 27 | 28 | fn name_len(&self) -> usize { 29 | let mut len = 0; 30 | while len < self.name.len() { 31 | if self.name[len] == 0 { 32 | break; 33 | } 34 | len += 1; 35 | } 36 | len 37 | } 38 | 39 | pub fn name(&self) -> Option<&str> { 40 | let len = self.name_len(); 41 | //TODO: report utf8 error? 42 | str::from_utf8(&self.name[..len]).ok() 43 | } 44 | 45 | // 4 bytes TreePtr 46 | // 1 byte name_len 47 | const SERIALIZED_PREFIX_SIZE: usize = mem::size_of::>() + 1; 48 | 49 | pub fn serialized_size(&self) -> usize { 50 | DirEntry::SERIALIZED_PREFIX_SIZE + self.name_len() 51 | } 52 | 53 | fn serialize_into(&self, buf: &mut [u8]) -> Option { 54 | let required = self.serialized_size(); 55 | if buf.len() < required { 56 | return None; 57 | } 58 | 59 | buf[0..4].copy_from_slice(&self.node_ptr().to_bytes()); 60 | buf[4] = self.name_len() as u8; 61 | buf[5..5 + self.name_len()].copy_from_slice(&self.name[..self.name_len()]); 62 | 63 | Some(required) 64 | } 65 | 66 | fn deserialize_from(buf: &[u8]) -> Result<(Self, usize), &'static str> { 67 | if buf.len() <= DirEntry::SERIALIZED_PREFIX_SIZE { 68 | return Err("Buffer too small"); 69 | } 70 | 71 | let node_ptr: TreePtr = 72 | TreePtr::from_bytes(buf[0..4].try_into().expect("Slice must be 4 bytes long")); 73 | let name_len = buf[4] as usize; 74 | 75 | if name_len < 1 || name_len > DIR_ENTRY_MAX_LENGTH { 76 | return Err("Invalid name length"); 77 | } 78 | 79 | if buf.len() < DirEntry::SERIALIZED_PREFIX_SIZE + name_len { 80 | return Err("Buffer too small"); 81 | } 82 | 83 | let mut name = [0u8; DIR_ENTRY_MAX_LENGTH]; 84 | name[..name_len].copy_from_slice( 85 | &buf[DirEntry::SERIALIZED_PREFIX_SIZE..DirEntry::SERIALIZED_PREFIX_SIZE + name_len], 86 | ); 87 | 88 | Ok(( 89 | DirEntry { node_ptr, name }, 90 | DirEntry::SERIALIZED_PREFIX_SIZE + name_len, 91 | )) 92 | } 93 | } 94 | 95 | impl Default for DirEntry { 96 | fn default() -> Self { 97 | Self { 98 | node_ptr: TreePtr::default(), 99 | name: [0; DIR_ENTRY_MAX_LENGTH], 100 | } 101 | } 102 | } 103 | 104 | pub struct DirList { 105 | count: u16, 106 | entry_bytes_len: u16, 107 | entry_bytes: [u8; BLOCK_SIZE as usize - 4], 108 | } 109 | 110 | unsafe impl BlockTrait for DirList { 111 | fn empty(level: BlockLevel) -> Option { 112 | if level.0 == 0 { 113 | Some(Self { 114 | count: 0, 115 | entry_bytes_len: 0, 116 | entry_bytes: [0; BLOCK_SIZE as usize - 4], 117 | }) 118 | } else { 119 | None 120 | } 121 | } 122 | } 123 | 124 | impl DirList { 125 | pub fn is_empty(&self) -> bool { 126 | self.count == 0 127 | } 128 | 129 | pub fn entries(&self) -> DirEntryIterator<'_> { 130 | DirEntryIterator { 131 | dir_list: self, 132 | emit_count: 0, 133 | position: 0, 134 | } 135 | } 136 | 137 | fn entry_position_for_name(&self, name: &str) -> Option { 138 | let name_len = name.len(); 139 | let mut position = 0; 140 | let mut entry_id = 0; 141 | 142 | while entry_id < self.count { 143 | let entry_name_len = self.entry_bytes[position + 4] as usize; 144 | if entry_name_len == name_len { 145 | let start = DirEntry::SERIALIZED_PREFIX_SIZE + position; 146 | let entry_name = &self.entry_bytes[start..start + entry_name_len]; 147 | if entry_name == name.as_bytes() { 148 | return Some(position); 149 | } 150 | } 151 | position += DirEntry::SERIALIZED_PREFIX_SIZE + entry_name_len; 152 | entry_id += 1; 153 | } 154 | None 155 | } 156 | 157 | pub fn find_entry(&self, name: &str) -> Option { 158 | if let Some(position) = self.entry_position_for_name(name) { 159 | let (entry, _) = DirEntry::deserialize_from(&self.entry_bytes[position..]).unwrap(); 160 | return Some(entry); 161 | } 162 | None 163 | } 164 | 165 | pub fn remove_entry(&mut self, name: &str) -> bool { 166 | if let Some(position) = self.entry_position_for_name(name) { 167 | let entry_size = 168 | DirEntry::SERIALIZED_PREFIX_SIZE + self.entry_bytes[position + 4] as usize; 169 | let remaining_size = self.entry_bytes_len as usize - position - entry_size; 170 | if remaining_size > 0 { 171 | self.entry_bytes.copy_within( 172 | position + entry_size..self.entry_bytes_len as usize, 173 | position, 174 | ); 175 | } 176 | self.entry_bytes_len -= entry_size as u16; 177 | self.count -= 1; 178 | return true; 179 | } 180 | false 181 | } 182 | 183 | pub fn for_each_entry(&self, mut f: F) 184 | where 185 | F: FnMut(&[u8; 4], &[u8]), 186 | { 187 | let mut position = 0; 188 | let mut entry_id = 0; 189 | 190 | while entry_id < self.count { 191 | let node_ptr_bytes = &self.entry_bytes[position..position + 4]; 192 | //let node_ptr = TreePtr::::from_bytes(node_ptr_bytes.try_into().unwrap()); 193 | let entry_name_len = self.entry_bytes[position + 4] as usize; 194 | let start = DirEntry::SERIALIZED_PREFIX_SIZE + position; 195 | let entry_name = &self.entry_bytes[start..start + entry_name_len]; 196 | 197 | f(node_ptr_bytes.try_into().unwrap(), entry_name); 198 | 199 | position += DirEntry::SERIALIZED_PREFIX_SIZE + entry_name_len; 200 | entry_id += 1; 201 | } 202 | } 203 | 204 | pub fn append(&mut self, entry: &DirEntry) -> bool { 205 | let entry_bytes_len = self.entry_bytes_len as usize; 206 | if let Some(size) = entry.serialize_into(&mut self.entry_bytes[entry_bytes_len..]) { 207 | self.count += 1; 208 | self.entry_bytes_len += size as u16; 209 | return true; 210 | } 211 | false 212 | } 213 | 214 | pub fn entry_count(&self) -> usize { 215 | self.count as usize 216 | } 217 | } 218 | 219 | impl ops::Deref for DirList { 220 | type Target = [u8]; 221 | fn deref(&self) -> &[u8] { 222 | unsafe { 223 | slice::from_raw_parts( 224 | self as *const DirList as *const u8, 225 | mem::size_of::(), 226 | ) as &[u8] 227 | } 228 | } 229 | } 230 | 231 | impl ops::DerefMut for DirList { 232 | fn deref_mut(&mut self) -> &mut [u8] { 233 | unsafe { 234 | slice::from_raw_parts_mut(self as *mut DirList as *mut u8, mem::size_of::()) 235 | as &mut [u8] 236 | } 237 | } 238 | } 239 | 240 | pub struct DirEntryIterator<'a> { 241 | dir_list: &'a DirList, 242 | emit_count: usize, 243 | position: usize, 244 | } 245 | 246 | impl Iterator for DirEntryIterator<'_> { 247 | type Item = DirEntry; 248 | 249 | fn next(&mut self) -> Option { 250 | if self.emit_count < self.dir_list.entry_count() { 251 | let position = self.position; 252 | let (entry, bytes_read) = 253 | DirEntry::deserialize_from(&self.dir_list.entry_bytes[position..]).unwrap(); 254 | 255 | self.emit_count += 1; 256 | self.position += bytes_read; 257 | 258 | Some(entry) 259 | } else { 260 | None 261 | } 262 | } 263 | } 264 | 265 | #[cfg(test)] 266 | mod test { 267 | use super::*; 268 | use alloc::format; 269 | 270 | #[test] 271 | fn dir_list_size_test() { 272 | use core::ops::Deref; 273 | assert_eq!( 274 | DirList::empty(BlockLevel(0)).unwrap().deref().len(), 275 | BLOCK_SIZE as usize 276 | ); 277 | } 278 | 279 | #[test] 280 | fn test_append() { 281 | let mut dir_list = DirList::empty(BlockLevel(0)).unwrap(); 282 | let dirent = DirEntry::new(TreePtr::new(123), "test000"); 283 | 284 | assert!(dir_list.append(&dirent)); 285 | assert_eq!(dir_list.entry_count(), 1); 286 | assert_eq!(dir_list.entry_bytes_len as usize, dirent.serialized_size()); 287 | 288 | let max_entries = dir_list.entry_bytes.len() / dirent.serialized_size(); 289 | for i in 1..max_entries { 290 | let dirent = DirEntry::new(TreePtr::new(123), format!("test{i:03}").as_str()); 291 | assert!(dir_list.append(&dirent), "Failed on iteration {i}"); 292 | } 293 | let dirent = DirEntry::new(TreePtr::new(123), format!("test{max_entries}").as_str()); 294 | assert!(!dir_list.append(&dirent)); 295 | 296 | for (i, entry) in dir_list.entries().enumerate() { 297 | assert_eq!(entry.name().unwrap(), format!("test{i:03}")); 298 | } 299 | } 300 | } 301 | -------------------------------------------------------------------------------- /src/disk/cache.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, VecDeque}; 2 | use std::{cmp, ptr}; 3 | use syscall::error::Result; 4 | 5 | use crate::disk::Disk; 6 | use crate::BLOCK_SIZE; 7 | 8 | fn copy_memory(src: &[u8], dest: &mut [u8]) -> usize { 9 | let len = cmp::min(src.len(), dest.len()); 10 | unsafe { ptr::copy(src.as_ptr(), dest.as_mut_ptr(), len) }; 11 | len 12 | } 13 | 14 | pub struct DiskCache { 15 | inner: T, 16 | cache: HashMap, 17 | order: VecDeque, 18 | size: usize, 19 | } 20 | 21 | impl DiskCache { 22 | pub fn new(inner: T) -> Self { 23 | // 16 MB cache 24 | let size = 16 * 1024 * 1024 / BLOCK_SIZE as usize; 25 | DiskCache { 26 | inner, 27 | cache: HashMap::with_capacity(size), 28 | order: VecDeque::with_capacity(size), 29 | size, 30 | } 31 | } 32 | 33 | fn insert(&mut self, i: u64, data: [u8; BLOCK_SIZE as usize]) { 34 | while self.order.len() >= self.size { 35 | let removed = self.order.pop_front().unwrap(); 36 | self.cache.remove(&removed); 37 | } 38 | 39 | self.cache.insert(i, data); 40 | self.order.push_back(i); 41 | } 42 | } 43 | 44 | impl Disk for DiskCache { 45 | unsafe fn read_at(&mut self, block: u64, buffer: &mut [u8]) -> Result { 46 | // println!("Cache read at {}", block); 47 | 48 | let mut read = 0; 49 | let mut failed = false; 50 | for i in 0..buffer.len().div_ceil(BLOCK_SIZE as usize) { 51 | let block_i = block + i as u64; 52 | 53 | let buffer_i = i * BLOCK_SIZE as usize; 54 | let buffer_j = cmp::min(buffer_i + BLOCK_SIZE as usize, buffer.len()); 55 | let buffer_slice = &mut buffer[buffer_i..buffer_j]; 56 | 57 | if let Some(cache_buf) = self.cache.get_mut(&block_i) { 58 | read += copy_memory(cache_buf, buffer_slice); 59 | } else { 60 | failed = true; 61 | break; 62 | } 63 | } 64 | 65 | if failed { 66 | self.inner.read_at(block, buffer)?; 67 | 68 | read = 0; 69 | for i in 0..buffer.len().div_ceil(BLOCK_SIZE as usize) { 70 | let block_i = block + i as u64; 71 | 72 | let buffer_i = i * BLOCK_SIZE as usize; 73 | let buffer_j = cmp::min(buffer_i + BLOCK_SIZE as usize, buffer.len()); 74 | let buffer_slice = &buffer[buffer_i..buffer_j]; 75 | 76 | let mut cache_buf = [0; BLOCK_SIZE as usize]; 77 | read += copy_memory(buffer_slice, &mut cache_buf); 78 | self.insert(block_i, cache_buf); 79 | } 80 | } 81 | 82 | Ok(read) 83 | } 84 | 85 | unsafe fn write_at(&mut self, block: u64, buffer: &[u8]) -> Result { 86 | //TODO: Write only blocks that have changed 87 | // println!("Cache write at {}", block); 88 | 89 | self.inner.write_at(block, buffer)?; 90 | 91 | let mut written = 0; 92 | for i in 0..buffer.len().div_ceil(BLOCK_SIZE as usize) { 93 | let block_i = block + i as u64; 94 | 95 | let buffer_i = i * BLOCK_SIZE as usize; 96 | let buffer_j = cmp::min(buffer_i + BLOCK_SIZE as usize, buffer.len()); 97 | let buffer_slice = &buffer[buffer_i..buffer_j]; 98 | 99 | let mut cache_buf = [0; BLOCK_SIZE as usize]; 100 | written += copy_memory(buffer_slice, &mut cache_buf); 101 | self.insert(block_i, cache_buf); 102 | } 103 | 104 | Ok(written) 105 | } 106 | 107 | fn size(&mut self) -> Result { 108 | self.inner.size() 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/disk/file.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{File, OpenOptions}; 2 | use std::io::{Seek, SeekFrom}; 3 | use std::os::unix::fs::FileExt; 4 | use std::path::Path; 5 | 6 | use syscall::error::{Error, Result, EIO}; 7 | 8 | use crate::disk::Disk; 9 | use crate::BLOCK_SIZE; 10 | 11 | pub struct DiskFile { 12 | pub file: File, 13 | } 14 | 15 | trait ResultExt { 16 | type T; 17 | fn or_eio(self) -> Result; 18 | } 19 | impl ResultExt for Result { 20 | type T = T; 21 | fn or_eio(self) -> Result { 22 | match self { 23 | Ok(t) => Ok(t), 24 | Err(err) => { 25 | eprintln!("RedoxFS: IO ERROR: {err}"); 26 | Err(Error::new(EIO)) 27 | } 28 | } 29 | } 30 | } 31 | impl ResultExt for std::io::Result { 32 | type T = T; 33 | fn or_eio(self) -> Result { 34 | match self { 35 | Ok(t) => Ok(t), 36 | Err(err) => { 37 | eprintln!("RedoxFS: IO ERROR: {err}"); 38 | Err(Error::new(EIO)) 39 | } 40 | } 41 | } 42 | } 43 | 44 | impl DiskFile { 45 | pub fn open(path: impl AsRef) -> Result { 46 | let file = OpenOptions::new() 47 | .read(true) 48 | .write(true) 49 | .open(path) 50 | .or_eio()?; 51 | Ok(DiskFile { file }) 52 | } 53 | 54 | pub fn create(path: impl AsRef, size: u64) -> Result { 55 | let file = OpenOptions::new() 56 | .read(true) 57 | .write(true) 58 | .create(true) 59 | .open(path) 60 | .or_eio()?; 61 | file.set_len(size).or_eio()?; 62 | Ok(DiskFile { file }) 63 | } 64 | } 65 | 66 | impl Disk for DiskFile { 67 | unsafe fn read_at(&mut self, block: u64, buffer: &mut [u8]) -> Result { 68 | self.file.read_at(buffer, block * BLOCK_SIZE).or_eio() 69 | } 70 | 71 | unsafe fn write_at(&mut self, block: u64, buffer: &[u8]) -> Result { 72 | self.file.write_at(buffer, block * BLOCK_SIZE).or_eio() 73 | } 74 | 75 | fn size(&mut self) -> Result { 76 | self.file.seek(SeekFrom::End(0)).or_eio() 77 | } 78 | } 79 | 80 | impl From for DiskFile { 81 | fn from(file: File) -> Self { 82 | Self { file } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/disk/io.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Seek, SeekFrom, Write}; 2 | use syscall::error::{Error, Result, EIO}; 3 | 4 | use crate::disk::Disk; 5 | use crate::BLOCK_SIZE; 6 | 7 | macro_rules! try_disk { 8 | ($expr:expr) => { 9 | match $expr { 10 | Ok(val) => val, 11 | Err(err) => { 12 | eprintln!("Disk I/O Error: {}", err); 13 | return Err(Error::new(EIO)); 14 | } 15 | } 16 | }; 17 | } 18 | 19 | pub struct DiskIo(pub T); 20 | 21 | impl Disk for DiskIo { 22 | unsafe fn read_at(&mut self, block: u64, buffer: &mut [u8]) -> Result { 23 | try_disk!(self.0.seek(SeekFrom::Start(block * BLOCK_SIZE))); 24 | let count = try_disk!(self.0.read(buffer)); 25 | Ok(count) 26 | } 27 | 28 | unsafe fn write_at(&mut self, block: u64, buffer: &[u8]) -> Result { 29 | try_disk!(self.0.seek(SeekFrom::Start(block * BLOCK_SIZE))); 30 | let count = try_disk!(self.0.write(buffer)); 31 | Ok(count) 32 | } 33 | 34 | fn size(&mut self) -> Result { 35 | let size = try_disk!(self.0.seek(SeekFrom::End(0))); 36 | Ok(size) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/disk/memory.rs: -------------------------------------------------------------------------------- 1 | use syscall::error::{Error, Result, EIO}; 2 | 3 | use crate::disk::Disk; 4 | use crate::BLOCK_SIZE; 5 | 6 | pub struct DiskMemory { 7 | data: Vec, 8 | } 9 | 10 | impl DiskMemory { 11 | pub fn new(size: u64) -> DiskMemory { 12 | DiskMemory { 13 | data: vec![0; size as usize], 14 | } 15 | } 16 | } 17 | 18 | impl Disk for DiskMemory { 19 | unsafe fn read_at(&mut self, block: u64, buffer: &mut [u8]) -> Result { 20 | let offset = (block * BLOCK_SIZE) as usize; 21 | let end = offset + buffer.len(); 22 | if end > self.data.len() { 23 | return Err(Error::new(EIO)); 24 | } 25 | buffer.copy_from_slice(&self.data[offset..end]); 26 | Ok(buffer.len()) 27 | } 28 | 29 | unsafe fn write_at(&mut self, block: u64, buffer: &[u8]) -> Result { 30 | let offset = (block * BLOCK_SIZE) as usize; 31 | let end = offset + buffer.len(); 32 | if end > self.data.len() { 33 | return Err(Error::new(EIO)); 34 | } 35 | self.data[offset..end].copy_from_slice(buffer); 36 | Ok(buffer.len()) 37 | } 38 | 39 | fn size(&mut self) -> Result { 40 | Ok(self.data.len() as u64) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/disk/mod.rs: -------------------------------------------------------------------------------- 1 | use syscall::error::Result; 2 | 3 | #[cfg(feature = "std")] 4 | pub use self::cache::DiskCache; 5 | #[cfg(feature = "std")] 6 | pub use self::file::DiskFile; 7 | #[cfg(feature = "std")] 8 | pub use self::io::DiskIo; 9 | #[cfg(feature = "std")] 10 | pub use self::memory::DiskMemory; 11 | #[cfg(feature = "std")] 12 | pub use self::sparse::DiskSparse; 13 | 14 | #[cfg(feature = "std")] 15 | mod cache; 16 | #[cfg(feature = "std")] 17 | mod file; 18 | #[cfg(feature = "std")] 19 | mod io; 20 | #[cfg(feature = "std")] 21 | mod memory; 22 | #[cfg(feature = "std")] 23 | mod sparse; 24 | 25 | /// A disk 26 | pub trait Disk { 27 | /// Read blocks from disk 28 | /// 29 | /// # Safety 30 | /// Unsafe to discourage use, use filesystem wrappers instead 31 | unsafe fn read_at(&mut self, block: u64, buffer: &mut [u8]) -> Result; 32 | 33 | /// Write blocks from disk 34 | /// 35 | /// # Safety 36 | /// Unsafe to discourage use, use filesystem wrappers instead 37 | unsafe fn write_at(&mut self, block: u64, buffer: &[u8]) -> Result; 38 | 39 | /// Get size of disk in bytes 40 | fn size(&mut self) -> Result; 41 | } 42 | -------------------------------------------------------------------------------- /src/disk/sparse.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{File, OpenOptions}; 2 | use std::io::{Read, Seek, SeekFrom, Write}; 3 | use std::path::Path; 4 | use syscall::error::{Error, Result, EIO}; 5 | 6 | use crate::disk::Disk; 7 | use crate::BLOCK_SIZE; 8 | 9 | macro_rules! try_disk { 10 | ($expr:expr) => { 11 | match $expr { 12 | Ok(val) => val, 13 | Err(err) => { 14 | eprintln!("Disk I/O Error: {}", err); 15 | return Err(Error::new(EIO)); 16 | } 17 | } 18 | }; 19 | } 20 | 21 | pub struct DiskSparse { 22 | pub file: File, 23 | pub max_size: u64, 24 | } 25 | 26 | impl DiskSparse { 27 | pub fn create>(path: P, max_size: u64) -> Result { 28 | let file = try_disk!(OpenOptions::new() 29 | .read(true) 30 | .write(true) 31 | .create(true) 32 | .open(path)); 33 | Ok(DiskSparse { file, max_size }) 34 | } 35 | } 36 | 37 | impl Disk for DiskSparse { 38 | unsafe fn read_at(&mut self, block: u64, buffer: &mut [u8]) -> Result { 39 | try_disk!(self.file.seek(SeekFrom::Start(block * BLOCK_SIZE))); 40 | let count = try_disk!(self.file.read(buffer)); 41 | Ok(count) 42 | } 43 | 44 | unsafe fn write_at(&mut self, block: u64, buffer: &[u8]) -> Result { 45 | try_disk!(self.file.seek(SeekFrom::Start(block * BLOCK_SIZE))); 46 | let count = try_disk!(self.file.write(buffer)); 47 | Ok(count) 48 | } 49 | 50 | fn size(&mut self) -> Result { 51 | Ok(self.max_size) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/filesystem.rs: -------------------------------------------------------------------------------- 1 | use aes::Aes128; 2 | use alloc::{boxed::Box, collections::VecDeque, vec}; 3 | use syscall::error::{Error, Result, EKEYREJECTED, ENOENT, ENOKEY}; 4 | use xts_mode::{get_tweak_default, Xts128}; 5 | 6 | #[cfg(feature = "std")] 7 | use crate::{AllocEntry, AllocList, BlockData, BlockTrait, Key, KeySlot, Node, Salt, TreeList}; 8 | use crate::{ 9 | Allocator, BlockAddr, BlockLevel, BlockMeta, Disk, Header, Transaction, BLOCK_SIZE, 10 | HEADER_RING, RECORD_SIZE, 11 | }; 12 | 13 | fn compress_cache() -> Box<[u8]> { 14 | vec![0; lz4_flex::block::get_maximum_output_size(RECORD_SIZE as usize)].into_boxed_slice() 15 | } 16 | 17 | /// A file system 18 | pub struct FileSystem { 19 | //TODO: make private 20 | pub disk: D, 21 | //TODO: make private 22 | pub block: u64, 23 | //TODO: make private 24 | pub header: Header, 25 | pub(crate) allocator: Allocator, 26 | pub(crate) cipher_opt: Option>, 27 | pub(crate) compress_cache: Box<[u8]>, 28 | } 29 | 30 | impl FileSystem { 31 | /// Open a file system on a disk 32 | pub fn open( 33 | mut disk: D, 34 | password_opt: Option<&[u8]>, 35 | block_opt: Option, 36 | squash: bool, 37 | ) -> Result { 38 | for ring_block in block_opt.map_or(0..65536, |x| x..x + 1) { 39 | let mut header = Header::default(); 40 | unsafe { disk.read_at(ring_block, &mut header)? }; 41 | 42 | // Skip invalid headers 43 | if !header.valid() { 44 | continue; 45 | } 46 | 47 | let block = ring_block - (header.generation() % HEADER_RING); 48 | for i in 0..HEADER_RING { 49 | let mut other_header = Header::default(); 50 | unsafe { disk.read_at(block + i, &mut other_header)? }; 51 | 52 | // Skip invalid headers 53 | if !other_header.valid() { 54 | continue; 55 | } 56 | 57 | // If this is a newer header, use it 58 | if other_header.generation() > header.generation() { 59 | header = other_header; 60 | } 61 | } 62 | 63 | let cipher_opt = match password_opt { 64 | Some(password) => { 65 | if !header.encrypted() { 66 | // Header not encrypted but password provided 67 | return Err(Error::new(EKEYREJECTED)); 68 | } 69 | match header.cipher(password) { 70 | Some(cipher) => Some(cipher), 71 | None => { 72 | // Header encrypted with a different password 73 | return Err(Error::new(ENOKEY)); 74 | } 75 | } 76 | } 77 | None => { 78 | if header.encrypted() { 79 | // Header encrypted but no password provided 80 | return Err(Error::new(ENOKEY)); 81 | } 82 | None 83 | } 84 | }; 85 | 86 | let mut fs = FileSystem { 87 | disk, 88 | block, 89 | header, 90 | allocator: Allocator::default(), 91 | cipher_opt, 92 | compress_cache: compress_cache(), 93 | }; 94 | 95 | unsafe { fs.reset_allocator()? }; 96 | 97 | // Squash allocations and sync 98 | Transaction::new(&mut fs).commit(squash)?; 99 | 100 | return Ok(fs); 101 | } 102 | 103 | Err(Error::new(ENOENT)) 104 | } 105 | 106 | /// Create a file system on a disk 107 | #[cfg(feature = "std")] 108 | pub fn create( 109 | disk: D, 110 | password_opt: Option<&[u8]>, 111 | ctime: u64, 112 | ctime_nsec: u32, 113 | ) -> Result { 114 | Self::create_reserved(disk, password_opt, &[], ctime, ctime_nsec) 115 | } 116 | 117 | /// Create a file system on a disk, with reserved data at the beginning 118 | /// Reserved data will be zero padded up to the nearest block 119 | /// We need to pass ctime and ctime_nsec in order to initialize the unix timestamps 120 | #[cfg(feature = "std")] 121 | pub fn create_reserved( 122 | mut disk: D, 123 | password_opt: Option<&[u8]>, 124 | reserved: &[u8], 125 | ctime: u64, 126 | ctime_nsec: u32, 127 | ) -> Result { 128 | let disk_size = disk.size()?; 129 | let disk_blocks = disk_size / BLOCK_SIZE; 130 | let block_offset = (reserved.len() as u64).div_ceil(BLOCK_SIZE); 131 | if disk_blocks < (block_offset + HEADER_RING + 4) { 132 | return Err(Error::new(syscall::error::ENOSPC)); 133 | } 134 | let fs_blocks = disk_blocks - block_offset; 135 | 136 | // Fill reserved data, pad with zeroes 137 | for block in 0..block_offset as usize { 138 | let mut data = [0; BLOCK_SIZE as usize]; 139 | 140 | let mut i = 0; 141 | while i < data.len() && block * BLOCK_SIZE as usize + i < reserved.len() { 142 | data[i] = reserved[block * BLOCK_SIZE as usize + i]; 143 | i += 1; 144 | } 145 | 146 | unsafe { 147 | disk.write_at(block as u64, &data)?; 148 | } 149 | } 150 | 151 | let mut header = Header::new(fs_blocks * BLOCK_SIZE); 152 | 153 | let cipher_opt = match password_opt { 154 | Some(password) => { 155 | //TODO: handle errors 156 | header.key_slots[0] = KeySlot::new( 157 | password, 158 | Salt::new().unwrap(), 159 | (Key::new().unwrap(), Key::new().unwrap()), 160 | ) 161 | .unwrap(); 162 | Some(header.key_slots[0].cipher(password).unwrap()) 163 | } 164 | None => None, 165 | }; 166 | 167 | let mut fs = FileSystem { 168 | disk, 169 | block: block_offset, 170 | header, 171 | allocator: Allocator::default(), 172 | cipher_opt, 173 | compress_cache: compress_cache(), 174 | }; 175 | 176 | // Write header generation zero 177 | let count = unsafe { fs.disk.write_at(fs.block, &fs.header)? }; 178 | if count != core::mem::size_of_val(&fs.header) { 179 | // Wrote wrong number of bytes 180 | #[cfg(feature = "log")] 181 | log::error!("CREATE: WRONG NUMBER OF BYTES"); 182 | return Err(Error::new(syscall::error::EIO)); 183 | } 184 | 185 | // Set tree and alloc pointers and write header generation one 186 | fs.tx(|tx| unsafe { 187 | let tree = BlockData::new( 188 | BlockAddr::new(HEADER_RING + 1, BlockMeta::default()), 189 | TreeList::empty(BlockLevel::default()).unwrap(), 190 | ); 191 | 192 | let mut alloc = BlockData::new( 193 | BlockAddr::new(HEADER_RING + 2, BlockMeta::default()), 194 | AllocList::empty(BlockLevel::default()).unwrap(), 195 | ); 196 | 197 | let alloc_free = fs_blocks - (HEADER_RING + 4); 198 | alloc.data_mut().entries[0] = AllocEntry::new(HEADER_RING + 4, alloc_free as i64); 199 | 200 | tx.header.tree = tx.write_block(tree)?; 201 | tx.header.alloc = tx.write_block(alloc)?; 202 | tx.header_changed = true; 203 | 204 | Ok(()) 205 | })?; 206 | 207 | unsafe { 208 | fs.reset_allocator()?; 209 | } 210 | 211 | fs.tx(|tx| unsafe { 212 | let mut root = BlockData::new( 213 | BlockAddr::new(HEADER_RING + 3, BlockMeta::default()), 214 | Node::new(Node::MODE_DIR | 0o755, 0, 0, ctime, ctime_nsec), 215 | ); 216 | root.data_mut().set_links(1); 217 | let root_ptr = tx.write_block(root)?; 218 | assert_eq!(tx.insert_tree(root_ptr)?.id(), 1); 219 | Ok(()) 220 | })?; 221 | 222 | // Make sure everything is synced and squash allocations 223 | Transaction::new(&mut fs).commit(true)?; 224 | 225 | Ok(fs) 226 | } 227 | 228 | /// start a filesystem transaction, required for making any changes 229 | pub fn tx) -> Result, T>(&mut self, f: F) -> Result { 230 | let mut tx = Transaction::new(self); 231 | let t = f(&mut tx)?; 232 | tx.commit(false)?; 233 | Ok(t) 234 | } 235 | 236 | pub fn allocator(&self) -> &Allocator { 237 | &self.allocator 238 | } 239 | 240 | /// Unsafe as it can corrupt the filesystem 241 | pub unsafe fn allocator_mut(&mut self) -> &mut Allocator { 242 | &mut self.allocator 243 | } 244 | 245 | /// Reset allocator to state stored on disk 246 | /// 247 | /// # Safety 248 | /// Unsafe, it must only be called when opening the filesystem 249 | unsafe fn reset_allocator(&mut self) -> Result<()> { 250 | self.allocator = Allocator::default(); 251 | 252 | // To avoid having to update all prior alloc blocks, there is only a previous pointer 253 | // This means we need to roll back all allocations. Currently we do this by reading the 254 | // alloc log into a buffer to reverse it. 255 | let mut allocs = VecDeque::new(); 256 | self.tx(|tx| { 257 | let mut alloc_ptr = tx.header.alloc; 258 | while !alloc_ptr.is_null() { 259 | let alloc = tx.read_block(alloc_ptr)?; 260 | alloc_ptr = alloc.data().prev; 261 | allocs.push_front(alloc); 262 | } 263 | Ok(()) 264 | })?; 265 | 266 | for alloc in allocs { 267 | for entry in alloc.data().entries.iter() { 268 | let index = entry.index(); 269 | let count = entry.count(); 270 | if count < 0 { 271 | for i in 0..-count { 272 | //TODO: replace assert with error? 273 | let addr = BlockAddr::new(index + i as u64, BlockMeta::default()); 274 | assert_eq!(self.allocator.allocate_exact(addr), Some(addr)); 275 | } 276 | } else { 277 | for i in 0..count { 278 | let addr = BlockAddr::new(index + i as u64, BlockMeta::default()); 279 | self.allocator.deallocate(addr); 280 | } 281 | } 282 | } 283 | } 284 | 285 | Ok(()) 286 | } 287 | 288 | pub(crate) fn decrypt(&mut self, data: &mut [u8], addr: BlockAddr) -> bool { 289 | if let Some(ref cipher) = self.cipher_opt { 290 | cipher.decrypt_area( 291 | data, 292 | BLOCK_SIZE as usize, 293 | addr.index().into(), 294 | get_tweak_default, 295 | ); 296 | true 297 | } else { 298 | // Do nothing if encryption is disabled 299 | false 300 | } 301 | } 302 | 303 | pub(crate) fn encrypt(&mut self, data: &mut [u8], addr: BlockAddr) -> bool { 304 | if let Some(ref cipher) = self.cipher_opt { 305 | cipher.encrypt_area( 306 | data, 307 | BLOCK_SIZE as usize, 308 | addr.index().into(), 309 | get_tweak_default, 310 | ); 311 | true 312 | } else { 313 | // Do nothing if encryption is disabled 314 | false 315 | } 316 | } 317 | } 318 | -------------------------------------------------------------------------------- /src/header.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Deref, DerefMut}; 2 | use core::{fmt, mem, slice}; 3 | use endian_num::Le; 4 | 5 | use aes::Aes128; 6 | use xts_mode::{get_tweak_default, Xts128}; 7 | 8 | use crate::{AllocList, BlockPtr, KeySlot, Tree, BLOCK_SIZE, SIGNATURE, VERSION}; 9 | 10 | pub const HEADER_RING: u64 = 256; 11 | 12 | /// The header of the filesystem 13 | #[derive(Clone, Copy)] 14 | #[repr(C, packed)] 15 | pub struct Header { 16 | /// Signature, should be SIGNATURE 17 | pub signature: [u8; 8], 18 | /// Version, should be VERSION 19 | pub version: Le, 20 | /// Disk ID, a 128-bit unique identifier 21 | pub uuid: [u8; 16], 22 | /// Disk size, in number of BLOCK_SIZE sectors 23 | pub size: Le, 24 | /// Generation of header 25 | pub generation: Le, 26 | /// Block of first tree node 27 | pub tree: BlockPtr, 28 | /// Block of last alloc node 29 | pub alloc: BlockPtr, 30 | /// Key slots 31 | pub key_slots: [KeySlot; 64], 32 | /// Padding 33 | pub padding: [u8; BLOCK_SIZE as usize - 3176], 34 | /// encrypted hash of header data without hash, set to hash and padded if disk is not encrypted 35 | pub encrypted_hash: [u8; 16], 36 | /// hash of header data without hash 37 | pub hash: Le, 38 | } 39 | 40 | impl Header { 41 | #[cfg(feature = "std")] 42 | pub fn new(size: u64) -> Header { 43 | let uuid = uuid::Uuid::new_v4(); 44 | let mut header = Header { 45 | signature: *SIGNATURE, 46 | version: VERSION.into(), 47 | uuid: *uuid.as_bytes(), 48 | size: size.into(), 49 | ..Default::default() 50 | }; 51 | header.update_hash(None); 52 | header 53 | } 54 | 55 | pub fn valid(&self) -> bool { 56 | if &self.signature != SIGNATURE { 57 | // Signature does not match 58 | return false; 59 | } 60 | 61 | if self.version.to_ne() != VERSION { 62 | // Version does not match 63 | return false; 64 | } 65 | 66 | if self.hash.to_ne() != self.create_hash() { 67 | // Hash does not match 68 | return false; 69 | } 70 | 71 | // All tests passed, header is valid 72 | true 73 | } 74 | 75 | pub fn uuid(&self) -> [u8; 16] { 76 | self.uuid 77 | } 78 | 79 | pub fn size(&self) -> u64 { 80 | self.size.to_ne() 81 | } 82 | 83 | pub fn generation(&self) -> u64 { 84 | self.generation.to_ne() 85 | } 86 | 87 | fn create_hash(&self) -> u64 { 88 | // Calculate part of header to hash (everything before the hashes) 89 | let end = mem::size_of_val(self) 90 | - mem::size_of_val(&{ self.hash }) 91 | - mem::size_of_val(&{ self.encrypted_hash }); 92 | seahash::hash(&self[..end]) 93 | } 94 | 95 | fn create_encrypted_hash(&self, cipher_opt: Option<&Xts128>) -> [u8; 16] { 96 | let mut encrypted_hash = [0; 16]; 97 | for (i, b) in self.hash.to_le_bytes().iter().enumerate() { 98 | encrypted_hash[i] = *b; 99 | } 100 | if let Some(cipher) = cipher_opt { 101 | let mut block = aes::Block::from(encrypted_hash); 102 | cipher.encrypt_area( 103 | &mut block, 104 | BLOCK_SIZE as usize, 105 | self.generation().into(), 106 | get_tweak_default, 107 | ); 108 | encrypted_hash = block.into(); 109 | } 110 | encrypted_hash 111 | } 112 | 113 | pub fn encrypted(&self) -> bool { 114 | (self.encrypted_hash) != self.create_encrypted_hash(None) 115 | } 116 | 117 | pub fn cipher(&self, password: &[u8]) -> Option> { 118 | let hash = self.create_encrypted_hash(None); 119 | for slot in self.key_slots.iter() { 120 | //TODO: handle errors 121 | let cipher = slot.cipher(password).unwrap(); 122 | let mut block = aes::Block::from(self.encrypted_hash); 123 | cipher.decrypt_area( 124 | &mut block, 125 | BLOCK_SIZE as usize, 126 | self.generation().into(), 127 | get_tweak_default, 128 | ); 129 | if block == aes::Block::from(hash) { 130 | return Some(cipher); 131 | } 132 | } 133 | None 134 | } 135 | 136 | fn update_hash(&mut self, cipher_opt: Option<&Xts128>) { 137 | self.hash = self.create_hash().into(); 138 | // Make sure to do this second, it relies on the hash being up to date 139 | self.encrypted_hash = self.create_encrypted_hash(cipher_opt); 140 | } 141 | 142 | pub fn update(&mut self, cipher_opt: Option<&Xts128>) -> u64 { 143 | let mut generation = self.generation(); 144 | generation += 1; 145 | self.generation = generation.into(); 146 | self.update_hash(cipher_opt); 147 | generation 148 | } 149 | } 150 | 151 | impl Default for Header { 152 | fn default() -> Self { 153 | Self { 154 | signature: [0; 8], 155 | version: 0.into(), 156 | uuid: [0; 16], 157 | size: 0.into(), 158 | generation: 0.into(), 159 | tree: BlockPtr::::default(), 160 | alloc: BlockPtr::::default(), 161 | key_slots: [KeySlot::default(); 64], 162 | padding: [0; BLOCK_SIZE as usize - 3176], 163 | encrypted_hash: [0; 16], 164 | hash: 0.into(), 165 | } 166 | } 167 | } 168 | 169 | impl fmt::Debug for Header { 170 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 171 | let signature = self.signature; 172 | let version = self.version; 173 | let uuid = self.uuid; 174 | let size = self.size; 175 | let generation = self.generation; 176 | let tree = self.tree; 177 | let alloc = self.alloc; 178 | let hash = self.hash; 179 | f.debug_struct("Header") 180 | .field("signature", &signature) 181 | .field("version", &version) 182 | .field("uuid", &uuid) 183 | .field("size", &size) 184 | .field("generation", &generation) 185 | .field("tree", &tree) 186 | .field("alloc", &alloc) 187 | .field("hash", &hash) 188 | .finish() 189 | } 190 | } 191 | 192 | impl Deref for Header { 193 | type Target = [u8]; 194 | fn deref(&self) -> &[u8] { 195 | unsafe { 196 | slice::from_raw_parts(self as *const Header as *const u8, mem::size_of::
()) 197 | as &[u8] 198 | } 199 | } 200 | } 201 | 202 | impl DerefMut for Header { 203 | fn deref_mut(&mut self) -> &mut [u8] { 204 | unsafe { 205 | slice::from_raw_parts_mut(self as *mut Header as *mut u8, mem::size_of::
()) 206 | as &mut [u8] 207 | } 208 | } 209 | } 210 | 211 | #[test] 212 | fn header_not_valid_test() { 213 | assert_eq!(Header::default().valid(), false); 214 | } 215 | 216 | #[test] 217 | fn header_size_test() { 218 | assert_eq!(mem::size_of::
(), BLOCK_SIZE as usize); 219 | } 220 | 221 | #[test] 222 | fn header_hash_test() { 223 | let mut header = Header::default(); 224 | assert_eq!(header.create_hash(), 0xe81ffcb86026ff96); 225 | header.update_hash(None); 226 | assert_eq!(header.hash.to_ne(), 0xe81ffcb86026ff96); 227 | assert_eq!( 228 | header.encrypted_hash, 229 | [0x96, 0xff, 0x26, 0x60, 0xb8, 0xfc, 0x1f, 0xe8, 0, 0, 0, 0, 0, 0, 0, 0] 230 | ); 231 | } 232 | 233 | #[cfg(feature = "std")] 234 | #[test] 235 | fn header_valid_test() { 236 | assert_eq!(Header::new(0).valid(), true); 237 | } 238 | -------------------------------------------------------------------------------- /src/key.rs: -------------------------------------------------------------------------------- 1 | use aes::{ 2 | cipher::{BlockDecrypt, BlockEncrypt, KeyInit}, 3 | Aes128, 4 | }; 5 | use xts_mode::Xts128; 6 | 7 | // The raw key, keep secret! 8 | #[repr(transparent)] 9 | pub struct Key([u8; 16]); 10 | 11 | impl Key { 12 | /// Generate a random key 13 | #[cfg(feature = "std")] 14 | pub fn new() -> Result { 15 | let mut bytes = [0; 16]; 16 | getrandom::getrandom(&mut bytes)?; 17 | Ok(Self(bytes)) 18 | } 19 | 20 | pub fn encrypt(&self, password_aes: &Aes128) -> EncryptedKey { 21 | let mut block = aes::Block::from(self.0); 22 | password_aes.encrypt_block(&mut block); 23 | EncryptedKey(block.into()) 24 | } 25 | 26 | pub fn into_aes(self) -> Aes128 { 27 | Aes128::new(&aes::Block::from(self.0)) 28 | } 29 | } 30 | 31 | /// The encrypted key, encrypted with AES using the salt and password 32 | #[derive(Clone, Copy, Default)] 33 | #[repr(transparent)] 34 | pub struct EncryptedKey([u8; 16]); 35 | 36 | impl EncryptedKey { 37 | pub fn decrypt(&self, password_aes: &Aes128) -> Key { 38 | let mut block = aes::Block::from(self.0); 39 | password_aes.decrypt_block(&mut block); 40 | Key(block.into()) 41 | } 42 | } 43 | 44 | /// Salt used to prevent rainbow table attacks on the encryption password 45 | #[derive(Clone, Copy, Default)] 46 | #[repr(transparent)] 47 | pub struct Salt([u8; 16]); 48 | 49 | impl Salt { 50 | /// Generate a random salt 51 | #[cfg(feature = "std")] 52 | pub fn new() -> Result { 53 | let mut bytes = [0; 16]; 54 | getrandom::getrandom(&mut bytes)?; 55 | Ok(Self(bytes)) 56 | } 57 | } 58 | 59 | /// The key slot, containing the salt and encrypted key that are used with one password 60 | #[derive(Clone, Copy, Default)] 61 | #[repr(C, packed)] 62 | pub struct KeySlot { 63 | salt: Salt, 64 | // Two keys for AES XTS 128 65 | encrypted_keys: (EncryptedKey, EncryptedKey), 66 | } 67 | 68 | impl KeySlot { 69 | /// Get the password AES key (generated from the password and salt, encrypts the real key) 70 | pub fn password_aes(password: &[u8], salt: &Salt) -> Result { 71 | let mut key = Key([0; 16]); 72 | 73 | let mut params_builder = argon2::ParamsBuilder::new(); 74 | params_builder.output_len(key.0.len())?; 75 | 76 | let argon2 = argon2::Argon2::new( 77 | argon2::Algorithm::Argon2id, 78 | argon2::Version::V0x13, 79 | params_builder.params()?, 80 | ); 81 | 82 | argon2.hash_password_into(password, &salt.0, &mut key.0)?; 83 | 84 | Ok(key.into_aes()) 85 | } 86 | 87 | /// Create a new key slot from a password, salt, and encryption key 88 | pub fn new(password: &[u8], salt: Salt, keys: (Key, Key)) -> Result { 89 | let password_aes = Self::password_aes(password, &salt)?; 90 | Ok(Self { 91 | salt, 92 | encrypted_keys: (keys.0.encrypt(&password_aes), keys.1.encrypt(&password_aes)), 93 | }) 94 | } 95 | 96 | /// Get the encryption cipher from this key slot 97 | pub fn cipher(&self, password: &[u8]) -> Result, argon2::Error> { 98 | let password_aes = Self::password_aes(password, &self.salt)?; 99 | Ok(Xts128::new( 100 | self.encrypted_keys.0.decrypt(&password_aes).into_aes(), 101 | self.encrypted_keys.1.decrypt(&password_aes).into_aes(), 102 | )) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![crate_name = "redoxfs"] 2 | #![crate_type = "lib"] 3 | #![cfg_attr(not(feature = "std"), no_std)] 4 | // Used often in generating redox_syscall errors 5 | #![allow(clippy::or_fun_call)] 6 | #![allow(unexpected_cfgs)] 7 | 8 | extern crate alloc; 9 | 10 | use core::sync::atomic::AtomicUsize; 11 | 12 | // The alloc log grows by 1 block about every 21 generations 13 | pub const ALLOC_GC_THRESHOLD: u64 = 1024; 14 | pub const BLOCK_SIZE: u64 = 4096; 15 | // A record is 4KiB << 5 = 128KiB 16 | pub const RECORD_LEVEL: usize = 5; 17 | pub const RECORD_SIZE: u64 = BLOCK_SIZE << RECORD_LEVEL; 18 | pub const SIGNATURE: &[u8; 8] = b"RedoxFS\0"; 19 | pub const VERSION: u64 = 8; 20 | pub const DIR_ENTRY_MAX_LENGTH: usize = 252; 21 | 22 | pub static IS_UMT: AtomicUsize = AtomicUsize::new(0); 23 | 24 | pub use self::allocator::{AllocEntry, AllocList, Allocator, ALLOC_LIST_ENTRIES}; 25 | #[cfg(feature = "std")] 26 | pub use self::archive::{archive, archive_at}; 27 | pub use self::block::{ 28 | BlockAddr, BlockData, BlockLevel, BlockList, BlockMeta, BlockPtr, BlockRaw, BlockTrait, 29 | }; 30 | #[cfg(feature = "std")] 31 | pub use self::clone::clone; 32 | pub use self::dir::{DirEntry, DirList}; 33 | pub use self::disk::*; 34 | pub use self::filesystem::FileSystem; 35 | pub use self::header::{Header, HEADER_RING}; 36 | pub use self::key::{Key, KeySlot, Salt}; 37 | #[cfg(feature = "std")] 38 | pub use self::mount::mount; 39 | pub use self::node::{Node, NodeFlags, NodeLevel, NodeLevelData}; 40 | pub use self::record::RecordRaw; 41 | pub use self::transaction::Transaction; 42 | pub use self::tree::{Tree, TreeData, TreeList, TreePtr}; 43 | #[cfg(feature = "std")] 44 | pub use self::unmount::unmount_path; 45 | 46 | mod allocator; 47 | #[cfg(feature = "std")] 48 | mod archive; 49 | mod block; 50 | #[cfg(feature = "std")] 51 | mod clone; 52 | mod dir; 53 | mod disk; 54 | mod filesystem; 55 | mod header; 56 | mod htree; 57 | mod key; 58 | #[cfg(all(feature = "std", not(fuzzing)))] 59 | mod mount; 60 | #[cfg(all(feature = "std", fuzzing))] 61 | pub mod mount; 62 | mod node; 63 | mod record; 64 | mod transaction; 65 | mod tree; 66 | #[cfg(feature = "std")] 67 | mod unmount; 68 | 69 | #[cfg(all(feature = "std", test))] 70 | mod tests; 71 | -------------------------------------------------------------------------------- /src/mount/fuse.rs: -------------------------------------------------------------------------------- 1 | extern crate fuser; 2 | 3 | use std::cmp; 4 | use std::ffi::OsStr; 5 | use std::io; 6 | use std::os::unix::ffi::OsStrExt; 7 | use std::path::Path; 8 | use std::time::{SystemTime, UNIX_EPOCH}; 9 | 10 | use self::fuser::MountOption; 11 | use self::fuser::TimeOrNow; 12 | use crate::mount::fuse::TimeOrNow::Now; 13 | use crate::mount::fuse::TimeOrNow::SpecificTime; 14 | 15 | use crate::{filesystem, Disk, Node, Transaction, TreeData, TreePtr, BLOCK_SIZE}; 16 | 17 | use self::fuser::{ 18 | FileAttr, FileType, Filesystem, ReplyAttr, ReplyCreate, ReplyData, ReplyDirectory, ReplyEmpty, 19 | ReplyEntry, ReplyStatfs, ReplyWrite, Request, Session, 20 | }; 21 | use std::time::Duration; 22 | 23 | const TTL: Duration = Duration::new(1, 0); // 1 second 24 | 25 | const NULL_TIME: Duration = Duration::new(0, 0); 26 | 27 | pub fn mount( 28 | mut filesystem: filesystem::FileSystem, 29 | mountpoint: P, 30 | callback: F, 31 | ) -> io::Result 32 | where 33 | D: Disk, 34 | P: AsRef, 35 | F: FnOnce(&Path) -> T, 36 | { 37 | let mountpoint = mountpoint.as_ref(); 38 | 39 | // One of the uses of this redoxfs fuse wrapper is to populate a filesystem 40 | // while building the Redox OS kernel. This means that we need to write on 41 | // a filesystem that belongs to `root`, which in turn means that we need to 42 | // be `root`, thus that we need to allow `root` to have access. 43 | let defer_permissions = [MountOption::CUSTOM("defer_permissions".to_owned())]; 44 | 45 | let res = { 46 | let mut session = Session::new( 47 | Fuse { 48 | fs: &mut filesystem, 49 | }, 50 | mountpoint, 51 | if cfg!(target_os = "macos") { 52 | &defer_permissions 53 | } else { 54 | &[] 55 | }, 56 | )?; 57 | 58 | let res = callback(mountpoint); 59 | 60 | session.run()?; 61 | 62 | res 63 | }; 64 | 65 | // Squash allocations and sync on unmount 66 | let _ = Transaction::new(&mut filesystem).commit(true); 67 | 68 | Ok(res) 69 | } 70 | 71 | pub struct Fuse<'f, D: Disk> { 72 | pub fs: &'f mut filesystem::FileSystem, 73 | } 74 | 75 | fn node_attr(node: &TreeData) -> FileAttr { 76 | FileAttr { 77 | ino: node.id() as u64, 78 | size: node.data().size(), 79 | // Blocks is in 512 byte blocks, not in our block size 80 | blocks: node.data().blocks() * (BLOCK_SIZE / 512), 81 | blksize: 512, 82 | atime: SystemTime::UNIX_EPOCH + Duration::new(node.data().atime().0, node.data().atime().1), 83 | mtime: SystemTime::UNIX_EPOCH + Duration::new(node.data().mtime().0, node.data().mtime().1), 84 | ctime: SystemTime::UNIX_EPOCH + Duration::new(node.data().ctime().0, node.data().ctime().1), 85 | crtime: UNIX_EPOCH + NULL_TIME, 86 | kind: if node.data().is_dir() { 87 | FileType::Directory 88 | } else if node.data().is_symlink() { 89 | FileType::Symlink 90 | } else { 91 | FileType::RegularFile 92 | }, 93 | perm: node.data().mode() & Node::MODE_PERM, 94 | nlink: node.data().links(), 95 | uid: node.data().uid(), 96 | gid: node.data().gid(), 97 | rdev: 0, 98 | flags: 0, 99 | } 100 | } 101 | 102 | impl Filesystem for Fuse<'_, D> { 103 | fn lookup(&mut self, _req: &Request, parent_id: u64, name: &OsStr, reply: ReplyEntry) { 104 | let parent_ptr = TreePtr::new(parent_id as u32); 105 | match self 106 | .fs 107 | .tx(|tx| tx.find_node(parent_ptr, name.to_str().unwrap())) 108 | { 109 | Ok(node) => { 110 | reply.entry(&TTL, &node_attr(&node), 0); 111 | } 112 | Err(err) => { 113 | reply.error(err.errno); 114 | } 115 | } 116 | } 117 | 118 | fn getattr(&mut self, _req: &Request, node_id: u64, reply: ReplyAttr) { 119 | let node_ptr = TreePtr::::new(node_id as u32); 120 | match self.fs.tx(|tx| tx.read_tree(node_ptr)) { 121 | Ok(node) => { 122 | reply.attr(&TTL, &node_attr(&node)); 123 | } 124 | Err(err) => { 125 | reply.error(err.errno); 126 | } 127 | } 128 | } 129 | 130 | fn setattr( 131 | &mut self, 132 | _req: &Request, 133 | node_id: u64, 134 | mode: Option, 135 | uid: Option, 136 | gid: Option, 137 | size: Option, 138 | atime: Option, 139 | mtime: Option, 140 | _ctime: Option, 141 | _fh: Option, 142 | _crtime: Option, 143 | _chgtime: Option, 144 | _bkuptime: Option, 145 | _flags: Option, 146 | reply: ReplyAttr, 147 | ) { 148 | let node_ptr = TreePtr::::new(node_id as u32); 149 | 150 | let mut node = match self.fs.tx(|tx| tx.read_tree(node_ptr)) { 151 | Ok(ok) => ok, 152 | Err(err) => { 153 | reply.error(err.errno); 154 | return; 155 | } 156 | }; 157 | let mut node_changed = false; 158 | 159 | if let Some(mode) = mode { 160 | if node.data().mode() & Node::MODE_PERM != mode as u16 & Node::MODE_PERM { 161 | let new_mode = 162 | (node.data().mode() & Node::MODE_TYPE) | (mode as u16 & Node::MODE_PERM); 163 | node.data_mut().set_mode(new_mode); 164 | node_changed = true; 165 | } 166 | } 167 | 168 | if let Some(uid) = uid { 169 | if node.data().uid() != uid { 170 | node.data_mut().set_uid(uid); 171 | node_changed = true; 172 | } 173 | } 174 | 175 | if let Some(gid) = gid { 176 | if node.data().gid() != gid { 177 | node.data_mut().set_gid(gid); 178 | node_changed = true; 179 | } 180 | } 181 | 182 | if let Some(atime) = atime { 183 | let atime_c = match atime { 184 | SpecificTime(st) => st.duration_since(UNIX_EPOCH).unwrap(), 185 | Now => SystemTime::now().duration_since(UNIX_EPOCH).unwrap(), 186 | }; 187 | node.data_mut() 188 | .set_atime(atime_c.as_secs(), atime_c.subsec_nanos()); 189 | node_changed = true; 190 | } 191 | 192 | if let Some(mtime) = mtime { 193 | let mtime_c = match mtime { 194 | SpecificTime(st) => st.duration_since(UNIX_EPOCH).unwrap(), 195 | Now => SystemTime::now().duration_since(UNIX_EPOCH).unwrap(), 196 | }; 197 | node.data_mut() 198 | .set_mtime(mtime_c.as_secs(), mtime_c.subsec_nanos()); 199 | node_changed = true; 200 | } 201 | 202 | if let Some(size) = size { 203 | match self.fs.tx(|tx| tx.truncate_node_inner(&mut node, size)) { 204 | Ok(ok) => { 205 | if ok { 206 | node_changed = true; 207 | } 208 | } 209 | Err(err) => { 210 | reply.error(err.errno); 211 | return; 212 | } 213 | } 214 | } 215 | 216 | let attr = node_attr(&node); 217 | 218 | if node_changed { 219 | if let Err(err) = self.fs.tx(|tx| tx.sync_tree(node)) { 220 | reply.error(err.errno); 221 | return; 222 | } 223 | } 224 | 225 | reply.attr(&TTL, &attr); 226 | } 227 | 228 | fn read( 229 | &mut self, 230 | _req: &Request, 231 | node_id: u64, 232 | _fh: u64, 233 | offset: i64, 234 | size: u32, 235 | _flags: i32, 236 | _lock_owner: Option, 237 | reply: ReplyData, 238 | ) { 239 | let node_ptr = TreePtr::::new(node_id as u32); 240 | 241 | let atime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 242 | let mut data = vec![0; size as usize]; 243 | match self.fs.tx(|tx| { 244 | tx.read_node( 245 | node_ptr, 246 | cmp::max(0, offset) as u64, 247 | &mut data, 248 | atime.as_secs(), 249 | atime.subsec_nanos(), 250 | ) 251 | }) { 252 | Ok(count) => { 253 | reply.data(&data[..count]); 254 | } 255 | Err(err) => { 256 | reply.error(err.errno); 257 | } 258 | } 259 | } 260 | 261 | fn write( 262 | &mut self, 263 | _req: &Request, 264 | node_id: u64, 265 | _fh: u64, 266 | offset: i64, 267 | data: &[u8], 268 | _write_flags: u32, 269 | _flags: i32, 270 | _lock_owner: Option, 271 | reply: ReplyWrite, 272 | ) { 273 | let node_ptr = TreePtr::::new(node_id as u32); 274 | 275 | let mtime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 276 | match self.fs.tx(|tx| { 277 | tx.write_node( 278 | node_ptr, 279 | cmp::max(0, offset) as u64, 280 | data, 281 | mtime.as_secs(), 282 | mtime.subsec_nanos(), 283 | ) 284 | }) { 285 | Ok(count) => { 286 | reply.written(count as u32); 287 | } 288 | Err(err) => { 289 | reply.error(err.errno); 290 | } 291 | } 292 | } 293 | 294 | fn flush(&mut self, _req: &Request, _ino: u64, _fh: u64, _lock_owner: u64, reply: ReplyEmpty) { 295 | reply.ok(); 296 | } 297 | 298 | fn fsync(&mut self, _req: &Request, _ino: u64, _fh: u64, _datasync: bool, reply: ReplyEmpty) { 299 | reply.ok(); 300 | } 301 | 302 | fn readdir( 303 | &mut self, 304 | _req: &Request, 305 | parent_id: u64, 306 | _fh: u64, 307 | offset: i64, 308 | mut reply: ReplyDirectory, 309 | ) { 310 | let parent_ptr = TreePtr::new(parent_id as u32); 311 | let mut children = Vec::new(); 312 | match self.fs.tx(|tx| tx.child_nodes(parent_ptr, &mut children)) { 313 | Ok(()) => { 314 | let mut i; 315 | let skip; 316 | if offset == 0 { 317 | skip = 0; 318 | i = 0; 319 | let _full = reply.add(parent_id, i, FileType::Directory, "."); 320 | 321 | i += 1; 322 | let _full = reply.add( 323 | //TODO: get parent? 324 | parent_id, 325 | i, 326 | FileType::Directory, 327 | "..", 328 | ); 329 | i += 1; 330 | } else { 331 | i = offset + 1; 332 | skip = offset as usize - 1; 333 | } 334 | 335 | for child in children.iter().skip(skip) { 336 | //TODO: make it possible to get file type from directory entry 337 | let node = match self.fs.tx(|tx| tx.read_tree(child.node_ptr())) { 338 | Ok(ok) => ok, 339 | Err(err) => { 340 | reply.error(err.errno); 341 | return; 342 | } 343 | }; 344 | 345 | let full = reply.add( 346 | child.node_ptr().id() as u64, 347 | i, 348 | if node.data().is_dir() { 349 | FileType::Directory 350 | } else { 351 | FileType::RegularFile 352 | }, 353 | child.name().unwrap(), 354 | ); 355 | 356 | if full { 357 | break; 358 | } 359 | 360 | i += 1; 361 | } 362 | reply.ok(); 363 | } 364 | Err(err) => { 365 | reply.error(err.errno); 366 | } 367 | } 368 | } 369 | 370 | fn create( 371 | &mut self, 372 | _req: &Request, 373 | parent_id: u64, 374 | name: &OsStr, 375 | mode: u32, 376 | _umask: u32, 377 | _flags: i32, 378 | reply: ReplyCreate, 379 | ) { 380 | let parent_ptr = TreePtr::::new(parent_id as u32); 381 | let ctime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 382 | match self.fs.tx(|tx| { 383 | tx.create_node( 384 | parent_ptr, 385 | name.to_str().unwrap(), 386 | Node::MODE_FILE | (mode as u16 & Node::MODE_PERM), 387 | ctime.as_secs(), 388 | ctime.subsec_nanos(), 389 | ) 390 | }) { 391 | Ok(node) => { 392 | // println!("Create {:?}:{:o}:{:o}", node.1.name(), node.1.mode, mode); 393 | reply.created(&TTL, &node_attr(&node), 0, 0, 0); 394 | } 395 | Err(error) => { 396 | reply.error(error.errno); 397 | } 398 | } 399 | } 400 | 401 | fn mkdir( 402 | &mut self, 403 | _req: &Request, 404 | parent_id: u64, 405 | name: &OsStr, 406 | mode: u32, 407 | _umask: u32, 408 | reply: ReplyEntry, 409 | ) { 410 | let parent_ptr = TreePtr::::new(parent_id as u32); 411 | let ctime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 412 | match self.fs.tx(|tx| { 413 | tx.create_node( 414 | parent_ptr, 415 | name.to_str().unwrap(), 416 | Node::MODE_DIR | (mode as u16 & Node::MODE_PERM), 417 | ctime.as_secs(), 418 | ctime.subsec_nanos(), 419 | ) 420 | }) { 421 | Ok(node) => { 422 | // println!("Mkdir {:?}:{:o}:{:o}", node.1.name(), node.1.mode, mode); 423 | reply.entry(&TTL, &node_attr(&node), 0); 424 | } 425 | Err(error) => { 426 | reply.error(error.errno); 427 | } 428 | } 429 | } 430 | 431 | fn rmdir(&mut self, _req: &Request, parent_id: u64, name: &OsStr, reply: ReplyEmpty) { 432 | let parent_ptr = TreePtr::::new(parent_id as u32); 433 | match self 434 | .fs 435 | .tx(|tx| tx.remove_node(parent_ptr, name.to_str().unwrap(), Node::MODE_DIR)) 436 | { 437 | Ok(_) => { 438 | reply.ok(); 439 | } 440 | Err(err) => { 441 | reply.error(err.errno); 442 | } 443 | } 444 | } 445 | 446 | fn unlink(&mut self, _req: &Request, parent_id: u64, name: &OsStr, reply: ReplyEmpty) { 447 | let parent_ptr = TreePtr::::new(parent_id as u32); 448 | match self 449 | .fs 450 | .tx(|tx| tx.remove_node(parent_ptr, name.to_str().unwrap(), Node::MODE_FILE)) 451 | { 452 | Ok(_) => { 453 | reply.ok(); 454 | } 455 | Err(err) => { 456 | reply.error(err.errno); 457 | } 458 | } 459 | } 460 | 461 | fn statfs(&mut self, _req: &Request, _ino: u64, reply: ReplyStatfs) { 462 | let bsize = BLOCK_SIZE; 463 | let blocks = self.fs.header.size() / bsize; 464 | let bfree = self.fs.allocator().free(); 465 | reply.statfs(blocks, bfree, bfree, 0, 0, bsize as u32, 256, 0); 466 | } 467 | 468 | fn symlink( 469 | &mut self, 470 | _req: &Request, 471 | parent_id: u64, 472 | name: &OsStr, 473 | link: &Path, 474 | reply: ReplyEntry, 475 | ) { 476 | let parent_ptr = TreePtr::::new(parent_id as u32); 477 | let ctime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 478 | match self.fs.tx(|tx| { 479 | let node = tx.create_node( 480 | parent_ptr, 481 | name.to_str().unwrap(), 482 | Node::MODE_SYMLINK | 0o777, 483 | ctime.as_secs(), 484 | ctime.subsec_nanos(), 485 | )?; 486 | 487 | let mtime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 488 | tx.write_node( 489 | node.ptr(), 490 | 0, 491 | link.as_os_str().as_bytes(), 492 | mtime.as_secs(), 493 | mtime.subsec_nanos(), 494 | )?; 495 | 496 | Ok(node) 497 | }) { 498 | Ok(node) => { 499 | reply.entry(&TTL, &node_attr(&node), 0); 500 | } 501 | Err(error) => { 502 | reply.error(error.errno); 503 | } 504 | } 505 | } 506 | 507 | fn readlink(&mut self, _req: &Request, node_id: u64, reply: ReplyData) { 508 | let node_ptr = TreePtr::::new(node_id as u32); 509 | let atime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 510 | let mut data = vec![0; 4096]; 511 | match self.fs.tx(|tx| { 512 | tx.read_node( 513 | node_ptr, 514 | 0, 515 | &mut data, 516 | atime.as_secs(), 517 | atime.subsec_nanos(), 518 | ) 519 | }) { 520 | Ok(count) => { 521 | reply.data(&data[..count]); 522 | } 523 | Err(err) => { 524 | reply.error(err.errno); 525 | } 526 | } 527 | } 528 | 529 | fn rename( 530 | &mut self, 531 | _req: &Request, 532 | orig_parent: u64, 533 | orig_name: &OsStr, 534 | new_parent: u64, 535 | new_name: &OsStr, 536 | _flags: u32, 537 | reply: ReplyEmpty, 538 | ) { 539 | let orig_parent_ptr = TreePtr::::new(orig_parent as u32); 540 | let orig_name = orig_name.to_str().expect("name is not utf-8"); 541 | let new_parent_ptr = TreePtr::::new(new_parent as u32); 542 | let new_name = new_name.to_str().expect("name is not utf-8"); 543 | 544 | // TODO: improve performance 545 | match self 546 | .fs 547 | .tx(|tx| tx.rename_node(orig_parent_ptr, orig_name, new_parent_ptr, new_name)) 548 | { 549 | Ok(()) => reply.ok(), 550 | Err(err) => reply.error(err.errno), 551 | } 552 | } 553 | } 554 | -------------------------------------------------------------------------------- /src/mount/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(not(target_os = "redox"), not(fuzzing)))] 2 | mod fuse; 3 | #[cfg(all(not(target_os = "redox"), fuzzing))] 4 | pub mod fuse; 5 | 6 | #[cfg(not(target_os = "redox"))] 7 | pub use self::fuse::mount; 8 | 9 | #[cfg(target_os = "redox")] 10 | mod redox; 11 | 12 | #[cfg(target_os = "redox")] 13 | pub use self::redox::mount; 14 | -------------------------------------------------------------------------------- /src/mount/redox/mod.rs: -------------------------------------------------------------------------------- 1 | use redox_scheme::{scheme::SchemeSync, RequestKind, Response, SignalBehavior, Socket}; 2 | use std::io; 3 | use std::path::Path; 4 | use std::sync::atomic::Ordering; 5 | 6 | use crate::{Disk, FileSystem, Transaction, IS_UMT}; 7 | 8 | use self::scheme::FileScheme; 9 | 10 | pub mod resource; 11 | pub mod scheme; 12 | 13 | //FIXME: mut callback is not mut 14 | #[allow(unused_mut)] 15 | 16 | pub fn mount(filesystem: FileSystem, mountpoint: P, mut callback: F) -> io::Result 17 | where 18 | D: Disk, 19 | P: AsRef, 20 | F: FnOnce(&Path) -> T, 21 | { 22 | let mountpoint = mountpoint.as_ref(); 23 | let socket = Socket::create(&format!("{}", mountpoint.display()))?; 24 | 25 | let mounted_path = format!("/scheme/{}", mountpoint.display()); 26 | let res = callback(Path::new(&mounted_path)); 27 | 28 | let mut scheme = FileScheme::new(format!("{}", mountpoint.display()), filesystem, &socket); 29 | while IS_UMT.load(Ordering::SeqCst) == 0 { 30 | let req = match socket.next_request(SignalBehavior::Restart)? { 31 | None => break, 32 | Some(req) => { 33 | match req.kind() { 34 | RequestKind::Call(r) => r, 35 | RequestKind::SendFd(sendfd_request) => { 36 | let result = scheme.on_sendfd(&sendfd_request); 37 | let response = Response::new(result, sendfd_request); 38 | 39 | if !socket.write_response(response, SignalBehavior::Restart)? { 40 | break; 41 | } 42 | continue; 43 | } 44 | _ => { 45 | // TODO: Redoxfs does not yet support asynchronous file IO. It might still make 46 | // sense to implement cancellation for huge buffers, e.g. dd bs=1G 47 | continue; 48 | } 49 | } 50 | } 51 | }; 52 | let response = req.handle_sync(&mut scheme); 53 | 54 | if !socket.write_response(response, SignalBehavior::Restart)? { 55 | break; 56 | } 57 | } 58 | 59 | // Squash allocations and sync on unmount 60 | let _ = Transaction::new(&mut scheme.fs).commit(true); 61 | 62 | Ok(res) 63 | } 64 | -------------------------------------------------------------------------------- /src/mount/redox/resource.rs: -------------------------------------------------------------------------------- 1 | use std::slice; 2 | use std::time::{SystemTime, UNIX_EPOCH}; 3 | 4 | use alloc::collections::BTreeMap; 5 | use libredox::call::MmapArgs; 6 | use range_tree::RangeTree; 7 | 8 | use syscall::data::{Stat, TimeSpec}; 9 | use syscall::dirent::{DirEntry, DirentBuf, DirentKind}; 10 | use syscall::error::{Error, Result, EBADF, EINVAL, EISDIR, ENOTDIR, EPERM}; 11 | use syscall::flag::{ 12 | MapFlags, F_GETFL, F_SETFL, MODE_PERM, O_ACCMODE, O_APPEND, O_RDONLY, O_RDWR, O_WRONLY, 13 | PROT_READ, PROT_WRITE, 14 | }; 15 | use syscall::{EBADFD, PAGE_SIZE}; 16 | 17 | use crate::{Disk, Node, Transaction, TreePtr, BLOCK_SIZE}; 18 | 19 | pub type Fmaps = BTreeMap; 20 | 21 | pub trait Resource { 22 | fn parent_ptr_opt(&self) -> Option>; 23 | 24 | fn node_ptr(&self) -> TreePtr; 25 | 26 | fn uid(&self) -> u32; 27 | 28 | fn set_path(&mut self, path: &str); 29 | 30 | fn read(&mut self, buf: &mut [u8], offset: u64, tx: &mut Transaction) -> Result; 31 | 32 | fn write(&mut self, buf: &[u8], offset: u64, tx: &mut Transaction) -> Result; 33 | 34 | fn fsize(&mut self, tx: &mut Transaction) -> Result; 35 | 36 | fn fmap( 37 | &mut self, 38 | fmaps: &mut Fmaps, 39 | flags: MapFlags, 40 | size: usize, 41 | offset: u64, 42 | tx: &mut Transaction, 43 | ) -> Result; 44 | 45 | fn funmap( 46 | &mut self, 47 | fmaps: &mut Fmaps, 48 | offset: u64, 49 | size: usize, 50 | tx: &mut Transaction, 51 | ) -> Result<()>; 52 | 53 | fn fchmod(&mut self, mode: u16, tx: &mut Transaction) -> Result<()> { 54 | let mut node = tx.read_tree(self.node_ptr())?; 55 | 56 | if node.data().uid() == self.uid() || self.uid() == 0 { 57 | let old_mode = node.data().mode(); 58 | let new_mode = (old_mode & !MODE_PERM) | (mode & MODE_PERM); 59 | if old_mode != new_mode { 60 | node.data_mut().set_mode(new_mode); 61 | tx.sync_tree(node)?; 62 | } 63 | 64 | Ok(()) 65 | } else { 66 | Err(Error::new(EPERM)) 67 | } 68 | } 69 | 70 | fn fchown(&mut self, uid: u32, gid: u32, tx: &mut Transaction) -> Result<()> { 71 | let mut node = tx.read_tree(self.node_ptr())?; 72 | 73 | let old_uid = node.data().uid(); 74 | if old_uid == self.uid() || self.uid() == 0 { 75 | let mut node_changed = false; 76 | 77 | if uid as i32 != -1 { 78 | if uid != old_uid { 79 | node.data_mut().set_uid(uid); 80 | node_changed = true; 81 | } 82 | } 83 | 84 | if gid as i32 != -1 { 85 | let old_gid = node.data().gid(); 86 | if gid != old_gid { 87 | node.data_mut().set_gid(gid); 88 | node_changed = true; 89 | } 90 | } 91 | 92 | if node_changed { 93 | tx.sync_tree(node)?; 94 | } 95 | 96 | Ok(()) 97 | } else { 98 | Err(Error::new(EPERM)) 99 | } 100 | } 101 | 102 | fn fcntl(&mut self, cmd: usize, arg: usize) -> Result; 103 | 104 | fn path(&self) -> &str; 105 | 106 | fn stat(&self, stat: &mut Stat, tx: &mut Transaction) -> Result<()> { 107 | let node = tx.read_tree(self.node_ptr())?; 108 | 109 | let ctime = node.data().ctime(); 110 | let mtime = node.data().mtime(); 111 | let atime = node.data().atime(); 112 | *stat = Stat { 113 | st_dev: 0, // TODO 114 | st_ino: node.id() as u64, 115 | st_mode: node.data().mode(), 116 | st_nlink: node.data().links(), 117 | st_uid: node.data().uid(), 118 | st_gid: node.data().gid(), 119 | st_size: node.data().size(), 120 | st_blksize: 512, 121 | // Blocks is in 512 byte blocks, not in our block size 122 | st_blocks: node.data().blocks() * (BLOCK_SIZE / 512), 123 | st_mtime: mtime.0, 124 | st_mtime_nsec: mtime.1, 125 | st_atime: atime.0, 126 | st_atime_nsec: atime.1, 127 | st_ctime: ctime.0, 128 | st_ctime_nsec: ctime.1, 129 | }; 130 | 131 | Ok(()) 132 | } 133 | 134 | fn sync(&mut self, fmaps: &mut Fmaps, tx: &mut Transaction) -> Result<()>; 135 | 136 | fn truncate(&mut self, len: u64, tx: &mut Transaction) -> Result<()>; 137 | 138 | fn utimens(&mut self, times: &[TimeSpec], tx: &mut Transaction) -> Result<()>; 139 | 140 | fn getdents<'buf>( 141 | &mut self, 142 | buf: DirentBuf<&'buf mut [u8]>, 143 | opaque_offset: u64, 144 | tx: &mut Transaction, 145 | ) -> Result>; 146 | } 147 | 148 | pub struct Entry { 149 | pub node_ptr: TreePtr, 150 | pub name: String, 151 | } 152 | 153 | pub struct DirResource { 154 | path: String, 155 | parent_ptr_opt: Option>, 156 | node_ptr: TreePtr, 157 | data: Option>, 158 | uid: u32, 159 | } 160 | 161 | impl DirResource { 162 | pub fn new( 163 | path: String, 164 | parent_ptr_opt: Option>, 165 | node_ptr: TreePtr, 166 | data: Option>, 167 | uid: u32, 168 | ) -> DirResource { 169 | DirResource { 170 | path, 171 | parent_ptr_opt, 172 | node_ptr, 173 | data, 174 | uid, 175 | } 176 | } 177 | } 178 | 179 | impl Resource for DirResource { 180 | fn parent_ptr_opt(&self) -> Option> { 181 | self.parent_ptr_opt 182 | } 183 | 184 | fn node_ptr(&self) -> TreePtr { 185 | self.node_ptr 186 | } 187 | 188 | fn uid(&self) -> u32 { 189 | self.uid 190 | } 191 | 192 | fn set_path(&mut self, path: &str) { 193 | self.path = path.to_string(); 194 | } 195 | 196 | fn read(&mut self, _buf: &mut [u8], _offset: u64, _tx: &mut Transaction) -> Result { 197 | Err(Error::new(EISDIR)) 198 | } 199 | 200 | fn write(&mut self, _buf: &[u8], _offset: u64, _tx: &mut Transaction) -> Result { 201 | Err(Error::new(EBADF)) 202 | } 203 | 204 | fn fsize(&mut self, _tx: &mut Transaction) -> Result { 205 | Ok(self.data.as_ref().ok_or(Error::new(EBADF))?.len() as u64) 206 | } 207 | 208 | fn fmap( 209 | &mut self, 210 | _fmaps: &mut Fmaps, 211 | _flags: MapFlags, 212 | _size: usize, 213 | _offset: u64, 214 | _tx: &mut Transaction, 215 | ) -> Result { 216 | Err(Error::new(EBADF)) 217 | } 218 | fn funmap( 219 | &mut self, 220 | _fmaps: &mut Fmaps, 221 | _offset: u64, 222 | _size: usize, 223 | _tx: &mut Transaction, 224 | ) -> Result<()> { 225 | Err(Error::new(EBADF)) 226 | } 227 | 228 | fn fcntl(&mut self, _cmd: usize, _arg: usize) -> Result { 229 | Err(Error::new(EBADF)) 230 | } 231 | 232 | fn path(&self) -> &str { 233 | &self.path 234 | } 235 | 236 | fn sync(&mut self, _fmaps: &mut Fmaps, _tx: &mut Transaction) -> Result<()> { 237 | Err(Error::new(EBADF)) 238 | } 239 | 240 | fn truncate(&mut self, _len: u64, _tx: &mut Transaction) -> Result<()> { 241 | Err(Error::new(EBADF)) 242 | } 243 | 244 | fn utimens(&mut self, _times: &[TimeSpec], _tx: &mut Transaction) -> Result<()> { 245 | Err(Error::new(EBADF)) 246 | } 247 | 248 | fn getdents<'buf>( 249 | &mut self, 250 | mut buf: DirentBuf<&'buf mut [u8]>, 251 | opaque_offset: u64, 252 | tx: &mut Transaction, 253 | ) -> Result> { 254 | match &self.data { 255 | Some(data) => { 256 | let opaque_offset = opaque_offset as usize; 257 | for (idx, entry) in data.iter().enumerate().skip(opaque_offset) { 258 | let child = tx.read_tree(entry.node_ptr)?; 259 | let result = buf.entry(DirEntry { 260 | inode: child.id() as u64, 261 | next_opaque_id: idx as u64 + 1, 262 | name: &entry.name, 263 | kind: match child.data().mode() & Node::MODE_TYPE { 264 | Node::MODE_DIR => DirentKind::Directory, 265 | Node::MODE_FILE => DirentKind::Regular, 266 | Node::MODE_SYMLINK => DirentKind::Symlink, 267 | //TODO: more types? 268 | _ => DirentKind::Unspecified, 269 | }, 270 | }); 271 | if let Err(err) = result { 272 | if err.errno == EINVAL && idx > opaque_offset { 273 | // POSIX allows partial result of getdents 274 | break; 275 | } else { 276 | return Err(err); 277 | } 278 | } 279 | } 280 | Ok(buf) 281 | } 282 | None => Err(Error::new(EBADF)), 283 | } 284 | } 285 | } 286 | 287 | #[derive(Debug)] 288 | pub struct Fmap { 289 | rc: usize, 290 | flags: MapFlags, 291 | last_page_tail: u16, 292 | } 293 | 294 | impl Fmap { 295 | pub unsafe fn new( 296 | node_ptr: TreePtr, 297 | flags: MapFlags, 298 | unaligned_size: usize, 299 | offset: u64, 300 | base: *mut u8, 301 | tx: &mut Transaction, 302 | ) -> Result { 303 | // Memory provided to fmap must be page aligned and sized 304 | let aligned_size = unaligned_size.next_multiple_of(syscall::PAGE_SIZE); 305 | 306 | let address = base.add(offset as usize); 307 | //println!("ADDR {:p} {:p}", base, address); 308 | 309 | // Read buffer from disk 310 | let atime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 311 | 312 | let buf = slice::from_raw_parts_mut(address, unaligned_size); 313 | 314 | let count = match tx.read_node(node_ptr, offset, buf, atime.as_secs(), atime.subsec_nanos()) 315 | { 316 | Ok(ok) => ok, 317 | Err(err) => { 318 | let _ = libredox::call::munmap(address.cast(), aligned_size); 319 | return Err(err); 320 | } 321 | }; 322 | 323 | // Make sure remaining data is zeroed 324 | buf[count..].fill(0_u8); 325 | 326 | Ok(Self { 327 | rc: 1, 328 | flags, 329 | last_page_tail: (unaligned_size % PAGE_SIZE) as u16, 330 | }) 331 | } 332 | 333 | pub unsafe fn sync( 334 | &mut self, 335 | node_ptr: TreePtr, 336 | base: *mut u8, 337 | offset: u64, 338 | size: usize, 339 | tx: &mut Transaction, 340 | ) -> Result<()> { 341 | if self.flags & PROT_WRITE == PROT_WRITE { 342 | let mtime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 343 | tx.write_node( 344 | node_ptr, 345 | offset, 346 | unsafe { core::slice::from_raw_parts(base.add(offset as usize), size) }, 347 | mtime.as_secs(), 348 | mtime.subsec_nanos(), 349 | )?; 350 | } 351 | Ok(()) 352 | } 353 | } 354 | 355 | pub struct FileResource { 356 | path: String, 357 | parent_ptr_opt: Option>, 358 | node_ptr: TreePtr, 359 | flags: usize, 360 | uid: u32, 361 | } 362 | #[derive(Debug)] 363 | pub struct FileMmapInfo { 364 | base: *mut u8, 365 | size: usize, 366 | ranges: RangeTree, 367 | pub open_fds: usize, 368 | } 369 | impl Default for FileMmapInfo { 370 | fn default() -> Self { 371 | Self { 372 | base: core::ptr::null_mut(), 373 | size: 0, 374 | ranges: RangeTree::new(), 375 | open_fds: 0, 376 | } 377 | } 378 | } 379 | 380 | impl FileResource { 381 | pub fn new( 382 | path: String, 383 | parent_ptr_opt: Option>, 384 | node_ptr: TreePtr, 385 | flags: usize, 386 | uid: u32, 387 | ) -> FileResource { 388 | FileResource { 389 | path, 390 | parent_ptr_opt, 391 | node_ptr, 392 | flags, 393 | uid, 394 | } 395 | } 396 | } 397 | 398 | impl Resource for FileResource { 399 | fn parent_ptr_opt(&self) -> Option> { 400 | self.parent_ptr_opt 401 | } 402 | 403 | fn node_ptr(&self) -> TreePtr { 404 | self.node_ptr 405 | } 406 | 407 | fn uid(&self) -> u32 { 408 | self.uid 409 | } 410 | 411 | fn set_path(&mut self, path: &str) { 412 | self.path = path.to_string(); 413 | } 414 | 415 | fn read(&mut self, buf: &mut [u8], offset: u64, tx: &mut Transaction) -> Result { 416 | if self.flags & O_ACCMODE != O_RDWR && self.flags & O_ACCMODE != O_RDONLY { 417 | return Err(Error::new(EBADF)); 418 | } 419 | let atime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 420 | tx.read_node( 421 | self.node_ptr, 422 | offset, 423 | buf, 424 | atime.as_secs(), 425 | atime.subsec_nanos(), 426 | ) 427 | } 428 | 429 | fn write(&mut self, buf: &[u8], offset: u64, tx: &mut Transaction) -> Result { 430 | if self.flags & O_ACCMODE != O_RDWR && self.flags & O_ACCMODE != O_WRONLY { 431 | return Err(Error::new(EBADF)); 432 | } 433 | let effective_offset = if self.flags & O_APPEND == O_APPEND { 434 | let node = tx.read_tree(self.node_ptr)?; 435 | node.data().size() 436 | } else { 437 | offset 438 | }; 439 | let mtime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 440 | tx.write_node( 441 | self.node_ptr, 442 | effective_offset, 443 | buf, 444 | mtime.as_secs(), 445 | mtime.subsec_nanos(), 446 | ) 447 | } 448 | 449 | fn fsize(&mut self, tx: &mut Transaction) -> Result { 450 | let node = tx.read_tree(self.node_ptr)?; 451 | Ok(node.data().size()) 452 | } 453 | 454 | fn fmap( 455 | &mut self, 456 | fmaps: &mut Fmaps, 457 | flags: MapFlags, 458 | unaligned_size: usize, 459 | offset: u64, 460 | tx: &mut Transaction, 461 | ) -> Result { 462 | //dbg!(&self.fmaps); 463 | let accmode = self.flags & O_ACCMODE; 464 | if flags.contains(PROT_READ) && !(accmode == O_RDWR || accmode == O_RDONLY) { 465 | return Err(Error::new(EBADF)); 466 | } 467 | if flags.contains(PROT_WRITE) && !(accmode == O_RDWR || accmode == O_WRONLY) { 468 | return Err(Error::new(EBADF)); 469 | } 470 | 471 | let aligned_size = unaligned_size.next_multiple_of(PAGE_SIZE); 472 | 473 | // TODO: PROT_EXEC? It is however unenforcable without restricting anonymous mmap, since a 474 | // program can always map anonymous RW-, read from a file, then remap as R-E. But it might 475 | // be usable as a hint, prohibiting direct executable mmaps at least. 476 | 477 | // TODO: Pass entry directory to Resource trait functions, since the node_ptr can be 478 | // obtained by the caller. 479 | let fmap_info = fmaps 480 | .get_mut(&self.node_ptr.id()) 481 | .ok_or(Error::new(EBADFD))?; 482 | 483 | let new_size = (offset as usize + aligned_size).next_multiple_of(PAGE_SIZE); 484 | if new_size > fmap_info.size { 485 | fmap_info.base = if fmap_info.base.is_null() { 486 | unsafe { 487 | libredox::call::mmap(MmapArgs { 488 | length: new_size, 489 | // PRIVATE/SHARED doesn't matter once the pages are passed in the fmap 490 | // handler. 491 | prot: libredox::flag::PROT_READ | libredox::flag::PROT_WRITE, 492 | flags: libredox::flag::MAP_PRIVATE, 493 | 494 | offset: 0, 495 | fd: !0, 496 | addr: core::ptr::null_mut(), 497 | })? as *mut u8 498 | } 499 | } else { 500 | unsafe { 501 | syscall::syscall5( 502 | syscall::SYS_MREMAP, 503 | fmap_info.base as usize, 504 | fmap_info.size, 505 | 0, 506 | new_size, 507 | syscall::MremapFlags::empty().bits() | (PROT_READ | PROT_WRITE).bits(), 508 | )? as *mut u8 509 | } 510 | }; 511 | fmap_info.size = new_size; 512 | } 513 | 514 | let affected_fmaps = fmap_info 515 | .ranges 516 | .remove_and_unused(offset..offset + aligned_size as u64); 517 | 518 | for (range, v_opt) in affected_fmaps { 519 | //dbg!(&range); 520 | if let Some(mut fmap) = v_opt { 521 | fmap.rc += 1; 522 | fmap.flags |= flags; 523 | //FIXME: Use result? 524 | let _ = fmap_info 525 | .ranges 526 | .insert(range.start, range.end - range.start, fmap); 527 | } else { 528 | let map = unsafe { 529 | Fmap::new( 530 | self.node_ptr, 531 | flags, 532 | unaligned_size, 533 | offset, 534 | fmap_info.base, 535 | tx, 536 | )? 537 | }; 538 | //FIXME: Use result? 539 | let _ = fmap_info.ranges.insert(offset, aligned_size as u64, map); 540 | } 541 | } 542 | //dbg!(&self.fmaps); 543 | 544 | Ok(fmap_info.base as usize + offset as usize) 545 | } 546 | 547 | fn funmap( 548 | &mut self, 549 | fmaps: &mut Fmaps, 550 | offset: u64, 551 | size: usize, 552 | tx: &mut Transaction, 553 | ) -> Result<()> { 554 | let fmap_info = fmaps 555 | .get_mut(&self.node_ptr.id()) 556 | .ok_or(Error::new(EBADFD))?; 557 | 558 | //dbg!(&self.fmaps); 559 | //dbg!(self.fmaps.conflicts(offset..offset + size as u64).collect::>()); 560 | #[allow(unused_mut)] 561 | let mut affected_fmaps = fmap_info.ranges.remove(offset..offset + size as u64); 562 | 563 | for (range, mut fmap) in affected_fmaps { 564 | fmap.rc = fmap.rc.checked_sub(1).unwrap(); 565 | 566 | //log::info!("SYNCING {}..{}", range.start, range.end); 567 | unsafe { 568 | fmap.sync( 569 | self.node_ptr, 570 | fmap_info.base, 571 | range.start, 572 | (range.end - range.start) as usize, 573 | tx, 574 | )?; 575 | } 576 | 577 | if fmap.rc > 0 { 578 | //FIXME: Use result? 579 | let _ = fmap_info 580 | .ranges 581 | .insert(range.start, range.end - range.start, fmap); 582 | } 583 | } 584 | //dbg!(&self.fmaps); 585 | 586 | Ok(()) 587 | } 588 | 589 | fn fcntl(&mut self, cmd: usize, arg: usize) -> Result { 590 | match cmd { 591 | F_GETFL => Ok(self.flags), 592 | F_SETFL => { 593 | self.flags = (self.flags & O_ACCMODE) | (arg & !O_ACCMODE); 594 | Ok(0) 595 | } 596 | _ => Err(Error::new(EINVAL)), 597 | } 598 | } 599 | 600 | fn path(&self) -> &str { 601 | &self.path 602 | } 603 | 604 | fn sync(&mut self, fmaps: &mut Fmaps, tx: &mut Transaction) -> Result<()> { 605 | if let Some(fmap_info) = fmaps.get_mut(&self.node_ptr.id()) { 606 | for (range, fmap) in fmap_info.ranges.iter_mut() { 607 | unsafe { 608 | fmap.sync( 609 | self.node_ptr, 610 | fmap_info.base, 611 | range.start, 612 | (range.end - range.start) as usize, 613 | tx, 614 | )?; 615 | } 616 | } 617 | } 618 | 619 | Ok(()) 620 | } 621 | 622 | fn truncate(&mut self, len: u64, tx: &mut Transaction) -> Result<()> { 623 | if self.flags & O_ACCMODE == O_RDWR || self.flags & O_ACCMODE == O_WRONLY { 624 | let mtime = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); 625 | tx.truncate_node(self.node_ptr, len, mtime.as_secs(), mtime.subsec_nanos())?; 626 | Ok(()) 627 | } else { 628 | Err(Error::new(EBADF)) 629 | } 630 | } 631 | 632 | fn utimens(&mut self, times: &[TimeSpec], tx: &mut Transaction) -> Result<()> { 633 | let mut node = tx.read_tree(self.node_ptr)?; 634 | 635 | if node.data().uid() == self.uid || self.uid == 0 { 636 | if let &[atime, mtime] = times { 637 | let mut node_changed = false; 638 | 639 | let old_mtime = node.data().mtime(); 640 | let new_mtime = (mtime.tv_sec as u64, mtime.tv_nsec as u32); 641 | if old_mtime != new_mtime { 642 | node.data_mut().set_mtime(new_mtime.0, new_mtime.1); 643 | node_changed = true; 644 | } 645 | 646 | let old_atime = node.data().atime(); 647 | let new_atime = (atime.tv_sec as u64, atime.tv_nsec as u32); 648 | if old_atime != new_atime { 649 | node.data_mut().set_atime(new_atime.0, new_atime.1); 650 | node_changed = true; 651 | } 652 | 653 | if node_changed { 654 | tx.sync_tree(node)?; 655 | } 656 | } 657 | Ok(()) 658 | } else { 659 | Err(Error::new(EPERM)) 660 | } 661 | } 662 | 663 | fn getdents<'buf>( 664 | &mut self, 665 | _buf: DirentBuf<&'buf mut [u8]>, 666 | _opaque_offset: u64, 667 | _tx: &mut Transaction, 668 | ) -> Result> { 669 | Err(Error::new(ENOTDIR)) 670 | } 671 | } 672 | 673 | impl Drop for FileResource { 674 | fn drop(&mut self) { 675 | /* 676 | if !self.fmaps.is_empty() { 677 | eprintln!( 678 | "redoxfs: file {} still has {} fmaps!", 679 | self.path, 680 | self.fmaps.len() 681 | ); 682 | } 683 | */ 684 | } 685 | } 686 | 687 | impl range_tree::Value for Fmap { 688 | type K = u64; 689 | 690 | fn try_merge_forward(self, other: &Self) -> core::result::Result { 691 | if self.rc == other.rc && self.flags == other.flags && self.last_page_tail == 0 { 692 | Ok(self) 693 | } else { 694 | Err(self) 695 | } 696 | } 697 | fn try_merge_backwards(self, other: &Self) -> core::result::Result { 698 | if self.rc == other.rc && self.flags == other.flags && other.last_page_tail == 0 { 699 | Ok(self) 700 | } else { 701 | Err(self) 702 | } 703 | } 704 | #[allow(unused_variables)] 705 | fn split( 706 | self, 707 | prev_range: Option>, 708 | range: core::ops::Range, 709 | next_range: Option>, 710 | ) -> (Option, Self, Option) { 711 | ( 712 | prev_range.map(|_range| Fmap { 713 | rc: self.rc, 714 | flags: self.flags, 715 | last_page_tail: 0, 716 | }), 717 | Fmap { 718 | rc: self.rc, 719 | flags: self.flags, 720 | last_page_tail: if next_range.is_none() { 721 | self.last_page_tail 722 | } else { 723 | 0 724 | }, 725 | }, 726 | next_range.map(|_range| Fmap { 727 | rc: self.rc, 728 | flags: self.flags, 729 | last_page_tail: self.last_page_tail, 730 | }), 731 | ) 732 | } 733 | } 734 | -------------------------------------------------------------------------------- /src/node.rs: -------------------------------------------------------------------------------- 1 | use core::{fmt, mem, ops, slice}; 2 | use endian_num::Le; 3 | 4 | use crate::{BlockLevel, BlockList, BlockPtr, BlockTrait, RecordRaw, BLOCK_SIZE, RECORD_LEVEL}; 5 | 6 | bitflags::bitflags! { 7 | pub struct NodeFlags: u32 { 8 | const INLINE_DATA = 0x1; 9 | } 10 | } 11 | 12 | /// An index into a [`Node`]'s block table. 13 | pub enum NodeLevel { 14 | L0(usize), 15 | L1(usize, usize), 16 | L2(usize, usize, usize), 17 | L3(usize, usize, usize, usize), 18 | L4(usize, usize, usize, usize, usize), 19 | } 20 | 21 | impl NodeLevel { 22 | // Warning: this uses constant record offsets, make sure to sync with Node 23 | 24 | /// Return the [`NodeLevel`] of the record with the given index. 25 | /// - the first 128 are level 0, 26 | /// - the next 64*256 are level 1, 27 | /// - ...and so on. 28 | pub fn new(mut record_offset: u64) -> Option { 29 | // 1 << 8 = 256, this is the number of entries in a BlockList 30 | const SHIFT: u64 = 8; 31 | const NUM: u64 = 1 << SHIFT; 32 | const MASK: u64 = NUM - 1; 33 | 34 | const L0: u64 = 128; 35 | if record_offset < L0 { 36 | return Some(Self::L0((record_offset & MASK) as usize)); 37 | } else { 38 | record_offset -= L0; 39 | } 40 | 41 | const L1: u64 = 64 * NUM; 42 | if record_offset < L1 { 43 | return Some(Self::L1( 44 | ((record_offset >> SHIFT) & MASK) as usize, 45 | (record_offset & MASK) as usize, 46 | )); 47 | } else { 48 | record_offset -= L1; 49 | } 50 | 51 | const L2: u64 = 32 * NUM * NUM; 52 | if record_offset < L2 { 53 | return Some(Self::L2( 54 | ((record_offset >> (2 * SHIFT)) & MASK) as usize, 55 | ((record_offset >> SHIFT) & MASK) as usize, 56 | (record_offset & MASK) as usize, 57 | )); 58 | } else { 59 | record_offset -= L2; 60 | } 61 | 62 | const L3: u64 = 16 * NUM * NUM * NUM; 63 | if record_offset < L3 { 64 | return Some(Self::L3( 65 | ((record_offset >> (3 * SHIFT)) & MASK) as usize, 66 | ((record_offset >> (2 * SHIFT)) & MASK) as usize, 67 | ((record_offset >> SHIFT) & MASK) as usize, 68 | (record_offset & MASK) as usize, 69 | )); 70 | } else { 71 | record_offset -= L3; 72 | } 73 | 74 | const L4: u64 = 12 * NUM * NUM * NUM * NUM; 75 | if record_offset < L4 { 76 | Some(Self::L4( 77 | ((record_offset >> (4 * SHIFT)) & MASK) as usize, 78 | ((record_offset >> (3 * SHIFT)) & MASK) as usize, 79 | ((record_offset >> (2 * SHIFT)) & MASK) as usize, 80 | ((record_offset >> SHIFT) & MASK) as usize, 81 | (record_offset & MASK) as usize, 82 | )) 83 | } else { 84 | None 85 | } 86 | } 87 | } 88 | 89 | type BlockListL1 = BlockList; 90 | type BlockListL2 = BlockList; 91 | type BlockListL3 = BlockList; 92 | type BlockListL4 = BlockList; 93 | 94 | #[repr(C, packed)] 95 | pub struct NodeLevelData { 96 | /// The first 128 blocks of this file. 97 | /// 98 | /// Total size: 128 * RECORD_SIZE (16 MiB, 128 KiB each) 99 | pub level0: [BlockPtr; 128], 100 | 101 | /// The next 64 * 256 blocks of this file, 102 | /// stored behind 64 level one tables. 103 | /// 104 | /// Total size: 64 * 256 * RECORD_SIZE (2 GiB, 32 MiB each) 105 | pub level1: [BlockPtr; 64], 106 | 107 | /// The next 32 * 256 * 256 blocks of this file, 108 | /// stored behind 32 level two tables. 109 | /// Each level two table points to 256 level one tables. 110 | /// 111 | /// Total size: 32 * 256 * 256 * RECORD_SIZE (256 GiB, 8 GiB each) 112 | pub level2: [BlockPtr; 32], 113 | 114 | /// The next 16 * 256 * 256 * 256 blocks of this file, 115 | /// stored behind 16 level three tables. 116 | /// 117 | /// Total size: 16 * 256 * 256 * 256 * RECORD_SIZE (32 TiB, 2 TiB each) 118 | pub level3: [BlockPtr; 16], 119 | 120 | /// The next 8 * 256 * 256 * 256 * 256 blocks of this file, 121 | /// stored behind 8 level four tables. 122 | /// 123 | /// Total size: 8 * 256 * 256 * 256 * 256 * RECORD_SIZE (4 PiB, 512 TiB each) 124 | pub level4: [BlockPtr; 8], 125 | } 126 | 127 | impl Default for NodeLevelData { 128 | fn default() -> Self { 129 | Self { 130 | level0: [BlockPtr::default(); 128], 131 | level1: [BlockPtr::default(); 64], 132 | level2: [BlockPtr::default(); 32], 133 | level3: [BlockPtr::default(); 16], 134 | level4: [BlockPtr::default(); 8], 135 | } 136 | } 137 | } 138 | 139 | /// A file/folder node 140 | #[repr(C, packed)] 141 | pub struct Node { 142 | /// This node's type & permissions. 143 | /// - four most significant bits are the node's type 144 | /// - next four bits are permissions for the node's user 145 | /// - next four bits are permissions for the node's group 146 | /// - four least significant bits are permissions for everyone else 147 | pub mode: Le, 148 | 149 | /// The uid that owns this file 150 | pub uid: Le, 151 | 152 | /// The gid that owns this file 153 | pub gid: Le, 154 | 155 | /// The number of hard links to this file 156 | pub links: Le, 157 | 158 | /// The length of this file, in bytes 159 | pub size: Le, 160 | /// The disk usage of this file, in blocks 161 | pub blocks: Le, 162 | 163 | /// Creation time 164 | pub ctime: Le, 165 | pub ctime_nsec: Le, 166 | 167 | /// Modification time 168 | pub mtime: Le, 169 | pub mtime_nsec: Le, 170 | 171 | /// Access time 172 | pub atime: Le, 173 | pub atime_nsec: Le, 174 | 175 | /// Record level 176 | pub record_level: Le, 177 | 178 | /// Flags 179 | pub flags: Le, 180 | 181 | /// Padding 182 | pub padding: [u8; BLOCK_SIZE as usize - 4042], 183 | 184 | /// Level data, should not be used directly so inline data can be supported 185 | pub(crate) level_data: NodeLevelData, 186 | } 187 | 188 | unsafe impl BlockTrait for Node { 189 | fn empty(level: BlockLevel) -> Option { 190 | if level.0 == 0 { 191 | Some(Self::default()) 192 | } else { 193 | None 194 | } 195 | } 196 | } 197 | 198 | impl Default for Node { 199 | fn default() -> Self { 200 | Self { 201 | mode: 0.into(), 202 | uid: 0.into(), 203 | gid: 0.into(), 204 | links: 0.into(), 205 | size: 0.into(), 206 | // This node counts as a block 207 | blocks: 1.into(), 208 | ctime: 0.into(), 209 | ctime_nsec: 0.into(), 210 | mtime: 0.into(), 211 | mtime_nsec: 0.into(), 212 | atime: 0.into(), 213 | atime_nsec: 0.into(), 214 | record_level: 0.into(), 215 | flags: 0.into(), 216 | padding: [0; BLOCK_SIZE as usize - 4042], 217 | level_data: NodeLevelData::default(), 218 | } 219 | } 220 | } 221 | 222 | impl Node { 223 | pub const MODE_TYPE: u16 = 0xF000; 224 | pub const MODE_FILE: u16 = 0x8000; 225 | pub const MODE_DIR: u16 = 0x4000; 226 | pub const MODE_SYMLINK: u16 = 0xA000; 227 | pub const MODE_SOCK: u16 = 0xC000; 228 | 229 | /// Mask for node permission bits 230 | pub const MODE_PERM: u16 = 0x0FFF; 231 | pub const MODE_EXEC: u16 = 0o1; 232 | pub const MODE_WRITE: u16 = 0o2; 233 | pub const MODE_READ: u16 = 0o4; 234 | 235 | /// Create a new, empty node with the given metadata 236 | pub fn new(mode: u16, uid: u32, gid: u32, ctime: u64, ctime_nsec: u32) -> Self { 237 | Self { 238 | mode: mode.into(), 239 | uid: uid.into(), 240 | gid: gid.into(), 241 | links: 0.into(), 242 | ctime: ctime.into(), 243 | ctime_nsec: ctime_nsec.into(), 244 | mtime: ctime.into(), 245 | mtime_nsec: ctime_nsec.into(), 246 | atime: ctime.into(), 247 | atime_nsec: ctime_nsec.into(), 248 | record_level: if mode & Self::MODE_TYPE == Self::MODE_FILE { 249 | // Files take on record level 250 | RECORD_LEVEL as u32 251 | } else { 252 | // Folders do not 253 | 0 254 | } 255 | .into(), 256 | flags: if mode & Self::MODE_TYPE == Self::MODE_DIR { 257 | // Directories must not use inline data (until h-tree supports it) 258 | NodeFlags::empty() 259 | } else { 260 | NodeFlags::INLINE_DATA 261 | } 262 | .bits() 263 | .into(), 264 | ..Default::default() 265 | } 266 | } 267 | 268 | /// This node's type & permissions. 269 | /// - four most significant bits are the node's type 270 | /// - next four bits are permissions for the node's user 271 | /// - next four bits are permissions for the node's group 272 | /// - four least significant bits are permissions for everyone else 273 | pub fn mode(&self) -> u16 { 274 | self.mode.to_ne() 275 | } 276 | 277 | /// The uid that owns this file 278 | pub fn uid(&self) -> u32 { 279 | self.uid.to_ne() 280 | } 281 | 282 | /// The gid that owns this file 283 | pub fn gid(&self) -> u32 { 284 | self.gid.to_ne() 285 | } 286 | 287 | /// The number of links to this file 288 | /// (directory entries, symlinks, etc) 289 | pub fn links(&self) -> u32 { 290 | self.links.to_ne() 291 | } 292 | 293 | /// The length of this file, in bytes. 294 | pub fn size(&self) -> u64 { 295 | self.size.to_ne() 296 | } 297 | 298 | /// The disk usage of this file, in blocks. 299 | pub fn blocks(&self) -> u64 { 300 | self.blocks.to_ne() 301 | } 302 | 303 | pub fn ctime(&self) -> (u64, u32) { 304 | (self.ctime.to_ne(), self.ctime_nsec.to_ne()) 305 | } 306 | 307 | pub fn mtime(&self) -> (u64, u32) { 308 | (self.mtime.to_ne(), self.mtime_nsec.to_ne()) 309 | } 310 | 311 | pub fn atime(&self) -> (u64, u32) { 312 | (self.atime.to_ne(), self.atime_nsec.to_ne()) 313 | } 314 | 315 | pub fn record_level(&self) -> BlockLevel { 316 | BlockLevel(self.record_level.to_ne() as usize) 317 | } 318 | 319 | pub fn flags(&self) -> NodeFlags { 320 | NodeFlags::from_bits_retain(self.flags.to_ne()) 321 | } 322 | 323 | pub fn set_mode(&mut self, mode: u16) { 324 | self.mode = mode.into(); 325 | } 326 | 327 | pub fn set_uid(&mut self, uid: u32) { 328 | self.uid = uid.into(); 329 | } 330 | 331 | pub fn set_gid(&mut self, gid: u32) { 332 | self.gid = gid.into(); 333 | } 334 | 335 | pub fn set_links(&mut self, links: u32) { 336 | self.links = links.into(); 337 | } 338 | 339 | pub fn set_size(&mut self, size: u64) { 340 | self.size = size.into(); 341 | } 342 | 343 | pub fn set_blocks(&mut self, blocks: u64) { 344 | self.blocks = blocks.into(); 345 | } 346 | 347 | pub fn set_mtime(&mut self, mtime: u64, mtime_nsec: u32) { 348 | self.mtime = mtime.into(); 349 | self.mtime_nsec = mtime_nsec.into(); 350 | } 351 | 352 | pub fn set_atime(&mut self, atime: u64, atime_nsec: u32) { 353 | self.atime = atime.into(); 354 | self.atime_nsec = atime_nsec.into(); 355 | } 356 | 357 | pub fn set_flags(&mut self, flags: NodeFlags) { 358 | self.flags = flags.bits().into(); 359 | } 360 | 361 | pub fn has_inline_data(&self) -> bool { 362 | self.flags().contains(NodeFlags::INLINE_DATA) 363 | } 364 | 365 | pub fn inline_data(&self) -> Option<&[u8]> { 366 | if self.has_inline_data() { 367 | Some(unsafe { 368 | slice::from_raw_parts( 369 | &self.level_data as *const NodeLevelData as *const u8, 370 | mem::size_of::(), 371 | ) 372 | }) 373 | } else { 374 | None 375 | } 376 | } 377 | 378 | pub fn inline_data_mut(&mut self) -> Option<&mut [u8]> { 379 | if self.has_inline_data() { 380 | Some(unsafe { 381 | slice::from_raw_parts_mut( 382 | &mut self.level_data as *mut NodeLevelData as *mut u8, 383 | mem::size_of::(), 384 | ) 385 | }) 386 | } else { 387 | None 388 | } 389 | } 390 | 391 | pub fn level_data(&self) -> Option<&NodeLevelData> { 392 | if !self.has_inline_data() { 393 | Some(&self.level_data) 394 | } else { 395 | None 396 | } 397 | } 398 | 399 | pub fn level_data_mut(&mut self) -> Option<&mut NodeLevelData> { 400 | if !self.has_inline_data() { 401 | Some(&mut self.level_data) 402 | } else { 403 | None 404 | } 405 | } 406 | 407 | pub fn is_dir(&self) -> bool { 408 | self.mode() & Self::MODE_TYPE == Self::MODE_DIR 409 | } 410 | 411 | pub fn is_file(&self) -> bool { 412 | self.mode() & Self::MODE_TYPE == Self::MODE_FILE 413 | } 414 | 415 | pub fn is_symlink(&self) -> bool { 416 | self.mode() & Self::MODE_TYPE == Self::MODE_SYMLINK 417 | } 418 | 419 | pub fn is_sock(&self) -> bool { 420 | self.mode() & Self::MODE_SOCK == Self::MODE_SOCK 421 | } 422 | 423 | /// Tests if UID is the owner of that file, only true when uid=0 or when the UID stored in metadata is equal to the UID you supply 424 | pub fn owner(&self, uid: u32) -> bool { 425 | uid == 0 || self.uid() == uid 426 | } 427 | 428 | /// Tests if the current user has enough permissions to view the file, op is the operation, 429 | /// like read and write, these modes are MODE_EXEC, MODE_READ, and MODE_WRITE 430 | pub fn permission(&self, uid: u32, gid: u32, op: u16) -> bool { 431 | let mut perm = self.mode() & 0o7; 432 | if self.uid() == uid { 433 | // If self.mode is 101100110, >> 6 would be 000000101 434 | // 0o7 is octal for 111, or, when expanded to 9 digits is 000000111 435 | perm |= (self.mode() >> 6) & 0o7; 436 | // Since we erased the GID and OTHER bits when >>6'ing, |= will keep those bits in place. 437 | } 438 | if self.gid() == gid || gid == 0 { 439 | perm |= (self.mode() >> 3) & 0o7; 440 | } 441 | if uid == 0 { 442 | //set the `other` bits to 111 443 | perm |= 0o7; 444 | } 445 | perm & op == op 446 | } 447 | } 448 | 449 | impl fmt::Debug for Node { 450 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 451 | let mode = self.mode; 452 | let uid = self.uid; 453 | let gid = self.gid; 454 | let links = self.links; 455 | let size = self.size; 456 | let blocks = self.blocks; 457 | let ctime = self.ctime; 458 | let ctime_nsec = self.ctime_nsec; 459 | let mtime = self.mtime; 460 | let mtime_nsec = self.mtime_nsec; 461 | let atime = self.atime; 462 | let atime_nsec = self.atime_nsec; 463 | f.debug_struct("Node") 464 | .field("mode", &mode) 465 | .field("uid", &uid) 466 | .field("gid", &gid) 467 | .field("links", &links) 468 | .field("size", &size) 469 | .field("blocks", &blocks) 470 | .field("ctime", &ctime) 471 | .field("ctime_nsec", &ctime_nsec) 472 | .field("mtime", &mtime) 473 | .field("mtime_nsec", &mtime_nsec) 474 | .field("atime", &atime) 475 | .field("atime_nsec", &atime_nsec) 476 | //TODO: level0/1/2/3 477 | .finish() 478 | } 479 | } 480 | 481 | impl ops::Deref for Node { 482 | type Target = [u8]; 483 | fn deref(&self) -> &[u8] { 484 | unsafe { 485 | slice::from_raw_parts(self as *const Node as *const u8, mem::size_of::()) 486 | as &[u8] 487 | } 488 | } 489 | } 490 | 491 | impl ops::DerefMut for Node { 492 | fn deref_mut(&mut self) -> &mut [u8] { 493 | unsafe { 494 | slice::from_raw_parts_mut(self as *mut Node as *mut u8, mem::size_of::()) 495 | as &mut [u8] 496 | } 497 | } 498 | } 499 | 500 | #[test] 501 | fn node_size_test() { 502 | assert_eq!(mem::size_of::(), crate::BLOCK_SIZE as usize); 503 | } 504 | 505 | #[test] 506 | fn node_inline_data_test() { 507 | let mut node = Node::default(); 508 | assert!(!node.has_inline_data()); 509 | assert!(node.inline_data().is_none()); 510 | assert!(node.inline_data_mut().is_none()); 511 | assert!(node.level_data().is_some()); 512 | assert!(node.level_data_mut().is_some()); 513 | 514 | node.set_flags(NodeFlags::INLINE_DATA); 515 | assert!(node.has_inline_data()); 516 | assert!(node.level_data().is_none()); 517 | assert!(node.level_data_mut().is_none()); 518 | 519 | let node_addr = &node as *const Node as usize; 520 | let meta_size = 128; 521 | { 522 | let inline_data = node.inline_data().unwrap(); 523 | let inline_data_addr = inline_data.as_ptr() as usize; 524 | assert_eq!(node_addr + meta_size, inline_data_addr); 525 | assert_eq!(inline_data.len(), (crate::BLOCK_SIZE as usize) - meta_size); 526 | } 527 | { 528 | let inline_data = node.inline_data_mut().unwrap(); 529 | let inline_data_addr = inline_data.as_ptr() as usize; 530 | assert_eq!(node_addr + meta_size, inline_data_addr); 531 | assert_eq!(inline_data.len(), (crate::BLOCK_SIZE as usize) - meta_size); 532 | } 533 | } 534 | 535 | #[cfg(kani)] 536 | #[kani::proof] 537 | fn check_node_level() { 538 | let offset = kani::any(); 539 | NodeLevel::new(offset); 540 | } 541 | 542 | #[cfg(kani)] 543 | #[kani::proof] 544 | fn check_node_perms() { 545 | let mode = 0o750; 546 | 547 | let uid = kani::any(); 548 | let gid = kani::any(); 549 | 550 | let ctime = kani::any(); 551 | let ctime_nsec = kani::any(); 552 | 553 | let node = Node::new(mode, uid, gid, ctime, ctime_nsec); 554 | 555 | let root_uid = 0; 556 | let root_gid = 0; 557 | 558 | let other_uid = kani::any(); 559 | kani::assume(other_uid != uid); 560 | kani::assume(other_uid != root_uid); 561 | let other_gid = kani::any(); 562 | kani::assume(other_gid != gid); 563 | kani::assume(other_gid != root_gid); 564 | 565 | assert!(node.owner(uid)); 566 | assert!(node.permission(uid, gid, 0o7)); 567 | assert!(node.permission(uid, gid, 0o5)); 568 | assert!(node.permission(uid, other_gid, 0o7)); 569 | assert!(node.permission(uid, other_gid, 0o5)); 570 | assert!(!node.permission(other_uid, gid, 0o7)); 571 | assert!(node.permission(other_uid, gid, 0o5)); 572 | 573 | assert!(node.owner(root_uid)); 574 | assert!(node.permission(root_uid, root_gid, 0o7)); 575 | assert!(node.permission(root_uid, root_gid, 0o5)); 576 | assert!(node.permission(root_uid, other_gid, 0o7)); 577 | assert!(node.permission(root_uid, other_gid, 0o5)); 578 | assert!(!node.permission(other_uid, root_gid, 0o7)); 579 | assert!(node.permission(other_uid, root_gid, 0o5)); 580 | 581 | assert!(!node.owner(other_uid)); 582 | assert!(!node.permission(other_uid, other_gid, 0o7)); 583 | assert!(!node.permission(other_uid, other_gid, 0o5)); 584 | } 585 | -------------------------------------------------------------------------------- /src/record.rs: -------------------------------------------------------------------------------- 1 | use alloc::{boxed::Box, vec}; 2 | use core::ops; 3 | 4 | use crate::{BlockLevel, BlockTrait, RECORD_LEVEL}; 5 | 6 | //TODO: this is a box to prevent stack overflows 7 | #[derive(Clone)] 8 | pub struct RecordRaw(pub(crate) Box<[u8]>); 9 | 10 | unsafe impl BlockTrait for RecordRaw { 11 | fn empty(level: BlockLevel) -> Option { 12 | if level.0 <= RECORD_LEVEL { 13 | Some(Self(vec![0; level.bytes() as usize].into_boxed_slice())) 14 | } else { 15 | None 16 | } 17 | } 18 | } 19 | 20 | impl ops::Deref for RecordRaw { 21 | type Target = [u8]; 22 | fn deref(&self) -> &[u8] { 23 | &self.0 24 | } 25 | } 26 | 27 | impl ops::DerefMut for RecordRaw { 28 | fn deref_mut(&mut self) -> &mut [u8] { 29 | &mut self.0 30 | } 31 | } 32 | 33 | #[test] 34 | fn record_raw_size_test() { 35 | for level_i in 0..RECORD_LEVEL { 36 | let level = BlockLevel(level_i); 37 | assert_eq!( 38 | RecordRaw::empty(level).unwrap().len(), 39 | level.bytes() as usize 40 | ); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/tree.rs: -------------------------------------------------------------------------------- 1 | use core::{marker::PhantomData, mem, ops, slice}; 2 | use endian_num::Le; 3 | 4 | use crate::{BlockLevel, BlockPtr, BlockRaw, BlockTrait}; 5 | 6 | // 1 << 8 = 256, this is the number of entries in a TreeList 7 | const TREE_LIST_SHIFT: u32 = 8; 8 | const TREE_LIST_ENTRIES: usize = (1 << TREE_LIST_SHIFT) - 2; 9 | 10 | /// A tree with 4 levels 11 | pub type Tree = TreeList>>>; 12 | 13 | /// A [`TreePtr`] and the contents of the block it references. 14 | #[derive(Clone, Copy, Debug, Default)] 15 | pub struct TreeData { 16 | /// The value of the [`TreePtr`] 17 | id: u32, 18 | 19 | // The data 20 | data: T, 21 | } 22 | 23 | impl TreeData { 24 | pub fn new(id: u32, data: T) -> Self { 25 | Self { id, data } 26 | } 27 | 28 | pub fn id(&self) -> u32 { 29 | self.id 30 | } 31 | 32 | pub fn data(&self) -> &T { 33 | &self.data 34 | } 35 | 36 | pub fn data_mut(&mut self) -> &mut T { 37 | &mut self.data 38 | } 39 | 40 | pub fn into_data(self) -> T { 41 | self.data 42 | } 43 | 44 | pub fn ptr(&self) -> TreePtr { 45 | TreePtr { 46 | id: self.id.into(), 47 | phantom: PhantomData, 48 | } 49 | } 50 | } 51 | 52 | /// A list of pointers to blocks of type `T`. 53 | /// This is one level of a [`Tree`], defined above. 54 | #[repr(C, packed)] 55 | pub struct TreeList { 56 | pub ptrs: [BlockPtr; TREE_LIST_ENTRIES], 57 | pub full_flags: [u128; 2], 58 | } 59 | 60 | impl TreeList { 61 | pub fn tree_list_is_full(&self) -> bool { 62 | self.full_flags[1] == u128::MAX & !(3 << 126) && self.full_flags[0] == u128::MAX 63 | } 64 | 65 | pub fn tree_list_is_empty(&self) -> bool { 66 | for ptr in self.ptrs.iter() { 67 | if !ptr.is_null() { 68 | return false; 69 | } 70 | } 71 | true 72 | } 73 | 74 | pub fn branch_is_full(&self, index: usize) -> bool { 75 | assert!(index < TREE_LIST_ENTRIES); 76 | let shift = index % 128; 77 | let full_flags_index = index / 128; 78 | self.full_flags[full_flags_index] & (1 << shift) != 0 79 | } 80 | 81 | pub fn set_branch_full(&mut self, index: usize, full: bool) { 82 | assert!(index < TREE_LIST_ENTRIES); 83 | let shift = index % 128; 84 | let full_flags_index = index / 128; 85 | 86 | if full { 87 | self.full_flags[full_flags_index] |= 1 << shift; 88 | } else { 89 | self.full_flags[full_flags_index] &= !(1 << shift); 90 | } 91 | } 92 | } 93 | 94 | unsafe impl BlockTrait for TreeList { 95 | fn empty(level: BlockLevel) -> Option { 96 | if level.0 == 0 { 97 | Some(Self { 98 | ptrs: [BlockPtr::default(); TREE_LIST_ENTRIES], 99 | full_flags: [0; 2], 100 | }) 101 | } else { 102 | None 103 | } 104 | } 105 | } 106 | 107 | impl ops::Deref for TreeList { 108 | type Target = [u8]; 109 | fn deref(&self) -> &[u8] { 110 | unsafe { 111 | slice::from_raw_parts( 112 | self as *const TreeList as *const u8, 113 | mem::size_of::>(), 114 | ) as &[u8] 115 | } 116 | } 117 | } 118 | 119 | impl ops::DerefMut for TreeList { 120 | fn deref_mut(&mut self) -> &mut [u8] { 121 | unsafe { 122 | slice::from_raw_parts_mut( 123 | self as *mut TreeList as *mut u8, 124 | mem::size_of::>(), 125 | ) as &mut [u8] 126 | } 127 | } 128 | } 129 | 130 | /// A pointer to an entry in a [`Tree`]. 131 | #[repr(C, packed)] 132 | pub struct TreePtr { 133 | id: Le, 134 | phantom: PhantomData, 135 | } 136 | 137 | impl TreePtr { 138 | /// Get a [`TreePtr`] to the filesystem root 139 | /// directory's node. 140 | pub fn root() -> Self { 141 | Self::new(1) 142 | } 143 | 144 | pub fn new(id: u32) -> Self { 145 | Self { 146 | id: id.into(), 147 | phantom: PhantomData, 148 | } 149 | } 150 | 151 | /// Create a [`TreePtr`] from [`Tree`] indices, 152 | /// Where `indexes` is `(i3, i2, i1, i0)`. 153 | /// - `i3` is the index into the level 3 table, 154 | /// - `i2` is the index into the level 2 table at `i3` 155 | /// - ...and so on. 156 | pub fn from_indexes(indexes: (usize, usize, usize, usize)) -> Self { 157 | const SHIFT: u32 = TREE_LIST_SHIFT; 158 | let id = ((indexes.0 << (3 * SHIFT)) as u32) 159 | | ((indexes.1 << (2 * SHIFT)) as u32) 160 | | ((indexes.2 << SHIFT) as u32) 161 | | (indexes.3 as u32); 162 | Self { 163 | id: id.into(), 164 | phantom: PhantomData, 165 | } 166 | } 167 | 168 | pub fn id(&self) -> u32 { 169 | self.id.to_ne() 170 | } 171 | 172 | pub fn is_null(&self) -> bool { 173 | self.id() == 0 174 | } 175 | 176 | /// Get this indices of this [`TreePtr`] in a [`Tree`]. 177 | /// Returns `(i3, i2, i1, i0)`: 178 | /// - `i3` is the index into the level 3 table, 179 | /// - `i2` is the index into the level 2 table at `i3` 180 | /// - ...and so on. 181 | pub fn indexes(&self) -> (usize, usize, usize, usize) { 182 | const SHIFT: u32 = TREE_LIST_SHIFT; 183 | const NUM: u32 = 1 << SHIFT; 184 | const MASK: u32 = NUM - 1; 185 | let id = self.id(); 186 | 187 | let i3 = ((id >> (3 * SHIFT)) & MASK) as usize; 188 | let i2 = ((id >> (2 * SHIFT)) & MASK) as usize; 189 | let i1 = ((id >> SHIFT) & MASK) as usize; 190 | let i0 = (id & MASK) as usize; 191 | 192 | (i3, i2, i1, i0) 193 | } 194 | 195 | pub fn to_bytes(&self) -> [u8; 4] { 196 | self.id.to_le_bytes() 197 | } 198 | 199 | pub fn from_bytes(bytes: [u8; 4]) -> Self { 200 | let val = u32::from_le_bytes(bytes); 201 | Self { 202 | id: Le(val), 203 | phantom: PhantomData, 204 | } 205 | } 206 | } 207 | 208 | impl Clone for TreePtr { 209 | fn clone(&self) -> Self { 210 | *self 211 | } 212 | } 213 | 214 | impl Copy for TreePtr {} 215 | 216 | impl Default for TreePtr { 217 | fn default() -> Self { 218 | Self { 219 | id: 0.into(), 220 | phantom: PhantomData, 221 | } 222 | } 223 | } 224 | 225 | #[cfg(test)] 226 | mod tests { 227 | use crate::{BlockAddr, BlockData, BlockMeta}; 228 | 229 | use super::*; 230 | 231 | #[test] 232 | fn tree_list_size_test() { 233 | assert_eq!( 234 | mem::size_of::>(), 235 | crate::BLOCK_SIZE as usize 236 | ); 237 | } 238 | 239 | #[test] 240 | fn tree_list_is_full_test() { 241 | let mut tree_list = TreeList::::empty(BlockLevel::default()).unwrap(); 242 | assert!(!tree_list.tree_list_is_full()); 243 | 244 | for i in 0..TREE_LIST_ENTRIES { 245 | assert!(!tree_list.branch_is_full(i)); 246 | tree_list.set_branch_full(i, true); 247 | assert!(tree_list.branch_is_full(i)); 248 | } 249 | 250 | assert!(tree_list.tree_list_is_full()); 251 | 252 | for i in 0..TREE_LIST_ENTRIES { 253 | assert!(tree_list.branch_is_full(i)); 254 | tree_list.set_branch_full(i, false); 255 | assert!(!tree_list.branch_is_full(i)); 256 | } 257 | } 258 | 259 | fn mock_block(addr: u64) -> BlockPtr { 260 | let block_addr = unsafe { BlockAddr::new(addr, BlockMeta::default()) }; 261 | BlockData::empty(block_addr).unwrap().create_ptr() 262 | } 263 | 264 | #[test] 265 | fn tree_list_is_empty() { 266 | let mut tree_list = TreeList::::empty(BlockLevel::default()).unwrap(); 267 | assert!(tree_list.tree_list_is_empty()); 268 | 269 | tree_list.ptrs[3] = mock_block(123); 270 | assert!(!tree_list.tree_list_is_empty()); 271 | } 272 | 273 | #[test] 274 | fn tree_ptr_to_and_from_bytes() { 275 | let ptr: TreePtr = TreePtr::new(123456); 276 | let bytes = ptr.to_bytes(); 277 | let ptr2: TreePtr = TreePtr::from_bytes(bytes); 278 | assert_eq!(ptr.id(), ptr2.id()); 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /src/unmount.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs, 3 | io::{self}, 4 | process::{Command, ExitStatus}, 5 | }; 6 | 7 | fn unmount_linux_path(mount_path: &str) -> io::Result { 8 | // Different distributions can have various fusermount binaries. Try 9 | // them all. 10 | let commands = ["fusermount", "fusermount3"]; 11 | 12 | for command in commands { 13 | let status = Command::new(command).arg("-u").arg(mount_path).status(); 14 | if status.is_ok() { 15 | return status; 16 | } 17 | if let Err(ref e) = status { 18 | if e.kind() == io::ErrorKind::NotFound { 19 | continue; 20 | } 21 | } 22 | } 23 | 24 | // Unmounting failed since no suitable command was found 25 | Err(std::io::Error::new( 26 | io::ErrorKind::NotFound, 27 | format!( 28 | "Unable to locate any fusermount binaries. Tried {:?}. Is fuse installed?", 29 | commands 30 | ), 31 | )) 32 | } 33 | 34 | pub fn unmount_path(mount_path: &str) -> Result<(), io::Error> { 35 | if cfg!(target_os = "redox") { 36 | fs::remove_file(format!(":{}", mount_path))? 37 | } else { 38 | let status_res = if cfg!(target_os = "linux") { 39 | unmount_linux_path(mount_path) 40 | } else { 41 | Command::new("umount").arg(mount_path).status() 42 | }; 43 | 44 | let status = status_res?; 45 | if !status.success() { 46 | return Err(io::Error::new( 47 | io::ErrorKind::Other, 48 | "redoxfs umount failed", 49 | )); 50 | } 51 | } 52 | 53 | Ok(()) 54 | } 55 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CARGO_ARGS=(--release) 4 | TARGET=target/release 5 | export RUST_BACKTRACE=full 6 | export RUST_LOG=info 7 | 8 | function cleanup { 9 | sync 10 | fusermount -u image || true 11 | fusermount3 -u image || true 12 | } 13 | 14 | trap 'cleanup' ERR 15 | 16 | set -eEx 17 | 18 | cleanup 19 | 20 | redoxer test -- --lib -- --nocapture 21 | cargo test --lib --no-default-features -- --nocapture 22 | cargo test --lib -- --nocapture 23 | cargo build "${CARGO_ARGS[@]}" 24 | 25 | rm -f image.bin 26 | fallocate -l 1G image.bin 27 | time "${TARGET}/redoxfs-mkfs" image.bin 28 | 29 | mkdir -p image 30 | "${TARGET}/redoxfs" image.bin image 31 | 32 | df -h image 33 | ls -lah image 34 | 35 | mkdir image/test 36 | time cp -r src image/test/src 37 | 38 | dd if=/dev/urandom of=image/test/random bs=1M count=256 39 | dd if=image/test/random of=/dev/null bs=1M count=256 40 | 41 | time truncate --size=256M image/test/sparse 42 | dd if=image/test/sparse of=/dev/null bs=1M count=256 43 | 44 | dd if=/dev/zero of=image/test/zero bs=1M count=256 45 | dd if=image/test/zero of=/dev/null bs=1M count=256 46 | 47 | ls -lah image/test 48 | 49 | df -h image 50 | 51 | rm image/test/random 52 | rm image/test/sparse 53 | rm image/test/zero 54 | rm -rf image/test/src 55 | rmdir image/test 56 | 57 | df -h image 58 | ls -lah image 59 | 60 | cleanup 61 | 62 | "${TARGET}/redoxfs" image.bin image 63 | 64 | df -h image 65 | ls -lah image 66 | 67 | cleanup 68 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | use assert_cmd::cargo::CommandCargoExt; 2 | 3 | use core::panic::AssertUnwindSafe; 4 | use redoxfs::{unmount_path, DirEntry, DiskMemory, DiskSparse, FileSystem, Node, TreePtr}; 5 | 6 | use std::panic::catch_unwind; 7 | use std::path::Path; 8 | use std::process::Command; 9 | use std::sync::atomic::AtomicUsize; 10 | use std::sync::atomic::Ordering::Relaxed; 11 | use std::thread::sleep; 12 | use std::time::Duration; 13 | use std::{env, fs, time}; 14 | 15 | static IMAGE_SEQ: AtomicUsize = AtomicUsize::new(0); 16 | 17 | fn with_redoxfs(callback: F) -> T 18 | where 19 | T: Send + Sync + 'static, 20 | F: FnOnce(&str) -> T + Send + Sync + 'static, 21 | { 22 | let disk_path = format!("image{}.bin", IMAGE_SEQ.fetch_add(1, Relaxed)); 23 | 24 | { 25 | let disk = DiskSparse::create(dbg!(&disk_path), 1024 * 1024 * 1024).unwrap(); 26 | let ctime = dbg!(time::SystemTime::now().duration_since(time::UNIX_EPOCH)).unwrap(); 27 | FileSystem::create(disk, None, ctime.as_secs(), ctime.subsec_nanos()).unwrap(); 28 | } 29 | let res = callback(&disk_path); 30 | 31 | dbg!(fs::remove_file(dbg!(disk_path))).unwrap(); 32 | 33 | res 34 | } 35 | 36 | fn with_mounted(callback: F) -> T 37 | where 38 | T: Send + Sync + 'static, 39 | F: FnOnce(&Path) -> T + Send + Sync + 'static, 40 | { 41 | let mount_path_o = format!("image{}", IMAGE_SEQ.fetch_add(1, Relaxed)); 42 | let mount_path = mount_path_o.clone(); 43 | 44 | let res = with_redoxfs(move |fs| { 45 | // At redox, we mount on /scheme/ path, no need an empty dir 46 | if cfg!(not(target_os = "redox")) { 47 | if !Path::new(&mount_path).exists() { 48 | dbg!(fs::create_dir(dbg!(&mount_path))).unwrap(); 49 | } 50 | } else { 51 | //FIXME: cargo_bin is broken when cross compiling. This is redoxer specific workaround 52 | env::set_var( 53 | "CARGO_BIN_EXE_redoxfs", 54 | "/root/target/x86_64-unknown-redox/debug/redoxfs", 55 | ); 56 | } 57 | let mut mount_cmd = Command::cargo_bin("redoxfs").expect("unable to find mount bin"); 58 | mount_cmd.arg("-d").arg(dbg!(&fs)).arg(dbg!(&mount_path)); 59 | let mut child = mount_cmd.spawn().expect("mount failed to run"); 60 | 61 | let real_path = if cfg!(target_os = "redox") { 62 | let real_path = dbg!(Path::new("/scheme").join(&mount_path)); 63 | let mut tries = 0; 64 | loop { 65 | if real_path.exists() { 66 | break; 67 | } 68 | tries += 1; 69 | if tries == 10 { 70 | panic!("Fail to wait for mount") 71 | } 72 | println!("{tries}"); 73 | sleep(Duration::from_millis(500)); 74 | } 75 | real_path 76 | } else { 77 | sleep(Duration::from_millis(200)); 78 | let r = Path::new(".").join(&mount_path); 79 | r 80 | }; 81 | 82 | let res = catch_unwind(AssertUnwindSafe(|| callback(&real_path))); 83 | 84 | sleep(Duration::from_millis(200)); 85 | 86 | child.kill().expect("Can't kill"); 87 | 88 | if cfg!(target_os = "redox") { 89 | unmount_path(&mount_path).unwrap(); 90 | } else { 91 | if !dbg!(Command::new("sync").status()).unwrap().success() { 92 | panic!("sync failed"); 93 | } 94 | 95 | if !unmount_path(&mount_path).is_ok() { 96 | // There seems to be a race condition where the device can be busy when trying to unmount. 97 | // So, we pause for a moment and retry. There will still be an error output to the logs 98 | // for the first failed attempt. 99 | sleep(Duration::from_millis(200)); 100 | if !unmount_path(&mount_path).is_ok() { 101 | panic!("umount failed"); 102 | } 103 | } 104 | } 105 | 106 | res.expect("Test failed") 107 | }); 108 | 109 | if cfg!(not(target_os = "redox")) { 110 | dbg!(fs::remove_dir(dbg!(mount_path_o))).unwrap(); 111 | } 112 | 113 | res 114 | } 115 | 116 | #[test] 117 | fn simple() { 118 | with_mounted(|path| { 119 | dbg!(fs::create_dir(&path.join("test"))).unwrap(); 120 | }) 121 | } 122 | 123 | #[test] 124 | fn create_and_remove_file() { 125 | with_mounted(|path| { 126 | let file_name = "test_file.txt"; 127 | let file_path = path.join(file_name); 128 | 129 | // Create the file 130 | fs::write(&file_path, "Hello, world!").unwrap(); 131 | assert!(fs::exists(&file_path).unwrap()); 132 | 133 | // Read the file 134 | let contents = fs::read_to_string(&file_path).unwrap(); 135 | assert_eq!(contents, "Hello, world!"); 136 | 137 | // Remove the file 138 | fs::remove_file(&file_path).unwrap(); 139 | assert!(!fs::exists(&file_path).unwrap()); 140 | }); 141 | } 142 | 143 | #[test] 144 | fn create_and_remove_directory() { 145 | with_mounted(|path| { 146 | let dir_name = "test_dir"; 147 | let dir_path = path.join(dir_name); 148 | 149 | // Create the directory 150 | fs::create_dir(&dir_path).expect(&format!("cannot create dir {}", &dir_path.display())); 151 | assert!(fs::exists(&dir_path).unwrap()); 152 | 153 | // Check that the directory is empty 154 | let entries: Vec<_> = fs::read_dir(&dir_path) 155 | .unwrap() 156 | .map(|e| e.unwrap().file_name()) 157 | .collect(); 158 | assert!(entries.is_empty()); 159 | 160 | // Add a file to the directory 161 | let file_name = "test_file.txt"; 162 | let file_path = dir_path.join(file_name); 163 | fs::write(&file_path, "Hello, world!").unwrap(); 164 | 165 | // Check that the dir cannot be removed when not empty 166 | let error = fs::remove_dir(&dir_path); 167 | assert!(error.is_err()); 168 | assert_eq!( 169 | error.unwrap_err().kind(), 170 | std::io::ErrorKind::DirectoryNotEmpty 171 | ); 172 | 173 | // Remove the file 174 | fs::remove_file(&file_path).unwrap(); 175 | 176 | // Remove the directory 177 | fs::remove_dir(&dir_path).unwrap(); 178 | assert!(!fs::exists(&dir_path).unwrap()); 179 | }); 180 | } 181 | 182 | #[test] 183 | fn create_and_remove_symlink() { 184 | with_mounted(|path| { 185 | let real_file = "real_file.txt"; 186 | let real_path = path.join(real_file); 187 | let symlink_file = "symlink_to_real_file.txt"; 188 | let symlink_path = path.join(symlink_file); 189 | 190 | // Create the real file 191 | fs::write(&real_path, "Hello, world!").unwrap(); 192 | 193 | // Create the symmlink according to the platform 194 | #[cfg(unix)] 195 | std::os::unix::fs::symlink(&real_file, &symlink_path).unwrap(); 196 | 197 | #[cfg(windows)] 198 | std::os::windows::fs::symlink_file(&real_file, &symlink_path).unwrap(); 199 | 200 | // Check that the symlink exists and points to the correct target 201 | let exists = fs::exists(&symlink_path); 202 | assert!( 203 | exists.is_ok() && exists.unwrap(), 204 | "Symlink should exist but was: {:?}", 205 | fs::exists(&symlink_path) 206 | ); 207 | let symlink_metadata = fs::symlink_metadata(&symlink_path).unwrap(); 208 | assert!(symlink_metadata.file_type().is_symlink()); 209 | let target = fs::read_link(&symlink_path).unwrap(); 210 | assert_eq!(target.to_str().unwrap(), real_file); 211 | assert_eq!(fs::read(&symlink_path).unwrap(), b"Hello, world!"); 212 | 213 | // Confirm the symlink cannot be removed as a directory 214 | let error = fs::remove_dir(&symlink_path); 215 | assert!(error.is_err()); 216 | assert_eq!(error.unwrap_err().kind(), std::io::ErrorKind::NotADirectory); 217 | 218 | // Remove the symlink 219 | fs::remove_file(&symlink_path).unwrap(); 220 | assert!(!fs::exists(&symlink_path).unwrap()); 221 | }); 222 | } 223 | 224 | #[cfg(target_os = "redox")] 225 | #[test] 226 | fn mmap() { 227 | //TODO 228 | with_mounted(|path| { 229 | use std::slice; 230 | 231 | let path = dbg!(path.join("test")); 232 | 233 | let mmap_inner = |write: bool| { 234 | let fd = dbg!(libredox::call::open( 235 | path.to_str().unwrap(), 236 | libredox::flag::O_CREAT | libredox::flag::O_RDWR | libredox::flag::O_CLOEXEC, 237 | 0, 238 | )) 239 | .unwrap(); 240 | 241 | let map = unsafe { 242 | slice::from_raw_parts_mut( 243 | dbg!(libredox::call::mmap(libredox::call::MmapArgs { 244 | fd, 245 | offset: 0, 246 | length: 128, 247 | prot: libredox::flag::PROT_READ | libredox::flag::PROT_WRITE, 248 | flags: libredox::flag::MAP_SHARED, 249 | addr: core::ptr::null_mut(), 250 | })) 251 | .unwrap() as *mut u8, 252 | 128, 253 | ) 254 | }; 255 | 256 | // Maps should be available after closing 257 | assert_eq!(dbg!(libredox::call::close(fd)), Ok(())); 258 | 259 | for i in 0..128 { 260 | if write { 261 | map[i as usize] = i; 262 | } 263 | assert_eq!(map[i as usize], i); 264 | } 265 | 266 | //TODO: add msync 267 | unsafe { 268 | assert_eq!( 269 | dbg!(libredox::call::munmap(map.as_mut_ptr().cast(), map.len())), 270 | Ok(()) 271 | ); 272 | } 273 | }; 274 | 275 | mmap_inner(true); 276 | mmap_inner(false); 277 | }) 278 | } 279 | 280 | // TODO: When increasing the total_count to 8000, the Allocator's deallocate() function surfaces as "slow" according to flamegraph. This 281 | // appears to be the result of bulk deleting in this test, but I would bet that any filesystem that has lived for a long time would 282 | // start to see degraded performance due to this. 283 | #[test] 284 | fn many_create_write_list_find_read_delete() { 285 | let disk = DiskMemory::new(1024 * 1024 * 1024); 286 | let ctime = time::SystemTime::now() 287 | .duration_since(time::UNIX_EPOCH) 288 | .unwrap(); 289 | let mut fs = FileSystem::create(disk, None, ctime.as_secs(), ctime.subsec_nanos()).unwrap(); 290 | let tree_ptr = TreePtr::::root(); 291 | let total_count = 3000; 292 | 293 | // Create a bunch of files 294 | for i in 0..total_count { 295 | let result = fs.tx(|tx| { 296 | tx.create_node( 297 | tree_ptr, 298 | &format!("file{i:05}"), 299 | Node::MODE_FILE | 0644, 300 | 1, 301 | 0, 302 | ) 303 | }); 304 | if result.is_err() { 305 | println!("Failure on create iteration {i}"); 306 | } 307 | 308 | let file_node = result.unwrap(); 309 | let result = fs.tx(|tx| { 310 | tx.write_node( 311 | file_node.ptr(), 312 | 0, 313 | format!("Hello World! #{i}").as_bytes(), 314 | ctime.as_secs(), 315 | ctime.subsec_nanos(), 316 | ) 317 | }); 318 | if result.is_err() { 319 | println!("Failure on write iteration {i}"); 320 | } 321 | assert!(result.unwrap() > 0) 322 | } 323 | 324 | // Confirm that they can be listed 325 | { 326 | let mut children = Vec::::with_capacity(total_count); 327 | let _ = fs.tx(|tx| tx.child_nodes(tree_ptr, &mut children)).unwrap(); 328 | assert_eq!( 329 | children.len(), 330 | total_count, 331 | "The list of children should match the number of files created." 332 | ); 333 | let mut children: Vec = children 334 | .iter() 335 | .map(|entry| entry.name().unwrap_or_default().to_string()) 336 | .collect(); 337 | children.sort(); 338 | 339 | for i in 0..total_count { 340 | let expected = format!("file{i:05}"); 341 | let idx = children.binary_search(&expected); 342 | assert!(idx.is_ok(), "Children did not contain '{}'", expected); 343 | } 344 | } 345 | 346 | // Find and read the files 347 | for i in 0..total_count { 348 | let result = fs.tx(|tx| tx.find_node(tree_ptr, &format!("file{i:05}"))); 349 | if result.is_err() { 350 | println!("Failure on find node iteration {i}"); 351 | } 352 | 353 | let file_node = result.unwrap(); 354 | let offset = 0; 355 | let mut buf = [0_u8; 32]; 356 | let result = fs.tx(|tx| { 357 | tx.read_node( 358 | file_node.ptr(), 359 | offset, 360 | &mut buf, 361 | ctime.as_secs(), 362 | ctime.subsec_nanos(), 363 | ) 364 | }); 365 | if result.is_err() { 366 | println!("Failure on read iteration {i}"); 367 | } 368 | let size = result.unwrap(); 369 | let body = std::str::from_utf8(&buf[..size]).unwrap(); 370 | assert_eq!(body, format!("Hello World! #{i}")); 371 | } 372 | 373 | // Delete all the files 374 | for i in 0..total_count { 375 | let file_name = format!("file{i:05}"); 376 | let result = fs.tx(|tx| tx.remove_node(tree_ptr, &file_name, Node::MODE_FILE)); 377 | if result.is_err() { 378 | println!("Failure on delete iteration {i}"); 379 | result.unwrap(); 380 | } 381 | let result = fs.tx(|tx| tx.find_node(tree_ptr, &file_name)); 382 | if !result.is_err() || result.err().unwrap().errno != syscall::error::ENOENT { 383 | println!("Failure on delete verification iteration {i}"); 384 | assert!(false, "Deletion appears to ahve failred"); 385 | } 386 | } 387 | } 388 | 389 | #[test] 390 | fn many_write_read_delete_mounted() { 391 | with_mounted(|path| { 392 | let total_count = 500; 393 | 394 | for i in 0..total_count { 395 | fs::write( 396 | &path.join(format!("file{}", i)), 397 | format!("Hello, number {i}!"), 398 | ) 399 | .unwrap(); 400 | } 401 | 402 | // Confirm each of the created files can be found and read 403 | for i in 0..total_count { 404 | let contents = fs::read_to_string(&path.join(format!("file{}", i))).unwrap(); 405 | assert_eq!(contents, format!("Hello, number {i}!")); 406 | } 407 | 408 | // Remove all the files 409 | for i in 0..total_count { 410 | let file_path = path.join(format!("file{i}")); 411 | assert!(fs::exists(&file_path).unwrap()); 412 | fs::remove_file(&file_path).unwrap(); 413 | assert!(!fs::exists(&file_path).unwrap()); 414 | } 415 | }); 416 | } 417 | --------------------------------------------------------------------------------