├── .github └── workflows │ └── verify.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md └── src ├── error.rs ├── io ├── buffer.rs ├── mod.rs └── ring.rs ├── lib.rs ├── map.rs └── os ├── mod.rs ├── unix ├── mach.rs ├── mod.rs └── posix │ ├── memfd.rs │ └── mod.rs └── windows.rs /.github/workflows/verify.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | push: 4 | branches: [main] 5 | tags: 6 | - "v*.*.*" 7 | 8 | name: Verify 9 | 10 | jobs: 11 | test_linux: 12 | name: Test Linux 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout sources 16 | uses: actions/checkout@v2 17 | - name: Install stable toolchain 18 | uses: actions-rs/toolchain@v1 19 | with: 20 | profile: minimal 21 | toolchain: stable 22 | override: true 23 | - name: Run cargo test 24 | uses: actions-rs/cargo@v1 25 | with: 26 | command: test 27 | 28 | test_windows: 29 | name: Test Windows 30 | runs-on: windows-latest 31 | steps: 32 | - name: Checkout sources 33 | uses: actions/checkout@v2 34 | - name: Install stable toolchain 35 | uses: actions-rs/toolchain@v1 36 | with: 37 | profile: minimal 38 | toolchain: stable 39 | override: true 40 | - name: Run cargo test 41 | uses: actions-rs/cargo@v1 42 | with: 43 | command: test 44 | 45 | test_macos: 46 | name: Test MacOS 47 | runs-on: macos-latest 48 | steps: 49 | - name: Checkout sources 50 | uses: actions/checkout@v2 51 | - name: Install stable toolchain 52 | uses: actions-rs/toolchain@v1 53 | with: 54 | profile: minimal 55 | toolchain: stable 56 | override: true 57 | - name: Run cargo test 58 | uses: actions-rs/cargo@v1 59 | with: 60 | command: test 61 | 62 | # For test vs build https://github.com/cross-rs/cross?tab=readme-ov-file#supported-targets 63 | 64 | test_cross: 65 | name: Test Android and Musl 66 | strategy: 67 | matrix: 68 | target: 69 | - aarch64-linux-android 70 | - x86_64-unknown-linux-musl 71 | runs-on: ubuntu-latest 72 | steps: 73 | - name: Checkout sources 74 | uses: actions/checkout@v2 75 | - name: Install stable toolchain 76 | uses: actions-rs/toolchain@v1 77 | with: 78 | profile: minimal 79 | toolchain: stable 80 | override: true 81 | - name: Install Cross 82 | run: cargo install cross --git https://github.com/cross-rs/cross --rev 085092c 83 | - name: Cross Test 84 | run: cross test --target ${{ matrix.target }} 85 | 86 | build_cross: 87 | name: Build BSDs 88 | strategy: 89 | matrix: 90 | target: 91 | - x86_64-unknown-freebsd 92 | - x86_64-unknown-netbsd 93 | runs-on: ubuntu-latest 94 | steps: 95 | - name: Checkout sources 96 | uses: actions/checkout@v2 97 | - name: Install stable toolchain 98 | uses: actions-rs/toolchain@v1 99 | with: 100 | profile: minimal 101 | toolchain: stable 102 | override: true 103 | - name: Install Cross 104 | run: cargo install cross --git https://github.com/cross-rs/cross --rev 085092c 105 | - name: Cross Test 106 | run: cross build --target ${{ matrix.target }} 107 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | /target 3 | **/*.rs.bk 4 | Cargo.lock 5 | *~ 6 | *.swp 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.6.3] - 2024-03-13 10 | - Update some `Size` functions to be `const` 11 | - Add or improve `AsMut` and `AsRef` implementations 12 | - Add `Ring::clear()` method 13 | - Add passthrough implementations for `Read` and `Write` when possible 14 | - Improve the `BufWriter::into_inner()` 15 | 16 | ## [0.6.2] - 2024-03-08 17 | - Update dependencies 18 | 19 | ## [0.6.1] - 2024-03-07 20 | - Add error conversion 21 | 22 | ## [0.6.0] - 2024-03-06 23 | - Update page advice 24 | - Reduce unsafe areas [@peamaeq](https://github.com/peamaeq) 25 | 26 | ## [0.5.1] - 2022-04-02 27 | - Fix circular buffer for non-power-of-2 sizes [@mbehr1](https://github.com/mbehr1) 28 | 29 | ## [0.5.0] - 2021-12-24 30 | - Remove deprecated code 31 | - Update to 2021 rust edition 32 | 33 | ## [0.4.4] - 2021-02-20 34 | 35 | ### Changed 36 | - Fix POSIX path translation for shared memory FD [@calebzulawski](https://github.com/calebzulawski). 37 | - Remove dependency on `rand` [@calebzulawski](https://github.com/calebzulawski). 38 | 39 | ## [0.4.3] - 2020-11-06 40 | ### Added 41 | - Testing for Android, FreeBSD, and Solaris by [@calebzulawski](https://github.com/calebzulawski). 42 | - Volatile and unaligned reads and writes for Span and SpanMut 43 | - Start a CHANGELOG 44 | 45 | ### Changed 46 | - Improve FreeBSD shared memory FD for ring buffers by [@calebzulawski](https://github.com/calebzulawski). 47 | - Fix Solaris and BSDs temp paths for shared memory FD [@calebzulawski](https://github.com/calebzulawski). 48 | - Fix Windows 32-bit size handling 49 | 50 | ## [0.4.2] - 2020-10-06 51 | ### Added 52 | - Add `os` and `io` optional features 53 | 54 | ### Changes 55 | - Documentation improvements 56 | - Stopped using deprecated examples where possible 57 | - Return the `File` object when `open`ing from a path 58 | 59 | [Unreleased]: https://github.com/kalamay/vmap-rs/compare/v0.6.3...HEAD 60 | [0.6.3]: https://github.com/kalamay/vmap-rs/compare/v0.6.2...v0.6.3 61 | [0.6.2]: https://github.com/kalamay/vmap-rs/compare/v0.6.1...v0.6.2 62 | [0.6.1]: https://github.com/kalamay/vmap-rs/compare/v0.6.0...v0.6.1 63 | [0.6.0]: https://github.com/kalamay/vmap-rs/compare/v0.5.1...v0.6.0 64 | [0.5.1]: https://github.com/kalamay/vmap-rs/compare/v0.5.0...v0.5.1 65 | [0.5.0]: https://github.com/kalamay/vmap-rs/compare/v0.4.4...v0.5.0 66 | [0.4.4]: https://github.com/kalamay/vmap-rs/compare/v0.4.3...v0.4.4 67 | [0.4.3]: https://github.com/kalamay/vmap-rs/compare/v0.4.2...v0.4.3 68 | [0.4.2]: https://github.com/kalamay/vmap-rs/compare/v0.4.1...v0.4.2 69 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vmap" 3 | version = "0.6.3" 4 | authors = ["Jeremy Larkin "] 5 | license = "MIT" 6 | repository = "https://github.com/kalamay/vmap-rs" 7 | documentation = "https://docs.rs/vmap" 8 | description = "Cross-platform library for fast and safe memory-mapped IO and boundary-free ring buffer." 9 | keywords = ["mmap", "io", "file", "circular-buffer", "ring-buffer"] 10 | edition = "2021" 11 | 12 | [features] 13 | default = ["all"] 14 | all = ["io", "os"] 15 | io = [] 16 | os = [] 17 | 18 | [dependencies] 19 | system_error = "0.2" 20 | 21 | [target.'cfg(unix)'.dependencies] 22 | libc = "0.2" 23 | 24 | [target.'cfg(windows)'.dependencies] 25 | winapi = { version = "0.3", features = ["std", "basetsd", "minwindef", "sysinfoapi", "handleapi", "memoryapi", "fileapi"] } 26 | 27 | [dev-dependencies] 28 | tempdir = "0.3" 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jeremy Larkin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Verify](https://github.com/kalamay/vmap-rs/workflows/Verify/badge.svg) 2 | 3 | # vmap-rs 4 | A cross-platform library for fast and safe memory-mapped IO and boundary-free 5 | ring buffer. 6 | 7 | This library defines a convenient API for reading and writing to files 8 | using the hosts virtual memory system, as well as allocating memory and 9 | creating circular memory regions. The design of the API strives to 10 | both minimize the frequency of mapping system calls while still retaining 11 | safe access. Critically, it never attempts the own the `File` object used 12 | for mapping. That is, it never clones it or in any way retains it. While 13 | this has some implications for the API (i.e. [`.flush()`]), it cannot cause 14 | bugs outside of this library through `File`'s leaky abstraction when cloned 15 | and then closed. 16 | 17 | The [`Map`] and [`MapMut`] types are primary means for allocating virtual 18 | memory regions, both for a file and anonymously. Generally, the 19 | [`Map::with_options()`] and [`MapMut::with_options()`] are used to specify 20 | the mapping requirements. See [`Options`] for more information. 21 | 22 | The [`MapMut`] type maintains interior mutability for the mapped memory, 23 | while the [`Map`] is read-only. However, it is possible to convert between 24 | these types ([`.into_map_mut()`] and [`.into_map()`]) assuming the proper 25 | [`Options`] are specified. 26 | 27 | Additionally, a variety of buffer implementations are provided in the 28 | [`vmap::io`] module. The [`Ring`] and [`InfiniteRing`] use cross-platform 29 | optimzed circular memory mapping to remove the typical boundary problem 30 | with most circular buffers. This ensures all ranges of the underlying byte 31 | buffer can be viewed as a single byte slice, event when the value wraps 32 | back around to the beginning of the buffer. The [`BufReader`] and [`BufWriter`] 33 | implement buffered I/O using a [`Ring`] as a backing layer. 34 | 35 | Take a look at the [Documentation](https://docs.rs/vmap/) for details! 36 | 37 | # Examples 38 | 39 | ```rust 40 | use vmap::Map; 41 | use std::{fs, str}; 42 | 43 | let path = "example"; 44 | 45 | // Write some test data 46 | fs::write(&path, b"this is a test")?; 47 | 48 | // Map the first 4 bytes 49 | let (map, file) = Map::with_options().len(4).open(&path)?; 50 | assert_eq!(Ok("this"), str::from_utf8(&map[..])); 51 | 52 | // Reuse the file to map a different region 53 | let map = Map::with_options().offset(10).len(4).map(&file)?; 54 | assert_eq!(Ok("test"), str::from_utf8(&map[..])); 55 | ``` 56 | 57 | If opened properly, the [`Map`] can be moved into a [`MapMut`] and modifications 58 | to the underlying file can be performed: 59 | 60 | ```rust 61 | use vmap::Map; 62 | use std::{fs, str}; 63 | 64 | let path = "example"; 65 | 66 | // Write some test data 67 | fs::write(&path, b"this is a test")?; 68 | 69 | // Open with write permissions so the Map can be converted into a MapMut 70 | let (map, file) = Map::with_options().write().len(14).open(&path)?; 71 | assert_eq!(Ok("this is a test"), str::from_utf8(&map[..])); 72 | 73 | // Move the Map into a MapMut 74 | // ... we could have started with MapMut::with_options() 75 | let mut map = map.into_map_mut()?; 76 | map[..4].clone_from_slice(b"that"); 77 | 78 | // Flush the changes to disk synchronously 79 | map.flush(&file, Flush::Sync)?; 80 | 81 | // Move the MapMut back into a Map 82 | let map = map.into_map()?; 83 | assert_eq!(Ok("that is a test"), str::from_utf8(&map[..])); 84 | ``` 85 | 86 | ## Ring Buffer 87 | 88 | The [`vmap`] library contains a [`Ring`] that constructs a circular memory 89 | allocation where values can wrap from around from the end of the buffer back 90 | to the beginning with sequential memory addresses. The [`InfiniteRing`] is 91 | similar, however it allows writes to overwrite reads. 92 | 93 | ```rust 94 | use vmap::io::{Ring, SeqWrite}; 95 | use std::io::{BufRead, Read, Write}; 96 | 97 | let mut buf = Ring::new(4000).unwrap(); 98 | let mut i = 1; 99 | 100 | // Fill up the buffer with lines. 101 | while buf.write_len() > 20 { 102 | write!(&mut buf, "this is test line {}\n", i)?; 103 | i += 1; 104 | } 105 | 106 | // No more space is available. 107 | assert!(write!(&mut buf, "this is test line {}\n", i).is_err()); 108 | 109 | let mut line = String::new(); 110 | 111 | // Read the first line written. 112 | let len = buf.read_line(&mut line)?; 113 | assert_eq!(line, "this is test line 1\n"); 114 | 115 | line.clear(); 116 | 117 | // Read the second line written. 118 | let len = buf.read_line(&mut line)?; 119 | assert_eq!(line, "this is test line 2\n"); 120 | 121 | // Now there is enough space to write more. 122 | write!(&mut buf, "this is test line {}\n", i)?; 123 | ``` 124 | 125 | [`.flush()`]: https://docs.rs/vmap/0.6.3/vmap/struct.MapMut.html#method.flush 126 | [`.into_map()`]: https://docs.rs/vmap/0.6.3/vmap/struct.MapMut.html#method.into_map 127 | [`.into_map_mut()`]: https://docs.rs/vmap/0.6.3/vmap/struct.Map.html#method.into_map_mut 128 | [`BufReader`]: https://docs.rs/vmap/0.6.3/vmap/io/struct.BufReader.html 129 | [`BufWriter`]: https://docs.rs/vmap/0.6.3/vmap/io/struct.BufWriter.html 130 | [`InfiniteRing`]: https://docs.rs/vmap/0.6.3/vmap/io/struct.InfiniteRing.html 131 | [`Map::with_options()`]: https://docs.rs/vmap/0.6.3/vmap/struct.Map.html#method.with_options 132 | [`MapMut::with_options()`]: https://docs.rs/vmap/0.6.3/vmap/struct.MapMut.html#method.with_options 133 | [`MapMut`]: https://docs.rs/vmap/0.6.3/vmap/struct.MapMut.html 134 | [`Map`]: https://docs.rs/vmap/0.6.3/vmap/struct.Map.html 135 | [`Options`]: https://docs.rs/vmap/0.6.3/vmap/struct.Options.html 136 | [`Ring`]: https://docs.rs/vmap/0.6.3/vmap/io/struct.Ring.html 137 | [`vmap::io`]: https://docs.rs/vmap/0.6.3/vmap/io/index.html 138 | [`vmap`]: https://docs.rs/vmap/ 139 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | //! Types for working with various map operation errors. 2 | 3 | use std::{fmt, io}; 4 | 5 | /// A specialized `Result` type for map operations. 6 | pub type Result = std::result::Result; 7 | 8 | /// A specialiazed `Result` type for conversion operations. 9 | /// 10 | /// The origin `self` type is consumed When converting between [`Map`] 11 | /// and [`MapMut`] types. The `ConvertResult` returns the original input 12 | /// value on failure so that it isn't necessarily dropped. This allows 13 | /// a failure to be handled while stil maintaining the existing mapping. 14 | /// 15 | /// [`Map`]: struct.Map.html 16 | /// [`MapMut`]: struct.MapMut.html 17 | pub type ConvertResult = std::result::Result; 18 | 19 | impl From<(Error, F)> for Error { 20 | /// Converts the `(Error, F)` tuple from a [`ConvertResult`] result into 21 | /// an [`Error`], dropping the failed map in the process. 22 | /// 23 | /// [`ConvertResult`]: type.ConvertResult.html 24 | /// [`Error`]: type.Error.html 25 | fn from(value: (Error, F)) -> Error { 26 | value.0 27 | } 28 | } 29 | 30 | /// A list specifying general categories of map errors. 31 | /// 32 | /// These errors can be converted into `std::io::Error` values. 33 | /// 34 | /// # Examples 35 | /// 36 | /// ```should_panic 37 | /// fn create_map() -> vmap::Result { 38 | /// /// ... 39 | /// # use vmap::{Error, Operation, Input}; 40 | /// # Err(Error::input(Operation::MapFile, Input::InvalidRange)) 41 | /// } 42 | /// 43 | /// fn main() -> std::io::Result<()> { 44 | /// let map = create_map()?; 45 | /// println!("len = {}\n", map.len()); 46 | /// Ok(()) 47 | /// } 48 | /// ``` 49 | pub struct Error { 50 | repr: Repr, 51 | op: Operation, 52 | } 53 | 54 | enum Repr { 55 | Io(io::Error), 56 | Input(Input), 57 | System(system_error::Error), 58 | } 59 | 60 | impl Error { 61 | /// Returns an error that wraps a `std::io::Error` along with an [`Operation`]. 62 | /// 63 | /// # Examples 64 | /// 65 | /// ``` 66 | /// use std::io::ErrorKind; 67 | /// use vmap::{Error, Operation}; 68 | /// 69 | /// println!("I/O error: {:?}", Error::io( 70 | /// Operation::MapFile, 71 | /// ErrorKind::NotFound.into(), 72 | /// )); 73 | /// ``` 74 | /// 75 | /// [`Operation`]: enum.Operation.html 76 | pub fn io(op: Operation, err: io::Error) -> Self { 77 | Self { 78 | repr: Repr::Io(err), 79 | op, 80 | } 81 | } 82 | 83 | /// Returns an error that wraps an [`Input`] type along with an [`Operation`]. 84 | /// 85 | /// # Examples 86 | /// 87 | /// ``` 88 | /// use vmap::{Error, Operation, Input}; 89 | /// 90 | /// println!("Input error: {:?}", Error::input( 91 | /// Operation::MapFile, 92 | /// Input::InvalidRange, 93 | /// )); 94 | /// ``` 95 | /// 96 | /// [`Input`]: enum.Input.html 97 | /// [`Operation`]: enum.Operation.html 98 | pub fn input(op: Operation, input: Input) -> Self { 99 | Self { 100 | repr: Repr::Input(input), 101 | op, 102 | } 103 | } 104 | 105 | /// Returns an error that wraps a `system_error::Error` along with an [`Operation`]. 106 | /// 107 | /// # Examples 108 | /// 109 | /// ``` 110 | /// use std::io::ErrorKind; 111 | /// use vmap::{Error, Operation}; 112 | /// 113 | /// println!("System error: {:?}", Error::system( 114 | /// Operation::MapFile, 115 | /// system_error::Error::last_os_error() 116 | /// )); 117 | /// ``` 118 | /// 119 | /// [`system_error::Error`]: https://docs.rs/system_error/0.1.1/system_error/struct.Error.html 120 | /// [`Operation`]: enum.Operation.html 121 | pub fn system(op: Operation, err: system_error::Error) -> Self { 122 | Self { 123 | repr: Repr::System(err), 124 | op, 125 | } 126 | } 127 | 128 | /// Returns an error that wraps a [`system_error::KernelCode`] along with an [`Operation`]. 129 | /// 130 | /// # Examples 131 | /// 132 | /// ``` 133 | /// use vmap::{Error, Operation}; 134 | /// 135 | /// println!("Kernel error: {:?}", Error::kernel( 136 | /// Operation::RingAllocate, 137 | /// 1, 138 | /// )); 139 | /// ``` 140 | /// 141 | /// [`system_error::KernelCode`]: https://docs.rs/system_error/0.1.1/system_error/type.KernelCode.html 142 | /// [`Operation`]: enum.Operation.html 143 | pub fn kernel(op: Operation, code: system_error::KernelCode) -> Self { 144 | Self::system(op, system_error::Error::from_raw_kernel_error(code)) 145 | } 146 | 147 | /// Returns an error representing the last OS error which occurred. 148 | /// 149 | /// This function reads the value of `errno` for the target platform (e.g. 150 | /// `GetLastError` on Windows) and will return a corresponding instance of 151 | /// `Error` for the error code. 152 | /// 153 | /// # Examples 154 | /// 155 | /// ``` 156 | /// use vmap::{Error, Operation}; 157 | /// 158 | /// println!("last OS error: {:?}", Error::last_os_error(Operation::MapFile)); 159 | /// ``` 160 | pub fn last_os_error(op: Operation) -> Self { 161 | Self::system(op, system_error::Error::last_os_error()) 162 | } 163 | 164 | /// Returns the OS error that this error represents (if any). 165 | /// 166 | /// If this `Error` was constructed via `last_os_error`, then this function 167 | /// will return `Some`, otherwise it will return `None`. 168 | /// 169 | /// # Examples 170 | /// 171 | /// ``` 172 | /// use vmap::{Error, Input, Operation}; 173 | /// 174 | /// fn print_os_error(err: &Error) { 175 | /// if let Some(raw_os_err) = err.raw_os_error() { 176 | /// println!("raw OS error: {:?}", raw_os_err); 177 | /// } else { 178 | /// println!("Not an OS error"); 179 | /// } 180 | /// } 181 | /// 182 | /// // Will print "raw OS error: ...". 183 | /// print_os_error(&Error::last_os_error(Operation::MapFile)); 184 | /// // Will print "Not an OS error". 185 | /// print_os_error(&Error::input(Operation::MapFile, Input::InvalidRange)); 186 | /// ``` 187 | pub fn raw_os_error(&self) -> Option { 188 | match &self.repr { 189 | Repr::Io(err) => err.raw_os_error(), 190 | Repr::Input(_) => None, 191 | Repr::System(err) => err.raw_os_error(), 192 | } 193 | } 194 | 195 | /// Returns the corresponding `std::io::ErrorKind` for this error. 196 | /// 197 | /// # Examples 198 | /// 199 | /// ``` 200 | /// use std::io::ErrorKind; 201 | /// use vmap::{Error, Operation}; 202 | /// 203 | /// fn print_error(err: Error) { 204 | /// println!("{:?}", err.kind()); 205 | /// } 206 | /// 207 | /// // Will print "Other". 208 | /// print_error(Error::last_os_error(Operation::MapFile)); 209 | /// // Will print "NotFound". 210 | /// print_error(Error::io(Operation::MapFile, ErrorKind::NotFound.into())); 211 | /// ``` 212 | pub fn kind(&self) -> io::ErrorKind { 213 | match self.repr { 214 | Repr::Io(ref err) => err.kind(), 215 | Repr::Input(_) => io::ErrorKind::InvalidInput, 216 | Repr::System(ref err) => err.kind(), 217 | } 218 | } 219 | 220 | /// Returns the corresponding [`Operation`] that cuased the error. 221 | /// 222 | /// # Examples 223 | /// 224 | /// ``` 225 | /// use vmap::{Error, Operation}; 226 | /// 227 | /// fn print_operation(err: Error) { 228 | /// println!("{:?}", err.operation()); 229 | /// } 230 | /// 231 | /// // Will print "MapFile". 232 | /// print_operation(Error::last_os_error(Operation::MapFile)); 233 | /// ``` 234 | /// 235 | /// [`Operation`]: enum.Operation.html 236 | pub fn operation(&self) -> Operation { 237 | self.op 238 | } 239 | } 240 | 241 | impl std::error::Error for Error { 242 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 243 | match self.repr { 244 | Repr::Io(ref err) => Some(err), 245 | Repr::Input(_) => None, 246 | Repr::System(_) => None, 247 | } 248 | } 249 | } 250 | 251 | impl From for Error { 252 | fn from(err: std::io::Error) -> Self { 253 | Self { 254 | repr: Repr::Io(err), 255 | op: Operation::None, 256 | } 257 | } 258 | } 259 | 260 | impl From for std::io::Error { 261 | fn from(err: Error) -> Self { 262 | match err.repr { 263 | Repr::Io(io) => io, 264 | Repr::Input(v) => Self::new(io::ErrorKind::InvalidInput, v.as_str()), 265 | Repr::System(sys) => sys.into(), 266 | } 267 | } 268 | } 269 | 270 | impl fmt::Debug for Error { 271 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { 272 | let (field, value) = match self.repr { 273 | Repr::Io(ref err) => ("io", err as &dyn fmt::Debug), 274 | Repr::Input(ref input) => ("input", input as &dyn fmt::Debug), 275 | Repr::System(ref err) => ("system", err as &dyn fmt::Debug), 276 | }; 277 | fmt.debug_struct("Error") 278 | .field("op", &self.op) 279 | .field("kind", &self.kind()) 280 | .field(field, value) 281 | .finish() 282 | } 283 | } 284 | 285 | impl fmt::Display for Error { 286 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { 287 | let value = match self.repr { 288 | Repr::Io(ref err) => err as &dyn fmt::Display, 289 | Repr::Input(ref input) => input as &dyn fmt::Display, 290 | Repr::System(ref err) => err as &dyn fmt::Display, 291 | }; 292 | if let Some(op) = self.op.as_str() { 293 | write!(fmt, "failed to {}, {}", op, value) 294 | } else { 295 | value.fmt(fmt) 296 | } 297 | } 298 | } 299 | 300 | /// A list specifying general categories of erroneous operations. 301 | /// 302 | /// This list is intended to grow over time and it is not recommended to 303 | /// exhaustively match against it. 304 | /// 305 | /// It is used with the [`Error`] type. 306 | /// 307 | /// [`Error`]: struct.Error.html 308 | #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 309 | #[non_exhaustive] 310 | pub enum Operation { 311 | /// The operation failed while attempting to map a file. 312 | MapFile, 313 | /// A map file handle failed to open. 314 | MapFileHandle, 315 | /// The view for a map file handle could not be created. 316 | MapFileView, 317 | /// The operation failed while attempting to allocate an anonymous mapping. 318 | MapAnonymous, 319 | /// An anonymous mapping handle failed to open. 320 | MapAnonymousHandle, 321 | /// The view for an anonymouse mapping handle could not be created. 322 | MapAnonymousView, 323 | /// A pointer could not be unmapped. 324 | Unmap, 325 | /// The [`Protect`] could not be applied to the provided memory region. 326 | /// 327 | /// [`Protect`]: ../enum.Protect.html 328 | Protect, 329 | /// The [`Advise`] could not be applied to the provided memory region. 330 | /// 331 | /// [`Advise`]: ../enum.Advise.html 332 | Advise, 333 | /// The physical page could not be locked into memory. 334 | Lock, 335 | /// The physical page could not be unlocked from memory. 336 | Unlock, 337 | /// A flush cannot be perfomed for the provided input. 338 | Flush, 339 | /// The full address space for a ring could not be allocated. 340 | RingAllocate, 341 | /// The full address space for a ring could not be deallocated. 342 | RingDeallocate, 343 | /// A virtual mapping entry could not be created. 344 | RingEntry, 345 | /// The mapping for the first half of the ring failed to allocate. 346 | RingPrimary, 347 | /// The mapping for the second half of the ring failed to allocate. 348 | RingSecondary, 349 | /// A temporary memory file descriptor failed to open. 350 | MemoryFd, 351 | /// Used for pure I/O errors to simplify wrapping a `std::io::Error` into an 352 | /// 353 | /// [`Error`]: struct.Error.html 354 | None, 355 | } 356 | 357 | impl Operation { 358 | /// Returns a display message fragment describing the `Operation` type. 359 | /// 360 | /// The result of `as_str` is used to `Display` the `Operation`. 361 | /// 362 | /// # Examples 363 | /// 364 | /// ``` 365 | /// use vmap::Operation; 366 | /// 367 | /// fn print_operation(op: Operation) { 368 | /// println!("failed to {}", op.as_str().unwrap()); 369 | /// } 370 | /// 371 | /// // Will print "failed to map file". 372 | /// print_operation(Operation::MapFile); 373 | /// ``` 374 | pub fn as_str(&self) -> Option<&'static str> { 375 | match *self { 376 | Operation::MapFile => Some("map file"), 377 | Operation::MapFileHandle => Some("map file handle"), 378 | Operation::MapFileView => Some("map file view"), 379 | Operation::MapAnonymous => Some("map anonymous"), 380 | Operation::MapAnonymousHandle => Some("map anonymous handle"), 381 | Operation::MapAnonymousView => Some("map anonymous view"), 382 | Operation::Unmap => Some("unmap"), 383 | Operation::Protect => Some("protect mapped memory"), 384 | Operation::Advise => Some("advise mapped memory"), 385 | Operation::Lock => Some("lock mapped memory"), 386 | Operation::Unlock => Some("unlock mapped memory"), 387 | Operation::Flush => Some("flush mapped memory"), 388 | Operation::RingAllocate => Some("allocate full ring"), 389 | Operation::RingDeallocate => Some("deallocate full ring"), 390 | Operation::RingEntry => Some("make ring memory entry"), 391 | Operation::RingPrimary => Some("map ring first half"), 392 | Operation::RingSecondary => Some("map ring second half"), 393 | Operation::MemoryFd => Some("open memory fd"), 394 | Operation::None => None, 395 | } 396 | } 397 | } 398 | 399 | impl fmt::Display for Operation { 400 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { 401 | fmt.write_str(self.as_str().unwrap_or("")) 402 | } 403 | } 404 | 405 | /// A list specifying general categories of input mapping errors. 406 | /// 407 | /// This list is intended to grow over time and it is not recommended to 408 | /// exhaustively match against it. 409 | /// 410 | /// It is used with the [`Error`] type. 411 | /// 412 | /// [`Error`]: struct.Error.html 413 | #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 414 | #[non_exhaustive] 415 | pub enum Input { 416 | /// The range of the requested file or bytes is invalid. 417 | InvalidRange, 418 | } 419 | 420 | impl Input { 421 | /// Returns a display message fragment describing the `Input` type. 422 | /// 423 | /// The result of `as_str` is used to `Display` the `Input`. 424 | /// 425 | /// # Examples 426 | /// 427 | /// ``` 428 | /// use vmap::{Input, Operation}; 429 | /// 430 | /// fn print_input(op: Operation, input: Input) { 431 | /// println!("failed to {}, {}", op.as_str().unwrap(), input.as_str()); 432 | /// } 433 | /// 434 | /// // Will print "failed to map file, invalid range" 435 | /// print_input(Operation::MapFile, Input::InvalidRange); 436 | /// ``` 437 | pub fn as_str(&self) -> &'static str { 438 | match *self { 439 | Input::InvalidRange => "invalid range", 440 | } 441 | } 442 | } 443 | 444 | impl fmt::Display for Input { 445 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { 446 | fmt.write_str(self.as_str()) 447 | } 448 | } 449 | -------------------------------------------------------------------------------- /src/io/buffer.rs: -------------------------------------------------------------------------------- 1 | use super::{Ring, SeqRead, SeqWrite}; 2 | use crate::Result; 3 | 4 | use std::{ 5 | fmt, 6 | io::{self, BufRead, ErrorKind, Read, Write}, 7 | ops::{Deref, DerefMut}, 8 | }; 9 | 10 | /// The `BufReader` adds buffering to any reader using a specialized buffer. 11 | /// 12 | /// This is very similar `std::io::BufReader`, but it uses a [`Ring`] for the 13 | /// internal buffer, and it provides a configurable low water mark. 14 | /// 15 | /// # Examples 16 | /// 17 | /// ``` 18 | /// use vmap::io::BufReader; 19 | /// # use std::io::prelude::*; 20 | /// # use std::net::{TcpListener, TcpStream}; 21 | /// 22 | /// # fn main() -> std::io::Result<()> { 23 | /// # let srv = TcpListener::bind("127.0.0.1:0")?; 24 | /// let sock = TcpStream::connect(srv.local_addr().unwrap())?; 25 | /// # let (mut cli, _addr) = srv.accept()?; 26 | /// let mut buf = BufReader::new(sock, 4000).expect("failed to create buffer"); 27 | /// # cli.write_all(b"hello\nworld\n")?; 28 | /// let mut line = String::new(); 29 | /// let len = buf.read_line(&mut line)?; 30 | /// assert_eq!(line, "hello\n"); 31 | /// # Ok(()) 32 | /// # } 33 | /// ``` 34 | pub struct BufReader { 35 | buf: Ring, 36 | inner: R, 37 | lowat: usize, 38 | } 39 | 40 | impl BufReader { 41 | /// Creates a new `BufReader`. 42 | pub fn new(inner: R, capacity: usize) -> Result { 43 | Ok(Self { 44 | buf: Ring::new(capacity)?, 45 | inner, 46 | lowat: 0, 47 | }) 48 | } 49 | 50 | /// Get the low-water level. 51 | #[inline] 52 | pub fn lowat(&self) -> usize { 53 | self.lowat 54 | } 55 | 56 | /// Set the low-water level. 57 | /// 58 | /// When the internal buffer content length drops to this level or below, a 59 | /// subsequent call to `fill_buffer()` will request more from the inner reader. 60 | /// 61 | /// If it desired for `fill_buffer()` to always request a `read()`, you 62 | /// may use: 63 | /// 64 | /// ``` 65 | /// # use vmap::io::BufReader; 66 | /// # fn main() -> std::io::Result<()> { 67 | /// let mut buf = BufReader::new(std::io::stdin(), 4096)?; 68 | /// buf.set_lowat(usize::MAX); 69 | /// # Ok(()) 70 | /// # } 71 | /// ``` 72 | #[inline] 73 | pub fn set_lowat(&mut self, val: usize) { 74 | self.lowat = val 75 | } 76 | 77 | /// Gets a reference to the underlying reader. 78 | #[inline] 79 | pub fn get_ref(&self) -> &R { 80 | &self.inner 81 | } 82 | 83 | /// Gets a mutable reference to the underlying reader. 84 | #[inline] 85 | pub fn get_mut(&mut self) -> &mut R { 86 | &mut self.inner 87 | } 88 | 89 | /// Returns a reference to the internally buffered data. 90 | #[inline] 91 | pub fn buffer(&self) -> &[u8] { 92 | self.buf.as_read_slice(std::usize::MAX) 93 | } 94 | 95 | /// Unwraps this `BufReader`, returning the underlying reader. 96 | pub fn into_inner(self) -> R { 97 | self.inner 98 | } 99 | } 100 | 101 | impl Deref for BufReader { 102 | type Target = R; 103 | 104 | #[inline] 105 | fn deref(&self) -> &Self::Target { 106 | self.get_ref() 107 | } 108 | } 109 | 110 | impl DerefMut for BufReader { 111 | #[inline] 112 | fn deref_mut(&mut self) -> &mut Self::Target { 113 | self.get_mut() 114 | } 115 | } 116 | 117 | impl AsRef for BufReader 118 | where 119 | R: Read, 120 | as Deref>::Target: AsRef, 121 | { 122 | fn as_ref(&self) -> &R { 123 | self.deref() 124 | } 125 | } 126 | 127 | impl AsMut for BufReader 128 | where 129 | R: Read, 130 | as Deref>::Target: AsMut, 131 | { 132 | fn as_mut(&mut self) -> &mut R { 133 | self.deref_mut() 134 | } 135 | } 136 | 137 | impl Read for BufReader { 138 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 139 | // If the reader has been dequeued and the destination buffer is larger 140 | // than the internal buffer, then read directly into the destination. 141 | if self.buf.read_len() == 0 && buf.len() >= self.buf.write_capacity() { 142 | return self.inner.read(buf); 143 | } 144 | let nread = { 145 | let mut rem = self.fill_buf()?; 146 | rem.read(buf)? 147 | }; 148 | self.consume(nread); 149 | Ok(nread) 150 | } 151 | } 152 | 153 | impl Write for BufReader { 154 | #[inline] 155 | fn write(&mut self, buf: &[u8]) -> io::Result { 156 | self.inner.write(buf) 157 | } 158 | 159 | #[inline] 160 | fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result { 161 | self.inner.write_vectored(bufs) 162 | } 163 | 164 | #[inline] 165 | fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { 166 | self.inner.write_all(buf) 167 | } 168 | 169 | #[inline] 170 | fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> { 171 | self.inner.write_fmt(fmt) 172 | } 173 | 174 | #[inline] 175 | fn flush(&mut self) -> io::Result<()> { 176 | self.inner.flush() 177 | } 178 | } 179 | 180 | impl BufRead for BufReader { 181 | fn fill_buf(&mut self) -> io::Result<&[u8]> { 182 | if self.buf.read_len() <= self.lowat { 183 | let n = self.inner.read(self.buf.as_write_slice(std::usize::MAX))?; 184 | self.buf.feed(n); 185 | } 186 | Ok(self.buffer()) 187 | } 188 | 189 | fn consume(&mut self, amt: usize) { 190 | self.buf.consume(amt); 191 | } 192 | } 193 | 194 | /// The `BufWriter` adds buffering to any writer using a specialized buffer. 195 | /// 196 | /// This is very similar `std::io::BufWriter`, but it uses a [`Ring`] for the 197 | /// internal the buffer. 198 | /// 199 | /// # Examples 200 | /// 201 | /// ``` 202 | /// use vmap::io::{BufReader, BufWriter}; 203 | /// # use std::io::prelude::*; 204 | /// # use std::net::{TcpListener, TcpStream}; 205 | /// 206 | /// # fn main() -> std::io::Result<()> { 207 | /// # let srv = TcpListener::bind("127.0.0.1:0")?; 208 | /// let recv = TcpStream::connect(srv.local_addr().unwrap())?; 209 | /// let send = /* accepted socked */ 210 | /// # srv.accept()?.0; 211 | /// 212 | /// let mut wr = BufWriter::new(send, 4000).unwrap(); 213 | /// wr.write_all(b"hello\nworld\n")?; 214 | /// wr.flush()?; 215 | /// 216 | /// let mut rd = BufReader::new(recv, 4000).unwrap(); 217 | /// let mut line = String::new(); 218 | /// let len = rd.read_line(&mut line)?; 219 | /// assert_eq!(line, "hello\n"); 220 | /// # Ok(()) 221 | /// # } 222 | /// ``` 223 | pub struct BufWriter { 224 | buf: Ring, 225 | inner: W, 226 | panicked: bool, 227 | } 228 | 229 | impl BufWriter { 230 | /// Creates a new `BufWriter`. 231 | pub fn new(inner: W, capacity: usize) -> Result { 232 | Ok(Self::from_parts(inner, Ring::new(capacity)?)) 233 | } 234 | 235 | /// Creates a new `BufWriter` using an allocated, and possibly populated, 236 | /// [`Ring`] instance. Consider calling [`Ring::clear()`] prior if the 237 | /// contents of the ring should be discarded. 238 | pub fn from_parts(inner: W, buf: Ring) -> Self { 239 | Self { 240 | buf, 241 | inner, 242 | panicked: false, 243 | } 244 | } 245 | 246 | /// Gets a reference to the underlying writer. 247 | #[inline] 248 | pub fn get_ref(&self) -> &W { 249 | &self.inner 250 | } 251 | 252 | /// Gets a mutable reference to the underlying writer. 253 | #[inline] 254 | pub fn get_mut(&mut self) -> &mut W { 255 | &mut self.inner 256 | } 257 | 258 | /// Unwraps this `BufWriter`, returning the underlying writer. 259 | /// 260 | /// On `Err`, the result is a tuple combining the error that occurred while 261 | /// flusing the buffer, and the buffer object. 262 | /// 263 | /// # Examples 264 | /// 265 | /// ``` 266 | /// use std::io::{self, Write, ErrorKind}; 267 | /// use vmap::io::BufWriter; 268 | /// 269 | /// struct ErringWriter(usize); 270 | /// impl Write for ErringWriter { 271 | /// fn write(&mut self, buf: &[u8]) -> io::Result { 272 | /// // eventually fails with BrokenPipe 273 | /// # match self.0.min(buf.len()) { 274 | /// # 0 => Err(ErrorKind::BrokenPipe.into()), 275 | /// # n => { self.0 -= n; Ok(n) }, 276 | /// # } 277 | /// } 278 | /// fn flush(&mut self) -> io::Result<()> { Ok(()) } 279 | /// } 280 | /// 281 | /// # fn main() -> vmap::Result<()> { 282 | /// let mut stream = BufWriter::new(ErringWriter(6), 4096)?; 283 | /// stream.write_all(b"hello\nworld\n")?; 284 | /// 285 | /// // flush the buffer and get the original stream back 286 | /// let stream = match stream.into_inner() { 287 | /// Ok(s) => s, 288 | /// Err(e) => { 289 | /// assert_eq!(e.error().kind(), ErrorKind::BrokenPipe); 290 | /// 291 | /// // You can forcefully obtain the stream, however it is in an 292 | /// // failing state. 293 | /// let (recovered_writer, ring) = e.into_inner().into_parts(); 294 | /// assert_eq!(ring.unwrap().as_ref(), b"world\n"); 295 | /// recovered_writer 296 | /// } 297 | /// }; 298 | /// # Ok(()) 299 | /// # } 300 | /// ``` 301 | pub fn into_inner(mut self) -> std::result::Result> { 302 | match self.flush_buf() { 303 | Err(e) => Err(IntoInnerError(self, e)), 304 | Ok(()) => Ok(self.into_parts().0), 305 | } 306 | } 307 | 308 | /// Disassembles this [`BufWriter`] into the underlying writer and the [`Ring`] 309 | /// used for buffering, containing any buffered but unwritted data. 310 | /// 311 | /// If the underlying writer panicked during the previous write, the [`Ring`] 312 | /// will be wrapped in a [`WriterPanicked`] error. In this case, the content 313 | /// still buffered within the [`Ring`] may or may not have been written. 314 | /// 315 | /// # Example 316 | /// 317 | /// ``` 318 | /// use std::io::{self, Write}; 319 | /// use std::panic::{catch_unwind, AssertUnwindSafe}; 320 | /// use vmap::io::BufWriter; 321 | /// 322 | /// struct PanickingWriter; 323 | /// impl Write for PanickingWriter { 324 | /// fn write(&mut self, buf: &[u8]) -> io::Result { panic!() } 325 | /// fn flush(&mut self) -> io::Result<()> { panic!() } 326 | /// } 327 | /// 328 | /// # fn main() -> vmap::Result<()> { 329 | /// let mut stream = BufWriter::new(PanickingWriter, 4096)?; 330 | /// stream.write_all(b"testing")?; 331 | /// let result = catch_unwind(AssertUnwindSafe(|| { 332 | /// stream.flush().unwrap() 333 | /// })); 334 | /// assert!(result.is_err()); 335 | /// let (recovered_writer, ring) = stream.into_parts(); 336 | /// assert!(matches!(recovered_writer, PanickingWriter)); 337 | /// assert_eq!(ring.unwrap_err().into_inner().as_ref(), b"testing"); 338 | /// # Ok(()) 339 | /// # } 340 | /// ``` 341 | pub fn into_parts(self) -> (W, std::result::Result) { 342 | // SAFETY: forget(self) prevents double dropping inner and buf. 343 | let inner = unsafe { std::ptr::read(&self.inner) }; 344 | let buf = unsafe { std::ptr::read(&self.buf) }; 345 | let buf = if self.panicked { 346 | Err(WriterPanicked(buf)) 347 | } else { 348 | Ok(buf) 349 | }; 350 | 351 | std::mem::forget(self); 352 | 353 | (inner, buf) 354 | } 355 | 356 | fn flush_buf(&mut self) -> io::Result<()> { 357 | loop { 358 | if self.buf.is_empty() { 359 | break Ok(()); 360 | } 361 | 362 | self.panicked = true; 363 | let r = self.inner.write(self.buf.as_read_slice(std::usize::MAX)); 364 | self.panicked = false; 365 | 366 | match r { 367 | Ok(0) => { 368 | break Err(ErrorKind::WriteZero.into()); 369 | } 370 | Ok(n) => self.buf.consume(n), 371 | Err(ref e) if e.kind() == ErrorKind::Interrupted => {} 372 | Err(e) => break Err(e), 373 | } 374 | } 375 | } 376 | } 377 | 378 | impl Deref for BufWriter { 379 | type Target = W; 380 | 381 | #[inline] 382 | fn deref(&self) -> &Self::Target { 383 | self.get_ref() 384 | } 385 | } 386 | 387 | impl DerefMut for BufWriter { 388 | #[inline] 389 | fn deref_mut(&mut self) -> &mut Self::Target { 390 | self.get_mut() 391 | } 392 | } 393 | 394 | impl AsRef for BufWriter 395 | where 396 | W: Write, 397 | as Deref>::Target: AsRef, 398 | { 399 | fn as_ref(&self) -> &W { 400 | self.deref() 401 | } 402 | } 403 | 404 | impl AsMut for BufWriter 405 | where 406 | W: Write, 407 | as Deref>::Target: AsMut, 408 | { 409 | fn as_mut(&mut self) -> &mut W { 410 | self.deref_mut() 411 | } 412 | } 413 | 414 | impl Drop for BufWriter { 415 | fn drop(&mut self) { 416 | if !self.panicked { 417 | let _r = self.flush_buf(); 418 | } 419 | } 420 | } 421 | 422 | impl Write for BufWriter { 423 | fn write(&mut self, buf: &[u8]) -> io::Result { 424 | if buf.len() > self.buf.write_len() { 425 | self.flush_buf()?; 426 | } 427 | if buf.len() >= self.buf.write_len() { 428 | self.panicked = true; 429 | let r = self.inner.write(buf); 430 | self.panicked = false; 431 | r 432 | } else { 433 | self.buf.write(buf) 434 | } 435 | } 436 | 437 | fn flush(&mut self) -> io::Result<()> { 438 | self.flush_buf().and_then(|()| self.get_mut().flush()) 439 | } 440 | } 441 | 442 | impl Read for BufWriter { 443 | #[inline] 444 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 445 | self.inner.read(buf) 446 | } 447 | 448 | #[inline] 449 | fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { 450 | self.inner.read_vectored(bufs) 451 | } 452 | 453 | #[inline] 454 | fn read_to_end(&mut self, buf: &mut Vec) -> io::Result { 455 | self.inner.read_to_end(buf) 456 | } 457 | 458 | #[inline] 459 | fn read_to_string(&mut self, buf: &mut String) -> io::Result { 460 | self.inner.read_to_string(buf) 461 | } 462 | 463 | #[inline] 464 | fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { 465 | self.inner.read_exact(buf) 466 | } 467 | } 468 | 469 | /// An error returned by [`BufWriter::into_inner`] which combines an error that 470 | /// happened while writing out the buffer, and the buffered writer object 471 | /// which may be used to recover from the condition. 472 | pub struct IntoInnerError(BufWriter, io::Error); 473 | 474 | impl IntoInnerError { 475 | /// Returns the error which caused the call to [`BufWriter::into_inner()`] 476 | /// to fail. 477 | pub fn error(&self) -> &io::Error { 478 | &self.1 479 | } 480 | 481 | /// Consumes the [`IntoInnerError`] and returns the buffered writer which 482 | /// received the error. 483 | pub fn into_inner(self) -> BufWriter { 484 | self.0 485 | } 486 | 487 | /// Consumes the [`IntoInnerError`] and returns the error which caused the call to 488 | /// [`BufWriter::into_inner()`] to fail. Unlike `error`, this can be used to 489 | /// obtain ownership of the underlying error. 490 | pub fn into_error(self) -> io::Error { 491 | self.1 492 | } 493 | 494 | /// Consumes the [`IntoInnerError`] and returns the error which caused the call to 495 | /// [`BufWriter::into_inner()`] to fail, and the underlying writer. 496 | pub fn into_parts(self) -> (io::Error, BufWriter) { 497 | (self.1, self.0) 498 | } 499 | } 500 | 501 | /// Error returned for the buffered data from `BufWriter::into_parts` when the underlying 502 | /// writer has previously panicked. The contents of the buffer may be partially written. 503 | pub struct WriterPanicked(Ring); 504 | 505 | impl WriterPanicked { 506 | /// Returns the [`Ring`] with possibly unwritten data. 507 | pub fn into_inner(self) -> Ring { 508 | self.0 509 | } 510 | 511 | const DESCRIPTION: &'static str = "writer panicked, unwritten data may remain"; 512 | } 513 | 514 | impl fmt::Display for WriterPanicked { 515 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 516 | write!(f, "{}", Self::DESCRIPTION) 517 | } 518 | } 519 | 520 | impl fmt::Debug for WriterPanicked { 521 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 522 | f.debug_struct("WriterPanicked") 523 | .field( 524 | "buffer", 525 | &format_args!("{}/{}", self.0.write_len(), self.0.write_capacity()), 526 | ) 527 | .finish() 528 | } 529 | } 530 | -------------------------------------------------------------------------------- /src/io/mod.rs: -------------------------------------------------------------------------------- 1 | //! Read/Write types for buffering. 2 | //! 3 | //! Both the [`Ring`] and [`InfiniteRing`] are fixed size anonymous allocations 4 | //! utilizing circular address mappinng. The circular mapping ensures that 5 | //! the entire readable or writable slice may always be addressed as a single, 6 | //! contiguous allocation. However, these two types differ in one key way: 7 | //! the [`Ring`] may only written to as readable space is consumed, whereas 8 | //! the [`InfiniteRing`] is always writable and will overwrite unconsumed 9 | //! space as needed. 10 | 11 | mod ring; 12 | pub use self::ring::*; 13 | 14 | mod buffer; 15 | pub use self::buffer::*; 16 | 17 | use std::cmp; 18 | use std::io::{self, BufRead}; 19 | use std::slice; 20 | 21 | /// Common input trait for all buffers. 22 | pub trait SeqRead: BufRead { 23 | /// Get the mapped readable pointer without any offset. 24 | fn as_read_ptr(&self) -> *const u8; 25 | 26 | /// Get the offset from the read pointer for the current read position. 27 | fn read_offset(&self) -> usize; 28 | 29 | /// Get the total number of readable bytes after the read offset. 30 | fn read_len(&self) -> usize; 31 | 32 | /// Test if all read bytes have been consumed. 33 | #[inline] 34 | fn is_empty(&self) -> bool { 35 | self.read_len() == 0 36 | } 37 | 38 | /// Get an immutable slice covering the read region of the buffer. 39 | #[inline] 40 | fn as_read_slice(&self, max: usize) -> &[u8] { 41 | unsafe { 42 | slice::from_raw_parts( 43 | self.as_read_ptr().add(self.read_offset()), 44 | cmp::min(self.read_len(), max), 45 | ) 46 | } 47 | } 48 | 49 | /// Perform a read and consume from the read slice. 50 | fn read_from(&mut self, into: &mut [u8]) -> io::Result { 51 | let len = { 52 | let src = self.as_read_slice(into.len()); 53 | let len = src.len(); 54 | into[..len].copy_from_slice(src); 55 | len 56 | }; 57 | self.consume(len); 58 | Ok(len) 59 | } 60 | } 61 | 62 | /// Common output trait for all buffers. 63 | pub trait SeqWrite { 64 | /// Get the mapped writable pointer without any offset. 65 | fn as_write_ptr(&mut self) -> *mut u8; 66 | 67 | /// Get the offset from the write pointer for the current read position. 68 | fn write_offset(&self) -> usize; 69 | 70 | /// Get the total number of bytes that may be written after the current write offset. 71 | fn write_len(&self) -> usize; 72 | 73 | /// Gets the number of bytes that the buffer has currently allocated space for. 74 | fn write_capacity(&self) -> usize; 75 | 76 | /// Bump the write offset after writing into the writable slice. 77 | /// 78 | /// This is a low-level call intended to be used for common write behavior. 79 | /// While this is safe to call improperly (without having written), it would 80 | /// result in stale information in the buffer. 81 | fn feed(&mut self, len: usize); 82 | 83 | /// Test if there is no room for furthur writes. 84 | #[inline] 85 | fn is_full(&self) -> bool { 86 | self.write_len() == 0 87 | } 88 | 89 | /// Get a mutable slice covering the write region of the buffer. 90 | #[inline] 91 | fn as_write_slice(&mut self, max: usize) -> &mut [u8] { 92 | unsafe { 93 | slice::from_raw_parts_mut( 94 | self.as_write_ptr().add(self.write_offset()), 95 | cmp::min(self.write_len(), max), 96 | ) 97 | } 98 | } 99 | 100 | /// Perform a write and feed into the write slice. 101 | fn write_into(&mut self, from: &[u8]) -> io::Result { 102 | let len = { 103 | let dst = self.as_write_slice(from.len()); 104 | let len = dst.len(); 105 | dst.copy_from_slice(&from[..len]); 106 | len 107 | }; 108 | self.feed(len); 109 | Ok(len) 110 | } 111 | } 112 | 113 | #[cfg(test)] 114 | mod tests { 115 | use crate::os; 116 | 117 | use super::super::Size; 118 | use super::{InfiniteRing, Ring, SeqRead, SeqWrite}; 119 | use std::io::{BufRead, Write}; 120 | 121 | #[test] 122 | fn size() { 123 | let sz = Size::alloc(); 124 | let mut buf = Ring::new(1000).expect("failed to create buffer"); 125 | assert_eq!(buf.write_capacity(), sz.size(1)); 126 | assert_eq!(buf.read_len(), 0); 127 | assert_eq!(buf.write_len(), sz.size(1)); 128 | 129 | let bytes = String::from("test").into_bytes(); 130 | buf.write_all(&bytes).expect("failed to write all bytes"); 131 | assert_eq!(buf.write_capacity(), sz.size(1)); 132 | assert_eq!(buf.read_len(), 4); 133 | assert_eq!(buf.write_len(), sz.size(1) - 4); 134 | } 135 | 136 | #[test] 137 | fn wrap() { 138 | let mut buf = Ring::new(1000).expect("failed to create ring buffer"); 139 | // pick some bytes that won't fit evenly in the capacity 140 | let bytes = b"anthropomorphologically"; 141 | let n = buf.write_capacity() / bytes.len(); 142 | for _ in 0..n { 143 | buf.write_all(bytes).expect("failed to write"); 144 | } 145 | assert_eq!(buf.read_len(), n * bytes.len()); 146 | buf.consume((n - 1) * bytes.len()); 147 | assert_eq!(buf.read_len(), bytes.len()); 148 | buf.write_all(bytes).expect("failed to write"); 149 | assert_eq!(buf.read_len(), 2 * bytes.len()); 150 | 151 | let cmp = b"anthropomorphologicallyanthropomorphologically"; 152 | assert_eq!(buf.as_read_slice(cmp.len()), &cmp[..]); 153 | } 154 | 155 | #[test] 156 | fn ring_sizes_vs_page_size() { 157 | let mut did_fail = false; 158 | let page_size = os::system_info().0 as usize; 159 | for pages in (2..10).rev() { 160 | for offset in [-1i32, 0, 1].iter() { 161 | // was failing with v0.5.0 at 2*page_size + 1! 162 | let ring_size: usize = ((pages * page_size) as i32 + offset) as usize; 163 | // let ring_size:usize = offset+(2*os::system_info().0) as usize; 164 | let mut buf = Ring::new(ring_size).expect("failed to create ring buffer"); 165 | // we write two byte pattern starting with 0x1001 166 | let start_val = 0x1001usize; 167 | let word_size = u16::BITS as usize / 8; 168 | 169 | let n = buf.write_capacity() / word_size; // bytes per u16 170 | for val in start_val..start_val + n { 171 | buf.write_all(&((val & 0xffff) as u16).to_ne_bytes()) 172 | .expect("failed to write"); 173 | } 174 | assert_eq!(buf.read_len(), n * word_size); 175 | let mut mismatched = false; 176 | for expected in start_val..start_val + n { 177 | let read = buf.as_read_slice(word_size); 178 | let read_u16 = u16::from_ne_bytes(read[0..2].try_into().unwrap()); 179 | let expe = &((expected & 0xffff) as u16).to_ne_bytes(); 180 | let read_page = (expected - start_val) * word_size / page_size; 181 | if read != expe { 182 | if !mismatched { 183 | println!( 184 | "mismatch (page #{}): expected 0x{:x} got 0x{:x}, ring_size={}, alloc_size={}", 185 | read_page, 186 | expected, read_u16, 187 | ring_size, 188 | Size::alloc().round(ring_size) 189 | ); 190 | mismatched = true; 191 | did_fail = true; 192 | } 193 | } else { 194 | if mismatched { 195 | println!( 196 | "mismatch end at (page #{}) val 0x{:x}, ring_size={}, alloc_size={}", 197 | read_page, 198 | expected, 199 | ring_size, 200 | Size::alloc().round(ring_size) 201 | ); 202 | mismatched = false; 203 | } 204 | } 205 | buf.consume(2); 206 | } 207 | } 208 | } 209 | assert!(!did_fail); 210 | } 211 | 212 | #[test] 213 | fn overwrite() { 214 | let mut ring = InfiniteRing::new(1000).expect("failed to create ring"); 215 | // pick some bytes that won't fit evenly in the capacity 216 | let bytes = b"anthropomorphologically"; 217 | let n = 2 * ring.write_capacity() / bytes.len() + 1; 218 | for _ in 0..n { 219 | ring.write_all(bytes).expect("failed to write"); 220 | } 221 | assert_eq!(ring.read_len(), ring.write_capacity()); 222 | 223 | let cmp = b"anthropomorphologicallyanthropomorphologically"; 224 | let end = bytes.len() - (ring.write_capacity() % bytes.len()); 225 | assert_eq!(ring.as_read_slice(10), &cmp[end..(end + 10)]); 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /src/io/ring.rs: -------------------------------------------------------------------------------- 1 | use super::{SeqRead, SeqWrite}; 2 | use crate::os::{map_ring, unmap_ring}; 3 | use crate::{Result, Size}; 4 | 5 | use std::cmp; 6 | use std::io::{self, BufRead, Read, Write}; 7 | use std::ops::Deref; 8 | 9 | /// Fixed-size reliable read/write buffer with sequential address mapping. 10 | /// 11 | /// This uses a circular address mapping scheme. That is, for any buffer of 12 | /// size `N`, the pointer address range of `0..N` maps to the same physical 13 | /// memory as the range `N..2*N`. This guarantees that the entire read or 14 | /// write range may be addressed as a single sequence of bytes. 15 | /// 16 | /// Unlike the [`InfiniteRing`], this type otherise acts as a "normal" buffer. 17 | /// Writes fill up the buffer, and when full, no furthur writes may be 18 | /// performed until a read occurs. The writable length sequence is the capacity 19 | /// of the buffer, less any pending readable bytes. 20 | /// 21 | /// # Examples 22 | /// 23 | /// ``` 24 | /// use vmap::io::{Ring, SeqWrite}; 25 | /// use std::io::{BufRead, Read, Write}; 26 | /// 27 | /// # fn main() -> std::io::Result<()> { 28 | /// let mut buf = Ring::new(4000).unwrap(); 29 | /// let mut i = 1; 30 | /// 31 | /// // Fill up the buffer with lines. 32 | /// while buf.write_len() > 20 { 33 | /// write!(&mut buf, "this is test line {}\n", i)?; 34 | /// i += 1; 35 | /// } 36 | /// 37 | /// // No more space is available. 38 | /// assert!(write!(&mut buf, "this is test line {}\n", i).is_err()); 39 | /// 40 | /// let mut line = String::new(); 41 | /// 42 | /// // Read the first line written. 43 | /// let len = buf.read_line(&mut line)?; 44 | /// assert_eq!(line, "this is test line 1\n"); 45 | /// 46 | /// line.clear(); 47 | /// 48 | /// // Read the second line written. 49 | /// let len = buf.read_line(&mut line)?; 50 | /// assert_eq!(line, "this is test line 2\n"); 51 | /// 52 | /// // Now there is enough space to write more. 53 | /// write!(&mut buf, "this is test line {}\n", i)?; 54 | /// # Ok(()) 55 | /// # } 56 | /// ``` 57 | #[derive(Debug)] 58 | pub struct Ring { 59 | ptr: *mut u8, 60 | len: usize, 61 | rpos: u64, 62 | wpos: u64, 63 | } 64 | 65 | impl Ring { 66 | /// Constructs a new buffer instance. 67 | /// 68 | /// The hint is a minimum size for the buffer. This size will be rounded up 69 | /// to the nearest page size for the actual capacity. The allocation will 70 | /// occupy double the space in the virtual memory table, but the physical 71 | /// memory usage will remain at the desired capacity. 72 | pub fn new(hint: usize) -> Result { 73 | let len = Size::alloc().round(hint); 74 | let ptr = map_ring(len)?; 75 | Ok(Self { 76 | ptr, 77 | len, 78 | rpos: 0, 79 | wpos: 0, 80 | }) 81 | } 82 | 83 | /// Clears the buffer, resetting the filled region to empty. 84 | /// 85 | /// The number of initialized bytes is not changed, and the contents of the buffer are not modified. 86 | pub fn clear(&mut self) { 87 | self.rpos = 0; 88 | self.wpos = 0; 89 | } 90 | } 91 | 92 | impl Drop for Ring { 93 | fn drop(&mut self) { 94 | unsafe { unmap_ring(self.ptr, self.write_capacity()) }.unwrap_or_default(); 95 | } 96 | } 97 | 98 | impl SeqRead for Ring { 99 | fn as_read_ptr(&self) -> *const u8 { 100 | self.ptr 101 | } 102 | 103 | fn read_offset(&self) -> usize { 104 | self.rpos as usize % self.len 105 | } 106 | 107 | fn read_len(&self) -> usize { 108 | (self.wpos - self.rpos) as usize 109 | } 110 | } 111 | 112 | impl SeqWrite for Ring { 113 | fn as_write_ptr(&mut self) -> *mut u8 { 114 | self.ptr 115 | } 116 | 117 | fn write_offset(&self) -> usize { 118 | self.wpos as usize % self.len 119 | } 120 | 121 | fn write_len(&self) -> usize { 122 | self.write_capacity() - self.read_len() 123 | } 124 | 125 | fn write_capacity(&self) -> usize { 126 | self.len 127 | } 128 | 129 | fn feed(&mut self, len: usize) { 130 | self.wpos += cmp::min(len, self.write_len()) as u64; 131 | } 132 | } 133 | 134 | impl BufRead for Ring { 135 | fn fill_buf(&mut self) -> io::Result<&[u8]> { 136 | Ok(self.as_read_slice(std::usize::MAX)) 137 | } 138 | 139 | fn consume(&mut self, len: usize) { 140 | self.rpos += cmp::min(len, self.read_len()) as u64; 141 | } 142 | } 143 | 144 | impl Read for Ring { 145 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 146 | self.read_from(buf) 147 | } 148 | } 149 | 150 | impl Write for Ring { 151 | fn write(&mut self, buf: &[u8]) -> io::Result { 152 | self.write_into(buf) 153 | } 154 | 155 | fn flush(&mut self) -> io::Result<()> { 156 | Ok(()) 157 | } 158 | } 159 | 160 | impl Deref for Ring { 161 | type Target = [u8]; 162 | 163 | #[inline] 164 | fn deref(&self) -> &Self::Target { 165 | self.as_read_slice(usize::MAX) 166 | } 167 | } 168 | 169 | impl AsRef<[u8]> for Ring 170 | where 171 | ::Target: AsRef<[u8]>, 172 | { 173 | fn as_ref(&self) -> &[u8] { 174 | self.deref() 175 | } 176 | } 177 | 178 | /// Fixed-size lossy read/write buffer with sequential address mapping. 179 | /// 180 | /// This uses a circular address mapping scheme. That is, for any buffer of 181 | /// size `N`, the pointer address range of `0..N` maps to the same physical 182 | /// memory as the range `N..2*N`. This guarantees that the entire read or 183 | /// write range may be addressed as a single sequence of bytes. 184 | /// 185 | /// Unlike the [`Ring`], writes to this type may evict bytes from the read side 186 | /// of the queue. The writeable size is always equal to the overall capacity of 187 | /// the buffer. 188 | /// 189 | /// # Examples 190 | /// 191 | /// ``` 192 | /// use vmap::io::{InfiniteRing, SeqRead, SeqWrite}; 193 | /// use std::io::{BufRead, Read, Write}; 194 | /// 195 | /// # fn main() -> std::io::Result<()> { 196 | /// let mut buf = InfiniteRing::new(4000).unwrap(); 197 | /// let mut i = 1; 198 | /// let mut total = 0; 199 | /// while total < buf.write_capacity() { 200 | /// let tmp = format!("this is test line {}\n", i); 201 | /// write!(buf, "{}", tmp); 202 | /// total += tmp.len(); 203 | /// i += 1; 204 | /// } 205 | /// 206 | /// // skip over the overwritten tail 207 | /// buf.consume(20 - buf.read_offset()); 208 | /// 209 | /// // read the next line 210 | /// let mut line = String::new(); 211 | /// let len = buf.read_line(&mut line)?; 212 | /// 213 | /// assert_eq!(len, 20); 214 | /// assert_eq!(&line[line.len()-20..], "this is test line 2\n"); 215 | /// # Ok(()) 216 | /// # } 217 | /// ``` 218 | #[derive(Debug)] 219 | pub struct InfiniteRing { 220 | ptr: *mut u8, 221 | len: usize, 222 | rlen: u64, 223 | wpos: u64, 224 | } 225 | 226 | impl InfiniteRing { 227 | /// Constructs a new ring buffer instance. 228 | /// 229 | /// The hint is a minimum size for the buffer. This size will be rounded up 230 | /// to the nearest page size for the actual capacity. The allocation will 231 | /// occupy double the space in the virtual memory table, but the physical 232 | /// memory usage will remain at the desired capacity. 233 | pub fn new(hint: usize) -> Result { 234 | let len = Size::alloc().round(hint); 235 | let ptr = map_ring(len)?; 236 | Ok(Self { 237 | ptr, 238 | len, 239 | rlen: 0, 240 | wpos: 0, 241 | }) 242 | } 243 | } 244 | 245 | impl Drop for InfiniteRing { 246 | fn drop(&mut self) { 247 | unsafe { unmap_ring(self.ptr, self.write_capacity()) }.unwrap_or_default() 248 | } 249 | } 250 | 251 | impl SeqRead for InfiniteRing { 252 | fn as_read_ptr(&self) -> *const u8 { 253 | self.ptr 254 | } 255 | fn read_offset(&self) -> usize { 256 | (self.wpos - self.rlen) as usize % self.len 257 | } 258 | fn read_len(&self) -> usize { 259 | self.rlen as usize 260 | } 261 | } 262 | 263 | impl SeqWrite for InfiniteRing { 264 | fn as_write_ptr(&mut self) -> *mut u8 { 265 | self.ptr 266 | } 267 | fn write_offset(&self) -> usize { 268 | self.wpos as usize % self.len 269 | } 270 | fn write_len(&self) -> usize { 271 | self.write_capacity() 272 | } 273 | fn write_capacity(&self) -> usize { 274 | self.len 275 | } 276 | fn feed(&mut self, len: usize) { 277 | self.wpos += cmp::min(len, self.write_len()) as u64; 278 | self.rlen = cmp::min(self.rlen + len as u64, self.len as u64); 279 | } 280 | } 281 | 282 | impl BufRead for InfiniteRing { 283 | fn fill_buf(&mut self) -> io::Result<&[u8]> { 284 | Ok(self.as_read_slice(std::usize::MAX)) 285 | } 286 | 287 | fn consume(&mut self, len: usize) { 288 | self.rlen -= cmp::min(len, self.read_len()) as u64; 289 | } 290 | } 291 | 292 | impl Read for InfiniteRing { 293 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 294 | self.read_from(buf) 295 | } 296 | } 297 | 298 | impl Write for InfiniteRing { 299 | fn write(&mut self, buf: &[u8]) -> io::Result { 300 | self.write_into(buf) 301 | } 302 | 303 | fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { 304 | let len = { 305 | let dst = self.as_write_slice(buf.len()); 306 | let len = dst.len(); 307 | let tail = buf.len() - len; 308 | dst.copy_from_slice(&buf[tail..]); 309 | len 310 | }; 311 | self.feed(len); 312 | Ok(()) 313 | } 314 | 315 | fn flush(&mut self) -> io::Result<()> { 316 | Ok(()) 317 | } 318 | } 319 | 320 | impl Deref for InfiniteRing { 321 | type Target = [u8]; 322 | 323 | #[inline] 324 | fn deref(&self) -> &Self::Target { 325 | self.as_read_slice(usize::MAX) 326 | } 327 | } 328 | 329 | impl AsRef<[u8]> for InfiniteRing 330 | where 331 | ::Target: AsRef<[u8]>, 332 | { 333 | fn as_ref(&self) -> &[u8] { 334 | self.deref() 335 | } 336 | } 337 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A cross-platform library for fast and safe memory-mapped IO and boundary-free 2 | //! ring buffer. 3 | //! 4 | //! This library defines a convenient API for reading and writing to files 5 | //! using the hosts virtual memory system, as well as allocating memory and 6 | //! creating circular memory regions. The design of the API strives to 7 | //! both minimize the frequency of mapping system calls while still retaining 8 | //! safe access. Critically, it never attempts the own the `File` object used 9 | //! for mapping. That is, it never clones it or in any way retains it. While 10 | //! this has some implications for the API (i.e. [`.flush()`]), it cannot cause 11 | //! bugs outside of this library through `File`'s leaky abstraction when cloned 12 | //! and then closed. 13 | //! 14 | //! The [`Map`] and [`MapMut`] types are primary means for allocating virtual 15 | //! memory regions, both for a file and anonymously. Generally, the 16 | //! [`Map::with_options()`] and [`MapMut::with_options()`] are used to specify 17 | //! the mapping requirements. See [`Options`] for more information. 18 | //! 19 | //! The [`MapMut`] type maintains interior mutability for the mapped memory, 20 | //! while the [`Map`] is read-only. However, it is possible to convert between 21 | //! these types ([`.into_map_mut()`] and [`.into_map()`]) assuming the proper 22 | //! [`Options`] are specified. 23 | //! 24 | //! Additionally, a variety of buffer implementations are provided in the 25 | //! [`vmap::io`] module. The [`Ring`] and [`InfiniteRing`] use cross-platform 26 | //! optimzed circular memory mapping to remove the typical boundary problem 27 | //! with most circular buffers. This ensures all ranges of the underlying byte 28 | //! buffer can be viewed as a single byte slice, event when the value wraps 29 | //! back around to the beginning of the buffer. The [`BufReader`] and [`BufWriter`] 30 | //! implement buffered I/O using a [`Ring`] as a backing layer. 31 | //! 32 | //! # Examples 33 | //! 34 | //! ``` 35 | //! use vmap::Map; 36 | //! use std::path::PathBuf; 37 | //! use std::str::from_utf8; 38 | //! # use std::fs; 39 | //! 40 | //! # fn main() -> vmap::Result<()> { 41 | //! # let tmp = tempdir::TempDir::new("vmap")?; 42 | //! let path: PathBuf = /* path to file */ 43 | //! # tmp.path().join("example"); 44 | //! # fs::write(&path, b"this is a test")?; 45 | //! 46 | //! // Map the first 4 bytes 47 | //! let (map, file) = Map::with_options().len(4).open(&path)?; 48 | //! assert_eq!(Ok("this"), from_utf8(&map[..])); 49 | //! 50 | //! // Reuse the file to map a different region 51 | //! let map = Map::with_options().offset(10).len(4).map(&file)?; 52 | //! assert_eq!(Ok("test"), from_utf8(&map[..])); 53 | //! # Ok(()) 54 | //! # } 55 | //! ``` 56 | //! 57 | //! If opened properly, the `Map` can be moved into a `MapMut` and modifications 58 | //! to the underlying file can be performed: 59 | //! 60 | //! ``` 61 | //! use vmap::{Map, Flush}; 62 | //! use std::path::PathBuf; 63 | //! use std::str::from_utf8; 64 | //! # use std::fs; 65 | //! 66 | //! # fn main() -> vmap::Result<()> { 67 | //! # let tmp = tempdir::TempDir::new("vmap")?; 68 | //! let path: PathBuf = /* path to file */ 69 | //! # tmp.path().join("example"); 70 | //! # fs::write(&path, b"this is a test")?; 71 | //! 72 | //! // Open with write permissions so the Map can be converted into a MapMut 73 | //! let (map, file) = Map::with_options().write().len(14).open(&path)?; 74 | //! assert_eq!(Ok("this is a test"), from_utf8(&map[..])); 75 | //! 76 | //! // Move the Map into a MapMut 77 | //! // ... we could have started with MapMut::with_options() 78 | //! let mut map = map.into_map_mut()?; 79 | //! map[..4].clone_from_slice(b"that"); 80 | //! 81 | //! // Flush the changes to disk synchronously 82 | //! map.flush(&file, Flush::Sync)?; 83 | //! 84 | //! // Move the MapMut back into a Map 85 | //! let map = map.into_map()?; 86 | //! assert_eq!(Ok("that is a test"), from_utf8(&map[..])); 87 | //! # Ok(()) 88 | //! # } 89 | //! ``` 90 | //! 91 | //! This library contains a [`Ring`] that constructs a circular memory 92 | //! allocation where values can wrap from around from the end of the buffer back 93 | //! to the beginning with sequential memory addresses. The [`InfiniteRing`] is 94 | //! similar, however it allows writes to overwrite reads. 95 | //! 96 | //! ``` 97 | //! use vmap::io::{Ring, SeqWrite}; 98 | //! use std::io::{BufRead, Read, Write}; 99 | //! 100 | //! # fn main() -> std::io::Result<()> { 101 | //! let mut buf = Ring::new(4000).unwrap(); 102 | //! let mut i = 1; 103 | //! 104 | //! // Fill up the buffer with lines. 105 | //! while buf.write_len() > 20 { 106 | //! write!(&mut buf, "this is test line {}\n", i)?; 107 | //! i += 1; 108 | //! } 109 | //! 110 | //! // No more space is available. 111 | //! assert!(write!(&mut buf, "this is test line {}\n", i).is_err()); 112 | //! 113 | //! let mut line = String::new(); 114 | //! 115 | //! // Read the first line written. 116 | //! let len = buf.read_line(&mut line)?; 117 | //! assert_eq!(line, "this is test line 1\n"); 118 | //! 119 | //! line.clear(); 120 | //! 121 | //! // Read the second line written. 122 | //! let len = buf.read_line(&mut line)?; 123 | //! assert_eq!(line, "this is test line 2\n"); 124 | //! 125 | //! // Now there is enough space to write more. 126 | //! write!(&mut buf, "this is test line {}\n", i)?; 127 | //! # Ok(()) 128 | //! # } 129 | //! ``` 130 | //! 131 | //! [`.flush()`]: struct.MapMut.html#method.flush 132 | //! [`.into_map()`]: struct.MapMut.html#method.into_map 133 | //! [`.into_map_mut()`]: struct.Map.html#method.into_map_mut 134 | //! [`BufReader`]: io/struct.BufReader.html 135 | //! [`BufWriter`]: io/struct.BufWriter.html 136 | //! [`InfiniteRing`]: io/struct.InfiniteRing.html 137 | //! [`Map::with_options()`]: struct.Map.html#method.with_options 138 | //! [`MapMut::with_options()`]: struct.MapMut.html#method.with_options 139 | //! [`MapMut`]: struct.MapMut.html 140 | //! [`Map`]: struct.Map.html 141 | //! [`Options`]: struct.Options.html 142 | //! [`Ring`]: io/struct.Ring.html 143 | //! [`vmap::io`]: io/index.html 144 | 145 | #![deny(missing_docs)] 146 | 147 | use std::ops::{Deref, DerefMut}; 148 | use std::sync::atomic::{AtomicUsize, Ordering}; 149 | use std::{mem, ptr}; 150 | 151 | #[cfg(feature = "os")] 152 | pub mod os; 153 | 154 | #[cfg(not(feature = "os"))] 155 | mod os; 156 | 157 | mod error; 158 | pub use self::error::{ConvertResult, Error, Input, Operation, Result}; 159 | 160 | mod map; 161 | pub use self::map::{Map, MapMut, Options}; 162 | 163 | #[cfg(feature = "io")] 164 | pub mod io; 165 | 166 | /// Protection level for a page. 167 | #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 168 | pub enum Protect { 169 | /// The page(s) may only be read from. 170 | ReadOnly, 171 | /// The page(s) may be read from and written to. 172 | ReadWrite, 173 | /// Like `ReadWrite`, but changes are not shared. 174 | ReadCopy, 175 | /// The page(s) may be read from and executed. 176 | ReadExec, 177 | } 178 | 179 | /// Desired behavior when flushing write changes. 180 | #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 181 | pub enum Flush { 182 | /// Request dirty pages to be written immediately and block until completed. 183 | Sync, 184 | /// Request dirty pages to be written but do not wait for completion. 185 | Async, 186 | } 187 | 188 | /// Hint for the access pattern of the underlying mapping. 189 | #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 190 | pub enum Advise { 191 | /// Use the system default behavior. 192 | Normal, 193 | /// The map will be accessed in a sequential manner. 194 | Sequential, 195 | /// The map will be accessed in a random manner. 196 | Random, 197 | /// The map is expected to be accessed soon. 198 | WillNeed, 199 | /// The map is not expected to be accessed soon. 200 | WillNotNeed, 201 | } 202 | 203 | /// Byte extent type used for length and resize options. 204 | /// 205 | /// For usage information, see the [`.len()`] or [`.resize()`] methods of the 206 | /// [`Options`] builder type. 207 | /// 208 | /// [`.len()`]: struct.Options.html#method.len 209 | /// [`.resize()`]: struct.Options.html#method.resize 210 | /// [`Options`]: struct.Options.html 211 | pub enum Extent { 212 | /// A dynamic extent that implies the end byte position of an underlying 213 | /// file resource or anonymous allocation. 214 | End, 215 | /// A static extent that referers to an exact byte position. 216 | Exact(usize), 217 | /// A dynamic extent that referes a byte position of at least a particular 218 | /// offset. 219 | Min(usize), 220 | /// A dynamic extent that referes a byte position of no greater than a 221 | /// particular offset. 222 | Max(usize), 223 | } 224 | 225 | impl From for Extent { 226 | fn from(v: usize) -> Self { 227 | Self::Exact(v) 228 | } 229 | } 230 | 231 | /// Gets a cached version of the system page size. 232 | /// 233 | /// # Examples 234 | /// 235 | /// ``` 236 | /// let page_size = vmap::page_size(); 237 | /// println!("the system page size is {} bytes", page_size); 238 | /// assert!(page_size >= 4096); 239 | /// ``` 240 | pub fn page_size() -> usize { 241 | let size = PAGE_SIZE.load(Ordering::Relaxed); 242 | if size == 0 { 243 | load_system_info().0 as usize 244 | } else { 245 | size 246 | } 247 | } 248 | 249 | /// Gets a cached version of the system allocation granularity size. 250 | /// 251 | /// On Windows this value is typically 64k. Otherwise it is the same as the 252 | /// page size. 253 | /// 254 | /// # Examples 255 | /// 256 | /// ``` 257 | /// let alloc_size = vmap::allocation_size(); 258 | /// println!("the system allocation granularity is {} bytes", alloc_size); 259 | /// if cfg!(windows) { 260 | /// assert!(alloc_size >= 65536); 261 | /// } else { 262 | /// assert!(alloc_size >= 4096); 263 | /// } 264 | /// ``` 265 | pub fn allocation_size() -> usize { 266 | let size = ALLOC_SIZE.load(Ordering::Relaxed); 267 | if size == 0 { 268 | load_system_info().1 as usize 269 | } else { 270 | size 271 | } 272 | } 273 | 274 | static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); 275 | static ALLOC_SIZE: AtomicUsize = AtomicUsize::new(0); 276 | 277 | #[inline] 278 | fn load_system_info() -> (u32, u32) { 279 | let (page, alloc) = self::os::system_info(); 280 | PAGE_SIZE.store(page as usize, Ordering::Relaxed); 281 | ALLOC_SIZE.store(alloc as usize, Ordering::Relaxed); 282 | (page, alloc) 283 | } 284 | 285 | /// Type for calculation system page or allocation size information. 286 | /// 287 | /// # Examples 288 | /// 289 | /// ``` 290 | /// let size = vmap::Size::alloc(); 291 | /// let pages = size.count(200); 292 | /// assert_eq!(pages, 1); 293 | /// 294 | /// let round = size.round(200); 295 | /// println!("200 bytes requires a {} byte mapping", round); 296 | /// 297 | /// let count = size.count(10000); 298 | /// println!("10000 bytes requires {} pages", count); 299 | /// 300 | /// let size = size.size(3); 301 | /// println!("3 pages are {} bytes", size); 302 | /// ``` 303 | #[derive(Copy, Clone)] 304 | pub struct Size(usize); 305 | 306 | impl Size { 307 | /// Creates a type for calculating page numbers and byte offsets. 308 | /// 309 | /// The size is determined from the system's configurated page size. 310 | /// This value is cached making it very cheap to construct. 311 | #[inline] 312 | pub fn page() -> Self { 313 | unsafe { Self::with_size(page_size()) } 314 | } 315 | 316 | /// Creates a type for calculating allocation numbers and byte offsets. 317 | /// 318 | /// The size is determined from the system's configurated allocation 319 | /// granularity. This value is cached making it very cheap to construct. 320 | #[inline] 321 | pub fn alloc() -> Self { 322 | unsafe { Self::with_size(allocation_size()) } 323 | } 324 | 325 | /// Creates a type for calculating allocations numbers and byte offsets 326 | /// using a known size. 327 | /// 328 | /// # Safety 329 | /// 330 | /// The size *must* be a power-of-2. To successfully map pages, the size 331 | /// must also be a mutliple of the actual system allocation granularity. 332 | /// Hypothetically this could be used to simulate larger page sizes, but 333 | /// this has no bearing on the TLB cache. 334 | /// 335 | /// # Examples 336 | /// 337 | /// ``` 338 | /// use vmap::Size; 339 | /// 340 | /// let sys = vmap::allocation_size(); 341 | /// let size = unsafe { Size::with_size(sys << 2) }; 342 | /// assert_eq!(size.round(1), sys << 2); // probably 16384 343 | /// ``` 344 | #[inline] 345 | pub unsafe fn with_size(size: usize) -> Self { 346 | Size(size) 347 | } 348 | 349 | /// Round a byte size up to the nearest unit size. 350 | /// 351 | /// # Examples 352 | /// 353 | /// ``` 354 | /// use vmap::Size; 355 | /// 356 | /// let sys = vmap::page_size(); 357 | /// let size = Size::page(); 358 | /// assert_eq!(size.round(0), 0); 359 | /// assert_eq!(size.round(1), sys); // probably 4096 360 | /// assert_eq!(size.round(sys-1), sys); // probably 4096 361 | /// assert_eq!(size.round(sys), sys); // probably 4096 362 | /// assert_eq!(size.round(sys+1), sys*2); // probably 8192 363 | /// ``` 364 | #[inline] 365 | pub const fn round(&self, len: usize) -> usize { 366 | self.truncate(len + self.0 - 1) 367 | } 368 | 369 | /// Round a byte size down to the nearest unit size. 370 | /// 371 | /// # Examples 372 | /// 373 | /// ``` 374 | /// use vmap::Size; 375 | /// 376 | /// let sys = vmap::page_size(); 377 | /// let size = Size::page(); 378 | /// assert_eq!(size.truncate(0), 0); 379 | /// assert_eq!(size.truncate(1), 0); 380 | /// assert_eq!(size.truncate(sys-1), 0); 381 | /// assert_eq!(size.truncate(sys), sys); // probably 4096 382 | /// assert_eq!(size.truncate(sys+1), sys); // probably 4096 383 | /// ``` 384 | #[inline] 385 | pub const fn truncate(&self, len: usize) -> usize { 386 | len & !(self.0 - 1) 387 | } 388 | 389 | /// Calculate the byte offset from size unit containing the position. 390 | /// 391 | /// # Examples 392 | /// 393 | /// ``` 394 | /// use vmap::Size; 395 | /// 396 | /// let sys = vmap::page_size(); 397 | /// let size = Size::page(); 398 | /// assert_eq!(size.offset(1), 1); 399 | /// assert_eq!(size.offset(sys-1), sys-1); 400 | /// assert_eq!(size.offset(sys*2 + 123), 123); 401 | /// ``` 402 | #[inline] 403 | pub const fn offset(&self, len: usize) -> usize { 404 | len & (self.0 - 1) 405 | } 406 | 407 | /// Convert a unit count into a byte size. 408 | /// 409 | /// # Examples 410 | /// 411 | /// ``` 412 | /// use vmap::Size; 413 | /// 414 | /// let sys = vmap::page_size(); 415 | /// let size = Size::page(); 416 | /// assert_eq!(size.size(0), 0); 417 | /// assert_eq!(size.size(1), sys); // probably 4096 418 | /// assert_eq!(size.size(2), sys*2); // probably 8192 419 | /// ``` 420 | #[inline] 421 | pub const fn size(&self, count: u32) -> usize { 422 | (count as usize) << self.0.trailing_zeros() 423 | } 424 | 425 | /// Covert a byte size into the number of units necessary to contain it. 426 | /// 427 | /// # Examples 428 | /// 429 | /// ``` 430 | /// use vmap::Size; 431 | /// 432 | /// let sys = vmap::page_size(); 433 | /// let size = Size::page(); 434 | /// assert_eq!(size.count(0), 0); 435 | /// assert_eq!(size.count(1), 1); 436 | /// assert_eq!(size.count(sys-1), 1); 437 | /// assert_eq!(size.count(sys), 1); 438 | /// assert_eq!(size.count(sys+1), 2); 439 | /// assert_eq!(size.count(sys*2), 2); 440 | /// ``` 441 | #[inline] 442 | pub const fn count(&self, len: usize) -> u32 { 443 | (self.round(len) >> self.0.trailing_zeros()) as u32 444 | } 445 | 446 | /// Calculates the unit bounds for a pointer and length. 447 | /// 448 | /// # Safety 449 | /// 450 | /// There is no verification that the pointer is a mapped page nor that 451 | /// the calculated offset may be dereferenced. 452 | #[inline] 453 | pub unsafe fn bounds(&self, ptr: *mut u8, len: usize) -> (*mut u8, usize) { 454 | let off = self.offset(ptr as usize); 455 | (ptr.offset(-(off as isize)), self.round(len + off)) 456 | } 457 | } 458 | 459 | impl Default for Size { 460 | fn default() -> Self { 461 | Self::alloc() 462 | } 463 | } 464 | 465 | /// General trait for working with any memory-safe representation of a 466 | /// contiguous region of arbitrary memory. 467 | pub trait Span: Deref + Sized + sealed::Span { 468 | /// Get the length of the allocated region. 469 | fn len(&self) -> usize; 470 | 471 | /// Get the pointer to the start of the allocated region. 472 | fn as_ptr(&self) -> *const u8; 473 | 474 | /// Tests if the span covers zero bytes. 475 | #[inline] 476 | fn is_empty(&self) -> bool { 477 | self.len() == 0 478 | } 479 | 480 | /// Performs a volatile read of the value at a given offset. 481 | /// 482 | /// Volatile operations are intended to act on I/O memory, and are 483 | /// guaranteed to not be elided or reordered by the compiler across 484 | /// other volatile operations. 485 | #[inline] 486 | fn read_volatile(&self, offset: usize) -> T { 487 | assert_capacity::(offset, self.len()); 488 | assert_alignment::(offset, self.as_ptr()); 489 | unsafe { ptr::read_volatile(self.as_ptr().add(offset) as *const T) } 490 | } 491 | 492 | /// Performs an unaligned read of the value at a given offset. 493 | #[inline] 494 | fn read_unaligned(&self, offset: usize) -> T { 495 | assert_capacity::(offset, self.len()); 496 | unsafe { ptr::read_unaligned(self.as_ptr().add(offset) as *const T) } 497 | } 498 | } 499 | 500 | /// General trait for working with any memory-safe representation of a 501 | /// contiguous region of arbitrary mutable memory. 502 | pub trait SpanMut: Span + DerefMut { 503 | /// Get a mutable pointer to the start of the allocated region. 504 | fn as_mut_ptr(&mut self) -> *mut u8; 505 | 506 | /// Performs a volatile write of the value at a given offset. 507 | /// 508 | /// Volatile operations are intended to act on I/O memory, and are 509 | /// guaranteed to not be elided or reordered by the compiler across 510 | /// other volatile operations. 511 | #[inline] 512 | fn write_volatile(&mut self, offset: usize, value: T) { 513 | assert_capacity::(offset, self.len()); 514 | assert_alignment::(offset, self.as_ptr()); 515 | unsafe { ptr::write_volatile(self.as_mut_ptr().add(offset) as *mut T, value) } 516 | } 517 | 518 | /// Performs an unaligned write of the value at a given offset. 519 | #[inline] 520 | fn write_unaligned(&mut self, offset: usize, value: T) { 521 | assert_capacity::(offset, self.len()); 522 | unsafe { ptr::write_unaligned(self.as_mut_ptr().add(offset) as *mut T, value) } 523 | } 524 | } 525 | 526 | impl<'a> Span for &'a [u8] { 527 | #[inline] 528 | fn len(&self) -> usize { 529 | <[u8]>::len(self) 530 | } 531 | 532 | #[inline] 533 | fn as_ptr(&self) -> *const u8 { 534 | <[u8]>::as_ptr(self) 535 | } 536 | } 537 | 538 | impl<'a> Span for &'a mut [u8] { 539 | #[inline] 540 | fn len(&self) -> usize { 541 | <[u8]>::len(self) 542 | } 543 | 544 | #[inline] 545 | fn as_ptr(&self) -> *const u8 { 546 | <[u8]>::as_ptr(self) 547 | } 548 | } 549 | 550 | impl<'a> SpanMut for &'a mut [u8] { 551 | #[inline] 552 | fn as_mut_ptr(&mut self) -> *mut u8 { 553 | <[u8]>::as_mut_ptr(self) 554 | } 555 | } 556 | 557 | mod sealed { 558 | pub trait Span {} 559 | 560 | impl Span for super::Map {} 561 | impl Span for super::MapMut {} 562 | impl<'a> Span for &'a [u8] {} 563 | impl<'a> Span for &'a mut [u8] {} 564 | 565 | pub trait FromPtr { 566 | unsafe fn from_ptr(ptr: *mut u8, len: usize) -> Self; 567 | } 568 | 569 | pub trait Scalar: Default {} 570 | 571 | impl Scalar for u8 {} 572 | impl Scalar for i8 {} 573 | impl Scalar for u16 {} 574 | impl Scalar for i16 {} 575 | impl Scalar for u32 {} 576 | impl Scalar for i32 {} 577 | impl Scalar for u64 {} 578 | impl Scalar for i64 {} 579 | impl Scalar for u128 {} 580 | impl Scalar for i128 {} 581 | impl Scalar for usize {} 582 | impl Scalar for isize {} 583 | impl Scalar for f32 {} 584 | impl Scalar for f64 {} 585 | } 586 | 587 | #[inline] 588 | fn assert_alignment(offset: usize, ptr: *const u8) { 589 | if unsafe { ptr.add(offset) } as usize % mem::align_of::() != 0 { 590 | panic!( 591 | "offset improperly aligned: the requirement is {} but the offset is +{}/-{}", 592 | mem::align_of::(), 593 | ptr as usize % mem::align_of::(), 594 | mem::align_of::() - (ptr as usize % mem::align_of::()), 595 | ) 596 | } 597 | } 598 | 599 | #[inline] 600 | fn assert_capacity(offset: usize, len: usize) { 601 | if offset + mem::size_of::() > len { 602 | panic!( 603 | "index out of bounds: the len is {} but the index is {}", 604 | len, 605 | offset + mem::size_of::() 606 | ) 607 | } 608 | } 609 | 610 | #[cfg(test)] 611 | mod tests { 612 | use std::fs; 613 | use std::path::PathBuf; 614 | use std::str::from_utf8; 615 | 616 | use super::*; 617 | 618 | #[test] 619 | fn allocation_size() { 620 | let sz = unsafe { Size::with_size(4096) }; 621 | assert_eq!(sz.round(0), 0); 622 | assert_eq!(sz.round(1), 4096); 623 | assert_eq!(sz.round(4095), 4096); 624 | assert_eq!(sz.round(4096), 4096); 625 | assert_eq!(sz.round(4097), 8192); 626 | assert_eq!(sz.truncate(0), 0); 627 | assert_eq!(sz.truncate(1), 0); 628 | assert_eq!(sz.truncate(4095), 0); 629 | assert_eq!(sz.truncate(4096), 4096); 630 | assert_eq!(sz.truncate(4097), 4096); 631 | assert_eq!(sz.size(0), 0); 632 | assert_eq!(sz.size(1), 4096); 633 | assert_eq!(sz.size(2), 8192); 634 | assert_eq!(sz.count(0), 0); 635 | assert_eq!(sz.count(1), 1); 636 | assert_eq!(sz.count(4095), 1); 637 | assert_eq!(sz.count(4096), 1); 638 | assert_eq!(sz.count(4097), 2); 639 | assert_eq!(sz.count(8192), 2); 640 | assert_eq!(sz.offset(0), 0); 641 | assert_eq!(sz.offset(1), 1); 642 | assert_eq!(sz.offset(4095), 4095); 643 | assert_eq!(sz.offset(4096), 0); 644 | assert_eq!(sz.offset(4097), 1); 645 | } 646 | 647 | #[test] 648 | fn alloc_min() -> Result<()> { 649 | let sz = Size::alloc(); 650 | 651 | let mut map = MapMut::with_options().len(Extent::Min(100)).alloc()?; 652 | assert_eq!(map.len(), sz.round(100)); 653 | assert_eq!(Ok("\0\0\0\0\0"), from_utf8(&map[..5])); 654 | 655 | map[..5].clone_from_slice(b"hello"); 656 | assert_eq!(Ok("hello"), from_utf8(&map[..5])); 657 | Ok(()) 658 | } 659 | 660 | #[test] 661 | fn alloc_exact() -> Result<()> { 662 | let mut map = MapMut::with_options().len(5).alloc()?; 663 | assert_eq!(map.len(), 5); 664 | assert_eq!(Ok("\0\0\0\0\0"), from_utf8(&map[..])); 665 | 666 | map[..5].clone_from_slice(b"hello"); 667 | assert_eq!(Ok("hello"), from_utf8(&map[..])); 668 | Ok(()) 669 | } 670 | 671 | #[test] 672 | fn alloc_offset() -> Result<()> { 673 | // map to the offset of the last 5 bytes of an allocation size, but map 6 bytes 674 | let off = Size::alloc().size(1) - 5; 675 | let mut map = MapMut::with_options().offset(off).len(6).alloc()?; 676 | 677 | // force the page after the 5 bytes to be read-only 678 | unsafe { os::protect(map.as_mut_ptr().add(5), 1, Protect::ReadOnly)? }; 679 | 680 | assert_eq!(map.len(), 6); 681 | assert_eq!(Ok("\0\0\0\0\0\0"), from_utf8(&map[..])); 682 | 683 | // writing one more byte will segfault 684 | map[..5].clone_from_slice(b"hello"); 685 | assert_eq!(Ok("hello\0"), from_utf8(&map[..])); 686 | Ok(()) 687 | } 688 | 689 | #[test] 690 | fn read_end() -> Result<()> { 691 | let (_tmp, path, len) = write_default("read_end")?; 692 | let (map, _) = Map::with_options().offset(29).open(&path)?; 693 | assert!(map.len() >= 30); 694 | assert_eq!(len - 29, map.len()); 695 | assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map[..30])); 696 | Ok(()) 697 | } 698 | 699 | #[test] 700 | fn read_min() -> Result<()> { 701 | let (_tmp, path, len) = write_default("read_min")?; 702 | let (map, _) = Map::with_options() 703 | .offset(29) 704 | .len(Extent::Min(30)) 705 | .open(&path)?; 706 | println!("path = {:?}, len = {}, map = {}", path, len, map.len()); 707 | assert!(map.len() >= 30); 708 | assert_eq!(len - 29, map.len()); 709 | assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map[..30])); 710 | Ok(()) 711 | } 712 | 713 | #[test] 714 | fn read_max() -> Result<()> { 715 | let (_tmp, path, _len) = write_default("read_max")?; 716 | let (map, _) = Map::with_options() 717 | .offset(29) 718 | .len(Extent::Max(30)) 719 | .open(&path)?; 720 | assert!(map.len() == 30); 721 | assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map[..])); 722 | Ok(()) 723 | } 724 | 725 | #[test] 726 | fn read_exact() -> Result<()> { 727 | let (_tmp, path, _len) = write_default("read_exact")?; 728 | let (map, _) = Map::with_options().offset(29).len(30).open(&path)?; 729 | assert!(map.len() == 30); 730 | assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map[..])); 731 | Ok(()) 732 | } 733 | 734 | #[test] 735 | fn copy() -> Result<()> { 736 | let (_tmp, path, _len) = write_default("copy")?; 737 | let (mut map, _) = MapMut::with_options() 738 | .offset(29) 739 | .len(30) 740 | .copy() 741 | .open(&path)?; 742 | assert_eq!(map.len(), 30); 743 | assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map[..])); 744 | 745 | map[..4].clone_from_slice(b"nice"); 746 | assert_eq!(Ok("nice and safe memory-mapped IO"), from_utf8(&map[..])); 747 | Ok(()) 748 | } 749 | 750 | #[test] 751 | fn write_into_mut() -> Result<()> { 752 | let tmp = tempdir::TempDir::new("vmap")?; 753 | let path: PathBuf = tmp.path().join("write_into_mut"); 754 | fs::write(&path, "this is a test").expect("failed to write file"); 755 | 756 | let (map, _) = Map::with_options().write().resize(16).open(&path)?; 757 | assert_eq!(16, map.len()); 758 | assert_eq!(Ok("this is a test"), from_utf8(&map[..14])); 759 | assert_eq!(Ok("this is a test\0\0"), from_utf8(&map[..])); 760 | 761 | let mut map = map.into_map_mut()?; 762 | map[..4].clone_from_slice(b"that"); 763 | assert_eq!(Ok("that is a test"), from_utf8(&map[..14])); 764 | assert_eq!(Ok("that is a test\0\0"), from_utf8(&map[..])); 765 | 766 | let map = map.into_map()?; 767 | assert_eq!(Ok("that is a test"), from_utf8(&map[..14])); 768 | assert_eq!(Ok("that is a test\0\0"), from_utf8(&map[..])); 769 | 770 | let (map, _) = Map::with_options().open(&path)?; 771 | assert_eq!(16, map.len()); 772 | assert_eq!(Ok("that is a test"), from_utf8(&map[..14])); 773 | assert_eq!(Ok("that is a test\0\0"), from_utf8(&map[..])); 774 | 775 | Ok(()) 776 | } 777 | 778 | #[test] 779 | fn truncate() -> Result<()> { 780 | let tmp = tempdir::TempDir::new("vmap")?; 781 | let path: PathBuf = tmp.path().join("truncate"); 782 | fs::write(&path, "this is a test").expect("failed to write file"); 783 | 784 | let (map, _) = Map::with_options() 785 | .write() 786 | .truncate(true) 787 | .resize(16) 788 | .open(&path)?; 789 | assert_eq!(16, map.len()); 790 | assert_eq!(Ok("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"), from_utf8(&map[..])); 791 | Ok(()) 792 | } 793 | 794 | type WriteResult = Result<(tempdir::TempDir, PathBuf, usize)>; 795 | 796 | fn write_tmp(name: &'static str, msg: &'static str) -> WriteResult { 797 | let tmp = tempdir::TempDir::new("vmap")?; 798 | let path: PathBuf = tmp.path().join(name); 799 | fs::write(&path, msg)?; 800 | Ok((tmp, path, msg.len())) 801 | } 802 | 803 | fn write_default(name: &'static str) -> WriteResult { 804 | write_tmp( 805 | name, 806 | "A cross-platform library for fast and safe memory-mapped IO in Rust", 807 | ) 808 | } 809 | 810 | #[test] 811 | fn volatile() -> Result<()> { 812 | let tmp = tempdir::TempDir::new("vmap")?; 813 | let path: PathBuf = tmp.path().join("volatile"); 814 | 815 | let (mut map, _) = MapMut::with_options() 816 | .write() 817 | .truncate(true) 818 | .create(true) 819 | .resize(16) 820 | .open(&path)?; 821 | assert_eq!(16, map.len()); 822 | 823 | assert_eq!(0u64, map.read_volatile(0)); 824 | assert_eq!(0u64, map.read_volatile(8)); 825 | 826 | map.write_volatile(0, 0xc3a5c85c97cb3127u64); 827 | map.write_volatile(8, 0xb492b66fbe98f273u64); 828 | 829 | assert_eq!(0xc3a5c85c97cb3127u64, map.read_volatile(0)); 830 | assert_eq!(0xb492b66fbe98f273u64, map.read_volatile(8)); 831 | 832 | let (map, _) = Map::with_options().open(&path)?; 833 | assert_eq!(16, map.len()); 834 | assert_eq!(0xc3a5c85c97cb3127u64, map.read_volatile(0)); 835 | assert_eq!(0xb492b66fbe98f273u64, map.read_volatile(8)); 836 | 837 | Ok(()) 838 | } 839 | 840 | #[test] 841 | fn unaligned() -> Result<()> { 842 | let tmp = tempdir::TempDir::new("vmap")?; 843 | let path: PathBuf = tmp.path().join("unaligned"); 844 | 845 | let (mut map, _) = MapMut::with_options() 846 | .write() 847 | .truncate(true) 848 | .create(true) 849 | .resize(17) 850 | .open(&path)?; 851 | assert_eq!(17, map.len()); 852 | 853 | assert_eq!(0u64, map.read_unaligned(1)); 854 | assert_eq!(0u64, map.read_unaligned(9)); 855 | 856 | map.write_unaligned(1, 0xc3a5c85c97cb3127u64); 857 | map.write_unaligned(9, 0xb492b66fbe98f273u64); 858 | 859 | assert_eq!(0xc3a5c85c97cb3127u64, map.read_unaligned(1)); 860 | assert_eq!(0xb492b66fbe98f273u64, map.read_unaligned(9)); 861 | 862 | let (map, _) = Map::with_options().open(&path)?; 863 | assert_eq!(17, map.len()); 864 | assert_eq!(0xc3a5c85c97cb3127u64, map.read_unaligned(1)); 865 | assert_eq!(0xb492b66fbe98f273u64, map.read_unaligned(9)); 866 | 867 | Ok(()) 868 | } 869 | } 870 | -------------------------------------------------------------------------------- /src/map.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::fs::{File, OpenOptions}; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::path::Path; 5 | use std::slice; 6 | use std::{cmp, fmt, io, marker}; 7 | 8 | use crate::os::{advise, flush, lock, map_anon, map_file, protect, unlock, unmap}; 9 | use crate::sealed::FromPtr; 10 | use crate::{ 11 | Advise, ConvertResult, Error, Extent, Flush, Input, Operation, Protect, Result, Size, Span, 12 | SpanMut, 13 | }; 14 | 15 | /// Allocation of one or more read-only sequential pages. 16 | /// 17 | /// # Examples 18 | /// 19 | /// ``` 20 | /// use vmap::{Map, Advise}; 21 | /// use std::path::PathBuf; 22 | /// use std::str::from_utf8; 23 | /// 24 | /// # fn main() -> vmap::Result<()> { 25 | /// # let tmp = tempdir::TempDir::new("vmap")?; 26 | /// let path: PathBuf = /* path to file */ 27 | /// # tmp.path().join("example"); 28 | /// # std::fs::write(&path, "A cross-platform library for fast and safe memory-mapped IO in Rust")?; 29 | /// let (map, file) = Map::with_options().offset(29).len(30).open(&path)?; 30 | /// map.advise(Advise::Sequential)?; 31 | /// assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map[..])); 32 | /// assert_eq!(Ok("safe"), from_utf8(&map[9..13])); 33 | /// # Ok(()) 34 | /// # } 35 | /// ``` 36 | pub struct Map(MapMut); 37 | 38 | impl Map { 39 | /// Returns a new [`Options`] object to create a read-only `Map`. 40 | /// 41 | /// When used to [`.open()`] a path or [`.map()`] a file, the default 42 | /// [`Options`] object is assumed to cover the entire file. 43 | /// 44 | /// See the [`Options`] type for details on options for modifying the file 45 | /// size, specifying offset positions, and selecting specific lengths. 46 | /// 47 | /// # Examples 48 | /// 49 | /// ``` 50 | /// use vmap::Map; 51 | /// use std::path::PathBuf; 52 | /// use std::str::from_utf8; 53 | /// 54 | /// # fn main() -> vmap::Result<()> { 55 | /// # let tmp = tempdir::TempDir::new("vmap")?; 56 | /// let path: PathBuf = /* path to file */ 57 | /// # tmp.path().join("example"); 58 | /// # std::fs::write(&path, "A cross-platform library for fast and safe memory-mapped IO in Rust")?; 59 | /// let (map, file) = Map::with_options() 60 | /// .offset(29) 61 | /// .len(30) 62 | /// .open(&path)?; 63 | /// assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map)); 64 | /// # Ok(()) 65 | /// # } 66 | /// ``` 67 | pub fn with_options() -> Options { 68 | Options::new() 69 | } 70 | 71 | /// Transfer ownership of the map into a mutable map. 72 | /// 73 | /// This will change the protection of the mapping. If the original file 74 | /// was not opened with write permissions, this will error. 75 | /// 76 | /// # Examples 77 | /// 78 | /// ``` 79 | /// use vmap::Map; 80 | /// use std::fs::OpenOptions; 81 | /// use std::path::PathBuf; 82 | /// use std::str::from_utf8; 83 | /// # use std::fs; 84 | /// 85 | /// # fn main() -> vmap::Result<()> { 86 | /// # let tmp = tempdir::TempDir::new("vmap")?; 87 | /// let path: PathBuf = /* path to file */ 88 | /// # tmp.path().join("example"); 89 | /// # fs::write(&path, b"this is a test")?; 90 | /// 91 | /// // Map the beginning of the file 92 | /// let (map, file) = Map::with_options().write().len(14).open(path)?; 93 | /// assert_eq!(Ok("this is a test"), from_utf8(&map[..])); 94 | /// 95 | /// let mut map = map.into_map_mut()?; 96 | /// map[..4].clone_from_slice(b"that"); 97 | /// assert_eq!(Ok("that is a test"), from_utf8(&map[..])); 98 | /// # Ok(()) 99 | /// # } 100 | /// ``` 101 | pub fn into_map_mut(self) -> ConvertResult { 102 | let (ptr, len) = unsafe { Size::page().bounds(self.0.ptr, self.0.len) }; 103 | match unsafe { protect(ptr, len, Protect::ReadWrite) } { 104 | Ok(()) => Ok(self.0), 105 | Err(err) => Err((err, self)), 106 | } 107 | } 108 | 109 | /// Updates the advise for the entire mapped region.. 110 | pub fn advise(&self, adv: Advise) -> Result<()> { 111 | self.0.advise(adv) 112 | } 113 | 114 | /// Updates the advise for a specific range of the mapped region. 115 | pub fn advise_range(&self, off: usize, len: usize, adv: Advise) -> Result<()> { 116 | self.0.advise_range(off, len, adv) 117 | } 118 | 119 | /// Lock all mapped physical pages into memory. 120 | pub fn lock(&self) -> Result<()> { 121 | self.0.lock() 122 | } 123 | 124 | /// Lock a range of physical pages into memory. 125 | pub fn lock_range(&self, off: usize, len: usize) -> Result<()> { 126 | self.0.lock_range(off, len) 127 | } 128 | 129 | /// Unlock all mapped physical pages into memory. 130 | pub fn unlock(&self) -> Result<()> { 131 | self.0.unlock() 132 | } 133 | 134 | /// Unlock a range of physical pages into memory. 135 | pub fn unlock_range(&self, off: usize, len: usize) -> Result<()> { 136 | self.0.unlock_range(off, len) 137 | } 138 | } 139 | 140 | impl FromPtr for Map { 141 | unsafe fn from_ptr(ptr: *mut u8, len: usize) -> Self { 142 | Self(MapMut::from_ptr(ptr, len)) 143 | } 144 | } 145 | 146 | impl Span for Map { 147 | #[inline] 148 | fn len(&self) -> usize { 149 | self.0.len() 150 | } 151 | 152 | #[inline] 153 | fn as_ptr(&self) -> *const u8 { 154 | self.0.as_ptr() 155 | } 156 | } 157 | 158 | impl Deref for Map { 159 | type Target = [u8]; 160 | 161 | #[inline] 162 | fn deref(&self) -> &[u8] { 163 | unsafe { slice::from_raw_parts(self.0.ptr, self.0.len) } 164 | } 165 | } 166 | 167 | impl AsRef<[u8]> for Map 168 | where 169 | ::Target: AsRef<[u8]>, 170 | { 171 | #[inline] 172 | fn as_ref(&self) -> &[u8] { 173 | self.deref() 174 | } 175 | } 176 | 177 | impl TryFrom for Map { 178 | type Error = (Error, MapMut); 179 | 180 | fn try_from(map: MapMut) -> ConvertResult { 181 | map.into_map() 182 | } 183 | } 184 | 185 | impl fmt::Debug for Map { 186 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 187 | fmt.debug_struct("Map") 188 | .field("ptr", &self.0.ptr) 189 | .field("len", &self.0.len) 190 | .finish() 191 | } 192 | } 193 | 194 | /// Allocation of one or more read-write sequential pages. 195 | #[derive(Debug)] 196 | pub struct MapMut { 197 | ptr: *mut u8, 198 | len: usize, 199 | } 200 | 201 | impl MapMut { 202 | /// Returns a new `Options` object to create a writable `MapMut`. 203 | /// 204 | /// When used to [`.open()`] a path or [`.map()`] a file, the default 205 | /// [`Options`] object is assumed to cover the entire file. 206 | /// 207 | /// See the [`Options`] type for details on options for modifying the file 208 | /// size, specifying offset positions, and selecting specific lengths. 209 | /// 210 | /// # Examples 211 | /// 212 | /// ``` 213 | /// use vmap::{MapMut, Flush}; 214 | /// use std::path::PathBuf; 215 | /// use std::str::from_utf8; 216 | /// 217 | /// # fn main() -> vmap::Result<()> { 218 | /// # let tmp = tempdir::TempDir::new("vmap")?; 219 | /// let path: PathBuf = /* path to file */ 220 | /// # tmp.path().join("example"); 221 | /// # std::fs::write(&path, "A cross-platform library for fast and safe memory-mapped IO in Rust")?; 222 | /// let (mut map, file) = MapMut::with_options() 223 | /// .offset(29) 224 | /// .len(30) 225 | /// .open(&path)?; 226 | /// assert_eq!(Ok("fast and safe memory-mapped IO"), from_utf8(&map)); 227 | /// map[..4].clone_from_slice(b"nice"); 228 | /// 229 | /// map.flush_range(&file, 0, 4, Flush::Sync); 230 | /// 231 | /// assert_eq!(Ok("nice and safe memory-mapped IO"), from_utf8(&map)); 232 | /// # Ok(()) 233 | /// # } 234 | /// ``` 235 | pub fn with_options() -> Options { 236 | let mut opts = Options::new(); 237 | opts.write(); 238 | opts 239 | } 240 | 241 | /// Create a new anonymous mapping at least as large as the hint. 242 | /// 243 | /// # Examples 244 | /// 245 | /// ``` 246 | /// use vmap::{MapMut, Protect}; 247 | /// use std::str::from_utf8; 248 | /// 249 | /// # fn main() -> vmap::Result<()> { 250 | /// let mut map = MapMut::new(200)?; 251 | /// map[..4].clone_from_slice(b"test"); 252 | /// assert_eq!(Ok("test"), from_utf8(&map[..4])); 253 | /// # Ok(()) 254 | /// # } 255 | /// ``` 256 | pub fn new(hint: usize) -> Result { 257 | Self::with_options().len(Extent::Min(hint)).alloc() 258 | } 259 | 260 | /// Transfer ownership of the map into a mutable map. 261 | /// 262 | /// This will change the protection of the mapping. If the original file 263 | /// was not opened with write permissions, this will error. 264 | /// 265 | /// # Examples 266 | /// 267 | /// ``` 268 | /// use vmap::MapMut; 269 | /// use std::fs::OpenOptions; 270 | /// use std::path::PathBuf; 271 | /// use std::str::from_utf8; 272 | /// # use std::fs; 273 | /// 274 | /// # fn main() -> vmap::Result<()> { 275 | /// # let tmp = tempdir::TempDir::new("vmap")?; 276 | /// let path: PathBuf = /* path to file */ 277 | /// # tmp.path().join("example"); 278 | /// # fs::write(&path, b"this is a test")?; 279 | /// let (mut map, file) = MapMut::with_options().len(14).open(&path)?; 280 | /// assert_eq!(Ok("this is a test"), from_utf8(&map[..])); 281 | /// 282 | /// map[..4].clone_from_slice(b"that"); 283 | /// 284 | /// let map = map.into_map()?; 285 | /// assert_eq!(Ok("that is a test"), from_utf8(&map[..])); 286 | /// # Ok(()) 287 | /// # } 288 | /// ``` 289 | pub fn into_map(self) -> ConvertResult { 290 | let (ptr, len) = unsafe { Size::page().bounds(self.ptr, self.len) }; 291 | match unsafe { protect(ptr, len, Protect::ReadWrite) } { 292 | Ok(()) => Ok(Map(self)), 293 | Err(err) => Err((err, self)), 294 | } 295 | } 296 | 297 | /// Writes modifications back to the filesystem. 298 | /// 299 | /// Flushes will happen automatically, but this will invoke a flush and 300 | /// return any errors with doing so. 301 | pub fn flush(&self, file: &File, mode: Flush) -> Result<()> { 302 | unsafe { 303 | let (ptr, len) = Size::page().bounds(self.ptr, self.len); 304 | flush(ptr, file, len, mode) 305 | } 306 | } 307 | 308 | /// Writes modifications back to the filesystem for a sub-range of the map. 309 | /// 310 | /// Flushes will happen automatically, but this will invoke a flush and 311 | /// return any errors with doing so. 312 | pub fn flush_range(&self, file: &File, off: usize, len: usize, mode: Flush) -> Result<()> { 313 | if off + len > self.len { 314 | Err(Error::input(Operation::Flush, Input::InvalidRange)) 315 | } else { 316 | unsafe { 317 | let (ptr, len) = Size::page().bounds(self.ptr.add(off), len); 318 | flush(ptr, file, len, mode) 319 | } 320 | } 321 | } 322 | 323 | /// Updates the advise for the entire mapped region.. 324 | pub fn advise(&self, adv: Advise) -> Result<()> { 325 | unsafe { 326 | let (ptr, len) = Size::page().bounds(self.ptr, self.len); 327 | advise(ptr, len, adv) 328 | } 329 | } 330 | 331 | /// Updates the advise for a specific range of the mapped region. 332 | pub fn advise_range(&self, off: usize, len: usize, adv: Advise) -> Result<()> { 333 | if off + len > self.len { 334 | Err(Error::input(Operation::Advise, Input::InvalidRange)) 335 | } else { 336 | unsafe { 337 | let (ptr, len) = Size::page().bounds(self.ptr.add(off), len); 338 | advise(ptr, len, adv) 339 | } 340 | } 341 | } 342 | 343 | /// Lock all mapped physical pages into memory. 344 | pub fn lock(&self) -> Result<()> { 345 | unsafe { 346 | let (ptr, len) = Size::page().bounds(self.ptr, self.len); 347 | lock(ptr, len) 348 | } 349 | } 350 | 351 | /// Lock a range of physical pages into memory. 352 | pub fn lock_range(&self, off: usize, len: usize) -> Result<()> { 353 | if off + len > self.len { 354 | Err(Error::input(Operation::Lock, Input::InvalidRange)) 355 | } else { 356 | unsafe { 357 | let (ptr, len) = Size::page().bounds(self.ptr.add(off), len); 358 | lock(ptr, len) 359 | } 360 | } 361 | } 362 | 363 | /// Unlock all mapped physical pages into memory. 364 | pub fn unlock(&self) -> Result<()> { 365 | unsafe { 366 | let (ptr, len) = Size::page().bounds(self.ptr, self.len); 367 | unlock(ptr, len) 368 | } 369 | } 370 | 371 | /// Unlock a range of physical pages into memory. 372 | pub fn unlock_range(&self, off: usize, len: usize) -> Result<()> { 373 | if off + len > self.len { 374 | Err(Error::input(Operation::Unlock, Input::InvalidRange)) 375 | } else { 376 | unsafe { 377 | let (ptr, len) = Size::page().bounds(self.ptr.add(off), len); 378 | unlock(ptr, len) 379 | } 380 | } 381 | } 382 | } 383 | 384 | impl FromPtr for MapMut { 385 | unsafe fn from_ptr(ptr: *mut u8, len: usize) -> Self { 386 | Self { ptr, len } 387 | } 388 | } 389 | 390 | impl Span for MapMut { 391 | #[inline] 392 | fn len(&self) -> usize { 393 | self.len 394 | } 395 | 396 | #[inline] 397 | fn as_ptr(&self) -> *const u8 { 398 | self.ptr 399 | } 400 | } 401 | 402 | impl SpanMut for MapMut { 403 | #[inline] 404 | fn as_mut_ptr(&mut self) -> *mut u8 { 405 | self.ptr 406 | } 407 | } 408 | 409 | impl Drop for MapMut { 410 | fn drop(&mut self) { 411 | unsafe { 412 | if self.len > 0 { 413 | let (ptr, len) = Size::alloc().bounds(self.ptr, self.len); 414 | unmap(ptr, len).unwrap_or_default(); 415 | } 416 | } 417 | } 418 | } 419 | 420 | impl Deref for MapMut { 421 | type Target = [u8]; 422 | 423 | #[inline] 424 | fn deref(&self) -> &[u8] { 425 | unsafe { slice::from_raw_parts(self.ptr, self.len) } 426 | } 427 | } 428 | 429 | impl DerefMut for MapMut { 430 | #[inline] 431 | fn deref_mut(&mut self) -> &mut [u8] { 432 | unsafe { slice::from_raw_parts_mut(self.ptr, self.len) } 433 | } 434 | } 435 | 436 | impl AsRef<[u8]> for MapMut 437 | where 438 | ::Target: AsRef<[u8]>, 439 | { 440 | #[inline] 441 | fn as_ref(&self) -> &[u8] { 442 | self.deref() 443 | } 444 | } 445 | 446 | impl AsMut<[u8]> for MapMut 447 | where 448 | ::Target: AsMut<[u8]>, 449 | { 450 | #[inline] 451 | fn as_mut(&mut self) -> &mut [u8] { 452 | self.deref_mut() 453 | } 454 | } 455 | 456 | impl TryFrom for MapMut { 457 | type Error = (Error, Map); 458 | 459 | fn try_from(map: Map) -> ConvertResult { 460 | map.into_map_mut() 461 | } 462 | } 463 | 464 | /// Options and flags which can be used to configure how a map is allocated. 465 | /// 466 | /// This builder exposes the ability to configure how a [`Map`] or a [`MapMut`] 467 | /// is allocated. These options can be used to either map a file or allocate 468 | /// an anonymous memory region. For file-based operations, a `std::fs::OpenOptions` 469 | /// value is maintained to match the desired abilities between the mapping and 470 | /// the underlying resource. This allows the creation, truncation, and resizing 471 | /// of a file to be coordinated when allocating a named map. For both mapping 472 | /// and anonymous allocations the option can also specify an offset and a 473 | /// mapping length. 474 | /// 475 | /// The `T` must either be a [`Map`] or a [`MapMut`]. Generally, this will be 476 | /// created by [`Map::with_options()`] or [`MapMut::with_options()`], then 477 | /// chain calls to methods to set each option, then call either [`.open()`], 478 | /// [`.map()`], or [`.alloc()`]. This will return a [`Result`] with the correct 479 | /// [`Map`] or [`MapMut`] inside. Additionally, there are [`.open_if()`] and 480 | /// [`.map_if()`] variations which instead return a [`Result`] containing an 481 | /// `Option`. These return `Ok(None)` if the attempted range lies outside 482 | /// of the file rather than an `Err`. 483 | /// 484 | /// Without specifying a size, the options defaults to either the full size of 485 | /// the file when using [`.open()`] or [`.map()`]. When using [`.alloc()`], the default 486 | /// size will be a single unit of allocation granularity. 487 | /// 488 | /// [`Map`]: struct.Map.html 489 | /// [`MapMut`]: struct.MapMut.html 490 | /// [`Map::with_options()`]: struct.Map.html#method.with_options 491 | /// [`MapMut::with_options()`]: struct.MapMut.html#method.with_options 492 | /// [`.open()`]: #method.open 493 | /// [`.open_if()`]: #method.open_if 494 | /// [`.map()`]: #method.map 495 | /// [`.map_if()`]: #method.map_if 496 | /// [`.alloc()`]: #method.alloc 497 | /// [`Result`]: type.Result.html 498 | pub struct Options { 499 | open_options: OpenOptions, 500 | resize: Extent, 501 | len: Extent, 502 | offset: usize, 503 | protect: Protect, 504 | truncate: bool, 505 | _marker: marker::PhantomData T>, 506 | } 507 | 508 | impl Options { 509 | /// Creates a new [`Options`] value with a default state. 510 | /// 511 | /// Generally, [`Map::with_options()`] or [`MapMut::with_options()`] is the 512 | /// preferred way to create options. 513 | /// 514 | /// [`Options`]: struct.Options.html 515 | /// [`Map::with_options()`]: struct.Map.html#method.with_options 516 | /// [`MapMut::with_options()`]: struct.MapMut.html#method.with_options 517 | pub fn new() -> Self { 518 | let mut open_options = OpenOptions::new(); 519 | open_options.read(true); 520 | Self { 521 | open_options, 522 | resize: Extent::End, 523 | len: Extent::End, 524 | offset: 0, 525 | protect: Protect::ReadOnly, 526 | truncate: false, 527 | _marker: marker::PhantomData, 528 | } 529 | } 530 | 531 | /// Sets the option for write access. 532 | /// 533 | /// This is applied automatically when using [`MapMut::with_options()`]. 534 | /// This can be useful with [`Map`] when there is a future intent to call 535 | /// [`Map::into_map_mut()`]. 536 | /// 537 | /// # Examples 538 | /// 539 | /// ``` 540 | /// use vmap::Map; 541 | /// 542 | /// # fn main() -> vmap::Result<()> { 543 | /// let (map, file) = Map::with_options().open("README.md")?; 544 | /// assert!(map.into_map_mut().is_err()); 545 | /// 546 | /// let (map, file) = Map::with_options().write().open("README.md")?; 547 | /// assert!(map.into_map_mut().is_ok()); 548 | /// # Ok(()) 549 | /// # } 550 | /// ``` 551 | /// 552 | /// [`MapMut::with_options()`]: struct.MapMut.html#method.with_options 553 | /// [`Map`]: struct.Map.html 554 | /// [`Map::into_map_mut()`]: struct.Map.html#method.into_map_mut 555 | pub fn write(&mut self) -> &mut Self { 556 | self.open_options.write(true); 557 | self.protect = Protect::ReadWrite; 558 | self 559 | } 560 | 561 | /// Sets the option for copy-on-write access. 562 | /// 563 | /// This efficiently implements a copy to an underlying modifiable 564 | /// resource. The allocated memory can be shared between multiple 565 | /// unmodified instances, and the copy operation is deferred until the 566 | /// first write. When used for an anonymous allocation, the deffered copy 567 | /// can be used in a child process. 568 | /// 569 | /// # Examples 570 | /// 571 | /// ``` 572 | /// use vmap::MapMut; 573 | /// 574 | /// # fn main() -> vmap::Result<()> { 575 | /// let (mut map1, file) = MapMut::with_options().copy().open("README.md")?; 576 | /// let (mut map2, _) = MapMut::with_options().copy().open("README.md")?; 577 | /// let first = map1[0]; 578 | /// 579 | /// map1[0] = b'X'; 580 | /// 581 | /// assert_eq!(first, map2[0]); 582 | /// # Ok(()) 583 | /// # } 584 | /// ``` 585 | /// 586 | /// [`MapMut::with_options()`]: struct.MapMut.html#method.with_options 587 | /// [`Map`]: struct.Map.html 588 | /// [`Map::into_map_mut()`]: struct.Map.html#method.into_map_mut 589 | pub fn copy(&mut self) -> &mut Self { 590 | self.open_options.write(false); 591 | self.protect = Protect::ReadCopy; 592 | self 593 | } 594 | 595 | /// Sets the option to create a new file, or open it if it already exists. 596 | /// 597 | /// This only applies when using [`.open()`] or [`.open_if()`]. In order for the 598 | /// file to be created, [`.write()`] access must be used. 599 | /// 600 | /// # Examples 601 | /// 602 | /// ``` 603 | /// use vmap::{Map, MapMut}; 604 | /// use std::path::PathBuf; 605 | /// 606 | /// # fn main() -> vmap::Result<()> { 607 | /// # let tmp = tempdir::TempDir::new("vmap")?; 608 | /// let path: PathBuf = /* path to file */ 609 | /// # tmp.path().join("example"); 610 | /// let (mut map, file) = MapMut::with_options().create(true).resize(100).open(&path)?; 611 | /// assert_eq!(100, map.len()); 612 | /// assert_eq!(b"\0\0\0\0", &map[..4]); 613 | /// 614 | /// map[..4].clone_from_slice(b"test"); 615 | /// 616 | /// let (map, file) = Map::with_options().open(&path)?; 617 | /// assert_eq!(100, map.len()); 618 | /// assert_eq!(b"test", &map[..4]); 619 | /// # Ok(()) 620 | /// # } 621 | /// ``` 622 | /// 623 | /// [`.open()`]: #method.open 624 | /// [`.open_if()`]: #method.open_if 625 | /// [`.write()`]: #method.write 626 | pub fn create(&mut self, create: bool) -> &mut Self { 627 | self.open_options.create(create); 628 | self 629 | } 630 | 631 | /// Sets the option to create a new file, failing if it already exists. 632 | /// 633 | /// This option is useful because it is atomic. Otherwise between checking 634 | /// whether a file exists and creating a new one, the file may have been 635 | /// created by another process (a TOCTOU race condition / attack). 636 | /// 637 | /// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are 638 | /// ignored. 639 | /// 640 | /// This only applies when using [`.open()`] or [`.open_if()`]. In order for the 641 | /// file to be created, [`.write()`] access must be used. 642 | /// 643 | /// # Examples 644 | /// 645 | /// ``` 646 | /// use vmap::MapMut; 647 | /// use std::path::PathBuf; 648 | /// 649 | /// # fn main() -> vmap::Result<()> { 650 | /// # let tmp = tempdir::TempDir::new("vmap")?; 651 | /// let path: PathBuf = /* path to file */ 652 | /// # tmp.path().join("example"); 653 | /// 654 | /// let (map, file) = MapMut::with_options().create_new(true).resize(10).open(&path)?; 655 | /// assert_eq!(10, map.len()); 656 | /// assert!(MapMut::with_options().create_new(true).open(&path).is_err()); 657 | /// # Ok(()) 658 | /// # } 659 | /// ``` 660 | /// 661 | /// [`.create()`]: #method.create 662 | /// [`.truncate()`]: #method.truncate 663 | /// [`.open()`]: #method.open 664 | /// [`.open_if()`]: #method.open_if 665 | /// [`.write()`]: #method.write 666 | pub fn create_new(&mut self, create_new: bool) -> &mut Self { 667 | self.open_options.create_new(create_new); 668 | self 669 | } 670 | 671 | /// Sets the option for truncating a previous file. 672 | /// 673 | /// If a file is successfully opened with this option set it will truncate 674 | /// the file to 0 length if it already exists. Given that the file will now 675 | /// be empty, a [`.resize()`] should be used. 676 | /// 677 | /// In order for the file to be truncated, [`.write()`] access must be used. 678 | /// 679 | /// # Examples 680 | /// 681 | /// ``` 682 | /// use vmap::MapMut; 683 | /// use std::path::PathBuf; 684 | /// 685 | /// # fn main() -> vmap::Result<()> { 686 | /// # let tmp = tempdir::TempDir::new("vmap")?; 687 | /// let path: PathBuf = /* path to file */ 688 | /// # tmp.path().join("example"); 689 | /// 690 | /// { 691 | /// let (mut map, file) = MapMut::with_options() 692 | /// .create(true) 693 | /// .truncate(true) 694 | /// .resize(4) 695 | /// .open(&path)?; 696 | /// assert_eq!(b"\0\0\0\0", &map[..]); 697 | /// map[..4].clone_from_slice(b"test"); 698 | /// assert_eq!(b"test", &map[..]); 699 | /// } 700 | /// 701 | /// let (mut map, file) = MapMut::with_options().truncate(true).resize(4).open(&path)?; 702 | /// assert_eq!(b"\0\0\0\0", &map[..]); 703 | /// # Ok(()) 704 | /// # } 705 | /// ``` 706 | /// 707 | /// [`.resize()`]: #method.resize 708 | /// [`.write()`]: #method.write 709 | pub fn truncate(&mut self, truncate: bool) -> &mut Self { 710 | self.open_options.truncate(truncate); 711 | self.truncate = truncate; 712 | self 713 | } 714 | 715 | /// Sets the byte offset into the mapping. 716 | /// 717 | /// For file-based mappings, the offset defines the starting byte range 718 | /// from the beginning of the resource. This must be within the range of 719 | /// the file. 720 | /// 721 | /// # Examples 722 | /// 723 | /// ``` 724 | /// use vmap::Map; 725 | /// use std::path::PathBuf; 726 | /// use std::str::from_utf8; 727 | /// use std::fs; 728 | /// 729 | /// # fn main() -> vmap::Result<()> { 730 | /// # let tmp = tempdir::TempDir::new("vmap")?; 731 | /// let path: PathBuf = /* path to file */ 732 | /// # tmp.path().join("example"); 733 | /// fs::write(&path, b"this is a test")?; 734 | /// 735 | /// let (map, file) = Map::with_options().offset(10).open(path)?; 736 | /// assert_eq!(Ok("test"), from_utf8(&map[..])); 737 | /// # Ok(()) 738 | /// # } 739 | /// ``` 740 | pub fn offset(&mut self, offset: usize) -> &mut Self { 741 | self.offset = offset; 742 | self 743 | } 744 | 745 | /// Sets the byte length extent of the mapping. 746 | /// 747 | /// For file-based mappings, this length must be available in the 748 | /// underlying resource, including any [`.offset()`]. When not specified, 749 | /// the default length is implied to be [`Extent::End`]. 750 | /// 751 | /// # Length with `Extent::End` 752 | /// 753 | /// With this value, the length extent is set to the end of the underlying 754 | /// resource. This is the default if no `.len()` is applied, but this can 755 | /// be set to override a prior setting if desired. 756 | /// 757 | /// For anonymous mappings, it is generally preferred to use a different 758 | /// extent strategy. Without setting any other extent, the default length 759 | /// is a single allocation unit of granularity. 760 | /// 761 | /// # Length with `Extent::Exact` 762 | /// 763 | /// Using an exact extent option will instruct the map to cover an exact 764 | /// byte length. That is, it will not consider the length of the underlying 765 | /// resource, if any. For file-based mappings, this length must be 766 | /// available in the file. For anonymous mappings, this is the minimum size 767 | /// that will be allocated, however, the resulting map will be sized 768 | /// exactly to this size. 769 | /// 770 | /// A `usize` may be used as an [`Extent::Exact`] through the `usize` 771 | /// implementation of [`Into`]. 772 | /// 773 | /// ``` 774 | /// use vmap::{Map, MapMut}; 775 | /// use std::path::PathBuf; 776 | /// use std::str::from_utf8; 777 | /// use std::fs; 778 | /// 779 | /// # fn main() -> vmap::Result<()> { 780 | /// # let tmp = tempdir::TempDir::new("vmap")?; 781 | /// let path: PathBuf = /* path to file */ 782 | /// # tmp.path().join("example"); 783 | /// fs::write(&path, b"this is a test")?; 784 | /// 785 | /// let (map, file) = Map::with_options() 786 | /// .len(4) // or .len(Extent::Exaxt(4)) 787 | /// .open(&path)?; 788 | /// assert_eq!(Ok("this"), from_utf8(&map[..])); 789 | /// 790 | /// let mut anon = MapMut::with_options() 791 | /// .len(4) 792 | /// .alloc()?; 793 | /// assert_eq!(4, anon.len()); 794 | /// # Ok(()) 795 | /// # } 796 | /// ``` 797 | /// 798 | /// # Length with `Extent::Min` 799 | /// 800 | /// The minimum extent strategy creates a mapping that is at least the 801 | /// desired byte length, but may be larger. When applied to a file-based 802 | /// mapping, this ensures that the resulting memory region covers a minimum 803 | /// extent, but otherwise covers to the end of the file. For an anonymous 804 | /// map, this ensures the allocated region meets the minimum size required, 805 | /// but allows accessing the remaining allocated space that would otherwise 806 | /// be unusable. 807 | /// 808 | /// ``` 809 | /// use vmap::{Extent, Map, MapMut, Size}; 810 | /// use std::path::PathBuf; 811 | /// use std::str::from_utf8; 812 | /// use std::fs; 813 | /// 814 | /// # fn main() -> vmap::Result<()> { 815 | /// # let tmp = tempdir::TempDir::new("vmap")?; 816 | /// let path: PathBuf = /* path to file */ 817 | /// # tmp.path().join("example"); 818 | /// fs::write(&path, b"this is a test")?; 819 | /// 820 | /// let (map, file) = Map::with_options() 821 | /// .offset(5) 822 | /// .len(Extent::Min(4)) 823 | /// .open(&path)?; 824 | /// assert_eq!(9, map.len()); 825 | /// assert_eq!(Ok("is a test"), from_utf8(&map[..])); 826 | /// 827 | /// assert!( 828 | /// Map::with_options() 829 | /// .len(Extent::Min(100)) 830 | /// .open_if(&path)? 831 | /// .0 832 | /// .is_none() 833 | /// ); 834 | /// 835 | /// let mut anon = MapMut::with_options() 836 | /// .len(Extent::Min(2000)) 837 | /// .alloc()?; 838 | /// assert_eq!(Size::alloc().size(1), anon.len()); 839 | /// # Ok(()) 840 | /// # } 841 | /// ``` 842 | /// 843 | /// # Length with `Extent::Max` 844 | /// 845 | /// The maximum extent strategy creates a mapping that is no larger than 846 | /// the desired byte length, but may be smaller. When applied to a file- 847 | /// based mapping, this will ensure that the resulting 848 | /// 849 | /// ``` 850 | /// use vmap::{Extent, Map, MapMut}; 851 | /// use std::path::PathBuf; 852 | /// use std::str::from_utf8; 853 | /// use std::fs; 854 | /// 855 | /// # fn main() -> vmap::Result<()> { 856 | /// # let tmp = tempdir::TempDir::new("vmap")?; 857 | /// let path: PathBuf = /* path to file */ 858 | /// # tmp.path().join("example"); 859 | /// fs::write(&path, b"this is a test")?; 860 | /// 861 | /// let (map, file) = Map::with_options() 862 | /// .offset(5) 863 | /// .len(Extent::Max(100)) 864 | /// .open(&path)?; 865 | /// assert_eq!(9, map.len()); 866 | /// assert_eq!(Ok("is a test"), from_utf8(&map[..])); 867 | /// 868 | /// let mut anon = MapMut::with_options() 869 | /// .len(Extent::Max(2000)) 870 | /// .alloc()?; 871 | /// assert_eq!(2000, anon.len()); 872 | /// # Ok(()) 873 | /// # } 874 | /// ``` 875 | /// 876 | /// [`Into`]: enum.Extent.html#impl-From 877 | /// [`Extent::End`]: enum.Extent.html#variant.End 878 | /// [`Extent::Exact`]: enum.Extent.html#variant.Exact 879 | /// [`Extent::Min`]: enum.Extent.html#variant.Min 880 | /// [`Extent::Max`]: enum.Extent.html#variant.Max 881 | /// [`.offset()`]: #method.offset 882 | /// [`.len_min()`]: #method.len_min 883 | /// [`.len_max()`]: #method.len_max 884 | pub fn len>(&mut self, value: E) -> &mut Self { 885 | self.len = value.into(); 886 | self 887 | } 888 | 889 | /// Sets the option to resize the file prior to mapping. 890 | /// 891 | /// When mapping to a file using [`.open()`], [`.open_if()`], [`.map()`], 892 | /// or [`.map_if()`] this options conditionally adjusts the length of the 893 | /// underlying resource to the desired size by calling [`.set_len()`] on 894 | /// the [`File`]. 895 | /// 896 | /// In order for the file to be resized, [`.write()`] access must be used. 897 | /// 898 | /// This has no affect on anonymous mappings. 899 | /// 900 | /// # Resize with `Extent::End` 901 | /// 902 | /// This implies resizing to the current size of the file. In other words, 903 | /// no resize is performed, and this is the default strategy. 904 | /// 905 | /// # Resize with `Extent::Exact` 906 | /// 907 | /// Using an exact extent option will instruct the map to cover an exact 908 | /// byte length. That is, it will not consider the length of the underlying 909 | /// resource, if any. For file-based mappings, this length must be 910 | /// available in the file. For anonymous mappings, this is the minimum size 911 | /// that will be allocated, however, the resulting map will be sized 912 | /// exactly to this size. 913 | /// 914 | /// A `usize` may be used as an [`Extent::Exact`] through the `usize` 915 | /// implementation of [`Into`]. 916 | /// 917 | /// ``` 918 | /// use vmap::Map; 919 | /// use std::path::PathBuf; 920 | /// use std::str::from_utf8; 921 | /// use std::fs; 922 | /// 923 | /// # fn main() -> vmap::Result<()> { 924 | /// # let tmp = tempdir::TempDir::new("vmap")?; 925 | /// let path: PathBuf = /* path to file */ 926 | /// # tmp.path().join("example"); 927 | /// fs::write(&path, b"this is a test")?; 928 | /// 929 | /// let (map, file) = Map::with_options() 930 | /// .write() 931 | /// .resize(7) // or .resize(Extent::Exact(7)) 932 | /// .open(&path)?; 933 | /// assert_eq!(7, map.len()); 934 | /// assert_eq!(Ok("this is"), from_utf8(&map[..])); 935 | /// # Ok(()) 936 | /// # } 937 | /// ``` 938 | /// 939 | /// # Resize with `Extent::Min` 940 | /// 941 | /// The minimum extent strategy resizes the file to be at least the 942 | /// desired byte length, but may be larger. If the file is already equal 943 | /// to or larger than the extent, no resize is performed. 944 | /// 945 | /// ``` 946 | /// use vmap::{Extent, Map}; 947 | /// use std::path::PathBuf; 948 | /// use std::str::from_utf8; 949 | /// use std::fs; 950 | /// 951 | /// # fn main() -> vmap::Result<()> { 952 | /// # let tmp = tempdir::TempDir::new("vmap")?; 953 | /// let path: PathBuf = /* path to file */ 954 | /// # tmp.path().join("example"); 955 | /// 956 | /// fs::write(&path, b"this")?; 957 | /// 958 | /// { 959 | /// let (map, file) = Map::with_options() 960 | /// .write() 961 | /// .resize(Extent::Min(7)) 962 | /// .open(&path)?; 963 | /// assert_eq!(7, map.len()); 964 | /// assert_eq!(Ok("this\0\0\0"), from_utf8(&map[..])); 965 | /// } 966 | /// 967 | /// fs::write(&path, b"this is a test")?; 968 | /// 969 | /// let (map, file) = Map::with_options() 970 | /// .write() 971 | /// .resize(Extent::Min(7)) 972 | /// .open(&path)?; 973 | /// assert_eq!(14, map.len()); 974 | /// assert_eq!(Ok("this is a test"), from_utf8(&map[..])); 975 | /// # Ok(()) 976 | /// # } 977 | /// ``` 978 | /// 979 | /// # Resize with `Extent::Max` 980 | /// 981 | /// The maximum extent strategy resizes the file to be no larger than the 982 | /// desired byte length, but may be smaller. If the file is already equal 983 | /// to or smaller than the extent, no resize is performed. 984 | /// 985 | /// ``` 986 | /// use vmap::{Extent, Map}; 987 | /// use std::path::PathBuf; 988 | /// use std::str::from_utf8; 989 | /// use std::fs; 990 | /// 991 | /// # fn main() -> vmap::Result<()> { 992 | /// # let tmp = tempdir::TempDir::new("vmap")?; 993 | /// let path: PathBuf = /* path to file */ 994 | /// # tmp.path().join("example"); 995 | /// fs::write(&path, b"this")?; 996 | /// 997 | /// { 998 | /// let (map, file) = Map::with_options() 999 | /// .write() 1000 | /// .resize(Extent::Max(7)) 1001 | /// .open(&path)?; 1002 | /// assert_eq!(4, map.len()); 1003 | /// assert_eq!(Ok("this"), from_utf8(&map[..])); 1004 | /// } 1005 | /// 1006 | /// fs::write(&path, b"this is a test")?; 1007 | /// 1008 | /// let (map, file) = Map::with_options() 1009 | /// .write() 1010 | /// .resize(Extent::Max(7)) 1011 | /// .open(&path)?; 1012 | /// assert_eq!(7, map.len()); 1013 | /// assert_eq!(Ok("this is"), from_utf8(&map[..])); 1014 | /// # Ok(()) 1015 | /// # } 1016 | /// ``` 1017 | /// 1018 | /// [`.open()`]: #method.open 1019 | /// [`.open_if()`]: #method.open_if 1020 | /// [`.map()`]: #method.map 1021 | /// [`.map_if()`]: #method.map_if 1022 | /// [`.set_len()`]: https://doc.rust-lang.org/std/fs/struct.File.html#method.set_len 1023 | /// [`File`]: https://doc.rust-lang.org/std/fs/struct.File.html 1024 | /// [`.write()`]: #method.write 1025 | /// [`Into`]: enum.Extent.html#impl-From 1026 | /// [`Extent::End`]: enum.Extent.html#variant.End 1027 | /// [`Extent::Exact`]: enum.Extent.html#variant.Exact 1028 | /// [`Extent::Min`]: enum.Extent.html#variant.Min 1029 | /// [`Extent::Max`]: enum.Extent.html#variant.Max 1030 | pub fn resize>(&mut self, value: E) -> &mut Self { 1031 | self.resize = value.into(); 1032 | self 1033 | } 1034 | 1035 | /// Opens and maps a file using the current options specified by `self`. 1036 | /// 1037 | /// Unlike [`.open_if()`], when the requested offset or length lies outside of 1038 | /// the underlying file, an error is returned. 1039 | /// 1040 | /// The returned [`File`] can be discarded if no longer needed to [`.flush()`] 1041 | /// or [`.map()`] other regions. This does not need to be kept open in order to 1042 | /// use the mapped value. 1043 | /// 1044 | /// # Examples 1045 | /// 1046 | /// ``` 1047 | /// use vmap::Map; 1048 | /// use std::path::PathBuf; 1049 | /// use std::fs; 1050 | /// 1051 | /// # fn main() -> std::io::Result<()> { 1052 | /// # let tmp = tempdir::TempDir::new("vmap")?; 1053 | /// let path: PathBuf = /* path to file */ 1054 | /// # tmp.path().join("example"); 1055 | /// fs::write(&path, b"this is a test")?; 1056 | /// 1057 | /// assert!(Map::with_options().len(4).open(&path).is_ok()); 1058 | /// assert!(Map::with_options().len(25).open(&path).is_err()); 1059 | /// # Ok(()) 1060 | /// # } 1061 | /// ``` 1062 | /// 1063 | /// [`.open_if()`]: #method.open_if 1064 | /// [`.map()`]: #method.map 1065 | /// [`.flush()`]: struct.MapMut.html#method.flush 1066 | /// [`File`]: https://doc.rust-lang.org/std/fs/struct.File.html 1067 | pub fn open>(&self, path: P) -> Result<(T, File)> { 1068 | let f = self.open_options.open(path).map_err(map_file_err)?; 1069 | Ok((self.map(&f)?, f)) 1070 | } 1071 | 1072 | /// Opens and maps a file with the options specified by `self` if the 1073 | /// provided byte range is valid. 1074 | /// 1075 | /// Unlike [`.open()`], when the requested offset or length lies outside of 1076 | /// the underlying file, `Ok(None)` will be returned rather than an error. 1077 | /// 1078 | /// The returned [`File`] can be discarded if no longer needed to [`.flush()`] 1079 | /// or [`.map()`] other regions. This does not need to be kept open in order to 1080 | /// use the mapped value. 1081 | /// 1082 | /// # Examples 1083 | /// 1084 | /// ``` 1085 | /// use vmap::Map; 1086 | /// use std::path::PathBuf; 1087 | /// use std::fs; 1088 | /// 1089 | /// # fn main() -> std::io::Result<()> { 1090 | /// # let tmp = tempdir::TempDir::new("vmap")?; 1091 | /// let path: PathBuf = /* path to file */ 1092 | /// # tmp.path().join("example"); 1093 | /// fs::write(&path, b"this is a test")?; 1094 | /// 1095 | /// assert!(Map::with_options().len(4).open_if(&path).is_ok()); 1096 | /// 1097 | /// let result = Map::with_options().len(25).open_if(&path); 1098 | /// assert!(result.is_ok()); 1099 | /// assert!(result.unwrap().0.is_none()); 1100 | /// # Ok(()) 1101 | /// # } 1102 | /// ``` 1103 | /// 1104 | /// [`.open()`]: #method.open 1105 | /// [`.map()`]: #method.map 1106 | /// [`.flush()`]: struct.MapMut.html#method.flush 1107 | /// [`File`]: https://doc.rust-lang.org/std/fs/struct.File.html 1108 | pub fn open_if>(&self, path: P) -> Result<(Option, File)> { 1109 | let f = self.open_options.open(path).map_err(map_file_err)?; 1110 | Ok((self.map_if(&f)?, f)) 1111 | } 1112 | 1113 | /// Maps an open `File` using the current options specified by `self`. 1114 | /// 1115 | /// Unlike [`.map_if()`], when the requested offset or length lies outside of 1116 | /// the underlying file, an error is returned. 1117 | /// 1118 | /// # Examples 1119 | /// 1120 | /// ``` 1121 | /// use vmap::Map; 1122 | /// use std::path::PathBuf; 1123 | /// use std::fs::OpenOptions; 1124 | /// 1125 | /// # fn main() -> std::io::Result<()> { 1126 | /// # let tmp = tempdir::TempDir::new("vmap")?; 1127 | /// let path: PathBuf = /* path to file */ 1128 | /// # tmp.path().join("example"); 1129 | /// let f = OpenOptions::new() 1130 | /// .read(true) 1131 | /// .write(true) 1132 | /// .create(true) 1133 | /// .open(path)?; 1134 | /// f.set_len(8)?; 1135 | /// 1136 | /// assert!(Map::with_options().len(4).map(&f).is_ok()); 1137 | /// assert!(Map::with_options().len(25).map(&f).is_err()); 1138 | /// # Ok(()) 1139 | /// # } 1140 | /// ``` 1141 | /// 1142 | /// [`.map_if()`]: #method.map_if 1143 | pub fn map(&self, f: &File) -> Result { 1144 | self.map_if(f)? 1145 | .ok_or_else(|| Error::input(Operation::MapFile, Input::InvalidRange)) 1146 | } 1147 | 1148 | /// Maps an open `File` with the options specified by `self` if the provided 1149 | /// byte range is valid. 1150 | /// 1151 | /// Unlike [`.map()`], when the requested offset or length lies outside of 1152 | /// the underlying file, `Ok(None)` will be returned rather than an error. 1153 | /// 1154 | /// # Examples 1155 | /// 1156 | /// ``` 1157 | /// use vmap::Map; 1158 | /// use std::path::PathBuf; 1159 | /// use std::fs::OpenOptions; 1160 | /// 1161 | /// # fn main() -> std::io::Result<()> { 1162 | /// # let tmp = tempdir::TempDir::new("vmap")?; 1163 | /// let path: PathBuf = /* path to file */ 1164 | /// # tmp.path().join("example"); 1165 | /// let f = OpenOptions::new() 1166 | /// .read(true) 1167 | /// .write(true) 1168 | /// .create(true) 1169 | /// .open(path)?; 1170 | /// f.set_len(8)?; 1171 | /// 1172 | /// assert!(Map::with_options().len(4).map_if(&f).is_ok()); 1173 | /// 1174 | /// let result = Map::with_options().len(25).map_if(&f); 1175 | /// assert!(result.is_ok()); 1176 | /// assert!(result.unwrap().is_none()); 1177 | /// # Ok(()) 1178 | /// # } 1179 | /// ``` 1180 | /// 1181 | /// [`.map()`]: #method.map 1182 | pub fn map_if(&self, f: &File) -> Result> { 1183 | let off = self.offset; 1184 | let mut flen = f.metadata().map_err(map_file_err)?.len() as usize; 1185 | 1186 | let resize = |sz: usize| f.set_len(sz as u64).map(|_| sz).map_err(map_file_err); 1187 | 1188 | if self.truncate && flen > 0 { 1189 | flen = resize(0)?; 1190 | } 1191 | 1192 | flen = match self.resize { 1193 | Extent::Exact(sz) => resize(sz)?, 1194 | Extent::Min(sz) if sz > flen => resize(sz)?, 1195 | Extent::Max(sz) if sz < flen => resize(sz)?, 1196 | _ => flen, 1197 | }; 1198 | 1199 | if flen < off { 1200 | return Ok(None); 1201 | } 1202 | 1203 | let max = flen - off; 1204 | let len = match self.len { 1205 | Extent::Min(l) | Extent::Exact(l) if l > max => return Ok(None), 1206 | Extent::Min(_) | Extent::End => max, 1207 | Extent::Max(l) => cmp::min(l, max), 1208 | Extent::Exact(l) => l, 1209 | }; 1210 | 1211 | let mapoff = Size::alloc().truncate(off); 1212 | let maplen = len + (off - mapoff); 1213 | let ptr = map_file(f, mapoff, maplen, self.protect)?; 1214 | unsafe { Ok(Some(T::from_ptr(ptr.add(off - mapoff), len))) } 1215 | } 1216 | 1217 | /// Creates an anonymous allocation using the options specified by `self`. 1218 | /// 1219 | /// # Examples 1220 | /// 1221 | /// ``` 1222 | /// use vmap::{Extent, MapMut}; 1223 | /// 1224 | /// # fn main() -> vmap::Result<()> { 1225 | /// let map = MapMut::with_options().len(Extent::Min(500)).alloc()?; 1226 | /// assert!(map.len() >= 500); 1227 | /// # Ok(()) 1228 | /// # } 1229 | /// ``` 1230 | pub fn alloc(&self) -> Result { 1231 | let off = Size::page().offset(self.offset); 1232 | let len = match self.len { 1233 | Extent::End => Size::alloc().round(off + 1) - off, 1234 | Extent::Min(l) => Size::alloc().round(off + l) - off, 1235 | Extent::Max(l) | Extent::Exact(l) => l, 1236 | }; 1237 | 1238 | let ptr = map_anon(off + len, self.protect)?; 1239 | unsafe { Ok(T::from_ptr(ptr.add(off), len)) } 1240 | } 1241 | } 1242 | 1243 | impl Default for Options { 1244 | fn default() -> Self { 1245 | Self::new() 1246 | } 1247 | } 1248 | 1249 | fn map_file_err(e: io::Error) -> Error { 1250 | Error::io(Operation::MapFile, e) 1251 | } 1252 | -------------------------------------------------------------------------------- /src/os/mod.rs: -------------------------------------------------------------------------------- 1 | //! Low-level cross-platform virtual memory functions 2 | 3 | #[cfg(unix)] 4 | mod unix; 5 | #[cfg(unix)] 6 | pub use self::unix::*; 7 | 8 | #[cfg(windows)] 9 | mod windows; 10 | #[cfg(windows)] 11 | pub use self::windows::*; 12 | -------------------------------------------------------------------------------- /src/os/unix/mach.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_camel_case_types)] 2 | 3 | use std::os::raw::{c_int, c_uint}; 4 | 5 | use libc::uintptr_t; 6 | 7 | use crate::{Error, Operation, Result}; 8 | 9 | use self::Operation::*; 10 | 11 | type kern_return_t = c_int; 12 | type vm_offset_t = uintptr_t; 13 | type vm_size_t = uintptr_t; 14 | type mach_port_t = c_uint; 15 | type vm_map_t = mach_port_t; 16 | type vm_address_t = vm_offset_t; 17 | type vm_prot_t = c_int; 18 | type mem_entry_name_port_t = mach_port_t; 19 | type vm_inherit_t = c_uint; 20 | type boolean_t = bool; 21 | 22 | const KERN_SUCCESS: kern_return_t = 0; 23 | 24 | const VM_PROT_READ: vm_prot_t = 0x01; 25 | const VM_PROT_WRITE: vm_prot_t = 0x02; 26 | const VM_PROT_DEFAULT: vm_prot_t = VM_PROT_READ | VM_PROT_WRITE; 27 | 28 | const VM_FLAGS_FIXED: c_int = 0x0000; 29 | const VM_FLAGS_ANYWHERE: c_int = 0x0001; 30 | const VM_FLAGS_OVERWRITE: c_int = 0x4000; 31 | 32 | const VM_INHERIT_NONE: vm_inherit_t = 2; 33 | 34 | extern "C" { 35 | fn mach_task_self() -> mach_port_t; 36 | 37 | fn vm_allocate( 38 | target_task: vm_map_t, 39 | address: *mut vm_address_t, 40 | size: vm_size_t, 41 | flags: c_int, 42 | ) -> kern_return_t; 43 | 44 | fn vm_deallocate( 45 | target_task: vm_map_t, 46 | address: vm_address_t, 47 | size: vm_size_t, 48 | ) -> kern_return_t; 49 | 50 | fn vm_map( 51 | target_task: vm_map_t, 52 | address: *mut vm_address_t, 53 | size: vm_size_t, 54 | mask: vm_address_t, 55 | flags: c_int, 56 | object: mem_entry_name_port_t, 57 | offset: vm_offset_t, 58 | copy: boolean_t, 59 | cur_protection: vm_prot_t, 60 | max_protection: vm_prot_t, 61 | inheritance: vm_inherit_t, 62 | ) -> kern_return_t; 63 | 64 | fn mach_make_memory_entry( 65 | target_task: vm_map_t, 66 | size: *mut vm_size_t, 67 | offset: vm_offset_t, 68 | permission: vm_prot_t, 69 | object_handle: *mut mem_entry_name_port_t, 70 | parent_entry: mem_entry_name_port_t, 71 | ) -> kern_return_t; 72 | } 73 | 74 | /// Creates an anonymous circular allocation. 75 | /// 76 | /// The length is the size of the sequential range, and the offset of 77 | /// `len+1` refers to the same memory location at offset `0`. The circle 78 | /// continues to up through the offset of `2*len - 1`. 79 | pub fn map_ring(len: usize) -> Result<*mut u8> { 80 | let port = unsafe { mach_task_self() }; 81 | let mut addr: vm_address_t = 0; 82 | let mut len = len as vm_size_t; 83 | let mut map_port: mem_entry_name_port_t = 0; 84 | 85 | let ret = unsafe { vm_allocate(port, &mut addr, 2 * len, VM_FLAGS_ANYWHERE) }; 86 | if ret != KERN_SUCCESS { 87 | return Err(Error::kernel(RingAllocate, ret)); 88 | } 89 | 90 | let ret = unsafe { vm_allocate(port, &mut addr, len, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE) }; 91 | if ret != KERN_SUCCESS { 92 | unsafe { 93 | vm_deallocate(port, addr, 2 * len); 94 | } 95 | return Err(Error::kernel(RingPrimary, ret)); 96 | } 97 | 98 | let ret = 99 | unsafe { mach_make_memory_entry(port, &mut len, addr, VM_PROT_DEFAULT, &mut map_port, 0) }; 100 | if ret != KERN_SUCCESS { 101 | unsafe { 102 | vm_deallocate(port, addr, 2 * len); 103 | } 104 | return Err(Error::kernel(RingEntry, ret)); 105 | } 106 | 107 | let mut half = addr + len; 108 | let ret = unsafe { 109 | vm_map( 110 | port, 111 | &mut half, 112 | len, 113 | 0, // mask 114 | VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, 115 | map_port, 116 | 0, // offset 117 | false, // copy 118 | VM_PROT_DEFAULT, 119 | VM_PROT_DEFAULT, 120 | VM_INHERIT_NONE, 121 | ) 122 | }; 123 | if ret != KERN_SUCCESS { 124 | unsafe { 125 | vm_deallocate(port, addr, 2 * len); 126 | } 127 | return Err(Error::kernel(RingSecondary, ret)); 128 | } 129 | 130 | Ok(addr as *mut u8) 131 | } 132 | 133 | /// Unmaps a ring mapping created by `map_ring`. 134 | /// 135 | /// # Safety 136 | /// 137 | /// This does not know or care if `pg` or `len` are valid. That is, 138 | /// it may be null, not at a proper page boundary, point to a size 139 | /// different from `len`, or worse yet, point to a properly mapped 140 | /// pointer from some other allocation system. 141 | /// 142 | /// Generally don't use this unless you are entirely sure you are 143 | /// doing so correctly. 144 | pub unsafe fn unmap_ring(pg: *mut u8, len: usize) -> Result<()> { 145 | let port = mach_task_self(); 146 | let ret = vm_deallocate(port, pg as vm_address_t, 2 * len); 147 | if ret != KERN_SUCCESS { 148 | Err(Error::kernel(RingDeallocate, ret)) 149 | } else { 150 | Ok(()) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/os/unix/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{Advise, Flush, Protect}; 2 | 3 | use std::fs::File; 4 | use std::os::unix::io::AsRawFd; 5 | use std::ptr; 6 | 7 | use libc::{ 8 | c_void, madvise, mlock, mmap, mprotect, msync, munlock, munmap, off_t, sysconf, MADV_DONTNEED, 9 | MADV_NORMAL, MADV_RANDOM, MADV_SEQUENTIAL, MADV_WILLNEED, MAP_ANON, MAP_FAILED, MAP_PRIVATE, 10 | MAP_SHARED, MS_ASYNC, MS_SYNC, PROT_EXEC, PROT_READ, PROT_WRITE, _SC_PAGESIZE, 11 | }; 12 | 13 | use crate::{Error, Operation, Result}; 14 | 15 | use self::Operation::*; 16 | 17 | // For macOS and iOS we use the mach vm system for rings. The posix module 18 | // does work correctly on these targets, but it necessitates an otherwise 19 | // uneeded file descriptor. 20 | #[cfg(all(feature = "io", any(target_os = "macos", target_os = "ios")))] 21 | mod mach; 22 | #[cfg(all(feature = "io", any(target_os = "macos", target_os = "ios")))] 23 | pub use self::mach::{map_ring, unmap_ring}; 24 | 25 | // For non-mach targets load the POSIX version of the ring mapping functions. 26 | #[cfg(all(feature = "io", not(any(target_os = "macos", target_os = "ios"))))] 27 | mod posix; 28 | #[cfg(all(feature = "io", not(any(target_os = "macos", target_os = "ios"))))] 29 | pub use self::posix::{map_ring, unmap_ring}; 30 | 31 | /// Requests the page size and allocation granularity from the system. 32 | pub fn system_info() -> (u32, u32) { 33 | let size = unsafe { sysconf(_SC_PAGESIZE) as u32 }; 34 | (size, size) 35 | } 36 | 37 | fn result(op: Operation, pg: *mut c_void) -> Result<*mut u8> { 38 | if pg == MAP_FAILED { 39 | Err(Error::last_os_error(op)) 40 | } else { 41 | Ok(pg as *mut u8) 42 | } 43 | } 44 | 45 | /// Memory maps a given range of a file. 46 | pub fn map_file(file: &File, off: usize, len: usize, prot: Protect) -> Result<*mut u8> { 47 | let (prot, flags) = match prot { 48 | Protect::ReadOnly => (PROT_READ, MAP_SHARED), 49 | Protect::ReadWrite => (PROT_READ | PROT_WRITE, MAP_SHARED), 50 | Protect::ReadCopy => (PROT_READ | PROT_WRITE, MAP_PRIVATE), 51 | Protect::ReadExec => (PROT_READ | PROT_EXEC, MAP_PRIVATE), 52 | }; 53 | unsafe { 54 | result( 55 | MapFile, 56 | mmap( 57 | ptr::null_mut(), 58 | len, 59 | prot, 60 | flags, 61 | file.as_raw_fd(), 62 | off as off_t, 63 | ), 64 | ) 65 | } 66 | } 67 | 68 | /// Creates an anonymous allocation. 69 | pub fn map_anon(len: usize, prot: Protect) -> Result<*mut u8> { 70 | let (prot, flags) = match prot { 71 | Protect::ReadOnly => (PROT_READ, MAP_SHARED), 72 | Protect::ReadWrite => (PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED), 73 | Protect::ReadCopy => (PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE), 74 | Protect::ReadExec => (PROT_READ | PROT_EXEC, MAP_ANON | MAP_PRIVATE), 75 | }; 76 | unsafe { result(MapAnonymous, mmap(ptr::null_mut(), len, prot, flags, -1, 0)) } 77 | } 78 | 79 | /// Unmaps a page range from a previos mapping. 80 | /// 81 | /// # Safety 82 | /// 83 | /// This does not know or care if `pg` or `len` are valid. That is, 84 | /// it may be null, not at a proper page boundary, point to a size 85 | /// different from `len`, or worse yet, point to a properly mapped 86 | /// pointer from some other allocation system. 87 | /// 88 | /// Generally don't use this unless you are entirely sure you are 89 | /// doing so correctly. 90 | pub unsafe fn unmap(pg: *mut u8, len: usize) -> Result<()> { 91 | if munmap(pg as *mut c_void, len) < 0 { 92 | Err(Error::last_os_error(Unmap)) 93 | } else { 94 | Ok(()) 95 | } 96 | } 97 | 98 | /// Changes the protection for a page range. 99 | /// 100 | /// # Safety 101 | /// 102 | /// This does not know or care if `pg` or `len` are valid. That is, 103 | /// it may be null, not at a proper page boundary, point to a size 104 | /// different from `len`, or worse yet, point to a properly mapped 105 | /// pointer from some other allocation system. 106 | /// 107 | /// Generally don't use this unless you are entirely sure you are 108 | /// doing so correctly. 109 | pub unsafe fn protect(pg: *mut u8, len: usize, prot: Protect) -> Result<()> { 110 | let prot = match prot { 111 | Protect::ReadOnly => PROT_READ, 112 | Protect::ReadWrite => PROT_READ | PROT_WRITE, 113 | Protect::ReadCopy => PROT_READ | PROT_WRITE, 114 | Protect::ReadExec => PROT_READ | PROT_EXEC, 115 | }; 116 | if mprotect(pg as *mut c_void, len, prot) != 0 { 117 | Err(Error::last_os_error(Protect)) 118 | } else { 119 | Ok(()) 120 | } 121 | } 122 | 123 | /// Writes modified whole pages back to the filesystem. 124 | /// 125 | /// # Safety 126 | /// 127 | /// This does not know or care if `pg` or `len` are valid. That is, 128 | /// it may be null, not at a proper page boundary, point to a size 129 | /// different from `len`, or worse yet, point to a properly mapped 130 | /// pointer from some other allocation system. 131 | /// 132 | /// Generally don't use this unless you are entirely sure you are 133 | /// doing so correctly. 134 | pub unsafe fn flush(pg: *mut u8, _file: &File, len: usize, mode: Flush) -> Result<()> { 135 | let flags = match mode { 136 | Flush::Sync => MS_SYNC, 137 | Flush::Async => MS_ASYNC, 138 | }; 139 | if msync(pg as *mut c_void, len, flags) < 0 { 140 | Err(Error::last_os_error(Flush)) 141 | } else { 142 | Ok(()) 143 | } 144 | } 145 | 146 | /// Updates the advise for the page range. 147 | /// 148 | /// # Safety 149 | /// 150 | /// This does not know or care if `pg` or `len` are valid. That is, 151 | /// it may be null, not at a proper page boundary, point to a size 152 | /// different from `len`, or worse yet, point to a properly mapped 153 | /// pointer from some other allocation system. 154 | /// 155 | /// Generally don't use this unless you are entirely sure you are 156 | /// doing so correctly. 157 | pub unsafe fn advise(pg: *mut u8, len: usize, adv: Advise) -> Result<()> { 158 | let adv = match adv { 159 | Advise::Normal => MADV_NORMAL, 160 | Advise::Sequential => MADV_SEQUENTIAL, 161 | Advise::Random => MADV_RANDOM, 162 | Advise::WillNeed => MADV_WILLNEED, 163 | Advise::WillNotNeed => MADV_DONTNEED, 164 | }; 165 | 166 | if madvise(pg as *mut c_void, len, adv) < 0 { 167 | Err(Error::last_os_error(Advise)) 168 | } else { 169 | Ok(()) 170 | } 171 | } 172 | 173 | /// Locks physical pages into memory. 174 | /// 175 | /// # Safety 176 | /// 177 | /// This does not know or care if `pg` or `len` are valid. That is, 178 | /// it may be null, not at a proper page boundary, point to a size 179 | /// different from `len`, or worse yet, point to a properly mapped 180 | /// pointer from some other allocation system. 181 | /// 182 | /// Generally don't use this unless you are entirely sure you are 183 | /// doing so correctly. 184 | pub unsafe fn lock(pg: *mut u8, len: usize) -> Result<()> { 185 | if mlock(pg as *mut c_void, len) < 0 { 186 | Err(Error::last_os_error(Lock)) 187 | } else { 188 | Ok(()) 189 | } 190 | } 191 | 192 | /// Unlocks physical pages from memory. 193 | /// 194 | /// # Safety 195 | /// 196 | /// This does not know or care if `pg` or `len` are valid. That is, 197 | /// it may be null, not at a proper page boundary, point to a size 198 | /// different from `len`, or worse yet, point to a properly mapped 199 | /// pointer from some other allocation system. 200 | /// 201 | /// Generally don't use this unless you are entirely sure you are 202 | /// doing so correctly. 203 | pub unsafe fn unlock(pg: *mut u8, len: usize) -> Result<()> { 204 | if munlock(pg as *mut c_void, len) < 0 { 205 | Err(Error::last_os_error(Unlock)) 206 | } else { 207 | Ok(()) 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/os/unix/posix/memfd.rs: -------------------------------------------------------------------------------- 1 | use std::os::raw::c_int; 2 | 3 | use crate::{Error, Operation, Result}; 4 | 5 | #[cfg(any(target_os = "linux", target_os = "android"))] 6 | pub fn memfd_open() -> Result { 7 | use std::os::raw::c_char; 8 | const NAME: &[u8] = b"vmap\0"; 9 | let fd = unsafe { 10 | libc::syscall( 11 | libc::SYS_memfd_create, 12 | NAME.as_ptr() as *const c_char, 13 | libc::MFD_CLOEXEC, 14 | ) 15 | }; 16 | if fd < 0 { 17 | Err(Error::last_os_error(Operation::MemoryFd)) 18 | } else { 19 | Ok(fd as c_int) 20 | } 21 | } 22 | 23 | #[cfg(target_os = "freebsd")] 24 | pub fn memfd_open() -> Result { 25 | let fd = unsafe { libc::shm_open(libc::SHM_ANON, libc::O_RDWR, 0o600) }; 26 | if fd < 0 { 27 | Err(Error::last_os_error(Operation::MemoryFd)) 28 | } else { 29 | Ok(fd as c_int) 30 | } 31 | } 32 | 33 | #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "freebsd")))] 34 | pub fn memfd_open() -> Result { 35 | const OFLAGS: c_int = libc::O_RDWR | libc::O_CREAT | libc::O_EXCL | libc::O_CLOEXEC; 36 | let mut path_bytes: [u8; 14] = *b"/vmap-XXXXXXX\0"; 37 | 38 | loop { 39 | let path = { 40 | use std::io::Write; 41 | let pseudorandom = std::time::SystemTime::now() 42 | .duration_since(std::time::UNIX_EPOCH) 43 | .unwrap() 44 | .subsec_nanos() 45 | % 10000000; 46 | write!(&mut path_bytes[6..], "{:0>7}", pseudorandom).unwrap(); 47 | std::ffi::CStr::from_bytes_with_nul(&path_bytes).unwrap() 48 | }; 49 | 50 | let fd = unsafe { libc::shm_open(path.as_ptr(), OFLAGS, 0o600) }; 51 | if fd < 0 { 52 | let err = Error::last_os_error(Operation::MemoryFd); 53 | if err.raw_os_error() != Some(libc::EEXIST) { 54 | return Err(err); 55 | } 56 | } else { 57 | unsafe { libc::shm_unlink(path.as_ptr()) }; 58 | return Ok(fd); 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/os/unix/posix/mod.rs: -------------------------------------------------------------------------------- 1 | mod memfd; 2 | use self::memfd::memfd_open; 3 | 4 | use std::os::raw::c_int; 5 | use std::ptr; 6 | 7 | use libc::{ 8 | c_void, close, ftruncate, mmap, off_t, MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, 9 | MAP_SHARED, PROT_READ, PROT_WRITE, 10 | }; 11 | 12 | use super::unmap; 13 | use crate::{Error, Operation, Result}; 14 | 15 | use self::Operation::*; 16 | 17 | /// Creates an anonymous circular allocation. 18 | /// 19 | /// The length is the size of the sequential range, and the offset of 20 | /// `len+1` refers to the same memory location at offset `0`. The circle 21 | /// continues to up through the offset of `2*len - 1`. 22 | pub fn map_ring(len: usize) -> Result<*mut u8> { 23 | // Create a temporary file descriptor truncated to the ring size. 24 | let fd = tmp_open(len)?; 25 | let ret = wrap_fd(len, fd); 26 | unsafe { 27 | close(fd); 28 | } 29 | ret 30 | } 31 | 32 | fn wrap_fd(len: usize, fd: c_int) -> Result<*mut u8> { 33 | // Map anoymous into an initial address that will cover the duplicate 34 | // address range. 35 | let pg = map( 36 | RingAllocate, 37 | ptr::null_mut(), 38 | len * 2, 39 | MAP_PRIVATE | MAP_ANON, 40 | -1, 41 | )?; 42 | match wrap_ptr(pg, len, fd) { 43 | Err(err) => unsafe { 44 | unmap_ring(pg, len).unwrap_or_default(); 45 | Err(err) 46 | }, 47 | Ok(pg) => Ok(pg), 48 | } 49 | } 50 | 51 | fn wrap_ptr(pg: *mut u8, len: usize, fd: c_int) -> Result<*mut u8> { 52 | // Map the two halves of the buffer into adjacent adresses that use the 53 | // same file descriptor offset. 54 | map(RingPrimary, pg, len, MAP_SHARED | MAP_FIXED, fd)?; 55 | map( 56 | RingSecondary, 57 | unsafe { pg.add(len) }, 58 | len, 59 | MAP_SHARED | MAP_FIXED, 60 | fd, 61 | )?; 62 | Ok(pg) 63 | } 64 | 65 | fn map(op: Operation, pg: *mut u8, len: usize, flags: c_int, fd: c_int) -> Result<*mut u8> { 66 | unsafe { 67 | let pg = mmap(pg as *mut c_void, len, PROT_READ | PROT_WRITE, flags, fd, 0); 68 | if pg == MAP_FAILED { 69 | Err(Error::last_os_error(op)) 70 | } else { 71 | Ok(pg as *mut u8) 72 | } 73 | } 74 | } 75 | 76 | /// Unmaps a ring mapping created by `map_ring`. 77 | /// 78 | /// # Safety 79 | /// 80 | /// This does not know or care if `pg` or `len` are valid. That is, 81 | /// it may be null, not at a proper page boundary, point to a size 82 | /// different from `len`, or worse yet, point to a properly mapped 83 | /// pointer from some other allocation system. 84 | /// 85 | /// Generally don't use this unless you are entirely sure you are 86 | /// doing so correctly. 87 | pub unsafe fn unmap_ring(pg: *mut u8, len: usize) -> Result<()> { 88 | unmap(pg, 2 * len) 89 | } 90 | 91 | fn tmp_open(size: usize) -> Result { 92 | let fd = memfd_open()?; 93 | if unsafe { ftruncate(fd, size as off_t) } < 0 { 94 | let err = Error::last_os_error(Operation::MemoryFd); 95 | unsafe { 96 | close(fd); 97 | } 98 | Err(err) 99 | } else { 100 | Ok(fd) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/os/windows.rs: -------------------------------------------------------------------------------- 1 | use crate::{Advise, Flush, Protect}; 2 | use std::os::windows::raw::HANDLE; 3 | 4 | use std::fs::File; 5 | use std::os::raw::c_void; 6 | use std::os::windows::io::AsRawHandle; 7 | use std::{mem, ptr}; 8 | 9 | use winapi::shared::basetsd::SIZE_T; 10 | use winapi::shared::minwindef::DWORD; 11 | use winapi::um::fileapi::FlushFileBuffers; 12 | use winapi::um::handleapi::{CloseHandle, INVALID_HANDLE_VALUE}; 13 | use winapi::um::memoryapi::{ 14 | CreateFileMappingW, FlushViewOfFile, MapViewOfFileEx, UnmapViewOfFile, VirtualAlloc, 15 | VirtualFree, VirtualLock, VirtualProtect, VirtualUnlock, FILE_MAP_COPY, FILE_MAP_EXECUTE, 16 | FILE_MAP_READ, FILE_MAP_WRITE, 17 | }; 18 | use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO}; 19 | use winapi::um::winnt::{ 20 | MEM_RELEASE, MEM_RESERVE, PAGE_EXECUTE_READ, PAGE_NOACCESS, PAGE_READONLY, PAGE_READWRITE, 21 | PAGE_WRITECOPY, 22 | }; 23 | 24 | use crate::{Error, Operation, Result}; 25 | 26 | use self::Operation::*; 27 | 28 | struct MapHandle { 29 | map: HANDLE, 30 | } 31 | 32 | impl MapHandle { 33 | pub unsafe fn new(op: Operation, file: HANDLE, prot: DWORD, len: usize) -> Result { 34 | let map = CreateFileMappingW( 35 | file, 36 | ptr::null_mut(), 37 | prot, 38 | (len >> 16 >> 16) as DWORD, 39 | (len & 0xffffffff) as DWORD, 40 | ptr::null(), 41 | ); 42 | if map.is_null() { 43 | Err(Error::last_os_error(op)) 44 | } else { 45 | Ok(Self { map }) 46 | } 47 | } 48 | 49 | pub unsafe fn view_ptr( 50 | &self, 51 | access: DWORD, 52 | off: usize, 53 | len: usize, 54 | at: *mut c_void, 55 | ) -> *mut c_void { 56 | MapViewOfFileEx( 57 | self.map, 58 | access as DWORD, 59 | (off >> 16 >> 16) as DWORD, 60 | (off & 0xffffffff) as DWORD, 61 | len as SIZE_T, 62 | at, 63 | ) 64 | } 65 | 66 | pub unsafe fn view( 67 | &self, 68 | op: Operation, 69 | access: DWORD, 70 | off: usize, 71 | len: usize, 72 | at: *mut c_void, 73 | ) -> Result<*mut u8> { 74 | let pg = self.view_ptr(access, off, len, at); 75 | if pg.is_null() { 76 | Err(Error::last_os_error(op)) 77 | } else { 78 | Ok(pg as *mut u8) 79 | } 80 | } 81 | } 82 | 83 | impl Drop for MapHandle { 84 | fn drop(&mut self) { 85 | unsafe { 86 | CloseHandle(self.map); 87 | } 88 | } 89 | } 90 | 91 | /// Requests the page size and allocation granularity from the system. 92 | pub fn system_info() -> (u32, u32) { 93 | let mut info = mem::MaybeUninit::::uninit(); 94 | let info = unsafe { 95 | GetSystemInfo(info.as_mut_ptr() as LPSYSTEM_INFO); 96 | info.assume_init() 97 | }; 98 | (info.dwPageSize, info.dwAllocationGranularity) 99 | } 100 | 101 | /// Memory maps a given range of a file. 102 | pub fn map_file(file: &File, off: usize, len: usize, prot: Protect) -> Result<*mut u8> { 103 | let (prot, access) = match prot { 104 | Protect::ReadOnly => (PAGE_READONLY, FILE_MAP_READ), 105 | Protect::ReadWrite => (PAGE_READWRITE, FILE_MAP_READ | FILE_MAP_WRITE), 106 | Protect::ReadCopy => (PAGE_WRITECOPY, FILE_MAP_COPY), 107 | Protect::ReadExec => (PAGE_EXECUTE_READ, FILE_MAP_READ | FILE_MAP_EXECUTE), 108 | }; 109 | 110 | unsafe { 111 | let map = MapHandle::new(MapFileHandle, file.as_raw_handle(), prot, 0)?; 112 | map.view(MapFileView, access, off, len, ptr::null_mut()) 113 | } 114 | } 115 | 116 | /// Creates an anonymous allocation. 117 | pub fn map_anon(len: usize, prot: Protect) -> Result<*mut u8> { 118 | let (prot, access) = match prot { 119 | Protect::ReadOnly => (PAGE_READONLY, FILE_MAP_READ), 120 | Protect::ReadWrite => (PAGE_READWRITE, FILE_MAP_READ | FILE_MAP_WRITE), 121 | Protect::ReadCopy => (PAGE_WRITECOPY, FILE_MAP_COPY), 122 | Protect::ReadExec => (PAGE_EXECUTE_READ, FILE_MAP_READ | FILE_MAP_EXECUTE), 123 | }; 124 | 125 | unsafe { 126 | let map = MapHandle::new(MapAnonymousHandle, INVALID_HANDLE_VALUE, prot, len)?; 127 | map.view(MapAnonymousView, access, 0, len, ptr::null_mut()) 128 | } 129 | } 130 | 131 | unsafe fn reserve(len: usize) -> Result<*mut c_void> { 132 | let pg = VirtualAlloc(ptr::null_mut(), len as SIZE_T, MEM_RESERVE, PAGE_NOACCESS); 133 | if pg.is_null() { 134 | Err(Error::last_os_error(RingAllocate)) 135 | } else { 136 | VirtualFree(pg, 0, MEM_RELEASE); 137 | Ok(pg) 138 | } 139 | } 140 | 141 | #[cfg(feature = "io")] 142 | unsafe fn map_ring_handle(map: &MapHandle, len: usize, pg: *mut c_void) -> Result<*mut u8> { 143 | let a = map.view(RingPrimary, FILE_MAP_READ | FILE_MAP_WRITE, 0, len, pg)?; 144 | let b = map.view( 145 | RingSecondary, 146 | FILE_MAP_READ | FILE_MAP_WRITE, 147 | 0, 148 | len, 149 | pg.add(len), 150 | ); 151 | if b.is_err() { 152 | UnmapViewOfFile(a as *mut c_void); 153 | b 154 | } else { 155 | Ok(a as *mut u8) 156 | } 157 | } 158 | 159 | /// Creates an anonymous circular allocation. 160 | /// 161 | /// The length is the size of the sequential range, and the offset of 162 | /// `len+1` refers to the same memory location at offset `0`. The circle 163 | /// continues to up through the offset of `2*len - 1`. 164 | #[cfg(feature = "io")] 165 | pub fn map_ring(len: usize) -> Result<*mut u8> { 166 | let full = 2 * len; 167 | let map = unsafe { MapHandle::new(RingAllocate, INVALID_HANDLE_VALUE, PAGE_READWRITE, full)? }; 168 | 169 | let mut n = 0; 170 | loop { 171 | let pg = unsafe { reserve(full)? }; 172 | let rc = unsafe { map_ring_handle(&map, len, pg) }; 173 | if rc.is_ok() || n == 5 { 174 | return rc; 175 | } 176 | n += 1; 177 | } 178 | } 179 | 180 | /// Unmaps a page range from a previos mapping. 181 | /// 182 | /// # Safety 183 | /// 184 | /// This does not know or care if `pg` or `len` are valid. That is, 185 | /// it may be null, not at a proper page boundary, point to a size 186 | /// different from `len`, or worse yet, point to a properly mapped 187 | /// pointer from some other allocation system. 188 | /// 189 | /// Generally don't use this unless you are entirely sure you are 190 | /// doing so correctly. 191 | pub unsafe fn unmap(pg: *mut u8, _len: usize) -> Result<()> { 192 | if UnmapViewOfFile(pg as *mut c_void) != 0 { 193 | Err(Error::last_os_error(Unmap)) 194 | } else { 195 | Ok(()) 196 | } 197 | } 198 | 199 | /// Unmaps a ring mapping created by `map_ring`. 200 | /// 201 | /// # Safety 202 | /// 203 | /// This does not know or care if `pg` or `len` are valid. That is, 204 | /// it may be null, not at a proper page boundary, point to a size 205 | /// different from `len`, or worse yet, point to a properly mapped 206 | /// pointer from some other allocation system. 207 | /// 208 | /// Generally don't use this unless you are entirely sure you are 209 | /// doing so correctly. 210 | #[cfg(feature = "io")] 211 | pub unsafe fn unmap_ring(pg: *mut u8, len: usize) -> Result<()> { 212 | if UnmapViewOfFile(pg.offset(len as isize) as *mut c_void) == 0 { 213 | Err(Error::last_os_error(RingDeallocate)) 214 | } else { 215 | UnmapViewOfFile(pg as *mut c_void); 216 | Ok(()) 217 | } 218 | } 219 | 220 | /// Changes the protection for a page range. 221 | /// 222 | /// # Safety 223 | /// 224 | /// This does not know or care if `pg` or `len` are valid. That is, 225 | /// it may be null, not at a proper page boundary, point to a size 226 | /// different from `len`, or worse yet, point to a properly mapped 227 | /// pointer from some other allocation system. 228 | /// 229 | /// Generally don't use this unless you are entirely sure you are 230 | /// doing so correctly. 231 | pub unsafe fn protect(pg: *mut u8, len: usize, prot: Protect) -> Result<()> { 232 | let prot = match prot { 233 | Protect::ReadOnly => PAGE_READONLY, 234 | Protect::ReadWrite => PAGE_READWRITE, 235 | Protect::ReadCopy => PAGE_READWRITE, 236 | Protect::ReadExec => PAGE_EXECUTE_READ, 237 | }; 238 | let mut old = 0; 239 | if VirtualProtect(pg as *mut c_void, len, prot, &mut old) == 0 { 240 | Err(Error::last_os_error(Protect)) 241 | } else { 242 | Ok(()) 243 | } 244 | } 245 | 246 | /// Writes modified whole pages back to the filesystem. 247 | /// 248 | /// # Safety 249 | /// 250 | /// This does not know or care if `pg` or `len` are valid. That is, 251 | /// it may be null, not at a proper page boundary, point to a size 252 | /// different from `len`, or worse yet, point to a properly mapped 253 | /// pointer from some other allocation system. 254 | /// 255 | /// Generally don't use this unless you are entirely sure you are 256 | /// doing so correctly. 257 | pub unsafe fn flush(pg: *mut u8, file: &File, len: usize, mode: Flush) -> Result<()> { 258 | if FlushViewOfFile(pg as *mut c_void, len as SIZE_T) == 0 { 259 | Err(Error::last_os_error(Flush)) 260 | } else { 261 | match mode { 262 | Flush::Sync => { 263 | if FlushFileBuffers(file.as_raw_handle()) == 0 { 264 | Err(Error::last_os_error(Flush)) 265 | } else { 266 | Ok(()) 267 | } 268 | } 269 | Flush::Async => Ok(()), 270 | } 271 | } 272 | } 273 | /// Updates the advise for the page range. 274 | /// 275 | /// # Safety 276 | /// 277 | /// This does not know or care if `pg` or `len` are valid. That is, 278 | /// it may be null, not at a proper page boundary, point to a size 279 | /// different from `len`, or worse yet, point to a properly mapped 280 | /// pointer from some other allocation system. 281 | /// 282 | /// Generally don't use this unless you are entirely sure you are 283 | /// doing so correctly. 284 | pub unsafe fn advise(_pg: *mut u8, _len: usize, _adv: Advise) -> Result<()> { 285 | Ok(()) 286 | } 287 | 288 | /// Locks physical pages into memory. 289 | /// 290 | /// # Safety 291 | /// 292 | /// This does not know or care if `pg` or `len` are valid. That is, 293 | /// it may be null, not at a proper page boundary, point to a size 294 | /// different from `len`, or worse yet, point to a properly mapped 295 | /// pointer from some other allocation system. 296 | /// 297 | /// Generally don't use this unless you are entirely sure you are 298 | /// doing so correctly. 299 | pub unsafe fn lock(pg: *mut u8, len: usize) -> Result<()> { 300 | if VirtualLock(pg as *mut c_void, len) == 0 { 301 | Err(Error::last_os_error(Lock)) 302 | } else { 303 | Ok(()) 304 | } 305 | } 306 | 307 | /// Unlocks physical pages from memory. 308 | /// 309 | /// # Safety 310 | /// 311 | /// This does not know or care if `pg` or `len` are valid. That is, 312 | /// it may be null, not at a proper page boundary, point to a size 313 | /// different from `len`, or worse yet, point to a properly mapped 314 | /// pointer from some other allocation system. 315 | /// 316 | /// Generally don't use this unless you are entirely sure you are 317 | /// doing so correctly. 318 | pub unsafe fn unlock(pg: *mut u8, len: usize) -> Result<()> { 319 | if VirtualUnlock(pg as *mut c_void, len) == 0 { 320 | Err(Error::last_os_error(Unlock)) 321 | } else { 322 | Ok(()) 323 | } 324 | } 325 | --------------------------------------------------------------------------------