├── .appveyor.yml ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── ci ├── install.sh └── script.sh ├── justfile ├── rustfmt.toml ├── src └── lib.rs └── tests ├── bug_cases.rs └── test.rs /.appveyor.yml: -------------------------------------------------------------------------------- 1 | # Based on the "trust" template v0.1.2 2 | # https://github.com/japaric/trust/tree/v0.1.2 3 | 4 | environment: 5 | matrix: 6 | # MinGW 7 | - TARGET: i686-pc-windows-gnu 8 | - TARGET: x86_64-pc-windows-gnu 9 | 10 | # MSVC 11 | - TARGET: i686-pc-windows-msvc 12 | - TARGET: x86_64-pc-windows-msvc 13 | 14 | install: 15 | - ps: >- 16 | If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') { 17 | $Env:PATH += ';C:\msys64\mingw64\bin' 18 | } ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') { 19 | $Env:PATH += ';C:\msys64\mingw32\bin' 20 | } 21 | - curl -sSf -o rustup-init.exe https://win.rustup.rs/ 22 | - rustup-init.exe -y --default-host %TARGET% --default-toolchain stable 23 | - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin 24 | - rustc -Vv 25 | - cargo -V 26 | 27 | build_script: 28 | - cargo build --target %TARGET% && 29 | cargo build --target %TARGET% --release 30 | 31 | test_script: 32 | - cargo test --target %TARGET% && 33 | cargo test --target %TARGET% --release 34 | 35 | cache: 36 | - C:\Users\appveyor\.cargo\registry 37 | - target 38 | 39 | branches: 40 | only: 41 | - master 42 | 43 | notifications: 44 | - provider: Email 45 | on_build_success: false 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | /target 3 | *.qf 4 | **/*.rs.bk 5 | Cargo.lock 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Based on the "trust" template v0.1.2 2 | # https://github.com/japaric/trust/tree/v0.1.2 3 | 4 | dist: trusty 5 | language: rust 6 | services: docker 7 | sudo: required 8 | 9 | env: 10 | global: 11 | - CRATE_NAME=queue-file 12 | - DISABLE_TESTS=1 13 | 14 | matrix: 15 | include: 16 | # Android 17 | - env: TARGET=aarch64-linux-android 18 | - env: TARGET=arm-linux-androideabi 19 | - env: TARGET=armv7-linux-androideabi 20 | - env: TARGET=i686-linux-android 21 | - env: TARGET=x86_64-linux-android 22 | 23 | # iOS 24 | - env: TARGET=aarch64-apple-ios 25 | os: osx 26 | - env: TARGET=x86_64-apple-ios 27 | os: osx 28 | 29 | # Linux 30 | - env: TARGET=aarch64-unknown-linux-gnu 31 | - env: TARGET=arm-unknown-linux-gnueabi 32 | - env: TARGET=armv7-unknown-linux-gnueabihf 33 | - env: TARGET=i686-unknown-linux-gnu 34 | - env: TARGET=i686-unknown-linux-musl 35 | - env: TARGET=mips-unknown-linux-gnu 36 | - env: TARGET=mips64-unknown-linux-gnuabi64 37 | - env: TARGET=mips64el-unknown-linux-gnuabi64 38 | - env: TARGET=mipsel-unknown-linux-gnu 39 | - env: TARGET=powerpc-unknown-linux-gnu 40 | - env: TARGET=powerpc64-unknown-linux-gnu 41 | - env: TARGET=powerpc64le-unknown-linux-gnu 42 | - env: TARGET=s390x-unknown-linux-gnu 43 | - env: TARGET=x86_64-unknown-linux-gnu 44 | - env: TARGET=x86_64-unknown-linux-musl 45 | 46 | # OSX 47 | - env: TARGET=i686-apple-darwin 48 | os: osx 49 | - env: TARGET=x86_64-apple-darwin 50 | os: osx 51 | 52 | # *BSD 53 | - env: TARGET=i686-unknown-freebsd 54 | - env: TARGET=x86_64-unknown-freebsd 55 | - env: TARGET=x86_64-unknown-netbsd 56 | 57 | # rustc minimum version. 58 | - env: TARGET=x86_64-unknown-linux-gnu 59 | rust: 1.32.0 60 | 61 | before_install: 62 | - set -e 63 | - rustup self update 64 | 65 | install: 66 | - sh ci/install.sh 67 | - source ~/.cargo/env || true 68 | 69 | script: 70 | - bash ci/script.sh 71 | 72 | after_script: set +e 73 | 74 | cache: cargo 75 | before_cache: 76 | # Travis can't cache files that are not readable by "others" 77 | - chmod -R a+r $HOME/.cargo 78 | 79 | branches: 80 | only: 81 | - master 82 | 83 | notifications: 84 | email: 85 | on_success: never 86 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "queue-file" 3 | version = "1.4.10" 4 | license = "Apache-2.0" 5 | authors = ["ING Systems "] 6 | description = "queue-file is a lightning-fast, transactional, file-based FIFO" 7 | edition = "2021" 8 | homepage = "https://github.com/ing-systems/queue-file" 9 | repository = "https://github.com/ing-systems/queue-file" 10 | readme = "README.md" 11 | keywords = ["io", "fifo", "queue"] 12 | categories = ["data-structures"] 13 | rust-version = "1.58.1" 14 | 15 | [dependencies] 16 | bytes = "1.0" 17 | snafu = "0.7" 18 | 19 | [dev-dependencies] 20 | auto-delete-path = "0.2.0" 21 | quickcheck = "1.0.3" 22 | quickcheck_macros = "1.0.0" 23 | test-case = "2.1.0" 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # queue-file 2 | 3 | [![Crate](https://img.shields.io/crates/v/queue-file.svg)](https://crates.io/crates/queue-file) 4 | [![API](https://docs.rs/queue-file/badge.svg)](https://docs.rs/queue-file) 5 | [![License](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](LICENSE) 6 | [![Windows Build Status](https://ci.appveyor.com/api/projects/status/loj512o2qo6q0rwg?svg=true)](https://ci.appveyor.com/project/khrs/queue-file) 7 | 8 | queue-file is a lightning-fast, transactional, file-based FIFO. 9 | 10 | Addition and removal of an element from the queue is an O(1) operation and is atomic. 11 | Writes are synchronous by default; data will be written to disk before an operation returns. 12 | 13 | queue-file crate is a feature complete and binary compatible port of `QueueFile` class from 14 | Tape2 by Square, Inc. Check the original project [here](https://github.com/square/tape). 15 | 16 | [Documentation](https://docs.rs/queue-file) 17 | 18 | ## Usage 19 | 20 | To use `queue-file`, first add this to your `Cargo.toml`: 21 | 22 | ```toml 23 | [dependencies] 24 | queue-file = "1" 25 | ``` 26 | 27 | ## Example 28 | 29 | ```rust 30 | use queue_file::QueueFile; 31 | 32 | fn main() { 33 | let mut qf = QueueFile::open("example.qf") 34 | .expect("cannot open queue file"); 35 | 36 | qf.add("ELEMENT #1".as_bytes()).expect("add failed"); 37 | qf.add("ELEMENT #2".as_bytes()).expect("add failed"); 38 | qf.add("ELEMENT #3".as_bytes()).expect("add failed"); 39 | 40 | qf.remove().expect("remove failed"); 41 | 42 | for (index, elem) in qf.iter().enumerate() { 43 | println!( 44 | "{}: {} bytes -> {}", 45 | index, 46 | elem.len(), 47 | std::str::from_utf8(&elem).unwrap_or("") 48 | ); 49 | } 50 | 51 | qf.clear().expect("clear failed"); 52 | } 53 | ``` 54 | 55 | ## MSRV 56 | 57 | Current MSRV is 1.58.1 58 | 59 | ## License 60 | 61 | This project is licensed under the [Apache 2.0 license](LICENSE). 62 | -------------------------------------------------------------------------------- /ci/install.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | 3 | main() { 4 | local target= 5 | if [ $TRAVIS_OS_NAME = linux ]; then 6 | target=x86_64-unknown-linux-musl 7 | sort=sort 8 | else 9 | target=x86_64-apple-darwin 10 | sort=gsort # for `sort --sort-version`, from brew's coreutils. 11 | fi 12 | 13 | # Builds for iOS are done on OSX, but require the specific target to be 14 | # installed. 15 | case $TARGET in 16 | aarch64-apple-ios) 17 | rustup target install aarch64-apple-ios 18 | ;; 19 | x86_64-apple-ios) 20 | rustup target install x86_64-apple-ios 21 | ;; 22 | esac 23 | 24 | # This fetches latest stable release 25 | local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \ 26 | | cut -d/ -f3 \ 27 | | grep -E '^v[0.1.0-9.]+$' \ 28 | | $sort --version-sort \ 29 | | tail -n1) 30 | curl -LSfs https://japaric.github.io/trust/install.sh | \ 31 | sh -s -- \ 32 | --force \ 33 | --git japaric/cross \ 34 | --tag $tag \ 35 | --target $target 36 | } 37 | 38 | main 39 | -------------------------------------------------------------------------------- /ci/script.sh: -------------------------------------------------------------------------------- 1 | # This script takes care of testing your crate 2 | 3 | set -ex 4 | 5 | main() { 6 | cross build --target $TARGET 7 | cross build --target $TARGET --release 8 | 9 | if [ ! -z $DISABLE_TESTS ]; then 10 | return 11 | fi 12 | 13 | cross test --target $TARGET 14 | cross test --target $TARGET --release 15 | } 16 | 17 | # we don't run the "test phase" when doing deploys 18 | if [ -z $TRAVIS_TAG ]; then 19 | main 20 | fi 21 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | default: 2 | @just --list --unsorted --color=always | rg -v " default" 3 | 4 | # Format source code 5 | fmt: 6 | cargo +nightly fmt 7 | 8 | clippy: 9 | # rustup component add clippy --toolchain nightly 10 | cargo +nightly clippy --workspace 11 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | version = "Two" 2 | edition = "2021" 3 | 4 | max_width = 100 5 | error_on_line_overflow = false 6 | error_on_unformatted = true 7 | 8 | normalize_comments = true 9 | 10 | use_small_heuristics = "Max" 11 | fn_params_layout = "Compressed" 12 | overflow_delimited_expr = true 13 | reorder_impl_items = true 14 | 15 | group_imports = "StdExternalCrate" 16 | imports_granularity = "Module" 17 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Java version: Copyright (C) 2010 Square, Inc. 2 | // Rust version: Copyright (C) 2019 ING Systems 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | //! `queue-file` crate is a feature complete and binary compatible port of `QueueFile` class from 17 | //! Tape2 by Square, Inc. Check the original project [here](https://github.com/square/tape). 18 | 19 | #![forbid(non_ascii_idents)] 20 | #![deny( 21 | macro_use_extern_crate, 22 | missing_copy_implementations, 23 | missing_debug_implementations, 24 | rust_2018_idioms, 25 | rust_2021_compatibility, 26 | trivial_casts, 27 | trivial_numeric_casts, 28 | unused_extern_crates, 29 | unused_import_braces, 30 | unused_qualifications 31 | )] 32 | #![warn( 33 | clippy::nursery, 34 | clippy::pedantic, 35 | clippy::mutex_atomic, 36 | clippy::rc_buffer, 37 | clippy::rc_mutex, 38 | // clippy::expect_used, 39 | // clippy::unwrap_used, 40 | )] 41 | #![allow( 42 | clippy::cast_possible_truncation, 43 | clippy::cast_possible_wrap, 44 | clippy::cast_precision_loss, 45 | clippy::cast_sign_loss, 46 | clippy::missing_errors_doc, 47 | clippy::missing_panics_doc, 48 | clippy::must_use_candidate 49 | )] 50 | 51 | use std::cmp::min; 52 | use std::collections::VecDeque; 53 | use std::fs::{rename, File, OpenOptions}; 54 | use std::io; 55 | use std::io::{Read, Seek, SeekFrom, Write}; 56 | use std::mem::ManuallyDrop; 57 | use std::path::Path; 58 | 59 | use bytes::{Buf, BufMut, BytesMut}; 60 | use snafu::{ensure, Snafu}; 61 | 62 | #[derive(Debug, Snafu)] 63 | pub enum Error { 64 | #[snafu(context(false))] 65 | Io { source: std::io::Error }, 66 | #[snafu(display("too many elements"))] 67 | TooManyElements {}, 68 | #[snafu(display("element too big"))] 69 | ElementTooBig {}, 70 | #[snafu(display("corrupted file: {}", msg))] 71 | CorruptedFile { msg: String }, 72 | #[snafu(display( 73 | "unsupported version {}. supported versions is {} and legacy", 74 | detected, 75 | supported 76 | ))] 77 | UnsupportedVersion { detected: u32, supported: u32 }, 78 | } 79 | 80 | type Result = std::result::Result; 81 | 82 | /// `QueueFile` is a lightning-fast, transactional, file-based FIFO. 83 | /// 84 | /// Addition and removal from an instance is an O(1) operation and is atomic. 85 | /// Writes are synchronous by default; data will be written to disk before an operation returns. 86 | /// 87 | /// The underlying file. Uses a ring buffer to store entries. Designed so that a modification 88 | /// isn't committed or visible until we write the header. The header is much smaller than a 89 | /// segment. So long as the underlying file system supports atomic segment writes, changes to the 90 | /// queue are atomic. Storing the file length ensures we can recover from a failed expansion 91 | /// (i.e. if setting the file length succeeds but the process dies before the data can be copied). 92 | /// 93 | /// # Example 94 | /// ``` 95 | /// use queue_file::QueueFile; 96 | /// 97 | /// # let path = auto_delete_path::AutoDeletePath::temp(); 98 | /// let mut qf = QueueFile::open(path) 99 | /// .expect("cannot open queue file"); 100 | /// let data = "Welcome to QueueFile!".as_bytes(); 101 | /// 102 | /// qf.add(&data).expect("add failed"); 103 | /// 104 | /// if let Ok(Some(bytes)) = qf.peek() { 105 | /// assert_eq!(data, bytes.as_ref()); 106 | /// } 107 | /// 108 | /// qf.remove().expect("remove failed"); 109 | /// ``` 110 | /// # File format 111 | /// 112 | /// ```text 113 | /// 16-32 bytes Header 114 | /// ... Data 115 | /// ``` 116 | /// This implementation supports two versions of the header format. 117 | /// ```text 118 | /// Versioned Header (32 bytes): 119 | /// 1 bit Versioned indicator [0 = legacy, 1 = versioned] 120 | /// 31 bits Version, always 1 121 | /// 8 bytes File length 122 | /// 4 bytes Element count 123 | /// 8 bytes Head element position 124 | /// 8 bytes Tail element position 125 | /// 126 | /// Legacy Header (16 bytes): 127 | /// 1 bit Legacy indicator, always 0 128 | /// 31 bits File length 129 | /// 4 bytes Element count 130 | /// 4 bytes Head element position 131 | /// 4 bytes Tail element position 132 | /// ``` 133 | /// Each element stored is represented by: 134 | /// ```text 135 | /// Element: 136 | /// 4 bytes Data length 137 | /// ... Data 138 | /// ``` 139 | #[derive(Debug)] 140 | pub struct QueueFile { 141 | inner: QueueFileInner, 142 | /// True when using the versioned header format. Otherwise use the legacy format. 143 | versioned: bool, 144 | /// The header length in bytes: 16 or 32. 145 | header_len: u64, 146 | /// Number of elements. 147 | elem_cnt: usize, 148 | /// Pointer to first (or eldest) element. 149 | first: Element, 150 | /// Pointer to last (or newest) element. 151 | last: Element, 152 | /// Minimum number of bytes the file shrinks to. 153 | capacity: u64, 154 | /// When true, removing an element will also overwrite data with zero bytes. 155 | /// It's true by default. 156 | overwrite_on_remove: bool, 157 | /// When true, skips header update upon adding. 158 | /// It's false by default. 159 | skip_write_header_on_add: bool, 160 | /// Write buffering. 161 | write_buf: Vec, 162 | /// Offset cache idx->Element. Sorted in ascending order, always unique. 163 | /// Indices form perfect squares though may skew after removal. 164 | cached_offsets: VecDeque<(usize, Element)>, 165 | /// Offset caching policy. 166 | offset_cache_kind: Option, 167 | } 168 | 169 | /// Policy for offset caching if enabled. 170 | /// Notice that offsets frequency might be skewed due after series of adding/removal. 171 | /// This shall not affect functional properties, only performance one. 172 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 173 | pub enum OffsetCacheKind { 174 | /// Linear offseting. 175 | /// 176 | /// Next offset would be cached after `offset` additions. 177 | Linear { offset: usize }, 178 | /// Quadratic offseting. 179 | /// 180 | /// Cached offsets form a sequence of perfect squares (e.g. cached 1st, 4th, 9th, .. offsets). 181 | Quadratic, 182 | } 183 | 184 | #[derive(Debug)] 185 | struct QueueFileInner { 186 | file: ManuallyDrop, 187 | /// Cached file length. Always a power of 2. 188 | file_len: u64, 189 | /// Intention seek offset. 190 | expected_seek: u64, 191 | /// Real last seek offset. 192 | last_seek: Option, 193 | /// Offset for the next read from buffer. 194 | read_buffer_offset: Option, 195 | /// Buffer for reads. 196 | read_buffer: Vec, 197 | /// Buffer used by `transfer` function. 198 | transfer_buf: Option>, 199 | /// When true, every write to file will be followed by `sync_data()` call. 200 | /// It's true by default. 201 | sync_writes: bool, 202 | } 203 | 204 | impl Drop for QueueFile { 205 | fn drop(&mut self) { 206 | if self.skip_write_header_on_add { 207 | let _ = self.sync_header(); 208 | } 209 | 210 | unsafe { 211 | ManuallyDrop::drop(&mut self.inner.file); 212 | } 213 | } 214 | } 215 | 216 | impl QueueFile { 217 | const BLOCK_LENGTH: u64 = 4096; 218 | const INITIAL_LENGTH: u64 = 4096; 219 | const READ_BUFFER_SIZE: usize = 4096; 220 | const VERSIONED_HEADER: u32 = 0x8000_0001; 221 | const ZEROES: [u8; 4096] = [0; 4096]; 222 | 223 | fn init(path: &Path, force_legacy: bool, capacity: u64) -> Result<()> { 224 | let tmp_path = path.with_extension(".tmp"); 225 | 226 | // Use a temp file so we don't leave a partially-initialized file. 227 | { 228 | let mut file = 229 | OpenOptions::new().read(true).write(true).create(true).open(&tmp_path)?; 230 | 231 | file.set_len(capacity)?; 232 | 233 | let mut buf = BytesMut::with_capacity(16); 234 | 235 | if force_legacy { 236 | buf.put_u32(capacity as u32); 237 | } else { 238 | buf.put_u32(Self::VERSIONED_HEADER); 239 | buf.put_u64(capacity); 240 | } 241 | 242 | file.write_all(buf.as_ref())?; 243 | } 244 | 245 | // A rename is atomic. 246 | rename(tmp_path, path)?; 247 | 248 | Ok(()) 249 | } 250 | 251 | /// Open or create [`QueueFile`] at `path` with specified minimal file size. 252 | /// 253 | /// # Example 254 | /// 255 | /// ``` 256 | /// # use queue_file::QueueFile; 257 | /// # let path = auto_delete_path::AutoDeletePath::temp(); 258 | /// let qf = QueueFile::with_capacity(path, 120).expect("failed to open queue"); 259 | /// ``` 260 | pub fn with_capacity>(path: P, capacity: u64) -> Result { 261 | Self::open_internal(path, true, false, capacity) 262 | } 263 | 264 | /// Open or create [`QueueFile`] at `path`. 265 | /// 266 | /// # Example 267 | /// 268 | /// ``` 269 | /// # use queue_file::QueueFile; 270 | /// # let path = auto_delete_path::AutoDeletePath::temp(); 271 | /// let qf = QueueFile::open(path).expect("failed to open queue"); 272 | /// ``` 273 | pub fn open>(path: P) -> Result { 274 | Self::with_capacity(path, Self::INITIAL_LENGTH) 275 | } 276 | 277 | /// Open or create [`QueueFile`] at `path` forcing legacy format. 278 | /// 279 | /// # Example 280 | /// 281 | /// ``` 282 | /// # use queue_file::QueueFile; 283 | /// # let path = auto_delete_path::AutoDeletePath::temp(); 284 | /// let qf = QueueFile::open_legacy(path).expect("failed to open queue"); 285 | /// ``` 286 | pub fn open_legacy>(path: P) -> Result { 287 | Self::open_internal(path, true, true, Self::INITIAL_LENGTH) 288 | } 289 | 290 | fn open_internal>( 291 | path: P, overwrite_on_remove: bool, force_legacy: bool, capacity: u64, 292 | ) -> Result { 293 | if !path.as_ref().exists() { 294 | Self::init(path.as_ref(), force_legacy, capacity)?; 295 | } 296 | 297 | let mut file = OpenOptions::new().read(true).write(true).open(path)?; 298 | 299 | let mut buf = [0u8; 32]; 300 | 301 | let bytes_read = file.read(&mut buf)?; 302 | 303 | ensure!(bytes_read >= 32, CorruptedFileSnafu { msg: "file too short" }); 304 | 305 | let versioned = !force_legacy && (buf[0] & 0x80) != 0; 306 | 307 | let header_len: u64; 308 | let file_len: u64; 309 | let elem_cnt: usize; 310 | let first_pos: u64; 311 | let last_pos: u64; 312 | 313 | let mut buf = BytesMut::from(&buf[..]); 314 | 315 | if versioned { 316 | header_len = 32; 317 | 318 | let version = buf.get_u32() & 0x7FFF_FFFF; 319 | 320 | ensure!(version == 1, UnsupportedVersionSnafu { detected: version, supported: 1u32 }); 321 | 322 | file_len = buf.get_u64(); 323 | elem_cnt = buf.get_u32() as usize; 324 | first_pos = buf.get_u64(); 325 | last_pos = buf.get_u64(); 326 | 327 | ensure!(i64::try_from(file_len).is_ok(), CorruptedFileSnafu { 328 | msg: "file length in header is greater than i64::MAX" 329 | }); 330 | ensure!(i32::try_from(elem_cnt).is_ok(), CorruptedFileSnafu { 331 | msg: "element count in header is greater than i32::MAX" 332 | }); 333 | ensure!(i64::try_from(first_pos).is_ok(), CorruptedFileSnafu { 334 | msg: "first element position in header is greater than i64::MAX" 335 | }); 336 | ensure!(i64::try_from(last_pos).is_ok(), CorruptedFileSnafu { 337 | msg: "last element position in header is greater than i64::MAX" 338 | }); 339 | } else { 340 | header_len = 16; 341 | 342 | file_len = u64::from(buf.get_u32()); 343 | elem_cnt = buf.get_u32() as usize; 344 | first_pos = u64::from(buf.get_u32()); 345 | last_pos = u64::from(buf.get_u32()); 346 | 347 | ensure!(i32::try_from(file_len).is_ok(), CorruptedFileSnafu { 348 | msg: "file length in header is greater than i32::MAX" 349 | }); 350 | ensure!(i32::try_from(elem_cnt).is_ok(), CorruptedFileSnafu { 351 | msg: "element count in header is greater than i32::MAX" 352 | }); 353 | ensure!(i32::try_from(first_pos).is_ok(), CorruptedFileSnafu { 354 | msg: "first element position in header is greater than i32::MAX" 355 | }); 356 | ensure!(i32::try_from(last_pos).is_ok(), CorruptedFileSnafu { 357 | msg: "last element position in header is greater than i32::MAX" 358 | }); 359 | } 360 | 361 | let real_file_len = file.metadata()?.len(); 362 | 363 | ensure!(file_len <= real_file_len, CorruptedFileSnafu { 364 | msg: format!( 365 | "file is truncated. expected length was {file_len} but actual length is {real_file_len}" 366 | ) 367 | }); 368 | ensure!(file_len >= header_len, CorruptedFileSnafu { 369 | msg: format!("length stored in header ({file_len}) is invalid") 370 | }); 371 | ensure!(first_pos <= file_len, CorruptedFileSnafu { 372 | msg: format!("position of the first element ({first_pos}) is beyond the file") 373 | }); 374 | ensure!(last_pos <= file_len, CorruptedFileSnafu { 375 | msg: format!("position of the last element ({last_pos}) is beyond the file") 376 | }); 377 | 378 | let mut queue_file = Self { 379 | inner: QueueFileInner { 380 | file: ManuallyDrop::new(file), 381 | file_len, 382 | expected_seek: 0, 383 | last_seek: Some(32), 384 | read_buffer_offset: None, 385 | read_buffer: vec![0; Self::READ_BUFFER_SIZE], 386 | transfer_buf: Some( 387 | vec![0u8; QueueFileInner::TRANSFER_BUFFER_SIZE].into_boxed_slice(), 388 | ), 389 | sync_writes: cfg!(not(test)), 390 | }, 391 | versioned, 392 | header_len, 393 | elem_cnt, 394 | first: Element::EMPTY, 395 | last: Element::EMPTY, 396 | capacity, 397 | overwrite_on_remove, 398 | skip_write_header_on_add: false, 399 | write_buf: Vec::new(), 400 | cached_offsets: VecDeque::new(), 401 | offset_cache_kind: None, 402 | }; 403 | 404 | if file_len < capacity { 405 | queue_file.inner.sync_set_len(queue_file.capacity)?; 406 | } 407 | 408 | queue_file.first = queue_file.read_element(first_pos)?; 409 | queue_file.last = queue_file.read_element(last_pos)?; 410 | 411 | Ok(queue_file) 412 | } 413 | 414 | /// Returns true if removing an element will also overwrite data with zero bytes. 415 | #[inline] 416 | pub const fn overwrite_on_remove(&self) -> bool { 417 | self.overwrite_on_remove 418 | } 419 | 420 | #[deprecated(since = "1.4.7", note = "Use `overwrite_on_remove` instead.")] 421 | pub const fn get_overwrite_on_remove(&self) -> bool { 422 | self.overwrite_on_remove() 423 | } 424 | 425 | /// If set to true removing an element will also overwrite data with zero bytes. 426 | #[inline] 427 | pub fn set_overwrite_on_remove(&mut self, value: bool) { 428 | self.overwrite_on_remove = value; 429 | } 430 | 431 | /// Returns true if every write to file will be followed by `sync_data()` call. 432 | #[inline] 433 | pub const fn sync_writes(&self) -> bool { 434 | self.inner.sync_writes 435 | } 436 | 437 | #[deprecated(since = "1.4.7", note = "Use `sync_writes` instead.")] 438 | pub const fn get_sync_writes(&self) -> bool { 439 | self.sync_writes() 440 | } 441 | 442 | /// If set to true every write to file will be followed by `sync_data()` call. 443 | #[inline] 444 | pub fn set_sync_writes(&mut self, value: bool) { 445 | self.inner.sync_writes = value; 446 | } 447 | 448 | /// Returns true if skips header update upon adding enabled. 449 | #[inline] 450 | pub const fn skip_write_header_on_add(&self) -> bool { 451 | self.skip_write_header_on_add 452 | } 453 | 454 | #[deprecated(since = "1.4.7", note = "Use `skip_write_header_on_add` instead.")] 455 | pub const fn get_skip_write_header_on_add(&self) -> bool { 456 | self.skip_write_header_on_add() 457 | } 458 | 459 | /// If set to true skips header update upon adding. 460 | #[inline] 461 | pub fn set_skip_write_header_on_add(&mut self, value: bool) { 462 | self.skip_write_header_on_add = value; 463 | } 464 | 465 | /// Changes buffer size used for data reading. 466 | pub fn set_read_buffer_size(&mut self, size: usize) { 467 | if self.inner.read_buffer.len() < size { 468 | self.inner.read_buffer_offset = None; 469 | } 470 | self.inner.read_buffer.resize(size, 0); 471 | } 472 | 473 | #[inline] 474 | pub const fn cache_offset_policy(&self) -> Option { 475 | self.offset_cache_kind 476 | } 477 | 478 | #[deprecated(since = "1.4.7", note = "Use `cache_offset_policy` instead.")] 479 | pub const fn get_cache_offset_policy(&self) -> Option { 480 | self.cache_offset_policy() 481 | } 482 | 483 | #[inline] 484 | pub fn set_cache_offset_policy(&mut self, kind: impl Into>) { 485 | self.offset_cache_kind = kind.into(); 486 | 487 | if self.offset_cache_kind.is_none() { 488 | self.cached_offsets.clear(); 489 | } 490 | } 491 | 492 | /// Returns true if this queue contains no entries. 493 | #[inline] 494 | pub const fn is_empty(&self) -> bool { 495 | self.elem_cnt == 0 496 | } 497 | 498 | /// Returns the number of elements in this queue. 499 | #[inline] 500 | pub const fn size(&self) -> usize { 501 | self.elem_cnt 502 | } 503 | 504 | /// Synchronizes the underlying file, look at [`File::sync_all`] doc for more info. 505 | pub fn sync_all(&mut self) -> Result<()> { 506 | if self.skip_write_header_on_add { 507 | self.sync_header()?; 508 | } 509 | 510 | Ok(self.inner.file.sync_all()?) 511 | } 512 | 513 | fn cache_last_offset_if_needed(&mut self, affected_items: usize) { 514 | if self.elem_cnt == 0 { 515 | return; 516 | } 517 | 518 | self.cache_elem_if_needed(self.elem_cnt - 1, self.last, affected_items); 519 | } 520 | 521 | fn cache_elem_if_needed(&mut self, index: usize, elem: Element, affected_items: usize) { 522 | debug_assert!(index <= self.elem_cnt); 523 | debug_assert!(index + 1 >= affected_items); 524 | 525 | let need_to_cache = self.offset_cache_kind.map_or(false, |kind| match kind { 526 | OffsetCacheKind::Linear { offset } => { 527 | let last_cached_index = self.cached_offsets.back().map_or(0, |(idx, _)| *idx); 528 | index.saturating_sub(last_cached_index) >= offset 529 | } 530 | OffsetCacheKind::Quadratic => { 531 | let x = (index as f64).sqrt() as usize; 532 | x > 1 && (index + 1 - affected_items..=index).contains(&(x * x)) 533 | } 534 | }); 535 | 536 | if need_to_cache { 537 | if let Some((last_cached_index, last_cached_elem)) = self.cached_offsets.back() { 538 | if *last_cached_index >= index { 539 | if *last_cached_index == index { 540 | debug_assert_eq!(last_cached_elem.pos, elem.pos); 541 | debug_assert_eq!(last_cached_elem.len, elem.len); 542 | } 543 | 544 | return; 545 | } 546 | } 547 | 548 | self.cached_offsets.push_back((index, elem)); 549 | } 550 | } 551 | 552 | #[inline] 553 | fn cached_index_up_to(&self, i: usize) -> Option { 554 | self.cached_offsets 555 | .binary_search_by(|(idx, _)| idx.cmp(&i)) 556 | .map_or_else(|i| i.checked_sub(1), Some) 557 | } 558 | 559 | pub fn add_n( 560 | &mut self, elems: impl IntoIterator> + Clone, 561 | ) -> Result<()> { 562 | let (count, total_len) = elems 563 | .clone() 564 | .into_iter() 565 | .fold((0, 0), |(c, l), elem| (c + 1, l + Element::HEADER_LENGTH + elem.as_ref().len())); 566 | 567 | if count == 0 { 568 | return Ok(()); 569 | } 570 | 571 | ensure!(self.elem_cnt + count < i32::max_value() as usize, TooManyElementsSnafu {}); 572 | 573 | self.expand_if_necessary(total_len as u64)?; 574 | 575 | let was_empty = self.is_empty(); 576 | let mut pos = if was_empty { 577 | self.header_len 578 | } else { 579 | self.wrap_pos(self.last.pos + Element::HEADER_LENGTH as u64 + self.last.len as u64) 580 | }; 581 | 582 | let mut first_added = None; 583 | let mut last_added = None; 584 | 585 | self.write_buf.clear(); 586 | 587 | for elem in elems { 588 | let elem = elem.as_ref(); 589 | let len = elem.len(); 590 | 591 | if first_added.is_none() { 592 | first_added = Some(Element::new(pos, len)?); 593 | } 594 | last_added = Some(Element::new(pos, len)?); 595 | 596 | self.write_buf.extend(&(len as u32).to_be_bytes()); 597 | self.write_buf.extend(elem); 598 | 599 | pos = self.wrap_pos(pos + Element::HEADER_LENGTH as u64 + len as u64); 600 | } 601 | 602 | let first_added = first_added.unwrap(); 603 | self.ring_write_buf(first_added.pos)?; 604 | 605 | if was_empty { 606 | self.first = first_added; 607 | } 608 | self.last = last_added.unwrap(); 609 | 610 | self.write_header(self.file_len(), self.elem_cnt + count, self.first.pos, self.last.pos)?; 611 | self.elem_cnt += count; 612 | 613 | self.cache_last_offset_if_needed(count); 614 | 615 | Ok(()) 616 | } 617 | 618 | /// Adds an element to the end of the queue. 619 | #[inline] 620 | pub fn add(&mut self, buf: &[u8]) -> Result<()> { 621 | self.add_n(std::iter::once(buf)) 622 | } 623 | 624 | /// Reads the eldest element. Returns `OK(None)` if the queue is empty. 625 | pub fn peek(&mut self) -> Result>> { 626 | if self.is_empty() { 627 | Ok(None) 628 | } else { 629 | let len = self.first.len; 630 | let mut data = vec![0; len].into_boxed_slice(); 631 | 632 | self.ring_read(self.first.pos + Element::HEADER_LENGTH as u64, &mut data)?; 633 | 634 | Ok(Some(data)) 635 | } 636 | } 637 | 638 | /// Removes the eldest element. 639 | #[inline] 640 | pub fn remove(&mut self) -> Result<()> { 641 | self.remove_n(1) 642 | } 643 | 644 | /// Removes the eldest `n` elements. 645 | pub fn remove_n(&mut self, n: usize) -> Result<()> { 646 | if n == 0 || self.is_empty() { 647 | return Ok(()); 648 | } 649 | 650 | if n >= self.elem_cnt { 651 | return self.clear(); 652 | } 653 | 654 | debug_assert!( 655 | self.cached_offsets 656 | .iter() 657 | .zip(self.cached_offsets.iter().skip(1)) 658 | .all(|(a, b)| a.0 < b.0), 659 | "{:?}", 660 | self.cached_offsets 661 | ); 662 | 663 | let erase_start_pos = self.first.pos; 664 | let mut erase_total_len = 0usize; 665 | 666 | // Read the position and length of the new first element. 667 | let mut new_first_pos = self.first.pos; 668 | let mut new_first_len = self.first.len; 669 | 670 | let cached_index = self.cached_index_up_to(n - 1); 671 | let to_remove = if let Some(i) = cached_index { 672 | let (index, elem) = self.cached_offsets[i]; 673 | 674 | if let Some(index) = index.checked_sub(1) { 675 | erase_total_len += Element::HEADER_LENGTH * index; 676 | erase_total_len += (elem.pos 677 | + if self.first.pos < elem.pos { 678 | 0 679 | } else { 680 | self.file_len() - self.first.pos - self.header_len 681 | }) as usize; 682 | } 683 | 684 | new_first_pos = elem.pos; 685 | new_first_len = elem.len; 686 | n - index 687 | } else { 688 | n 689 | }; 690 | 691 | for _ in 0..to_remove { 692 | erase_total_len += Element::HEADER_LENGTH + new_first_len; 693 | new_first_pos = 694 | self.wrap_pos(new_first_pos + Element::HEADER_LENGTH as u64 + new_first_len as u64); 695 | 696 | let mut buf: [u8; 4] = [0; 4]; 697 | self.ring_read(new_first_pos, &mut buf)?; 698 | new_first_len = u32::from_be_bytes(buf) as usize; 699 | } 700 | 701 | // Commit the header. 702 | self.write_header(self.file_len(), self.elem_cnt - n, new_first_pos, self.last.pos)?; 703 | self.elem_cnt -= n; 704 | self.first = Element::new(new_first_pos, new_first_len)?; 705 | 706 | if let Some(cached_index) = cached_index { 707 | self.cached_offsets.drain(..=cached_index); 708 | } 709 | self.cached_offsets.iter_mut().for_each(|(i, _)| *i -= n); 710 | 711 | if self.overwrite_on_remove { 712 | self.ring_erase(erase_start_pos, erase_total_len)?; 713 | } 714 | 715 | Ok(()) 716 | } 717 | 718 | /// Clears this queue. Truncates the file to the initial size. 719 | pub fn clear(&mut self) -> Result<()> { 720 | // Commit the header. 721 | self.write_header(self.capacity, 0, 0, 0)?; 722 | 723 | if self.overwrite_on_remove { 724 | self.inner.seek(self.header_len); 725 | let first_block = self.capacity.min(Self::BLOCK_LENGTH) - self.header_len; 726 | self.inner.write(&Self::ZEROES[..first_block as usize])?; 727 | 728 | if let Some(left) = self.capacity.checked_sub(Self::BLOCK_LENGTH) { 729 | for _ in 0..left / Self::BLOCK_LENGTH { 730 | self.inner.write(&Self::ZEROES)?; 731 | } 732 | 733 | let tail = left % Self::BLOCK_LENGTH; 734 | 735 | if tail != 0 { 736 | self.inner.write(&Self::ZEROES[..tail as usize])?; 737 | } 738 | } 739 | } 740 | 741 | self.cached_offsets.clear(); 742 | 743 | self.elem_cnt = 0; 744 | self.first = Element::EMPTY; 745 | self.last = Element::EMPTY; 746 | 747 | if self.file_len() > self.capacity { 748 | self.inner.sync_set_len(self.capacity)?; 749 | } 750 | 751 | Ok(()) 752 | } 753 | 754 | /// Returns an iterator over elements in this queue. 755 | /// 756 | /// # Example 757 | /// 758 | /// ``` 759 | /// # use queue_file::QueueFile; 760 | /// # let path = auto_delete_path::AutoDeletePath::temp(); 761 | /// let mut qf = QueueFile::open(path).expect("failed to open queue"); 762 | /// let items = vec![vec![1, 2], vec![], vec![3]]; 763 | /// qf.add_n(&items).expect("failed to add elements to queue"); 764 | /// 765 | /// let stored = qf.iter().map(Vec::from).collect::>(); 766 | /// assert_eq!(items, stored); 767 | /// ``` 768 | pub fn iter(&mut self) -> Iter<'_> { 769 | let pos = self.first.pos; 770 | 771 | Iter { 772 | // We are using write buffer for reducing number of allocations. 773 | // BorrowedIter doesn't modify any data and will return it back on drop. 774 | buffer: std::mem::take(&mut self.write_buf), 775 | queue_file: self, 776 | next_elem_index: 0, 777 | next_elem_pos: pos, 778 | } 779 | } 780 | 781 | /// Returns the amount of bytes used by the backed file. 782 | /// Always >= [`Self::used_bytes`]. 783 | #[inline] 784 | pub const fn file_len(&self) -> u64 { 785 | self.inner.file_len 786 | } 787 | 788 | /// Returns the amount of bytes used by the queue. 789 | #[inline] 790 | pub const fn used_bytes(&self) -> u64 { 791 | if self.elem_cnt == 0 { 792 | self.header_len 793 | } else if self.last.pos >= self.first.pos { 794 | // Contiguous queue. 795 | (self.last.pos - self.first.pos) 796 | + Element::HEADER_LENGTH as u64 797 | + self.last.len as u64 798 | + self.header_len 799 | } else { 800 | // tail < head. The queue wraps. 801 | self.last.pos + Element::HEADER_LENGTH as u64 + self.last.len as u64 + self.file_len() 802 | - self.first.pos 803 | } 804 | } 805 | 806 | /// Returns underlying file of the queue. 807 | pub fn into_inner_file(mut self) -> File { 808 | if self.skip_write_header_on_add { 809 | let _ = self.sync_header(); 810 | } 811 | 812 | let file = unsafe { ManuallyDrop::take(&mut self.inner.file) }; 813 | std::mem::forget(self); 814 | 815 | file 816 | } 817 | 818 | #[inline] 819 | const fn remaining_bytes(&self) -> u64 { 820 | self.file_len() - self.used_bytes() 821 | } 822 | 823 | fn sync_header(&mut self) -> Result<()> { 824 | self.write_header(self.file_len(), self.size(), self.first.pos, self.last.pos) 825 | } 826 | 827 | /// Writes header atomically. The arguments contain the updated values. The struct member fields 828 | /// should not have changed yet. This only updates the state in the file. It's up to the caller 829 | /// to update the class member variables *after* this call succeeds. Assumes segment writes are 830 | /// atomic in the underlying file system. 831 | fn write_header( 832 | &mut self, file_len: u64, elem_cnt: usize, first_pos: u64, last_pos: u64, 833 | ) -> Result<()> { 834 | let mut header = [0; 32]; 835 | let mut header_buf = &mut header[..]; 836 | 837 | // Never allow write values that will render file unreadable by Java library. 838 | if self.versioned { 839 | ensure!(i64::try_from(file_len).is_ok(), CorruptedFileSnafu { 840 | msg: "file length in header will exceed i64::MAX" 841 | }); 842 | ensure!(i32::try_from(elem_cnt).is_ok(), CorruptedFileSnafu { 843 | msg: "element count in header will exceed i32::MAX" 844 | }); 845 | ensure!(i64::try_from(first_pos).is_ok(), CorruptedFileSnafu { 846 | msg: "first element position in header will exceed i64::MAX" 847 | }); 848 | ensure!(i64::try_from(last_pos).is_ok(), CorruptedFileSnafu { 849 | msg: "last element position in header will exceed i64::MAX" 850 | }); 851 | 852 | header_buf.put_u32(Self::VERSIONED_HEADER); 853 | header_buf.put_u64(file_len); 854 | header_buf.put_i32(elem_cnt as i32); 855 | header_buf.put_u64(first_pos); 856 | header_buf.put_u64(last_pos); 857 | } else { 858 | ensure!(i32::try_from(file_len).is_ok(), CorruptedFileSnafu { 859 | msg: "file length in header will exceed i32::MAX" 860 | }); 861 | ensure!(i32::try_from(elem_cnt).is_ok(), CorruptedFileSnafu { 862 | msg: "element count in header will exceed i32::MAX" 863 | }); 864 | ensure!(i32::try_from(first_pos).is_ok(), CorruptedFileSnafu { 865 | msg: "first element position in header will exceed i32::MAX" 866 | }); 867 | ensure!(i32::try_from(last_pos).is_ok(), CorruptedFileSnafu { 868 | msg: "last element position in header will exceed i32::MAX" 869 | }); 870 | 871 | header_buf.put_i32(file_len as i32); 872 | header_buf.put_i32(elem_cnt as i32); 873 | header_buf.put_i32(first_pos as i32); 874 | header_buf.put_i32(last_pos as i32); 875 | } 876 | 877 | self.inner.seek(0); 878 | self.inner.write(&header.as_ref()[..self.header_len as usize]) 879 | } 880 | 881 | fn read_element(&mut self, pos: u64) -> Result { 882 | if pos == 0 { 883 | Ok(Element::EMPTY) 884 | } else { 885 | let mut buf: [u8; 4] = [0; Element::HEADER_LENGTH]; 886 | self.ring_read(pos, &mut buf)?; 887 | 888 | Element::new(pos, u32::from_be_bytes(buf) as usize) 889 | } 890 | } 891 | 892 | /// Wraps the position if it exceeds the end of the file. 893 | #[inline] 894 | const fn wrap_pos(&self, pos: u64) -> u64 { 895 | if pos < self.file_len() { pos } else { self.header_len + pos - self.file_len() } 896 | } 897 | 898 | /// Writes `n` bytes from buffer to position in file. Automatically wraps write if position is 899 | /// past the end of the file or if buffer overlaps it. 900 | fn ring_write_buf(&mut self, pos: u64) -> Result<()> { 901 | let pos = self.wrap_pos(pos); 902 | 903 | if pos + self.write_buf.len() as u64 <= self.file_len() { 904 | self.inner.seek(pos); 905 | self.inner.write(&self.write_buf) 906 | } else { 907 | let before_eof = (self.file_len() - pos) as usize; 908 | 909 | self.inner.seek(pos); 910 | self.inner.write(&self.write_buf[..before_eof])?; 911 | self.inner.seek(self.header_len); 912 | self.inner.write(&self.write_buf[before_eof..]) 913 | } 914 | } 915 | 916 | fn ring_erase(&mut self, pos: u64, n: usize) -> Result<()> { 917 | let mut pos = pos; 918 | let mut len = n; 919 | 920 | self.write_buf.clear(); 921 | self.write_buf.extend(Self::ZEROES); 922 | 923 | while len > 0 { 924 | let chunk_len = min(len, Self::ZEROES.len()); 925 | self.write_buf.truncate(chunk_len); 926 | 927 | self.ring_write_buf(pos)?; 928 | 929 | len -= chunk_len; 930 | pos += chunk_len as u64; 931 | } 932 | 933 | Ok(()) 934 | } 935 | 936 | /// Reads `n` bytes into buffer from file. Wraps if necessary. 937 | fn ring_read(&mut self, pos: u64, buf: &mut [u8]) -> io::Result<()> { 938 | let pos = self.wrap_pos(pos); 939 | 940 | if pos + buf.len() as u64 <= self.file_len() { 941 | self.inner.seek(pos); 942 | self.inner.read(buf) 943 | } else { 944 | let before_eof = (self.file_len() - pos) as usize; 945 | 946 | self.inner.seek(pos); 947 | self.inner.read(&mut buf[..before_eof])?; 948 | self.inner.seek(self.header_len); 949 | self.inner.read(&mut buf[before_eof..]) 950 | } 951 | } 952 | 953 | /// If necessary, expands the file to accommodate an additional element of the given length. 954 | fn expand_if_necessary(&mut self, data_len: u64) -> Result<()> { 955 | let mut rem_bytes = self.remaining_bytes(); 956 | 957 | if rem_bytes >= data_len { 958 | return Ok(()); 959 | } 960 | 961 | let orig_file_len = self.file_len(); 962 | let mut prev_len = orig_file_len; 963 | let mut new_len = prev_len; 964 | 965 | while rem_bytes < data_len { 966 | rem_bytes += prev_len; 967 | new_len = prev_len << 1; 968 | prev_len = new_len; 969 | } 970 | 971 | let bytes_used_before = self.used_bytes(); 972 | 973 | // Calculate the position of the tail end of the data in the ring buffer 974 | let end_of_last_elem = 975 | self.wrap_pos(self.last.pos + Element::HEADER_LENGTH as u64 + self.last.len as u64); 976 | self.inner.sync_set_len(new_len)?; 977 | 978 | let mut count = 0u64; 979 | 980 | // If the buffer is split, we need to make it contiguous 981 | if end_of_last_elem <= self.first.pos { 982 | count = end_of_last_elem - self.header_len; 983 | 984 | self.inner.transfer(self.header_len, orig_file_len, count)?; 985 | } 986 | 987 | // Commit the expansion. 988 | if self.last.pos < self.first.pos { 989 | let new_last_pos = orig_file_len + self.last.pos - self.header_len; 990 | self.last = Element::new(new_last_pos, self.last.len)?; 991 | } 992 | 993 | // TODO: cached offsets might be recalculated after transfer 994 | self.cached_offsets.clear(); 995 | 996 | if self.overwrite_on_remove { 997 | self.ring_erase(self.header_len, count as usize)?; 998 | } 999 | 1000 | let bytes_used_after = self.used_bytes(); 1001 | debug_assert_eq!(bytes_used_before, bytes_used_after); 1002 | 1003 | Ok(()) 1004 | } 1005 | } 1006 | 1007 | // I/O Helpers 1008 | impl QueueFileInner { 1009 | const TRANSFER_BUFFER_SIZE: usize = 128 * 1024; 1010 | 1011 | #[inline] 1012 | fn seek(&mut self, pos: u64) -> u64 { 1013 | self.expected_seek = pos; 1014 | 1015 | pos 1016 | } 1017 | 1018 | fn real_seek(&mut self) -> io::Result { 1019 | if Some(self.expected_seek) == self.last_seek { 1020 | return Ok(self.expected_seek); 1021 | } 1022 | 1023 | let res = self.file.seek(SeekFrom::Start(self.expected_seek)); 1024 | self.last_seek = res.as_ref().ok().copied(); 1025 | 1026 | res 1027 | } 1028 | 1029 | fn read(&mut self, buf: &mut [u8]) -> io::Result<()> { 1030 | if buf.is_empty() { 1031 | return Ok(()); 1032 | } 1033 | 1034 | let size = buf.len(); 1035 | 1036 | let not_enough_data = if let Some(left) = self.read_buffer.len().checked_sub(size) { 1037 | self.read_buffer_offset 1038 | .and_then(|o| self.expected_seek.checked_sub(o)) 1039 | .and_then(|skip| left.checked_sub(skip as usize)) 1040 | .is_none() 1041 | } else { 1042 | self.read_buffer.resize(size, 0); 1043 | 1044 | true 1045 | }; 1046 | 1047 | if not_enough_data { 1048 | use std::io::{Error, ErrorKind}; 1049 | 1050 | self.real_seek()?; 1051 | 1052 | let mut read = 0; 1053 | let mut res = Ok(()); 1054 | 1055 | while !buf.is_empty() { 1056 | match self.file.read(&mut self.read_buffer[read..]) { 1057 | Ok(0) => break, 1058 | Ok(n) => read += n, 1059 | Err(ref e) if e.kind() == ErrorKind::Interrupted => {} 1060 | Err(e) => { 1061 | res = Err(e); 1062 | break; 1063 | } 1064 | } 1065 | } 1066 | 1067 | if res.is_ok() && read < size { 1068 | res = Err(Error::new(ErrorKind::UnexpectedEof, "failed to fill whole buffer")); 1069 | } 1070 | 1071 | if let Err(err) = res { 1072 | self.read_buffer_offset = None; 1073 | self.last_seek = None; 1074 | 1075 | return Err(err); 1076 | } 1077 | 1078 | self.read_buffer_offset = Some(self.expected_seek); 1079 | 1080 | if let Some(seek) = &mut self.last_seek { 1081 | *seek += read as u64; 1082 | } 1083 | } 1084 | 1085 | let start = (self.expected_seek - self.read_buffer_offset.unwrap()) as usize; 1086 | 1087 | buf.copy_from_slice(&self.read_buffer[start..start + size]); 1088 | 1089 | Ok(()) 1090 | } 1091 | 1092 | fn write(&mut self, buf: &[u8]) -> Result<()> { 1093 | self.real_seek()?; 1094 | 1095 | self.file.write_all(buf)?; 1096 | 1097 | if let Some(seek) = &mut self.last_seek { 1098 | *seek += buf.len() as u64; 1099 | } 1100 | 1101 | if let Some(read_buffer_offset) = self.read_buffer_offset { 1102 | let write_size_u64 = buf.len() as u64; 1103 | let read_buffer_end_offset = read_buffer_offset + self.read_buffer.len() as u64; 1104 | let read_buffered = read_buffer_offset..read_buffer_end_offset; 1105 | 1106 | let has_start = read_buffered.contains(&self.expected_seek); 1107 | let buf_end = self.expected_seek + write_size_u64; 1108 | let has_end = read_buffered.contains(&buf_end); 1109 | 1110 | match (has_start, has_end) { 1111 | // rd_buf_offset .. exp_seek .. exp_seek+buf.len .. rd_buf_end 1112 | // need to copy whole write buffer 1113 | (true, true) => { 1114 | let start = (self.expected_seek - read_buffer_offset) as usize; 1115 | 1116 | self.read_buffer[start..start + buf.len()].copy_from_slice(buf); 1117 | } 1118 | // exp_seek .. rd_buf_offset .. exp_seek+buf.len .. rd_buf_end 1119 | // need to copy only a tail of write buffer 1120 | (false, true) => { 1121 | let need_to_skip = (read_buffer_offset - self.expected_seek) as usize; 1122 | let need_to_copy = buf.len() - need_to_skip; 1123 | 1124 | self.read_buffer[..need_to_copy].copy_from_slice(&buf[need_to_skip..]); 1125 | } 1126 | // rd_buf_offset .. exp_seek .. rd_buf_end .. exp_seek+buf.len 1127 | // need to copy only a head of write buffer 1128 | (true, false) => { 1129 | let need_to_skip = (self.expected_seek - read_buffer_offset) as usize; 1130 | let need_to_copy = self.read_buffer.len() - need_to_skip; 1131 | 1132 | self.read_buffer[need_to_skip..need_to_skip + need_to_copy] 1133 | .copy_from_slice(&buf[..need_to_copy]); 1134 | } 1135 | // exp_seek .. rd_buf_offset .. rd_buf_end .. exp_seek+buf.len 1136 | // read buffer is inside writing range, need to rewrite it completely 1137 | (false, false) 1138 | if (self.expected_seek + 1..buf_end).contains(&read_buffer_offset) => 1139 | { 1140 | let need_to_skip = (read_buffer_offset - self.expected_seek) as usize; 1141 | let need_to_copy = self.read_buffer.len(); 1142 | 1143 | self.read_buffer[..] 1144 | .copy_from_slice(&buf[need_to_skip..need_to_skip + need_to_copy]); 1145 | } 1146 | // nothing to do, read & write buffers do not overlap 1147 | (false, false) => {} 1148 | } 1149 | } 1150 | 1151 | if self.sync_writes { 1152 | self.file.sync_data()?; 1153 | } 1154 | 1155 | Ok(()) 1156 | } 1157 | 1158 | fn transfer_inner( 1159 | &mut self, buf: &mut [u8], mut read_pos: u64, mut write_pos: u64, count: u64, 1160 | ) -> Result<()> { 1161 | debug_assert!(read_pos < self.file_len); 1162 | debug_assert!(write_pos <= self.file_len); 1163 | debug_assert!(count < self.file_len); 1164 | debug_assert!(i64::try_from(count).is_ok()); 1165 | 1166 | let mut bytes_left = count as i64; 1167 | 1168 | while bytes_left > 0 { 1169 | self.seek(read_pos); 1170 | let bytes_to_read = min(bytes_left as usize, Self::TRANSFER_BUFFER_SIZE); 1171 | self.read(&mut buf[..bytes_to_read])?; 1172 | 1173 | self.seek(write_pos); 1174 | self.write(&buf[..bytes_to_read])?; 1175 | 1176 | read_pos += bytes_to_read as u64; 1177 | write_pos += bytes_to_read as u64; 1178 | bytes_left -= bytes_to_read as i64; 1179 | } 1180 | 1181 | // Should we `sync_data()` in internal loop instead? 1182 | if self.sync_writes { 1183 | self.file.sync_data()?; 1184 | } 1185 | 1186 | Ok(()) 1187 | } 1188 | 1189 | /// Transfer `count` bytes starting from `read_pos` to `write_pos`. 1190 | fn transfer(&mut self, read_pos: u64, write_pos: u64, count: u64) -> Result<()> { 1191 | let mut buf = self.transfer_buf.take().unwrap(); 1192 | let res = self.transfer_inner(&mut buf, read_pos, write_pos, count); 1193 | self.transfer_buf = Some(buf); 1194 | 1195 | res 1196 | } 1197 | 1198 | fn sync_set_len(&mut self, new_len: u64) -> io::Result<()> { 1199 | self.file.set_len(new_len)?; 1200 | self.file_len = new_len; 1201 | self.file.sync_all() 1202 | } 1203 | } 1204 | 1205 | #[derive(Copy, Clone, Debug)] 1206 | struct Element { 1207 | pos: u64, 1208 | len: usize, 1209 | } 1210 | 1211 | impl Element { 1212 | const EMPTY: Self = Self { pos: 0, len: 0 }; 1213 | const HEADER_LENGTH: usize = 4; 1214 | 1215 | #[inline] 1216 | fn new(pos: u64, len: usize) -> Result { 1217 | ensure!(i64::try_from(pos).is_ok(), CorruptedFileSnafu { 1218 | msg: "element position must be less or equal to i64::MAX" 1219 | }); 1220 | ensure!(i32::try_from(len).is_ok(), ElementTooBigSnafu); 1221 | 1222 | Ok(Self { pos, len }) 1223 | } 1224 | } 1225 | 1226 | /// Iterator over items in the queue. 1227 | #[derive(Debug)] 1228 | pub struct Iter<'a> { 1229 | queue_file: &'a mut QueueFile, 1230 | buffer: Vec, 1231 | next_elem_index: usize, 1232 | next_elem_pos: u64, 1233 | } 1234 | 1235 | impl<'a> Iterator for Iter<'a> { 1236 | type Item = Box<[u8]>; 1237 | 1238 | fn next(&mut self) -> Option { 1239 | let buffer = self.borrowed_next()?; 1240 | 1241 | Some(buffer.to_vec().into_boxed_slice()) 1242 | } 1243 | 1244 | fn size_hint(&self) -> (usize, Option) { 1245 | let elems_left = self.queue_file.elem_cnt - self.next_elem_index; 1246 | 1247 | (elems_left, Some(elems_left)) 1248 | } 1249 | 1250 | fn nth(&mut self, n: usize) -> Option { 1251 | if self.queue_file.elem_cnt - self.next_elem_index < n { 1252 | self.next_elem_index = self.queue_file.elem_cnt; 1253 | 1254 | return None; 1255 | } 1256 | 1257 | let left = if let Some(i) = self.queue_file.cached_index_up_to(n) { 1258 | let (index, elem) = self.queue_file.cached_offsets[i]; 1259 | if index > self.next_elem_index { 1260 | self.next_elem_index = index; 1261 | self.next_elem_pos = elem.pos; 1262 | } 1263 | 1264 | n - self.next_elem_index 1265 | } else { 1266 | n 1267 | }; 1268 | 1269 | for _ in 0..left { 1270 | self.borrowed_next(); 1271 | } 1272 | 1273 | self.next() 1274 | } 1275 | } 1276 | 1277 | impl Iter<'_> { 1278 | /// Returns the next element in the queue. 1279 | /// Similar to `Iter::next` but returned value bounded to internal buffer, 1280 | /// i.e not allocated at each call. 1281 | pub fn borrowed_next(&mut self) -> Option<&[u8]> { 1282 | if self.next_elem_index >= self.queue_file.elem_cnt { 1283 | return None; 1284 | } 1285 | 1286 | let current = self.queue_file.read_element(self.next_elem_pos).ok()?; 1287 | self.next_elem_pos = self.queue_file.wrap_pos(current.pos + Element::HEADER_LENGTH as u64); 1288 | 1289 | if current.len > self.buffer.len() { 1290 | self.buffer.resize(current.len, 0); 1291 | } 1292 | self.queue_file.ring_read(self.next_elem_pos, &mut self.buffer[..current.len]).ok()?; 1293 | 1294 | self.next_elem_pos = self 1295 | .queue_file 1296 | .wrap_pos(current.pos + Element::HEADER_LENGTH as u64 + current.len as u64); 1297 | 1298 | self.queue_file.cache_elem_if_needed(self.next_elem_index, current, 1); 1299 | self.next_elem_index += 1; 1300 | 1301 | Some(&self.buffer[..current.len]) 1302 | } 1303 | } 1304 | 1305 | impl Drop for Iter<'_> { 1306 | fn drop(&mut self) { 1307 | self.queue_file.write_buf = std::mem::take(&mut self.buffer); 1308 | } 1309 | } 1310 | -------------------------------------------------------------------------------- /tests/bug_cases.rs: -------------------------------------------------------------------------------- 1 | use queue_file::QueueFile; 2 | 3 | #[test] 4 | fn reopen_bigger_capacity_wrong_file_len() { 5 | let path = auto_delete_path::AutoDeletePath::temp(); 6 | 7 | { 8 | let qf = QueueFile::with_capacity(&path, 1024 * 5).unwrap(); 9 | assert_eq!(std::fs::metadata(&path).unwrap().len(), qf.file_len()); 10 | } 11 | 12 | let qf = QueueFile::with_capacity(&path, 1024 * 6).unwrap(); 13 | assert_eq!(std::fs::metadata(&path).unwrap().len(), qf.file_len()); 14 | } 15 | 16 | #[test] 17 | fn bigger_write_buffer_overwrites_read_buffer() { 18 | let path = auto_delete_path::AutoDeletePath::temp(); 19 | let mut qf = QueueFile::with_capacity(path, 32 + 4 * 2 + 2 + 4).unwrap(); 20 | qf.set_overwrite_on_remove(false); 21 | qf.set_read_buffer_size(7); 22 | 23 | qf.add_n(&[&[1, 2, 3], &[4, 5, 6]]).unwrap(); 24 | qf.remove().unwrap(); 25 | qf.remove().unwrap(); 26 | 27 | qf.add_n(&[&[7, 8, 9, 10][..], &[0, 0]]).unwrap(); 28 | qf.remove().unwrap(); 29 | 30 | qf.add(&[99]).unwrap(); 31 | qf.remove().unwrap(); 32 | } 33 | 34 | #[test] 35 | fn transfer_expand_invalid_file_len() { 36 | let path = auto_delete_path::AutoDeletePath::temp(); 37 | let mut qf = QueueFile::with_capacity(path, 32 + (4 + 1) * 3).unwrap(); 38 | qf.set_read_buffer_size(7); 39 | 40 | qf.add_n(&[&[1], &[2], &[3]]).unwrap(); 41 | qf.remove_n(2).unwrap(); 42 | qf.add_n(&[&[1]]).unwrap(); 43 | 44 | qf.add_n(&[&[2], &[4]]).unwrap(); 45 | 46 | assert_eq!(qf.iter().map(|v| v[0]).collect::>(), vec![3, 1, 2, 4]); 47 | } 48 | -------------------------------------------------------------------------------- /tests/test.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | 3 | use queue_file::{OffsetCacheKind, QueueFile}; 4 | use quickcheck_macros::quickcheck; 5 | use test_case::test_case; 6 | 7 | #[test_case(true; "with overwrite")] 8 | #[test_case(false; "with no overwrite")] 9 | fn queue_capacity_preserved(is_overwrite: bool) { 10 | let initial_size = 517; 11 | let p = auto_delete_path::AutoDeletePath::temp(); 12 | let mut qf = QueueFile::with_capacity(&p, initial_size).unwrap(); 13 | qf.set_overwrite_on_remove(is_overwrite); 14 | 15 | assert_eq!(std::fs::metadata(&p).unwrap().len(), initial_size); 16 | 17 | for i in 0u32..40 { 18 | qf.add(&i.to_be_bytes()).unwrap(); 19 | } 20 | assert_eq!(std::fs::metadata(&p).unwrap().len(), initial_size); 21 | 22 | for i in 0u32..40 { 23 | qf.add(&i.to_be_bytes()).unwrap(); 24 | } 25 | assert_eq!(std::fs::metadata(&p).unwrap().len(), initial_size * 2); 26 | 27 | qf.clear().unwrap(); 28 | assert_eq!(std::fs::metadata(&p).unwrap().len(), initial_size); 29 | } 30 | 31 | #[test_case(true; "with overwrite")] 32 | #[test_case(false; "with no overwrite")] 33 | fn existing_queue_extended_on_new_capacity(is_overwrite: bool) { 34 | let initial_size = 200; 35 | let p = auto_delete_path::AutoDeletePath::temp(); 36 | 37 | { 38 | let mut qf = QueueFile::with_capacity(&p, initial_size).unwrap(); 39 | qf.set_overwrite_on_remove(is_overwrite); 40 | 41 | assert_eq!(std::fs::metadata(&p).unwrap().len(), initial_size); 42 | } 43 | 44 | let initial_size2 = 350; 45 | let _qf = QueueFile::with_capacity(&p, initial_size2).unwrap(); 46 | assert_eq!(std::fs::metadata(&p).unwrap().len(), initial_size2); 47 | } 48 | 49 | #[derive(Debug, Clone)] 50 | enum Action { 51 | Add(Vec), 52 | Read { skip: usize, take: usize }, 53 | Remove(usize), 54 | } 55 | 56 | impl quickcheck::Arbitrary for Action { 57 | fn arbitrary(g: &mut quickcheck::Gen) -> Self { 58 | let kind = u32::arbitrary(g); 59 | 60 | match kind % 3 { 61 | 0 => Self::Add(Vec::arbitrary(g)), 62 | 1 => Self::Remove(usize::arbitrary(g)), 63 | 2 => Self::Read { skip: usize::arbitrary(g), take: usize::arbitrary(g) }, 64 | _ => unreachable!(), 65 | } 66 | } 67 | 68 | fn shrink(&self) -> Box> { 69 | match self { 70 | Self::Add(v) => Box::new(v.shrink().map(Self::Add)), 71 | Self::Remove(n) => Box::new(n.shrink().map(Self::Remove)), 72 | Self::Read { skip, take } => Box::new( 73 | take.shrink().zip(skip.shrink()).map(|(take, skip)| Self::Read { take, skip }), 74 | ), 75 | } 76 | } 77 | } 78 | 79 | #[track_caller] 80 | fn collect_queue_items(qf: &mut QueueFile) -> Vec> { 81 | collect_queue_items_partial(qf, 0, qf.size() + 1) 82 | } 83 | 84 | #[track_caller] 85 | fn collect_queue_items_partial(qf: &mut QueueFile, skip: usize, take: usize) -> Vec> { 86 | qf.iter().skip(skip).take(take).map(Vec::from).collect::>() 87 | } 88 | 89 | #[track_caller] 90 | fn compare_with_vecdeque(qf: &mut QueueFile, vd: &VecDeque>) { 91 | compare_with_vecdeque_partial(qf, vd, 0, vd.len() + 1); 92 | } 93 | 94 | #[track_caller] 95 | fn compare_with_vecdeque_partial( 96 | qf: &mut QueueFile, vd: &VecDeque>, skip: usize, take: usize, 97 | ) { 98 | let left = collect_queue_items_partial(qf, skip, take); 99 | let right = vd.iter().skip(skip).take(take).cloned().collect::>(); 100 | assert_eq!(left, right); 101 | } 102 | 103 | #[quickcheck] 104 | fn legacy_queue_is_vecdeque(actions: Vec) { 105 | let path = auto_delete_path::AutoDeletePath::temp(); 106 | let mut qf = QueueFile::open_legacy(&path).unwrap(); 107 | let mut vd = VecDeque::new(); 108 | 109 | for action in actions { 110 | match action { 111 | Action::Add(v) => { 112 | qf.add(&v).unwrap(); 113 | vd.push_back(v); 114 | } 115 | Action::Read { take, skip } => compare_with_vecdeque_partial(&mut qf, &vd, skip, take), 116 | Action::Remove(n) => { 117 | vd.drain(..n.min(vd.len())); 118 | qf.remove_n(n).unwrap(); 119 | } 120 | } 121 | 122 | compare_with_vecdeque(&mut qf, &vd); 123 | } 124 | } 125 | 126 | #[quickcheck] 127 | fn queue_with_skip_header_update_is_vecdeque(actions: Vec) { 128 | let path = auto_delete_path::AutoDeletePath::temp(); 129 | let mut qf = QueueFile::open(&path).unwrap(); 130 | qf.set_skip_write_header_on_add(true); 131 | let mut vd = VecDeque::new(); 132 | 133 | for action in actions { 134 | match action { 135 | Action::Add(v) => { 136 | qf.add(&v).unwrap(); 137 | vd.push_back(v); 138 | } 139 | Action::Read { take, skip } => compare_with_vecdeque_partial(&mut qf, &vd, skip, take), 140 | Action::Remove(n) => { 141 | vd.drain(..n.min(vd.len())); 142 | qf.remove_n(n).unwrap(); 143 | } 144 | } 145 | 146 | compare_with_vecdeque(&mut qf, &vd); 147 | } 148 | 149 | let stored = collect_queue_items(&mut qf); 150 | drop(qf); 151 | 152 | let mut qf = QueueFile::open(&path).unwrap(); 153 | let restored = collect_queue_items(&mut qf); 154 | assert_eq!(stored, restored); 155 | } 156 | 157 | #[quickcheck] 158 | fn queue_is_vecdeque(actions: Vec) { 159 | let path = auto_delete_path::AutoDeletePath::temp(); 160 | let mut qf = QueueFile::open(&path).unwrap(); 161 | let mut vd = VecDeque::new(); 162 | 163 | for action in actions { 164 | match action { 165 | Action::Add(v) => { 166 | qf.add(&v).unwrap(); 167 | vd.push_back(v); 168 | } 169 | Action::Read { take, skip } => compare_with_vecdeque_partial(&mut qf, &vd, skip, take), 170 | Action::Remove(n) => { 171 | vd.drain(..n.min(vd.len())); 172 | qf.remove_n(n).unwrap(); 173 | } 174 | } 175 | 176 | compare_with_vecdeque(&mut qf, &vd); 177 | } 178 | } 179 | 180 | #[quickcheck] 181 | fn queue_is_vecdeque_no_intermediate_comparisons(actions: Vec) { 182 | let path = auto_delete_path::AutoDeletePath::temp(); 183 | let mut qf = QueueFile::open(&path).unwrap(); 184 | let mut vd = VecDeque::new(); 185 | 186 | for action in actions { 187 | match action { 188 | Action::Add(v) => { 189 | qf.add(&v).unwrap(); 190 | vd.push_back(v); 191 | } 192 | Action::Read { take, skip } => compare_with_vecdeque_partial(&mut qf, &vd, skip, take), 193 | Action::Remove(n) => { 194 | vd.drain(..n.min(vd.len())); 195 | qf.remove_n(n).unwrap(); 196 | } 197 | } 198 | } 199 | 200 | compare_with_vecdeque(&mut qf, &vd); 201 | } 202 | 203 | #[quickcheck] 204 | fn small_queue_is_vecdeque(actions: Vec) { 205 | let path = auto_delete_path::AutoDeletePath::temp(); 206 | let mut qf = QueueFile::with_capacity(&path, 32 + 32).unwrap(); 207 | qf.set_overwrite_on_remove(false); 208 | qf.set_read_buffer_size(7); 209 | let mut vd = VecDeque::new(); 210 | 211 | for action in actions { 212 | match action { 213 | Action::Add(v) => { 214 | qf.add(&v).unwrap(); 215 | vd.push_back(v); 216 | } 217 | Action::Read { take, skip } => compare_with_vecdeque_partial(&mut qf, &vd, skip, take), 218 | Action::Remove(n) => { 219 | vd.drain(..n.min(vd.len())); 220 | qf.remove_n(n).unwrap(); 221 | } 222 | } 223 | 224 | compare_with_vecdeque(&mut qf, &vd); 225 | } 226 | } 227 | 228 | #[quickcheck] 229 | fn small_queue_is_vecdeque_cached_offsets(actions: Vec) { 230 | let path = auto_delete_path::AutoDeletePath::temp(); 231 | let mut qf = QueueFile::with_capacity(&path, 32 + 32).unwrap(); 232 | qf.set_overwrite_on_remove(false); 233 | qf.set_read_buffer_size(7); 234 | qf.set_cache_offset_policy(Some(OffsetCacheKind::Quadratic)); 235 | let mut vd = VecDeque::new(); 236 | 237 | for action in actions { 238 | match action { 239 | Action::Add(v) => { 240 | qf.add(&v).unwrap(); 241 | vd.push_back(v); 242 | } 243 | Action::Read { take, skip } => compare_with_vecdeque_partial(&mut qf, &vd, skip, take), 244 | Action::Remove(n) => { 245 | vd.drain(..n.min(vd.len())); 246 | qf.remove_n(n).unwrap(); 247 | } 248 | } 249 | 250 | compare_with_vecdeque(&mut qf, &vd); 251 | } 252 | } 253 | 254 | #[quickcheck] 255 | fn add_n_works(actions: Vec) { 256 | let path = auto_delete_path::AutoDeletePath::temp(); 257 | let mut qf = QueueFile::open(&path).unwrap(); 258 | let mut vd = VecDeque::new(); 259 | 260 | let mut adds = vec![]; 261 | 262 | macro_rules! add_n_check { 263 | () => { 264 | qf.add_n(adds.iter().cloned()).unwrap(); 265 | vd.extend(adds.drain(..)); 266 | 267 | compare_with_vecdeque(&mut qf, &vd); 268 | }; 269 | } 270 | 271 | for action in actions { 272 | match action { 273 | Action::Add(v) => adds.push(v), 274 | Action::Read { take, skip } => compare_with_vecdeque_partial(&mut qf, &vd, skip, take), 275 | Action::Remove(n) => { 276 | add_n_check!(); 277 | 278 | vd.drain(..n.min(vd.len())); 279 | qf.remove_n(n).unwrap(); 280 | } 281 | } 282 | 283 | compare_with_vecdeque(&mut qf, &vd); 284 | } 285 | 286 | add_n_check!(); 287 | } 288 | 289 | #[test] 290 | fn iter_nth() { 291 | let path = auto_delete_path::AutoDeletePath::temp(); 292 | let mut qf = QueueFile::open(&path).unwrap(); 293 | 294 | let a = vec![1]; 295 | let b = vec![2, 3]; 296 | let c = vec![4, 5, 6]; 297 | qf.add_n(vec![a.clone(), b.clone(), c.clone()]).unwrap(); 298 | 299 | assert_eq!(qf.iter().next(), Some(a.into_boxed_slice())); 300 | assert_eq!(qf.iter().nth(1), Some(b.clone().into_boxed_slice())); 301 | assert_eq!(qf.iter().nth(2), Some(c.clone().into_boxed_slice())); 302 | assert_eq!(qf.iter().skip(0).nth(1), Some(b.clone().into_boxed_slice())); 303 | assert_eq!(qf.iter().skip(0).nth(2), Some(c.clone().into_boxed_slice())); 304 | assert_eq!(qf.iter().nth(1), Some(b.into_boxed_slice())); 305 | assert_eq!(qf.iter().skip(1).nth(1), Some(c.into_boxed_slice())); 306 | assert_eq!(qf.iter().nth(3), None); 307 | assert_eq!(qf.iter().nth(123), None); 308 | } 309 | --------------------------------------------------------------------------------