├── .github └── workflows │ └── main.yml ├── .gitignore ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches └── vector.rs ├── examples └── chunked.rs ├── fuzz ├── .gitignore ├── Cargo.toml └── fuzz_targets │ ├── cmd.rs │ ├── shared_vector.rs │ └── unique_vector.rs ├── images ├── chunked-vector.svg ├── header-vector.svg ├── shared-vector.svg └── unique-vector.svg └── src ├── drain.rs ├── lib.rs ├── raw.rs ├── shared.rs ├── splice.rs └── vector.rs /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | stable: 11 | env: 12 | RUST_BACKTRACE: 1 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions-rs/toolchain@v1 17 | with: 18 | profile: minimal 19 | toolchain: stable 20 | override: true 21 | - uses: Swatinem/rust-cache@v1 22 | - name: Test 23 | uses: actions-rs/cargo@v1 24 | with: 25 | command: test 26 | args: --all --verbose 27 | 28 | nightly: 29 | env: 30 | RUST_BACKTRACE: 1 31 | runs-on: ubuntu-latest 32 | steps: 33 | - uses: actions/checkout@v2 34 | - uses: actions-rs/toolchain@v1 35 | with: 36 | profile: minimal 37 | toolchain: nightly 38 | override: true 39 | - uses: Swatinem/rust-cache@v1 40 | - name: Test 41 | uses: actions-rs/cargo@v1 42 | with: 43 | command: test 44 | args: --all --verbose --features nightly 45 | 46 | miri: 47 | env: 48 | RUST_BACKTRACE: 1 49 | runs-on: ubuntu-latest 50 | steps: 51 | - uses: actions/checkout@v2 52 | - uses: actions-rs/toolchain@v1 53 | with: 54 | profile: minimal 55 | toolchain: nightly 56 | components: miri 57 | override: true 58 | - uses: Swatinem/rust-cache@v1 59 | - name: Test 60 | run: cargo +nightly miri test 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | .DS_Store 12 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This project welcomes contribution from everyone. Here are the guidelines if you are 4 | thinking of helping us: 5 | 6 | ## Contributions 7 | 8 | Contributions to this project should be made in the form of GitHub pull requests. 9 | Each pull request will be reviewed by a core contributor (someone with 10 | permission to land patches) and either landed in the main tree or 11 | given feedback for changes that would be required. 12 | All contributions should follow this format, even those from core contributors. 13 | 14 | Should you wish to work on an issue, please claim it first by commenting on 15 | the GitHub issue that you want to work on it. This is to prevent duplicated 16 | efforts from contributors on the same issue. 17 | 18 | ## Pull Request Checklist 19 | 20 | - Branch from the main branch and, if needed, rebase to the current main 21 | branch before submitting your pull request. If it doesn't merge cleanly with 22 | main you may be asked to rebase your changes. 23 | 24 | - Commits should be as small as possible, while ensuring that each commit is 25 | correct independently (i.e., each commit should compile and pass tests). 26 | 27 | - If your patch is not getting reviewed or you need a specific person to review 28 | it, you can @-reply a reviewer asking for a review in the pull request or a 29 | comment. 30 | 31 | - Whenever applicable, add tests relevant to the fixed bug or new feature. 32 | 33 | For specific git instructions, see [GitHub workflow 101](https://github.com/servo/servo/wiki/Github-workflow). 34 | 35 | ## Testing 36 | 37 | To run all tests, execute `cargo test` as well as `cargo +nightly miri test` from the root of the repository. 38 | 39 | ## Conduct 40 | 41 | In all related forums, we follow the [Rust Code of Conduct](http://www.rust-lang.org/conduct.html). 42 | For escalation or moderation issues, please contact [Nical](https://github.com/nical) instead of the Rust moderation team. 43 | 44 | ## License 45 | 46 | Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be licensed dual MIT/Apache 2,without any additional terms or conditions. 47 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shared_vector" 3 | version = "0.4.4" 4 | edition = "2021" 5 | authors = ["Nicolas Silva "] 6 | repository = "https://github.com/nical/shared_vector" 7 | documentation = "https://docs.rs/shared_vector/" 8 | keywords = ["vector", "immutable"] 9 | license = "MIT OR Apache-2.0" 10 | description = "Reference counted vector data structure." 11 | 12 | [dev-dependencies] 13 | criterion = "0.4" 14 | blink-alloc = "0.2.5" 15 | 16 | [[bench]] 17 | name = "vector" 18 | harness = false 19 | 20 | [profile.release] 21 | debug = true 22 | 23 | [dependencies] 24 | allocator-api2 = "0.2.4" 25 | 26 | [features] 27 | default = ["std"] 28 | nightly = ["allocator-api2/nightly", "blink-alloc/nightly"] 29 | std = ["allocator-api2/std"] 30 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright 2016 Nicolas Silva 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2013 Nicolas Silva 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Reference counted vectors. 2 | 3 | - [crate](https://crates.io/crates/shared_vector) 4 | - [doc](https://docs.rs/shared_vector) 5 | 6 | # Overview 7 | 8 | This crate provides the following two types: 9 | - `SharedVector`/`AtomicSharedVector`, an immutable reference counted vector (with an atomically 10 | reference counted variant). 11 | - `Vector`, an unique vector type with an API similar to `std::Vec`. 12 | 13 | Internally, shared vectors are a little different from the standard `Vec`. 14 | `SharedVector` and `AtomicSharedVector` hold a single pointer to a buffer containing: 15 | - A header storing the length, capacity and allocator, 16 | - the contiguous sequence of items of type `T`. 17 | 18 | 19 | 20 | `Vector`'s representation is closer to `Vec`: it stores the length and capacity information inline and only writes them into the header if/when converting into a shared vector. The allocated buffer does leave room for the header so that converting to and from `SharedVector` does not require reallocating. 21 | 22 | 23 | 24 | This allows very cheap conversion between the two: 25 | - shared to unique: a new allocation is made only if there are multiple handles to the same buffer (the reference count is greather than one). 26 | - unique to shared: always fast since unique buffers are guaranteed to be sole owners of their buffer. 27 | 28 | The generic parameter `A` is the allocator. This crate uses [allocator-api2](https://crates.io/crates/allocator-api2) to polyfill the unstable [allocator_api](https://doc.rust-lang.org/unstable-book/library-features/allocator-api.html) feature. This makes it possible to use custom allocators in stable rust while the feature is still nightly-only. 29 | 30 | # Use cases 31 | 32 | ## `Arc>` without the indirection. 33 | 34 | A vector can be be built using a Vec-style API, and then made immutable and reference counted for various use case (easy multi-threading or simply shared ownership). 35 | 36 | Using the standard library one might be tempted to first build a `Vec` and share it via `Arc>`. This is a fine approach at the cost of an extra pointer indirection that could be avoided in principle. Another approach is to share it as an `Arc<[T]>` which removes the indirection at the cost of the need to reallocate and copy the contents. 37 | 38 | Using this crate there is no extra indirection in the resulting shared vector nor any copy between the unique and shared versions. 39 | 40 | ``` 41 | use shared_vector::Vector; 42 | let mut builder = Vector::new(); 43 | builder.push(1u32); 44 | builder.push(2); 45 | builder.push(3); 46 | // Make it reference counted, no allocation. 47 | let mut shared = builder.into_shared(); 48 | // We can now create new references 49 | let shared_2 = shared.new_ref(); 50 | let shared_3 = shared.new_ref(); 51 | ``` 52 | 53 | ## Immutable data structures 54 | 55 | `SharedVector` and `AtomicSharedVector` behave like simple immutable data structures and 56 | are good building blocks for creating more complicated ones. 57 | 58 | ``` 59 | use shared_vector::{SharedVector, rc_vector}; 60 | let mut a = rc_vector![1u32, 2, 3]; 61 | 62 | // `new_ref` (you can also use `clone`) creates a second reference to the same buffer. 63 | // future mutations of `a` won't affect `b`. 64 | let mut b = a.new_ref(); 65 | // Because both a and b point to the same buffer, the next mutation allocates a new 66 | // copy under the hood. 67 | a.push(4); 68 | // Now that a and b are unique pointers to their own buffers, no allocation happens 69 | // when pushing to them. 70 | a.push(5); 71 | b.push(6); 72 | 73 | assert_eq!(a.as_slice(), &[1, 2, 3, 4, 5]); 74 | assert_eq!(b.as_slice(), &[1, 2, 3, 6]); 75 | ``` 76 | 77 | Note that `SharedVector` is *not* a RRB vector implementation. 78 | 79 | ### ChunkVector 80 | 81 | As a very light experiment towards making custom immutable data structures on top of shared vectors, there is a very simple chunked vector implementation in the examples folder. 82 | 83 | 84 | 85 | Just like the vector types, "chunk vector" comes in three flavors: `ChunkVector`, `SharedChunkVector`, `AtomicSharedChunkVector`. 86 | 87 | They are internally represented as a reference counted table of reference counted memory blocks (or "chunks"). In the illustration above, two chunked vectors point to the same table, while another points to a different table but still shares some of the storage chunks. In practice the chunked vector types are very little more than `SharedVector>` 88 | 89 | # Limitiations 90 | 91 | - These vector types can hold at most `u32::MAX` elements. 92 | 93 | # License 94 | 95 | Licensed under either of: 96 | 97 | - Apache License, Version 2.0 (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) 98 | - MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT) 99 | 100 | # Contributions 101 | 102 | See the [contribution guidelines](https://github.com/nical/shared_vector/blob/master/CONTRIBUTING.md). 103 | -------------------------------------------------------------------------------- /benches/vector.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId}; 2 | use shared_vector::{AtomicSharedVector, SharedVector, Vector}; 3 | 4 | criterion_group!(vector, vector_push); 5 | criterion_main!(vector); 6 | 7 | fn push_shared(n: u32, initial_cap: usize) { 8 | let mut v = SharedVector::with_capacity(initial_cap); 9 | for i in 0..n { 10 | v.push(i); 11 | } 12 | black_box(v); 13 | } 14 | 15 | fn push_atomic(n: u32, initial_cap: usize) { 16 | let mut v = AtomicSharedVector::with_capacity(initial_cap); 17 | for i in 0..n { 18 | v.push(i); 19 | } 20 | black_box(v); 21 | } 22 | 23 | fn push_unique(n: u32, initial_cap: usize) { 24 | let mut v = Vector::with_capacity(initial_cap); 25 | for i in 0..n { 26 | v.push(i); 27 | } 28 | black_box(v); 29 | } 30 | 31 | fn push_std(n: u32, initial_cap: usize) { 32 | let mut v = Vec::with_capacity(initial_cap); 33 | for i in 0..n { 34 | v.push(i); 35 | } 36 | black_box(v); 37 | } 38 | 39 | fn vector_push(c: &mut Criterion) { 40 | let mut g = c.benchmark_group("push"); 41 | 42 | for item_count in [1000, 10_000] { 43 | for initial_cap in [1024, 256, 32] { 44 | g.bench_with_input(BenchmarkId::new(&format!("shared({initial_cap})"), &item_count), &item_count, |b, item_count| b.iter (||push_shared(*item_count, black_box(initial_cap)))); 45 | g.bench_with_input(BenchmarkId::new(&format!("atomic({initial_cap})"), &item_count), &item_count, |b, item_count| b.iter (||push_atomic(*item_count, black_box(initial_cap)))); 46 | g.bench_with_input(BenchmarkId::new(&format!("unique({initial_cap})"), &item_count), &item_count, |b, item_count| b.iter(||push_unique(*item_count, black_box(initial_cap)))); 47 | g.bench_with_input(BenchmarkId::new(&format!("std({initial_cap})"), &item_count), &item_count, |b, item_count| b.iter(||push_std(*item_count, black_box(initial_cap)))); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /examples/chunked.rs: -------------------------------------------------------------------------------- 1 | use shared_vector::{ 2 | AtomicRefCount, BufferSize, DefaultRefCount, RefCount, RefCountedVector, SharedVector, Vector, 3 | }; 4 | 5 | pub type AtomicSharedChunkVector = RefCountedChunkVector; 6 | pub type SharedChunkVector = RefCountedChunkVector; 7 | 8 | /// A reference counted container split into multiple chunks of contiguous data. 9 | /// 10 | /// 11 | /// 12 | #[derive(Clone)] 13 | pub struct RefCountedChunkVector { 14 | chunks: RefCountedVector, R>, 15 | chunk_size: usize, 16 | len: usize, 17 | } 18 | 19 | impl RefCountedChunkVector { 20 | pub fn new(chunk_size: usize) -> Self { 21 | assert!(chunk_size < BufferSize::MAX as usize); 22 | RefCountedChunkVector { 23 | chunks: RefCountedVector::new(), 24 | chunk_size, 25 | len: 0, 26 | } 27 | } 28 | 29 | #[inline] 30 | pub fn len(&self) -> usize { 31 | self.len 32 | } 33 | 34 | #[inline] 35 | pub fn num_chunks(&self) -> usize { 36 | self.chunks.len() 37 | } 38 | 39 | #[inline] 40 | pub fn is_empty(&self) -> bool { 41 | self.len == 0 42 | } 43 | 44 | pub fn push(&mut self, val: T) 45 | where 46 | T: Clone, 47 | { 48 | if let Some(last) = self.chunks.last_mut() { 49 | if last.remaining_capacity() > 0 { 50 | last.push(val); 51 | self.len += 1; 52 | return; 53 | } 54 | } 55 | 56 | let mut new_chunk = RefCountedVector::with_capacity(self.chunk_size); 57 | new_chunk.push(val); 58 | self.chunks.push(new_chunk); 59 | self.len += 1; 60 | } 61 | 62 | pub fn pop(&mut self) -> Option 63 | where 64 | T: Clone, 65 | { 66 | let mut result = None; 67 | let mut pop_chunk = false; 68 | if let Some(chunk) = self.chunks.last_mut() { 69 | result = chunk.pop(); 70 | pop_chunk = chunk.is_empty(); 71 | if result.is_some() { 72 | self.len -= 1; 73 | } 74 | } 75 | 76 | if pop_chunk { 77 | self.chunks.pop(); 78 | } 79 | 80 | result 81 | } 82 | 83 | pub fn push_chunk(&mut self, chunk: RefCountedVector) { 84 | self.len += chunk.len(); 85 | self.chunks.push(chunk); 86 | } 87 | 88 | pub fn pop_chunk(&mut self) -> Option> { 89 | if let Some(chunk) = self.chunks.pop() { 90 | self.len -= chunk.len(); 91 | return Some(chunk); 92 | } 93 | 94 | None 95 | } 96 | 97 | pub fn clear(&mut self) { 98 | self.chunks.clear(); 99 | self.len = 0; 100 | } 101 | 102 | pub fn chunks(&self) -> impl Iterator { 103 | self.chunks.iter().map(RefCountedVector::as_slice) 104 | } 105 | 106 | pub fn chunks_mut(&mut self) -> impl Iterator 107 | where 108 | T: Clone, 109 | { 110 | self.chunks.iter_mut().map(RefCountedVector::as_mut_slice) 111 | } 112 | 113 | pub fn iter(&self) -> impl Iterator { 114 | self.chunks.iter().flat_map(|chunk| chunk.iter()) 115 | } 116 | 117 | pub fn iter_mut(&mut self) -> impl Iterator 118 | where 119 | T: Clone, 120 | { 121 | self.chunks.iter_mut().flat_map(|chunk| chunk.iter_mut()) 122 | } 123 | 124 | #[inline] 125 | pub fn new_ref(&self) -> Self 126 | where 127 | T: Clone, 128 | { 129 | Self { 130 | chunks: self.chunks.new_ref(), 131 | chunk_size: self.chunk_size, 132 | len: self.len, 133 | } 134 | } 135 | 136 | pub fn into_unique(mut self) -> ChunkVector 137 | where 138 | T: Clone, 139 | { 140 | for chunk in self.chunks.iter_mut() { 141 | chunk.ensure_unique(); 142 | } 143 | 144 | let last_full = self 145 | .chunks 146 | .last() 147 | .map(|chunk| chunk.remaining_capacity() == 0) 148 | .unwrap_or(true); 149 | let head = if last_full { 150 | Vector::new() 151 | } else { 152 | self.chunks.pop().unwrap().into_unique() 153 | }; 154 | 155 | ChunkVector { 156 | head, 157 | chunks: unsafe { std::mem::transmute(self.chunks.into_unique()) }, 158 | chunk_size: self.chunk_size, 159 | len: self.len, 160 | } 161 | } 162 | } 163 | 164 | pub struct ChunkVector { 165 | head: Vector, 166 | chunks: Vector>, 167 | chunk_size: usize, 168 | len: usize, 169 | } 170 | 171 | impl ChunkVector { 172 | pub fn new(chunk_size: usize) -> Self { 173 | assert!(chunk_size > 0); 174 | assert!(chunk_size < BufferSize::MAX as usize); 175 | ChunkVector { 176 | head: Vector::new(), 177 | chunks: Vector::new(), 178 | chunk_size, 179 | len: 0, 180 | } 181 | } 182 | 183 | #[inline] 184 | pub fn len(&self) -> usize { 185 | self.len 186 | } 187 | 188 | #[inline] 189 | pub fn num_chunks(&self) -> usize { 190 | self.chunks.len() 191 | } 192 | 193 | #[inline] 194 | pub fn is_empty(&self) -> bool { 195 | self.len == 0 196 | } 197 | 198 | pub fn push(&mut self, val: T) 199 | where 200 | T: Clone, 201 | { 202 | if self.head.capacity() == 0 { 203 | self.head.reserve(self.chunk_size); 204 | } 205 | 206 | self.head.push(val); 207 | self.len += 1; 208 | 209 | if self.head.remaining_capacity() == 0 { 210 | let chunk = std::mem::replace(&mut self.head, Vector::new()); 211 | self.chunks.push(chunk.into_shared()); 212 | } 213 | } 214 | 215 | pub fn pop(&mut self) -> Option 216 | where 217 | T: Clone, 218 | { 219 | if self.head.is_empty() { 220 | if let Some(chunk) = self.chunks.pop() { 221 | self.head = chunk.into_unique(); 222 | } 223 | } 224 | 225 | let result = self.head.pop(); 226 | 227 | if result.is_some() { 228 | self.len -= 1; 229 | } 230 | 231 | result 232 | } 233 | 234 | pub fn into_shared(mut self) -> SharedChunkVector { 235 | if !self.head.is_empty() { 236 | self.chunks.push(self.head.into_shared()); 237 | } 238 | 239 | SharedChunkVector { 240 | chunks: self.chunks.into_shared(), 241 | len: self.len, 242 | chunk_size: self.chunk_size, 243 | } 244 | } 245 | 246 | pub fn into_shared_atomic(self) -> AtomicSharedChunkVector { 247 | unsafe { std::mem::transmute(self.into_shared()) } 248 | } 249 | 250 | pub fn chunks(&self) -> impl Iterator { 251 | let head: Option<&[T]> = if self.head.is_empty() { 252 | None 253 | } else { 254 | Some(self.head.as_slice()) 255 | }; 256 | 257 | self.chunks 258 | .iter() 259 | .map(RefCountedVector::as_slice) 260 | .chain(head) 261 | } 262 | 263 | pub fn chunks_mut(&mut self) -> impl Iterator 264 | where 265 | T: Clone, 266 | { 267 | let head: Option<&mut [T]> = if self.head.is_empty() { 268 | None 269 | } else { 270 | Some(self.head.as_mut_slice()) 271 | }; 272 | 273 | self.chunks 274 | .iter_mut() 275 | .map(RefCountedVector::as_mut_slice) 276 | .chain(head) 277 | } 278 | 279 | pub fn iter(&self) -> impl Iterator { 280 | self.chunks 281 | .iter() 282 | .flat_map(|chunk| chunk.iter()) 283 | .chain(self.head.iter()) 284 | } 285 | 286 | pub fn iter_mut(&mut self) -> impl Iterator 287 | where 288 | T: Clone, 289 | { 290 | self.chunks 291 | .iter_mut() 292 | .flat_map(|chunk| chunk.iter_mut()) 293 | .chain(self.head.iter_mut()) 294 | } 295 | } 296 | 297 | fn chunks_basic() { 298 | chunks_basic_impl::(); 299 | chunks_basic_impl::(); 300 | 301 | fn chunks_basic_impl() { 302 | let mut v: RefCountedChunkVector = RefCountedChunkVector::new(16); 303 | for i in 0..40 { 304 | v.push(i); 305 | assert_eq!(v.len(), i as usize + 1); 306 | } 307 | let mut v2 = v.new_ref(); 308 | for i in 40..80 { 309 | v.push(i); 310 | assert_eq!(v.len(), i as usize + 1); 311 | } 312 | 313 | let items: Vec = v.iter().cloned().collect(); 314 | assert_eq!(items.len(), 80); 315 | for i in 0..80 { 316 | assert_eq!(items[i], i as u32); 317 | } 318 | 319 | let items: Vec = v2.iter().cloned().collect(); 320 | assert_eq!(items.len(), 40); 321 | for i in 0..40 { 322 | assert_eq!(items[i], i as u32); 323 | } 324 | 325 | for i in 0..80 { 326 | let idx = 79 - i; 327 | assert_eq!(v.pop(), Some(idx)); 328 | assert_eq!(v.len(), idx as usize); 329 | } 330 | 331 | v2.clear(); 332 | 333 | assert!(v.pop().is_none()); 334 | assert!(v2.pop().is_none()); 335 | } 336 | } 337 | 338 | fn unique_chunks() { 339 | let mut v = ChunkVector::new(100); 340 | 341 | for i in 0..512u32 { 342 | v.push(i); 343 | assert_eq!(v.len(), i as usize + 1); 344 | } 345 | 346 | assert_eq!(v.len(), 512); 347 | 348 | for i in 0..50 { 349 | let idx = 511 - i; 350 | assert_eq!(v.pop(), Some(idx)); 351 | assert_eq!(v.len(), idx as usize); 352 | } 353 | 354 | assert_eq!(v.len(), 462); 355 | 356 | let items: Vec = v.iter().cloned().collect(); 357 | assert_eq!(items.len(), 462); 358 | for i in 0..462 { 359 | assert_eq!(items[i], i as u32); 360 | } 361 | 362 | let shared = v.into_shared(); 363 | 364 | assert_eq!(shared.len(), 462); 365 | 366 | let items: Vec = shared.iter().cloned().collect(); 367 | assert_eq!(items.len(), 462); 368 | for i in 0..462 { 369 | assert_eq!(items[i], i as u32); 370 | } 371 | } 372 | 373 | fn main() { 374 | chunks_basic(); 375 | unique_chunks(); 376 | } 377 | -------------------------------------------------------------------------------- /fuzz/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | corpus 3 | artifacts 4 | coverage 5 | -------------------------------------------------------------------------------- /fuzz/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shared_vector-fuzz" 3 | version = "0.0.0" 4 | publish = false 5 | edition = "2021" 6 | 7 | [package.metadata] 8 | cargo-fuzz = true 9 | 10 | [dependencies] 11 | libfuzzer-sys = "0.4" 12 | arbitrary = { version = "1.2.3", features = ["derive"] } 13 | 14 | [dependencies.shared_vector] 15 | path = ".." 16 | 17 | # Prevent this from interfering with workspaces 18 | [workspace] 19 | members = ["."] 20 | 21 | [profile.release] 22 | debug = 1 23 | 24 | [[bin]] 25 | name = "shared_vector" 26 | path = "fuzz_targets/shared_vector.rs" 27 | test = false 28 | doc = false 29 | 30 | [[bin]] 31 | name = "unique_vector" 32 | path = "fuzz_targets/unique_vector.rs" 33 | test = false 34 | doc = false 35 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/cmd.rs: -------------------------------------------------------------------------------- 1 | use arbitrary::Arbitrary; 2 | 3 | pub fn slot(idx: usize) -> usize { idx % 4 } 4 | pub fn reserve_max(len: usize, additional: usize) -> usize { 5 | (additional % 1024).min(2048 - len.min(2048)) 6 | } 7 | 8 | #[derive(Arbitrary, Copy, Clone, Debug)] 9 | pub enum Cmd { 10 | AddRef { src_idx: usize, dst_idx: usize }, 11 | DropVec { idx: usize }, 12 | Clear { idx: usize }, 13 | Push { idx: usize, val: u32 }, 14 | PushWithinCapacity { idx: usize, val: u32 }, 15 | ExtendFromSlice { idx: usize }, 16 | Pop { idx: usize }, 17 | CloneBuffer { src_idx: usize, dst_idx: usize }, 18 | Iter { idx: usize }, 19 | IterMut { idx: usize }, 20 | EnsureUnique { idx: usize }, 21 | Append { src_idx: usize, dst_idx: usize }, 22 | WithCapacity { idx: usize, cap: usize }, 23 | FromSlice { src_idx: usize, dst_idx: usize }, 24 | AsMutSlice { idx: usize }, 25 | First { idx: usize }, 26 | Last { idx: usize, }, 27 | FirstMut { idx: usize }, 28 | LastMut { idx: usize, }, 29 | Reserve { idx: usize, val: usize }, 30 | Convert { idx: usize }, 31 | Swap { idx: usize, offsets: (usize, usize) }, 32 | Remove { idx: usize, offset: usize }, 33 | SwapRemove { idx: usize, offset: usize }, 34 | Insert { idx: usize, offset: usize, val: u32 }, 35 | Retain { idx: usize, bits: u32 }, 36 | ShrinkTo { idx: usize, cap: usize }, 37 | ShrinkToFit { idx: usize }, 38 | Drain { idx: usize, start: usize, count: usize }, 39 | Splice { idx: usize, start: usize, rem_count: usize, val: u32, add_count: usize }, 40 | } 41 | 42 | fn cmd_to_string(vec_type: &str, cmd: Cmd) -> String { 43 | match cmd { 44 | Cmd::AddRef { src_idx, dst_idx } => { 45 | if vec_type == "UniqueVector" { 46 | return String::new(); 47 | } 48 | let src_idx = slot(src_idx); 49 | let dst_idx = slot(dst_idx); 50 | format!("vectors[{dst_idx}] = vectors[{src_idx}].new_ref();") 51 | } 52 | Cmd::DropVec { idx } => { 53 | format!("vectors[{}] = {vec_type}::new();", slot(idx)) 54 | } 55 | Cmd::Clear { idx } => { 56 | format!("vectors[{}].clear();", slot(idx)) 57 | } 58 | Cmd::Push { idx, val } => { 59 | format!("vectors[{}].push(Box::new({val}));", slot(idx)) 60 | } 61 | Cmd::PushWithinCapacity { idx, val } => { 62 | format!("vectors[{}].push_within_capacity(Box::new({val}));", slot(idx)) 63 | } 64 | Cmd::ExtendFromSlice { idx } => { 65 | format!("vectors[{}].extend_from_slice(&[Box::new(1), Box::new(2), Box::new(3)]);", slot(idx)) 66 | } 67 | Cmd::Pop { idx } => { 68 | format!("vectors[{}].pop();", slot(idx)) 69 | } 70 | Cmd::CloneBuffer { src_idx, dst_idx } => { 71 | let src_idx = slot(src_idx); 72 | let dst_idx = slot(dst_idx); 73 | format!("vectors[{dst_idx}] = vectors[{src_idx}].clone_buffer();") 74 | } 75 | Cmd::Iter { idx } => { 76 | format!("let _: u32 = vectors[{}].iter().fold(0, |a, b| a.wrapping_add(**b));", slot(idx)) 77 | } 78 | Cmd::IterMut { idx } => { 79 | format!("for elt in vectors[{}].iter_mut() {{ *elt = Box::new(1337); }}", slot(idx)) 80 | } 81 | Cmd::EnsureUnique { idx } => { 82 | if vec_type == "UniqueVector" { 83 | return String::new(); 84 | } 85 | format!("vectors[{}].ensure_unique();", slot(idx)) 86 | } 87 | Cmd::Append { src_idx, dst_idx } => { 88 | let a = format!("let mut v = take(&mut vectors[{}]);", slot(src_idx)); 89 | let b = format!("vectors[{}].append(&mut v);", slot(dst_idx)); 90 | format!("{a}\n {b}") 91 | } 92 | Cmd::WithCapacity { idx, cap } => { 93 | format!("vectors[{}] = {vec_type}::with_capacity({});", slot(idx), cap % 1024) 94 | } 95 | Cmd::FromSlice { src_idx, dst_idx } => { 96 | format!("vectors[{}] = {vec_type}::from_slice(vectors[{}].as_slice());", slot(dst_idx), slot(src_idx)) 97 | } 98 | Cmd::AsMutSlice { idx } => { 99 | format!("for v in vectors[{}].as_mut_slice() {{ *v = Box::new(42); }};", slot(idx)) 100 | } 101 | Cmd::First { idx } => { 102 | format!("let _ = vectors[{}].first();", slot(idx)) 103 | } 104 | Cmd::Last { idx } => { 105 | format!("let _ = vectors[{}].last();", slot(idx)) 106 | } 107 | Cmd::FirstMut { idx } => { 108 | format!("if let Some(elt) = vectors[{}].first_mut() {{ *elt = Box::new(1); }}", slot(idx)) 109 | } 110 | Cmd::LastMut { idx } => { 111 | format!("if let Some(elt) = vectors[{}].last_mut() {{ *elt = Box::new(2); }} ", slot(idx)) 112 | } 113 | Cmd::Reserve { idx, val } => { 114 | let val = val % 1024; 115 | let idx = slot(idx); 116 | format!("vectors[{idx}].reserve(reserve_max(vectors[{idx}].len(), {val}));") 117 | } 118 | Cmd::Convert { idx } => { 119 | let conv = match vec_type { 120 | "SharedVector" => "into_unique", 121 | "AtomicSharedVector" => "into_unique", 122 | "UniqueVector" => "into_shared", 123 | _ => panic!("unknwon type {vec_type}"), 124 | }; 125 | let inv = match vec_type { 126 | "SharedVector" => "into_shared", 127 | "AtomicSharedVector" => "into_shared", 128 | "UniqueVector" => "into_unique", 129 | _ => panic!("unknwon type {vec_type}"), 130 | }; 131 | let idx = slot(idx); 132 | let a = format!("let a = take(&mut vectors[{idx}]);"); 133 | let b = format!("vectors[{idx}] = a.{conv}().{inv}();"); 134 | format!("{a}\n {b}") 135 | } 136 | Cmd::Swap { idx, offsets } => { 137 | let a = format!("let vec = &mut vectors[{}];", slot(idx)); 138 | let b = format!("let len = vec.len();"); 139 | let c = format!("if !vec.is_empty() {{ vec.swap({} % len, {} % len) }}", offsets.0, offsets.1); 140 | format!("{a}\n {b}\n {c}") 141 | } 142 | Cmd::Remove { idx, offset } => { 143 | let a = format!("let vec = &mut vectors[{}];", slot(idx)); 144 | let b = format!("if !vec.is_empty() {{ vec.remove({offset} % vec.len()); }}"); 145 | format!("{a}\n {b}") 146 | } 147 | Cmd::SwapRemove { idx, offset } => { 148 | let a = format!("let vec = &mut vectors[{}];", slot(idx)); 149 | let b = format!("if !vec.is_empty() {{ vec.swap_remove({offset} % vec.len()); }}"); 150 | format!("{a}\n {b}") 151 | } 152 | Cmd::Insert { idx, offset, val } => { 153 | let a = format!("let len = vectors[{}].len();", slot(idx)); 154 | let b = format!("vectors[{}].insert({offset} % len.max(1), Box::new({val}));", slot(idx)); 155 | format!("{a}\n {b}") 156 | } 157 | Cmd::ShrinkTo { idx, cap } => { 158 | format!("vectors[{}].shrink_to({cap});", slot(idx)) 159 | } 160 | Cmd::ShrinkToFit { idx } => { 161 | format!("vectors[{}].shrink_to_fit();", slot(idx)) 162 | } 163 | Cmd::Drain { idx, start, count } => { 164 | let s1 = format!("let vec = &mut vectors[{}];", slot(idx)); 165 | let s2 = format!("let len = vec.len();"); 166 | let s3 = format!("let start = if len > 0 {{ {start} % len }} else {{ 0 }};"); 167 | let s4 = format!("let end = {}.min(len);", (start + (count % 5))); 168 | let s5 = format!("vectors[{}].drain(start..end);", slot(idx)); 169 | format!("{s1}\n {s2}\n {s3}\n {s4}\n {s5}") 170 | } 171 | Cmd::Splice { idx, start, rem_count, val, add_count } => { 172 | let s1 = format!("let vec = &mut vectors[{}];", slot(idx)); 173 | let s2 = format!("let len = vec.len();"); 174 | let s3 = format!("let start = if len > 0 {{ {start} % len }} else {{ 0 }};"); 175 | let s4 = format!("let end = (start + {}).min(len);", rem_count % 5); 176 | let s5 = format!("let items = vec![Box::new({val}); {}];", add_count % 10); 177 | let s6 = format!("vectors[{}].splice(start..end, items.into_iter());", slot(idx)); 178 | format!("{s1}\n {s2}\n {s3}\n {s4}\n {s5}\n {s6}") 179 | } 180 | Cmd::Retain { idx, bits } => { 181 | format!("let mut i = 0;\n vectors[{}].retain(&mut |val: &Box| {{ i += 1; {bits} & (1 << i.min(31)) != 0 }});", slot(idx)) 182 | } 183 | } 184 | } 185 | 186 | #[allow(unused)] 187 | pub fn cmds_to_src(vec_type: &str, cmds: &[Cmd]) { 188 | println!("// -------"); 189 | println!(" fn reserve_max(len: usize, additional: usize) -> usize {{"); 190 | println!(" additional.min(2048 - len.min(2048))"); 191 | println!(" }}"); 192 | println!(" fn take(place: &mut {vec_type}) -> {vec_type} {{"); 193 | println!(" std::mem::replace(place, {vec_type}::new())"); 194 | println!(" }}"); 195 | println!(" let mut vectors: [{vec_type}>; 4] = ["); 196 | println!(" {vec_type}::new(),"); 197 | println!(" {vec_type}::new(),"); 198 | println!(" {vec_type}::new(),"); 199 | println!(" {vec_type}::new(),"); 200 | println!(" ];"); 201 | for cmd in cmds { 202 | println!(" {}", cmd_to_string(vec_type, *cmd)); 203 | } 204 | println!("// -------"); 205 | } 206 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/shared_vector.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | 3 | use libfuzzer_sys::fuzz_target; 4 | use shared_vector::SharedVector; 5 | 6 | mod cmd; 7 | use cmd::*; 8 | 9 | fuzz_target!(|cmds: Vec| { 10 | 11 | let mut vectors: [SharedVector>; 4] = [ 12 | SharedVector::new(), 13 | SharedVector::new(), 14 | SharedVector::new(), 15 | SharedVector::new(), 16 | ]; 17 | 18 | for cmd in cmds { 19 | match cmd { 20 | Cmd::AddRef { src_idx, dst_idx } => { 21 | vectors[slot(dst_idx)] = vectors[slot(src_idx)].new_ref(); 22 | } 23 | Cmd::DropVec { idx } => { 24 | vectors[slot(idx)] = SharedVector::new(); 25 | } 26 | Cmd::Clear { idx } => { 27 | vectors[slot(idx)].clear(); 28 | } 29 | Cmd::Push { idx, val } => { 30 | vectors[slot(idx)].push(Box::new(val)); 31 | } 32 | Cmd::PushWithinCapacity { idx, val } => { 33 | let _ = vectors[slot(idx)].push_within_capacity(Box::new(val)); 34 | } 35 | Cmd::Pop { idx } => { 36 | vectors[slot(idx)].pop(); 37 | } 38 | Cmd::ExtendFromSlice { idx } => { 39 | vectors[slot(idx)].extend_from_slice(&[Box::new(1), Box::new(2), Box::new(3)]); 40 | } 41 | Cmd::CloneBuffer { src_idx, dst_idx } => { 42 | vectors[slot(dst_idx)] = vectors[slot(src_idx)].clone_buffer(); 43 | } 44 | Cmd::Iter { idx } => { 45 | let _: u32 = vectors[slot(idx)] 46 | .iter() 47 | .fold(0, |a, b| a.wrapping_add(**b)); 48 | } 49 | Cmd::IterMut { idx } => { 50 | for elt in vectors[slot(idx)].iter_mut() { *elt = Box::new(1337); }; 51 | } 52 | Cmd::EnsureUnique { idx } => { 53 | vectors[slot(idx)].ensure_unique(); 54 | } 55 | Cmd::Append { src_idx, dst_idx } => { 56 | let mut v = std::mem::replace(&mut vectors[slot(src_idx)], SharedVector::new()); 57 | vectors[slot(dst_idx)].append(&mut v); 58 | } 59 | Cmd::WithCapacity { idx, cap } => { 60 | vectors[slot(idx)] = SharedVector::with_capacity(cap % 1024); 61 | } 62 | Cmd::FromSlice { src_idx, dst_idx } => { 63 | vectors[slot(dst_idx)] = SharedVector::from_slice(vectors[slot(src_idx)].as_slice()); 64 | } 65 | Cmd::AsMutSlice { idx } => { 66 | for v in vectors[slot(idx)].as_mut_slice() { *v = Box::new(42); }; 67 | } 68 | Cmd::First { idx } => { 69 | let _ = vectors[slot(idx)].first(); 70 | } 71 | Cmd::Last { idx } => { 72 | let _ = vectors[slot(idx)].last(); 73 | } 74 | Cmd::FirstMut { idx } => { 75 | if let Some(elt) = vectors[slot(idx)].first_mut() { *elt = Box::new(1); } 76 | } 77 | Cmd::LastMut { idx } => { 78 | if let Some(elt) = vectors[slot(idx)].last_mut() { *elt = Box::new(2); } 79 | } 80 | Cmd::Reserve { idx, val } => { 81 | vectors[slot(idx)].reserve(reserve_max(vectors[slot(idx)].len(), val)); 82 | } 83 | Cmd::Convert { idx } => { 84 | let a = std::mem::replace(&mut vectors[slot(idx)], SharedVector::new()); 85 | vectors[slot(idx)] = a.into_unique().into_shared(); 86 | } 87 | Cmd::Swap { idx, offsets } => { 88 | let vec = &mut vectors[slot(idx)]; 89 | let len = vec.len(); 90 | if !vec.is_empty() { 91 | vec.swap(offsets.0 % len, offsets.1 % len) 92 | } 93 | } 94 | Cmd::Remove { .. } => { 95 | // TODO 96 | } 97 | Cmd::SwapRemove { idx, offset } => { 98 | let vec = &mut vectors[slot(idx)]; 99 | if vec.is_empty() { 100 | return; 101 | } 102 | vec.swap_remove(offset % vec.len()); 103 | } 104 | Cmd::Insert { idx, offset, val } => { 105 | // TODO 106 | } 107 | Cmd::ShrinkTo { idx, cap } => { 108 | vectors[slot(idx)].shrink_to(cap); 109 | } 110 | Cmd::ShrinkToFit { idx } => { 111 | vectors[slot(idx)].shrink_to_fit(); 112 | } 113 | Cmd::Drain { .. } => { 114 | // TODO 115 | } 116 | Cmd::Splice { .. } => { 117 | // TODO 118 | } 119 | Cmd::Retain { .. } => { 120 | // let mut i = 0; 121 | // vectors[slot(idx)].retain(&mut |_: &Box| { i += 1; bits & (1 << i.min(31)) != 0 }); 122 | } 123 | } 124 | } 125 | }); 126 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/unique_vector.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | 3 | use libfuzzer_sys::fuzz_target; 4 | use shared_vector::Vector; 5 | 6 | mod cmd; 7 | use cmd::*; 8 | 9 | fuzz_target!(|cmds: Vec| { 10 | 11 | //fn print() { 12 | // use Cmd::*; 13 | // cmds_to_src("Vector", &[]); 14 | //} 15 | //print(); 16 | 17 | let mut vectors: [Vector>; 4] = [ 18 | Vector::new(), 19 | Vector::new(), 20 | Vector::new(), 21 | Vector::new(), 22 | ]; 23 | 24 | for cmd in cmds { 25 | match cmd { 26 | Cmd::AddRef { .. } => { 27 | //vectors[slot(dst_idx)] = vectors[slot(src_idx)].new_ref(); 28 | } 29 | Cmd::DropVec { idx } => { 30 | vectors[slot(idx)] = Vector::new(); 31 | } 32 | Cmd::Clear { idx } => { 33 | vectors[slot(idx)].clear(); 34 | } 35 | Cmd::Push { idx, val } => { 36 | vectors[slot(idx)].push(Box::new(val)); 37 | } 38 | Cmd::PushWithinCapacity { idx, val } => { 39 | let _ = vectors[slot(idx)].push_within_capacity(Box::new(val)); 40 | } 41 | Cmd::Pop { idx } => { 42 | vectors[slot(idx)].pop(); 43 | } 44 | Cmd::ExtendFromSlice { idx } => { 45 | vectors[slot(idx)].extend_from_slice(&[Box::new(1), Box::new(2), Box::new(3)]); 46 | } 47 | Cmd::CloneBuffer { src_idx, dst_idx } => { 48 | vectors[slot(dst_idx)] = vectors[slot(src_idx)].clone_buffer(); 49 | } 50 | Cmd::Iter { idx } => { 51 | let _: u32 = vectors[slot(idx)] 52 | .iter() 53 | .fold(0, |a, b| a.wrapping_add(**b)); 54 | } 55 | Cmd::IterMut { idx } => { 56 | for elt in vectors[slot(idx)].iter_mut() { *elt = Box::new(1337); }; 57 | } 58 | Cmd::EnsureUnique { .. } => {} 59 | Cmd::Append { src_idx, dst_idx } => { 60 | let mut v = std::mem::replace(&mut vectors[slot(src_idx)], Vector::new()); 61 | vectors[slot(dst_idx)].append(&mut v); 62 | } 63 | Cmd::WithCapacity { idx, cap } => { 64 | vectors[slot(idx)] = Vector::with_capacity(cap % 1024); 65 | } 66 | Cmd::FromSlice { src_idx, dst_idx } => { 67 | vectors[slot(dst_idx)] = Vector::from_slice(vectors[slot(src_idx)].as_slice()); 68 | } 69 | Cmd::AsMutSlice { idx } => { 70 | for v in vectors[slot(idx)].as_mut_slice() { *v = Box::new(42); }; 71 | } 72 | Cmd::First { idx } => { 73 | let _ = vectors[slot(idx)].first(); 74 | } 75 | Cmd::Last { idx } => { 76 | let _ = vectors[slot(idx)].last(); 77 | } 78 | Cmd::FirstMut { idx } => { 79 | if let Some(elt) = vectors[slot(idx)].first_mut() { *elt = Box::new(1); } 80 | } 81 | Cmd::LastMut { idx } => { 82 | if let Some(elt) = vectors[slot(idx)].last_mut() { *elt = Box::new(2); } 83 | } 84 | Cmd::Reserve { idx, val } => { 85 | vectors[slot(idx)].reserve(reserve_max(vectors[slot(idx)].len(), val)); 86 | } 87 | Cmd::Convert { idx } => { 88 | let a = std::mem::replace(&mut vectors[slot(idx)], Vector::new()); 89 | vectors[slot(idx)] = a.into_shared().into_unique(); 90 | } 91 | Cmd::Swap { idx, offsets } => { 92 | let vec = &mut vectors[slot(idx)]; 93 | let len = vec.len(); 94 | if !vec.is_empty() { 95 | vec.swap(offsets.0 % len, offsets.1 % len) 96 | } 97 | } 98 | Cmd::Remove { idx, offset } => { 99 | let vec = &mut vectors[slot(idx)]; 100 | if vec.is_empty() { 101 | return; 102 | } 103 | vec.remove(offset % vec.len()); 104 | } 105 | Cmd::SwapRemove { idx, offset } => { 106 | let vec = &mut vectors[slot(idx)]; 107 | if vec.is_empty() { 108 | return; 109 | } 110 | vec.swap_remove(offset % vec.len()); 111 | } 112 | Cmd::Insert { idx, offset, val } => { 113 | let len = vectors[slot(idx)].len(); 114 | vectors[slot(idx)].insert(offset % len.max(1), Box::new(val)); 115 | } 116 | Cmd::ShrinkTo { idx, cap } => { 117 | vectors[slot(idx)].shrink_to(cap); 118 | } 119 | Cmd::ShrinkToFit { idx } => { 120 | vectors[slot(idx)].shrink_to_fit(); 121 | } 122 | Cmd::Drain { idx, start, count } => { 123 | let vec = &mut vectors[slot(idx)]; 124 | let len = vec.len(); 125 | let start = if len > 0 { start % len} else { 0 }; 126 | let end = (start + (count % 5)).min(len); 127 | vectors[slot(idx)].drain(start..end); 128 | } 129 | Cmd::Splice { idx, start, rem_count, val, add_count } => { 130 | let vec = &mut vectors[slot(idx)]; 131 | let len = vec.len(); 132 | let start = if len > 0 { start % len } else { 0 }; 133 | let end = (start + (rem_count % 5)).min(len); 134 | let items = vec![Box::new(val); add_count % 10]; 135 | vectors[slot(idx)].splice(start..end, items.into_iter()); 136 | } 137 | Cmd::Retain { idx, bits } => { 138 | let mut i = 0; 139 | vectors[slot(idx)].retain(&mut |_: &Box| { i += 1; bits & (1 << i.min(31)) != 0 }); 140 | } 141 | } 142 | } 143 | }); 144 | -------------------------------------------------------------------------------- /images/chunked-vector.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/header-vector.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/shared-vector.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/unique-vector.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/drain.rs: -------------------------------------------------------------------------------- 1 | // Most of the code in this file is copied from std::Vec's Drain implementation. 2 | 3 | use core::fmt; 4 | use core::iter::FusedIterator; 5 | use core::mem; 6 | use core::ptr::{self, NonNull}; 7 | use core::slice; 8 | 9 | use super::RawVector; 10 | 11 | /// A draining iterator for `Vector`. 12 | /// 13 | /// This `struct` is created by [`Vector::drain`]. 14 | /// See its documentation for more. 15 | pub struct Drain<'a, T: 'a> { 16 | /// Index of tail to preserve 17 | pub(super) tail_start: usize, 18 | /// Length of tail 19 | pub(super) tail_len: usize, 20 | /// Current remaining range to remove 21 | pub(super) iter: slice::Iter<'a, T>, 22 | pub(super) vec: NonNull>, 23 | } 24 | 25 | impl fmt::Debug for Drain<'_, T> { 26 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 27 | f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() 28 | } 29 | } 30 | 31 | impl<'a, T> Drain<'a, T> { 32 | /// Returns the remaining items of this iterator as a slice. 33 | /// 34 | /// # Examples 35 | /// 36 | /// ``` 37 | /// let mut vec = vec!['a', 'b', 'c']; 38 | /// let mut drain = vec.drain(..); 39 | /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']); 40 | /// let _ = drain.next().unwrap(); 41 | /// assert_eq!(drain.as_slice(), &['b', 'c']); 42 | /// ``` 43 | #[must_use] 44 | pub fn as_slice(&self) -> &[T] { 45 | self.iter.as_slice() 46 | } 47 | } 48 | 49 | impl<'a, T> AsRef<[T]> for Drain<'a, T> { 50 | fn as_ref(&self) -> &[T] { 51 | self.as_slice() 52 | } 53 | } 54 | 55 | unsafe impl Sync for Drain<'_, T> {} 56 | unsafe impl Send for Drain<'_, T> {} 57 | 58 | impl Iterator for Drain<'_, T> { 59 | type Item = T; 60 | 61 | #[inline] 62 | fn next(&mut self) -> Option { 63 | self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) }) 64 | } 65 | 66 | fn size_hint(&self) -> (usize, Option) { 67 | self.iter.size_hint() 68 | } 69 | } 70 | 71 | impl DoubleEndedIterator for Drain<'_, T> { 72 | #[inline] 73 | fn next_back(&mut self) -> Option { 74 | self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) }) 75 | } 76 | } 77 | 78 | impl Drop for Drain<'_, T> { 79 | fn drop(&mut self) { 80 | /// Moves back the un-`Drain`ed elements to restore the original `Vec`. 81 | struct DropGuard<'r, 'a, T>(&'r mut Drain<'a, T>); 82 | 83 | impl<'r, 'a, T> Drop for DropGuard<'r, 'a, T> { 84 | fn drop(&mut self) { 85 | if self.0.tail_len > 0 { 86 | unsafe { 87 | let source_vec = self.0.vec.as_mut(); 88 | // memmove back untouched tail, update to new length 89 | let start = source_vec.len(); 90 | let tail = self.0.tail_start; 91 | if tail != start { 92 | let src = source_vec.as_ptr().add(tail); 93 | let dst = source_vec.as_mut_ptr().add(start); 94 | ptr::copy(src, dst, self.0.tail_len); 95 | } 96 | source_vec.header.len = (start + self.0.tail_len) as u32; 97 | } 98 | } 99 | } 100 | } 101 | 102 | let iter = mem::replace(&mut self.iter, (&mut []).iter()); 103 | let drop_len = iter.len(); 104 | 105 | let mut vec = self.vec; 106 | 107 | // if T::IS_ZST { 108 | // // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount. 109 | // // this can be achieved by manipulating the Vec length instead of moving values out from `iter`. 110 | // unsafe { 111 | // let vec = vec.as_mut(); 112 | // let old_len = vec.len(); 113 | // vec.set_len(old_len + drop_len + self.tail_len); 114 | // vec.truncate(old_len + self.tail_len); 115 | // } 116 | // return; 117 | // } 118 | 119 | // ensure elements are moved back into their appropriate places, even when drop_in_place panics 120 | let _guard = DropGuard(self); 121 | 122 | if drop_len == 0 { 123 | return; 124 | } 125 | 126 | // as_slice() must only be called when iter.len() is > 0 because 127 | // it also gets touched by vec::Splice which may turn it into a dangling pointer 128 | // which would make it and the vec pointer point to different allocations which would 129 | // lead to invalid pointer arithmetic below. 130 | let drop_ptr = iter.as_slice().as_ptr(); 131 | 132 | unsafe { 133 | // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place 134 | // a pointer with mutable provenance is necessary. Therefore we must reconstruct 135 | // it from the original vec but also avoid creating a &mut to the front since that could 136 | // invalidate raw pointers to it which some unsafe code might rely on. 137 | let vec_ptr = vec.as_mut().as_mut_ptr(); 138 | let drop_offset = sub_ptr(drop_ptr, vec_ptr); 139 | let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len); 140 | ptr::drop_in_place(to_drop); 141 | } 142 | } 143 | } 144 | 145 | fn sub_ptr(a: *const T, b: *const T) -> usize { 146 | debug_assert!(a >= b); 147 | 148 | (a as usize - b as usize) / mem::size_of::() 149 | } 150 | 151 | impl FusedIterator for Drain<'_, T> {} 152 | 153 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(feature = "nightly", feature(allocator_api))] 2 | #![doc = include_str!("../README.md")] 3 | 4 | mod raw; 5 | mod shared; 6 | mod vector; 7 | mod drain; 8 | mod splice; 9 | 10 | pub use raw::{AtomicRefCount, BufferSize, DefaultRefCount, RefCount}; 11 | pub use shared::{AtomicSharedVector, RefCountedVector, SharedVector}; 12 | pub use vector::{Vector, RawVector}; 13 | 14 | pub mod alloc { 15 | pub use allocator_api2::alloc::{AllocError, Allocator, Global}; 16 | } 17 | 18 | pub(crate) fn grow_amortized(len: usize, additional: usize) -> usize { 19 | let required = len.saturating_add(additional); 20 | let cap = len.saturating_add(len).max(required).max(8); 21 | 22 | const MAX: usize = BufferSize::MAX as usize; 23 | 24 | if cap > MAX { 25 | if required <= MAX { 26 | return required; 27 | } 28 | 29 | panic!("Required allocation size is too large"); 30 | } 31 | 32 | cap 33 | } 34 | 35 | #[macro_export] 36 | macro_rules! vector { 37 | (@one@ $x:expr) => (1usize); 38 | ($elem:expr; $n:expr) => ({ 39 | $crate::Vector::from_elem($elem, $n) 40 | }); 41 | ($($x:expr),*$(,)*) => ({ 42 | let count = 0usize $(+ $crate::vector!(@one@ $x))*; 43 | let mut vec = $crate::Vector::with_capacity(count); 44 | $(vec.push($x);)* 45 | vec 46 | }); 47 | ([$($x:expr),*$(,)*] in $allocator:expr) => ({ 48 | let count = 0usize $(+ $crate::vector!(@one@ $x))*; 49 | let mut vec = $crate::Vector::try_with_capacity_in(count, $allocator).unwrap(); 50 | $(vec.push($x);)* 51 | vec 52 | }); 53 | ([$x:expr;$n:expr] in $allocator:expr) => ({ 54 | let mut vec = $crate::Vector::try_with_capacity_in($n, $allocator).unwrap(); 55 | for _ in 0..$n { vec.push($x.clone()); } 56 | vec 57 | }); 58 | } 59 | 60 | #[macro_export] 61 | macro_rules! rc_vector { 62 | ($elem:expr; $n:expr) => ({ 63 | let mut vec = $crate::SharedVector::with_capacity($n); 64 | for _ in 0..$n { vec.push($elem.clone()); } 65 | vec 66 | }); 67 | ($($x:expr),*$(,)*) => ({ 68 | let count = 0usize $(+ $crate::vector!(@one@ $x))*; 69 | let mut vec = $crate::SharedVector::with_capacity(count); 70 | $(vec.push($x);)* 71 | vec 72 | }); 73 | ([$($x:expr),*$(,)*] in $allocator:expr) => ({ 74 | let count = 0usize $(+ $crate::vector!(@one@ $x))*; 75 | let mut vec = $crate::SharedVector::try_with_capacity_in(count, $allocator).unwrap(); 76 | $(vec.push($x);)* 77 | vec 78 | }); 79 | ([$elem:expr;$n:expr] in $allocator:expr) => ({ 80 | let mut vec = $crate::SharedVector::try_with_capacity_in($n, $allocator).unwrap(); 81 | for _ in 0..$n { vec.push($elem.clone()); } 82 | vec 83 | }); 84 | } 85 | 86 | #[macro_export] 87 | macro_rules! arc_vector { 88 | ($elem:expr; $n:expr) => ({ 89 | let mut vec = $crate::AtomicSharedVector::with_capacity($n); 90 | for _ in 0..$n { vec.push($elem.clone()); } 91 | vec 92 | }); 93 | ($($x:expr),*$(,)*) => ({ 94 | let count = 0usize $(+ $crate::vector!(@one@ $x))*; 95 | let mut vec = $crate::AtomicSharedVector::with_capacity(count); 96 | $(vec.push($x);)* 97 | vec 98 | }); 99 | ([$($x:expr),*$(,)*] in $allocator:expr) => ({ 100 | let count = 0usize $(+ $crate::vector!(@one@ $x))*; 101 | let mut vec = $crate::AtomicSharedVector::try_with_capacity_in(count, $allocator).unwrap(); 102 | $(vec.push($x);)* 103 | vec 104 | }); 105 | ([$elem:expr;$n:expr] in $allocator:expr) => ({ 106 | let mut vec = $crate::AtomicSharedVector::try_with_capacity_in($n, $allocator).unwrap(); 107 | for _ in 0..$n { vec.push($elem.clone()); } 108 | vec 109 | }); 110 | } 111 | 112 | #[test] 113 | fn vector_macro() { 114 | pub use allocator_api2::alloc::{Allocator, Global}; 115 | 116 | let v1: Vector = vector![0, 1, 2, 3, 4, 5]; 117 | let v2: Vector = vector![2; 4]; 118 | let v3: Vector = vector!([6, 7] in Global); 119 | assert_eq!(v1.as_slice(), &[0, 1, 2, 3, 4, 5]); 120 | assert_eq!(v2.as_slice(), &[2, 2, 2, 2]); 121 | assert_eq!(v3.as_slice(), &[6, 7]); 122 | 123 | let v1: SharedVector = rc_vector![0, 1, 2, 3, 4, 5]; 124 | let v2: SharedVector = rc_vector![3; 5]; 125 | let v3: SharedVector = rc_vector!([4; 3] in Global); 126 | assert_eq!(v1.as_slice(), &[0, 1, 2, 3, 4, 5]); 127 | assert_eq!(v2.as_slice(), &[3, 3, 3, 3, 3]); 128 | assert_eq!(v3.as_slice(), &[4, 4, 4]); 129 | 130 | let v1: AtomicSharedVector = arc_vector![0, 1, 2, 3, 4, 5]; 131 | let v2: AtomicSharedVector = arc_vector![1; 4]; 132 | let v3: AtomicSharedVector = arc_vector![[3, 2, 1] in Global]; 133 | assert_eq!(v1.as_slice(), &[0, 1, 2, 3, 4, 5]); 134 | assert_eq!(v2.as_slice(), &[1, 1, 1, 1]); 135 | assert_eq!(v3.as_slice(), &[3, 2, 1]); 136 | } 137 | -------------------------------------------------------------------------------- /src/raw.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::Layout; 2 | use core::cell::UnsafeCell; 3 | use core::marker::PhantomData; 4 | use core::mem; 5 | use core::ptr::{self, NonNull}; 6 | use core::sync::atomic::{ 7 | AtomicI32, 8 | Ordering::{Relaxed, Release}, 9 | }; 10 | 11 | pub use crate::alloc::{AllocError, Allocator, Global}; 12 | 13 | pub type BufferSize = u32; 14 | 15 | pub trait RefCount { 16 | unsafe fn add_ref(&self); 17 | unsafe fn release_ref(&self) -> bool; 18 | fn new(count: i32) -> Self; 19 | fn get(&self) -> i32; 20 | } 21 | 22 | pub struct DefaultRefCount(UnsafeCell); 23 | pub struct AtomicRefCount(AtomicI32); 24 | 25 | #[repr(C)] 26 | #[derive(Clone)] 27 | pub struct VecHeader { 28 | pub cap: BufferSize, 29 | pub len: BufferSize, 30 | } 31 | 32 | impl VecHeader { 33 | fn remaining_capacity(&self) -> u32 { self.cap - self.len } 34 | } 35 | 36 | #[repr(C)] 37 | pub struct Header { 38 | pub(crate) vec: VecHeader, 39 | pub(crate) ref_count: R, 40 | pub(crate) allocator: A, 41 | } 42 | 43 | impl RefCount for AtomicRefCount { 44 | #[inline] 45 | unsafe fn add_ref(&self) { 46 | // Relaxed ordering is OK since the presence of the existing reference 47 | // prevents threads from deleting the buffer. 48 | self.0.fetch_add(1, Relaxed); 49 | } 50 | 51 | #[inline] 52 | unsafe fn release_ref(&self) -> bool { 53 | self.0.fetch_sub(1, Release) == 1 54 | } 55 | 56 | #[inline] 57 | fn new(val: i32) -> Self { 58 | AtomicRefCount(AtomicI32::new(val)) 59 | } 60 | 61 | #[inline] 62 | fn get(&self) -> i32 { 63 | self.0.load(Relaxed) 64 | } 65 | } 66 | 67 | impl RefCount for DefaultRefCount { 68 | #[inline] 69 | unsafe fn add_ref(&self) { 70 | *self.0.get() += 1; 71 | } 72 | 73 | #[inline] 74 | unsafe fn release_ref(&self) -> bool { 75 | let count = self.0.get(); 76 | *count -= 1; 77 | *count == 0 78 | } 79 | 80 | #[inline] 81 | fn new(val: i32) -> Self { 82 | DefaultRefCount(UnsafeCell::new(val)) 83 | } 84 | 85 | #[inline] 86 | fn get(&self) -> i32 { 87 | unsafe { *self.0.get() } 88 | } 89 | } 90 | 91 | #[inline] 92 | pub unsafe fn data_ptr(header: NonNull
) -> *mut T { 93 | (header.as_ptr() as *mut u8).add(header_size::()) as *mut T 94 | } 95 | 96 | pub(crate) const fn header_size() -> usize { 97 | let a = mem::align_of::(); 98 | let s = mem::size_of::
(); 99 | let size = if a > s { a } else { s }; 100 | 101 | // Favor L1 cache line alignment for large structs. 102 | let min = if mem::size_of::() < 64 { 16 } else { 64 }; 103 | if size < min { 104 | min 105 | } else { 106 | size 107 | } 108 | } 109 | 110 | pub fn buffer_layout(n: usize) -> Result { 111 | let size = mem::size_of::().checked_mul(n).ok_or(AllocError)?; 112 | let align = mem::align_of::
().max(mem::align_of::()); 113 | let align = if mem::size_of::() < 64 { 114 | align 115 | } else { 116 | align.max(64) 117 | }; 118 | let header_size = header_size::(); 119 | 120 | Layout::from_size_align(header_size + size, align).map_err(|_| AllocError) 121 | } 122 | 123 | pub unsafe fn drop_items(mut ptr: *mut T, count: u32) { 124 | for _ in 0..count { 125 | core::ptr::drop_in_place(ptr); 126 | ptr = ptr.add(1); 127 | } 128 | } 129 | 130 | pub unsafe fn dealloc(mut ptr: NonNull>, cap: BufferSize) { 131 | let layout = buffer_layout::, T>(cap as usize).unwrap(); 132 | let allocator = ptr::read(&ptr.as_mut().allocator); 133 | allocator.deallocate(ptr.cast::(), layout); 134 | } 135 | 136 | #[cold] 137 | pub fn alloc_error_cold() -> AllocError { 138 | AllocError 139 | } 140 | 141 | #[repr(transparent)] 142 | pub struct HeaderBuffer { 143 | pub header: NonNull>, 144 | _marker: PhantomData, 145 | } 146 | 147 | impl HeaderBuffer { 148 | pub unsafe fn from_raw(ptr: NonNull>) -> Self { 149 | HeaderBuffer { 150 | header: ptr, 151 | _marker: PhantomData, 152 | } 153 | } 154 | 155 | #[inline] 156 | pub unsafe fn as_mut(&mut self) -> &mut Header { 157 | self.header.as_mut() 158 | } 159 | 160 | #[inline] 161 | pub unsafe fn as_ref(&self) -> &Header { 162 | self.header.as_ref() 163 | } 164 | 165 | #[inline] 166 | pub unsafe fn as_ptr(&self) -> *mut Header { 167 | self.header.as_ptr() 168 | } 169 | 170 | #[inline] 171 | pub fn allocator(&self) -> &A { 172 | unsafe { &self.header.as_ref().allocator } 173 | } 174 | } 175 | 176 | pub unsafe fn move_data(src_data: *mut T, src_vec: &mut VecHeader, dst_data: *mut T, dst_vec: &mut VecHeader) { 177 | debug_assert!(dst_vec.cap - dst_vec.len >= src_vec.len); 178 | let len = src_vec.len; 179 | if len > 0 { 180 | unsafe { 181 | let dst = dst_data.add(dst_vec.len as usize); 182 | 183 | let inital_dst_len = dst_vec.len; 184 | dst_vec.len = inital_dst_len + len; 185 | src_vec.len = 0; 186 | 187 | ptr::copy_nonoverlapping(src_data, dst, len as usize); 188 | } 189 | } 190 | } 191 | 192 | pub unsafe fn extend_from_slice_assuming_capacity(data: *mut T, vec: &mut VecHeader, slice: &[T]) 193 | where 194 | T: Clone, 195 | { 196 | let len = slice.len() as u32; 197 | debug_assert!(len <= vec.remaining_capacity()); 198 | 199 | let inital_len = vec.len; 200 | 201 | let mut ptr = data.add(inital_len as usize); 202 | 203 | for item in slice { 204 | ptr::write(ptr, item.clone()); 205 | ptr = ptr.add(1) 206 | } 207 | 208 | vec.len += len; 209 | } 210 | 211 | // Returns true if the iterator was emptied. 212 | pub unsafe fn extend_within_capacity>(data: *mut T, vec: &mut VecHeader, iter: &mut I) -> bool { 213 | let inital_len = vec.len; 214 | 215 | let mut ptr = data.add(inital_len as usize); 216 | 217 | let mut count = 0; 218 | let max = vec.remaining_capacity(); 219 | let mut finished = false; 220 | loop { 221 | if count == max { 222 | break; 223 | } 224 | if let Some(item) = iter.next() { 225 | ptr::write(ptr, item); 226 | ptr = ptr.add(1); 227 | count += 1; 228 | } else { 229 | finished = true; 230 | break; 231 | } 232 | } 233 | 234 | vec.len += count; 235 | return finished; 236 | } 237 | 238 | #[inline] 239 | pub unsafe fn pop(data: *mut T, vec: &mut VecHeader) -> Option { 240 | if vec.len == 0 { 241 | return None; 242 | } 243 | 244 | vec.len -= 1; 245 | 246 | Some(ptr::read(data.add(vec.len as usize))) 247 | } 248 | 249 | #[inline(always)] 250 | pub unsafe fn push_assuming_capacity(data: *mut T, vec: &mut VecHeader, val: T) { 251 | let dst = data.add(vec.len as usize); 252 | ptr::write(dst, val); 253 | vec.len += 1; 254 | } 255 | 256 | pub unsafe fn clear(data: *mut T, vec: &mut VecHeader) { 257 | drop_items(data, vec.len); 258 | vec.len = 0; 259 | } 260 | 261 | pub fn assert_ref_count_layout() { 262 | assert_eq!(mem::size_of::(), mem::size_of::()); 263 | assert_eq!(mem::align_of::(), mem::align_of::()); 264 | } 265 | 266 | #[inline(never)] 267 | pub fn allocate_header_buffer( 268 | mut cap: usize, 269 | allocator: &A, 270 | ) -> Result<(NonNull, usize), AllocError> 271 | where 272 | A: Allocator, 273 | { 274 | if cap == 0 { 275 | cap = 16; 276 | } 277 | 278 | if cap > BufferSize::MAX as usize { 279 | return Err(alloc_error_cold()); 280 | } 281 | 282 | let layout = buffer_layout::, T>(cap)?; 283 | let allocation = allocator.allocate(layout)?; 284 | let items_size = allocation.len() - header_size::, T>(); 285 | let size_of = mem::size_of::(); 286 | let real_capacity = if size_of == 0 { 287 | cap 288 | } else { 289 | items_size / size_of 290 | }; 291 | 292 | Ok((allocation.cast(), real_capacity)) 293 | } 294 | 295 | pub unsafe fn header_from_data_ptr(data_ptr: NonNull) -> NonNull { 296 | NonNull::new_unchecked((data_ptr.as_ptr() as *mut u8).sub(header_size::()) as *mut H) 297 | } 298 | 299 | #[test] 300 | fn buffer_layout_alignemnt() { 301 | type B = Box; 302 | let layout = buffer_layout::, B>(2).unwrap(); 303 | assert_eq!(layout.align(), mem::size_of::()); 304 | 305 | let atomic_layout = buffer_layout::, B>(2).unwrap(); 306 | 307 | assert_eq!(layout, atomic_layout); 308 | } 309 | -------------------------------------------------------------------------------- /src/shared.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Debug; 2 | use core::ops::{Deref, DerefMut, Index, IndexMut}; 3 | use core::ptr::NonNull; 4 | use core::{mem, ptr}; 5 | use core::sync::atomic::Ordering; 6 | 7 | use crate::raw; 8 | use crate::alloc::{AllocError, Allocator, Global}; 9 | use crate::raw::{BufferSize, HeaderBuffer}; 10 | use crate::vector::{Vector, RawVector}; 11 | use crate::{grow_amortized, AtomicRefCount, DefaultRefCount, RefCount}; 12 | 13 | /// A heap allocated, atomically reference counted, immutable contiguous buffer containing elements of type `T`. 14 | /// 15 | /// 16 | /// 17 | /// See [RefCountedVector]. 18 | pub type AtomicSharedVector = RefCountedVector; 19 | 20 | /// A heap allocated, reference counted, immutable contiguous buffer containing elements of type `T`. 21 | /// 22 | /// 23 | /// 24 | /// See [RefCountedVector]. 25 | pub type SharedVector = RefCountedVector; 26 | 27 | /// A heap allocated, reference counted, immutable contiguous buffer containing elements of type `T`. 28 | /// 29 | /// 30 | /// 31 | /// Similar in principle to `Arc<[T]>`. It can be converted into a `Vector` for 32 | /// free if there is only a single reference to the RefCountedVector alive. 33 | /// 34 | /// # Copy-on-write "Immutable" vectors 35 | /// 36 | /// This type contains mutable methods like `push` and `pop`. These internally allocate a new buffer 37 | /// if the buffer is not unique (there are more than one reference to it). When there is a single reference, 38 | /// these mutable operation simply update the existing buffer. 39 | /// 40 | /// In other words, this type behaves like an [immutable (or persistent) data structure](https://en.wikipedia.org/wiki/Persistent_data_structure) 41 | /// Actual mutability only happens under the hood as an optimization when a single reference exists. 42 | #[repr(transparent)] 43 | pub struct RefCountedVector { 44 | pub(crate) inner: HeaderBuffer, 45 | } 46 | 47 | impl RefCountedVector { 48 | /// Creates an empty shared buffer without allocating memory. 49 | #[inline] 50 | pub fn new() -> RefCountedVector { 51 | Self::try_with_capacity_in(0, Global).unwrap() 52 | } 53 | 54 | /// Constructs a new, empty vector with at least the specified capacity. 55 | #[inline] 56 | pub fn with_capacity(cap: usize) -> RefCountedVector { 57 | Self::try_with_capacity_in(cap, Global).unwrap() 58 | } 59 | 60 | /// Clones the contents of a slice into a new vector. 61 | #[inline] 62 | pub fn from_slice(slice: &[T]) -> RefCountedVector 63 | where 64 | T: Clone, 65 | { 66 | Self::try_from_slice_in(slice, Global).unwrap() 67 | } 68 | } 69 | 70 | impl RefCountedVector { 71 | /// Creates an empty vector without allocating memory. 72 | pub fn new_in(allocator: A) -> Self { 73 | Self::try_with_capacity_in(0, allocator).unwrap() 74 | } 75 | 76 | /// Creates an empty pre-allocated vector with a given storage capacity. 77 | pub fn with_capacity_in(cap: usize, allocator: A) -> Self { 78 | Self::try_with_capacity_in(cap, allocator).unwrap() 79 | } 80 | 81 | /// Tries to construct a new, empty vector with at least the specified capacity. 82 | #[inline] 83 | pub fn try_with_capacity_in(cap: usize, allocator: A) -> Result { 84 | raw::assert_ref_count_layout::(); 85 | unsafe { 86 | let (ptr, cap) = raw::allocate_header_buffer::(cap, &allocator)?; 87 | 88 | ptr::write( 89 | ptr.cast().as_ptr(), 90 | raw::Header { 91 | vec: raw::VecHeader { 92 | cap: cap as BufferSize, 93 | len: 0, 94 | }, 95 | ref_count: R::new(1), 96 | allocator, 97 | }, 98 | ); 99 | 100 | Ok(RefCountedVector { 101 | inner: HeaderBuffer::from_raw(ptr.cast()), 102 | }) 103 | } 104 | } 105 | 106 | pub fn try_from_slice_in(slice: &[T], allocator: A) -> Result where T: Clone { 107 | let mut v = Self::try_with_capacity_in(slice.len(), allocator)?; 108 | 109 | unsafe { 110 | raw::extend_from_slice_assuming_capacity(v.data_ptr(), v.vec_header_mut(), slice); 111 | } 112 | 113 | Ok(v) 114 | } 115 | 116 | /// Returns `true` if the vector contains no elements. 117 | #[inline] 118 | pub fn is_empty(&self) -> bool { 119 | self.vec_header().len == 0 120 | } 121 | 122 | /// Returns the number of elements in the vector, also referred to as its ‘length’. 123 | #[inline] 124 | pub fn len(&self) -> usize { 125 | self.vec_header().len as usize 126 | } 127 | 128 | /// Returns the total number of elements the vector can hold without reallocating. 129 | #[inline] 130 | pub fn capacity(&self) -> usize { 131 | self.vec_header().cap as usize 132 | } 133 | 134 | /// Returns number of elements that can be added without reallocating. 135 | #[inline] 136 | pub fn remaining_capacity(&self) -> usize { 137 | let h = self.vec_header(); 138 | (h.cap - h.len) as usize 139 | } 140 | 141 | /// Returns a reference to the underlying allocator. 142 | pub fn allocator(&self) -> &A { 143 | self.inner.allocator() 144 | } 145 | 146 | /// Creates a new reference without allocating. 147 | /// 148 | /// Equivalent to `Clone::clone`. 149 | #[inline] 150 | pub fn new_ref(&self) -> Self { 151 | unsafe { 152 | self.inner.as_ref().ref_count.add_ref(); 153 | RefCountedVector { 154 | inner: HeaderBuffer::from_raw(self.inner.header) 155 | } 156 | } 157 | } 158 | 159 | /// Extracts a slice containing the entire vector. 160 | #[inline] 161 | pub fn as_slice(&self) -> &[T] { 162 | unsafe { 163 | core::slice::from_raw_parts(self.data_ptr(), self.len()) 164 | } 165 | } 166 | 167 | /// Returns true if this is the only existing handle to the buffer. 168 | /// 169 | /// When this function returns true, mutable methods and converting to a `Vector` 170 | /// is very fast (does not involve additional memory allocations or copies). 171 | #[inline] 172 | pub fn is_unique(&self) -> bool { 173 | unsafe { self.inner.as_ref().ref_count.get() == 1 } 174 | } 175 | 176 | /// Clears the vector, removing all values. 177 | pub fn clear(&mut self) 178 | where 179 | A: Clone, 180 | { 181 | if self.is_unique() { 182 | unsafe { 183 | raw::clear(self.data_ptr(), self.vec_header_mut()); 184 | } 185 | return; 186 | } 187 | 188 | *self = 189 | Self::try_with_capacity_in(self.capacity(), self.inner.allocator().clone()).unwrap(); 190 | } 191 | 192 | /// Returns true if the two vectors share the same underlying storage. 193 | pub fn ptr_eq(&self, other: &Self) -> bool { 194 | self.inner.header == other.inner.header 195 | } 196 | 197 | /// Allocates a duplicate of this buffer (infallible). 198 | pub fn copy_buffer(&self) -> Self 199 | where 200 | T: Copy, 201 | A: Clone, 202 | { 203 | self.try_copy_buffer().unwrap() 204 | } 205 | 206 | /// Tries to allocate a duplicate of this buffer. 207 | pub fn try_copy_buffer(&self) -> Result 208 | where 209 | T: Copy, 210 | A: Clone, 211 | { 212 | unsafe { 213 | let header = self.inner.as_ref(); 214 | let len = header.vec.len; 215 | let cap = header.vec.cap; 216 | 217 | if len > cap { 218 | return Err(AllocError); 219 | } 220 | 221 | let allocator = header.allocator.clone(); 222 | let mut clone = Self::try_with_capacity_in(cap as usize, allocator)?; 223 | 224 | if len > 0 { 225 | core::ptr::copy_nonoverlapping(self.data_ptr(), clone.data_ptr(), len as usize); 226 | clone.vec_header_mut().len = len; 227 | } 228 | 229 | Ok(clone) 230 | } 231 | } 232 | 233 | #[inline] 234 | pub fn data_ptr(&self) -> *mut T { 235 | unsafe { (self.inner.as_ptr() as *mut u8).add(raw::header_size::, T>()) as *mut T } 236 | } 237 | 238 | // SAFETY: call this only if the vector is unique. 239 | pub(crate) unsafe fn vec_header_mut(&mut self) -> &mut raw::VecHeader { 240 | &mut self.inner.as_mut().vec 241 | } 242 | 243 | pub(crate) fn vec_header(&self) -> &raw::VecHeader { 244 | unsafe { &self.inner.as_ref().vec } 245 | } 246 | } 247 | 248 | /// Mutable methods that can cause the vector to be cloned and therefore require both the items and 249 | /// the allocator to be cloneable. 250 | impl RefCountedVector { 251 | /// Converts this RefCountedVector into an immutable one, allocating a new copy if there are other references. 252 | #[inline] 253 | pub fn into_unique(mut self) -> Vector { 254 | self.ensure_unique(); 255 | 256 | unsafe { 257 | let data = NonNull::new_unchecked(self.data_ptr()); 258 | let header = self.vec_header().clone(); 259 | let allocator = self.inner.as_ref().allocator.clone(); 260 | 261 | mem::forget(self); 262 | 263 | Vector { 264 | raw: RawVector { 265 | data, 266 | header, 267 | }, 268 | allocator, 269 | } 270 | } 271 | } 272 | 273 | /// Appends an element to the back of a collection. 274 | /// 275 | /// # Panics 276 | /// 277 | /// Panics if the new capacity exceeds `u32::MAX` bytes. 278 | pub fn push(&mut self, val: T) { 279 | self.reserve(1); 280 | unsafe { 281 | raw::push_assuming_capacity(self.data_ptr(), &mut self.vec_header_mut(), val); 282 | } 283 | } 284 | 285 | /// Removes the last element from the vector and returns it, or `None` if it is empty. 286 | pub fn pop(&mut self) -> Option { 287 | self.ensure_unique(); 288 | 289 | unsafe { 290 | raw::pop(self.data_ptr(), &mut self.vec_header_mut()) 291 | } 292 | } 293 | 294 | /// Removes an element from the vector and returns it. 295 | /// 296 | /// The removed element is replaced by the last element of the vector. 297 | /// 298 | /// # Panics 299 | /// 300 | /// Panics if index is out of bounds. 301 | #[inline] 302 | pub fn swap_remove(&mut self, idx: usize) -> T { 303 | self.ensure_unique(); 304 | 305 | let len = self.len(); 306 | assert!(idx < len); 307 | 308 | unsafe { 309 | let data_ptr = self.data_ptr(); 310 | let ptr = data_ptr.add(idx); 311 | let item = ptr::read(ptr); 312 | 313 | let last_idx = len - 1; 314 | if idx != last_idx { 315 | let last_ptr = data_ptr.add(last_idx); 316 | ptr::write(ptr, ptr::read(last_ptr)); 317 | } 318 | 319 | self.vec_header_mut().len = last_idx as BufferSize; 320 | 321 | item 322 | } 323 | } 324 | 325 | /// Appends an element if there is sufficient spare capacity, otherwise an error is returned 326 | /// with the element. 327 | /// 328 | /// Like other mutable operations, this method may reallocate if the vector is not unique. 329 | /// However it will not reallocate when there’s insufficient capacity. 330 | /// The caller should use reserve or try_reserve to ensure that there is enough capacity. 331 | pub fn push_within_capacity(&mut self, val: T) -> Result<(), T> { 332 | if self.remaining_capacity() == 0 { 333 | return Err(val); 334 | } 335 | 336 | self.ensure_unique(); 337 | unsafe { 338 | raw::push_assuming_capacity(self.data_ptr(), &mut self.vec_header_mut(), val); 339 | } 340 | 341 | Ok(()) 342 | } 343 | 344 | /// Clones and appends the contents of the slice to the back of a collection. 345 | pub fn extend_from_slice(&mut self, slice: &[T]) { 346 | self.reserve(slice.len()); 347 | unsafe { 348 | raw::extend_from_slice_assuming_capacity(self.data_ptr(), self.vec_header_mut(), slice); 349 | } 350 | } 351 | 352 | /// Appends the contents of an iterator to the back of a collection. 353 | pub fn extend(&mut self, data: impl IntoIterator) { 354 | let mut iter = data.into_iter(); 355 | 356 | let (min, max) = iter.size_hint(); 357 | self.reserve(max.unwrap_or(min)); 358 | 359 | unsafe { 360 | if raw::extend_within_capacity(self.data_ptr(), self.vec_header_mut(), &mut iter) { 361 | return; 362 | } 363 | } 364 | 365 | for item in iter { 366 | self.push(item); 367 | } 368 | } 369 | 370 | /// Ensures this shared vector uniquely owns its storage, allocating a new copy 371 | /// If there are other references to it. 372 | /// 373 | /// In principle this is mostly useful internally to provide safe mutable methods 374 | /// as it does not observaly affect most of the shared vector behavior, however 375 | /// it has a few niche use cases, for example to provoke copies earlier for more 376 | /// predictable performance or in some unsafe endeavors. 377 | #[inline] 378 | pub fn ensure_unique(&mut self) { 379 | if !self.is_unique() { 380 | *self = self.try_clone_buffer(None).unwrap(); 381 | } 382 | } 383 | 384 | /// Extracts a mutable slice containing the entire vector. 385 | /// 386 | /// Like other mutable methods, this will clone the vector's storage 387 | /// if it is not unique to ensure safe mutations. 388 | #[inline] 389 | pub fn as_mut_slice(&mut self) -> &mut [T] 390 | where 391 | T: Clone, 392 | A: Clone, 393 | { 394 | self.ensure_unique(); 395 | unsafe { 396 | core::slice::from_raw_parts_mut(self.data_ptr(), self.len()) 397 | } 398 | } 399 | 400 | /// Allocates a duplicate of this buffer (infallible). 401 | pub fn clone_buffer(&self) -> Self 402 | where 403 | T: Clone, 404 | A: Clone, 405 | { 406 | self.try_clone_buffer(None).unwrap() 407 | } 408 | 409 | fn try_clone_buffer(&self, new_cap: Option) -> Result 410 | where 411 | T: Clone, 412 | A: Clone, 413 | { 414 | unsafe { 415 | let header = self.inner.as_ref(); 416 | let len = header.vec.len; 417 | let cap = if let Some(cap) = new_cap { 418 | cap 419 | } else { 420 | header.vec.cap 421 | }; 422 | let allocator = header.allocator.clone(); 423 | 424 | if len > cap { 425 | return Err(AllocError); 426 | } 427 | 428 | let mut clone = Self::try_with_capacity_in(cap as usize, allocator)?; 429 | 430 | raw::extend_from_slice_assuming_capacity( 431 | clone.data_ptr(), 432 | clone.vec_header_mut(), 433 | self.as_slice() 434 | ); 435 | 436 | Ok(clone) 437 | } 438 | } 439 | 440 | /// Ensures the vector can be safely mutated and has enough extra capacity to 441 | /// add `additional` more items. 442 | /// 443 | /// This will allocate new storage for the vector if the vector is not unique or if 444 | /// the capacity is not sufficient to accomodate `self.len() + additional` items. 445 | /// The vector may reserve more space to speculatively avoid frequent reallocations. 446 | #[inline] 447 | pub fn reserve(&mut self, additional: usize) { 448 | let is_unique = self.is_unique(); 449 | let enough_capacity = self.remaining_capacity() >= additional; 450 | 451 | if !is_unique || !enough_capacity { 452 | // Hopefully the least common case. 453 | self.try_realloc_additional(is_unique, enough_capacity, additional) 454 | .unwrap(); 455 | } 456 | } 457 | 458 | /// Tries to reserve at least `additional` extra elements to be inserted in the given vector. 459 | /// 460 | /// The vector may reserve more space to speculatively avoid frequent reallocations. 461 | /// After calling try_reserve, capacity will be greater than or equal to `self.len() + additional` 462 | /// if it returns `Ok(())`. 463 | /// Does nothing if capacity is already sufficient. This method preserves the contents even if an 464 | /// error occurs. 465 | pub fn try_reserve(&mut self, additional: usize) -> Result<(), AllocError> { 466 | let is_unique = self.is_unique(); 467 | let enough_capacity = self.remaining_capacity() >= additional; 468 | 469 | if !is_unique || !enough_capacity { 470 | // Hopefully the least common case. 471 | self.try_realloc_additional(is_unique, enough_capacity, additional)?; 472 | } 473 | 474 | Ok(()) 475 | } 476 | 477 | /// Reserves the minimum capacity for at least `additional` elements to be inserted in the given vector. 478 | /// 479 | /// Unlike `reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations. 480 | /// After calling `try_reserve_exact`, capacity will be greater than or equal to `self.len() + additional` if 481 | /// it returns `Ok(())`. 482 | /// This will also allocate if the vector is not unique. 483 | /// Does nothing if the capacity is already sufficient and the vector is unique. 484 | /// 485 | /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not 486 | /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected. 487 | pub fn reserve_exact(&mut self, additional: usize) { 488 | self.try_reserve_exact(additional).unwrap(); 489 | } 490 | 491 | /// Tries to reserve the minimum capacity for at least `additional` elements to be inserted in the given vector. 492 | /// 493 | /// Unlike `try_reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations. 494 | /// After calling `reserve_exact`, capacity will be greater than or equal to `self.len() + additional`. 495 | /// This will also allocate if the vector is not unique. 496 | /// Does nothing if the capacity is already sufficient and the vector is unique. 497 | /// 498 | /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not 499 | /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected. 500 | pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), AllocError> { 501 | let is_unique = self.is_unique(); 502 | let enough_capacity = self.remaining_capacity() >= additional; 503 | 504 | if !is_unique || !enough_capacity { 505 | // Hopefully the least common case. 506 | self.try_realloc_with_capacity(is_unique, additional)?; 507 | } 508 | 509 | Ok(()) 510 | } 511 | 512 | /// Shrinks the capacity of the vector with a lower bound. 513 | /// 514 | /// The capacity will remain at least as large as both the length and the supplied value. 515 | /// If the current capacity is less than the lower limit, this is a no-op. 516 | pub fn shrink_to(&mut self, min_capacity: usize) { 517 | let min_capacity = min_capacity.max(self.len()); 518 | if self.capacity() <= min_capacity { 519 | return; 520 | } 521 | 522 | let is_unique = self.is_unique(); 523 | self.try_realloc_with_capacity(is_unique, min_capacity) 524 | .unwrap(); 525 | } 526 | 527 | /// Shrinks the capacity of the vector as much as possible. 528 | pub fn shrink_to_fit(&mut self) { 529 | self.shrink_to(self.len()) 530 | } 531 | 532 | /// Moves all the elements of `other` into `self`, leaving `other` empty. 533 | /// 534 | /// If `other is not unique, the elements are cloned instead of moved. 535 | pub fn append(&mut self, other: &mut Self) { 536 | self.reserve(other.len()); 537 | 538 | unsafe { 539 | if other.is_unique() { 540 | // Fast path: memcpy 541 | raw::move_data( 542 | other.data_ptr(), &mut other.inner.header.as_mut().vec, 543 | self.data_ptr(), &mut self.inner.as_mut().vec, 544 | ) 545 | } else { 546 | // Slow path, clone each item. 547 | raw::extend_from_slice_assuming_capacity(self.data_ptr(), self.vec_header_mut(), other.as_slice()); 548 | 549 | *other = 550 | Self::try_with_capacity_in(other.capacity(), self.inner.allocator().clone()) 551 | .unwrap(); 552 | } 553 | } 554 | } 555 | 556 | #[cold] 557 | fn try_realloc_additional( 558 | &mut self, 559 | is_unique: bool, 560 | enough_capacity: bool, 561 | additional: usize, 562 | ) -> Result<(), AllocError> { 563 | let new_cap = if enough_capacity { 564 | self.capacity() 565 | } else { 566 | grow_amortized(self.len(), additional) 567 | }; 568 | 569 | self.try_realloc_with_capacity(is_unique, new_cap) 570 | } 571 | 572 | #[cold] 573 | fn try_realloc_with_capacity( 574 | &mut self, 575 | is_unique: bool, 576 | new_cap: usize, 577 | ) -> Result<(), AllocError> { 578 | let allocator = self.inner.allocator().clone(); 579 | if is_unique && self.capacity() > 0 { 580 | // The buffer is not large enough, we'll have to create a new one, however we 581 | // know that we have the only reference to it so we'll move the data with 582 | // a simple memcpy instead of cloning it. 583 | 584 | unsafe { 585 | use crate::raw::{buffer_layout, Header}; 586 | let old_cap = self.capacity(); 587 | let old_header = self.inner.header; 588 | let old_layout = buffer_layout::, T>(old_cap).unwrap(); 589 | let new_layout = buffer_layout::, T>(new_cap).unwrap(); 590 | 591 | let new_alloc = if new_layout.size() >= old_layout.size() { 592 | allocator.grow(old_header.cast(), old_layout, new_layout) 593 | } else { 594 | allocator.shrink(old_header.cast(), old_layout, new_layout) 595 | }?; 596 | 597 | self.inner.header = new_alloc.cast(); 598 | self.inner.as_mut().vec.cap = new_cap as BufferSize; 599 | 600 | return Ok(()); 601 | } 602 | } 603 | 604 | // The slowest path, we pay for both the new allocation and the need to clone 605 | // each item one by one. 606 | let mut new_vec = Self::try_with_capacity_in(new_cap, allocator)?; 607 | new_vec.extend_from_slice(self.as_slice()); 608 | 609 | mem::swap(self, &mut new_vec); 610 | 611 | Ok(()) 612 | } 613 | 614 | 615 | // TODO: remove this one? 616 | /// Returns the concatenation of two vectors. 617 | pub fn concatenate(mut self, mut other: Self) -> Self 618 | where 619 | T: Clone, 620 | A: Clone, 621 | { 622 | self.append(&mut other); 623 | 624 | self 625 | } 626 | } 627 | 628 | impl Drop for RefCountedVector { 629 | fn drop(&mut self) { 630 | unsafe { 631 | if self.inner.as_ref().ref_count.release_ref() { 632 | let header = self.vec_header().clone(); 633 | // See the implementation of std Arc for the need to use this fence. Note that 634 | // we only need it for the atomic reference counted version but I don't expect 635 | // this to make a measurable difference. 636 | core::sync::atomic::fence(Ordering::Acquire); 637 | 638 | raw::drop_items(self.data_ptr(), header.len); 639 | raw::dealloc::(self.inner.header, header.cap); 640 | } 641 | } 642 | } 643 | } 644 | 645 | 646 | unsafe impl Send for AtomicSharedVector {} 647 | 648 | impl Clone for RefCountedVector { 649 | fn clone(&self) -> Self { 650 | self.new_ref() 651 | } 652 | } 653 | 654 | impl, R: RefCount, A: Allocator> PartialEq> 655 | for RefCountedVector 656 | { 657 | fn eq(&self, other: &Self) -> bool { 658 | self.ptr_eq(other) || self.as_slice() == other.as_slice() 659 | } 660 | } 661 | 662 | impl, R: RefCount, A: Allocator> PartialEq<&[T]> for RefCountedVector { 663 | fn eq(&self, other: &&[T]) -> bool { 664 | self.as_slice() == *other 665 | } 666 | } 667 | 668 | impl AsRef<[T]> for RefCountedVector { 669 | fn as_ref(&self) -> &[T] { 670 | self.as_slice() 671 | } 672 | } 673 | 674 | impl Default for RefCountedVector { 675 | fn default() -> Self { 676 | Self::new() 677 | } 678 | } 679 | 680 | impl<'a, T, R: RefCount, A: Allocator> IntoIterator for &'a RefCountedVector { 681 | type Item = &'a T; 682 | type IntoIter = core::slice::Iter<'a, T>; 683 | fn into_iter(self) -> core::slice::Iter<'a, T> { 684 | self.as_slice().iter() 685 | } 686 | } 687 | 688 | impl<'a, T: Clone, R: RefCount, A: Allocator + Clone> IntoIterator 689 | for &'a mut RefCountedVector 690 | { 691 | type Item = &'a mut T; 692 | type IntoIter = core::slice::IterMut<'a, T>; 693 | fn into_iter(self) -> core::slice::IterMut<'a, T> { 694 | self.as_mut_slice().iter_mut() 695 | } 696 | } 697 | 698 | impl Index for RefCountedVector 699 | where 700 | R: RefCount, 701 | A: Allocator, 702 | I: core::slice::SliceIndex<[T]>, 703 | { 704 | type Output = >::Output; 705 | fn index(&self, index: I) -> &Self::Output { 706 | self.as_slice().index(index) 707 | } 708 | } 709 | 710 | impl IndexMut for RefCountedVector 711 | where 712 | T: Clone, 713 | R: RefCount, 714 | A: Allocator + Clone, 715 | I: core::slice::SliceIndex<[T]>, 716 | { 717 | fn index_mut(&mut self, index: I) -> &mut Self::Output { 718 | self.as_mut_slice().index_mut(index) 719 | } 720 | } 721 | 722 | impl Deref for RefCountedVector { 723 | type Target = [T]; 724 | fn deref(&self) -> &[T] { 725 | self.as_slice() 726 | } 727 | } 728 | 729 | impl DerefMut for RefCountedVector { 730 | fn deref_mut(&mut self) -> &mut [T] { 731 | self.as_mut_slice() 732 | } 733 | } 734 | 735 | impl Debug for RefCountedVector { 736 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { 737 | self.as_slice().fmt(f) 738 | } 739 | } 740 | 741 | impl From> for SharedVector { 742 | fn from(vector: Vector) -> Self { 743 | vector.into_shared() 744 | } 745 | } 746 | 747 | impl From> for AtomicSharedVector { 748 | fn from(vector: Vector) -> Self { 749 | vector.into_shared_atomic() 750 | } 751 | } 752 | 753 | // In order to give us a chance to catch leaks and double-frees, test with values that implement drop. 754 | #[cfg(test)] 755 | fn num(val: u32) -> Box { 756 | Box::new(val) 757 | } 758 | 759 | #[test] 760 | fn basic_shared() { 761 | basic_shared_impl::(); 762 | basic_shared_impl::(); 763 | 764 | fn basic_shared_impl() { 765 | let mut a: RefCountedVector, R> = RefCountedVector::with_capacity(64); 766 | a.push(num(1)); 767 | a.push(num(2)); 768 | 769 | let mut b = a.new_ref(); 770 | b.push(num(4)); 771 | 772 | a.push(num(3)); 773 | 774 | assert_eq!(a.as_slice(), &[num(1), num(2), num(3)]); 775 | assert_eq!(b.as_slice(), &[num(1), num(2), num(4)]); 776 | 777 | let popped = a.pop(); 778 | assert_eq!(a.as_slice(), &[num(1), num(2)]); 779 | assert_eq!(popped, Some(num(3))); 780 | 781 | let mut b2 = b.new_ref(); 782 | let popped = b2.pop(); 783 | assert_eq!(b2.as_slice(), &[num(1), num(2)]); 784 | assert_eq!(popped, Some(num(4))); 785 | 786 | println!("concatenate"); 787 | let c = a.concatenate(b2); 788 | assert_eq!(c.as_slice(), &[num(1), num(2), num(1), num(2)]); 789 | } 790 | } 791 | 792 | #[test] 793 | fn empty_buffer() { 794 | let _: AtomicSharedVector = AtomicSharedVector::new(); 795 | let _: AtomicSharedVector = AtomicSharedVector::new(); 796 | 797 | let _: SharedVector<()> = SharedVector::new(); 798 | let _: SharedVector<()> = SharedVector::new(); 799 | 800 | let _: AtomicSharedVector<()> = AtomicSharedVector::new(); 801 | let _: AtomicSharedVector<()> = AtomicSharedVector::new(); 802 | 803 | let _: Vector<()> = Vector::new(); 804 | } 805 | 806 | #[test] 807 | #[rustfmt::skip] 808 | fn grow() { 809 | let mut a = Vector::with_capacity(0); 810 | 811 | a.push(num(1)); 812 | a.push(num(2)); 813 | a.push(num(3)); 814 | 815 | a.extend_from_slice(&[num(4), num(5), num(6), num(7), num(8), num(9), num(10), num(12), num(12), num(13), num(14), num(15), num(16), num(17), num(18)]); 816 | 817 | assert_eq!( 818 | a.as_slice(), 819 | &[num(1), num(2), num(3), num(4), num(5), num(6), num(7), num(8), num(9), num(10), num(12), num(12), num(13), num(14), num(15), num(16), num(17), num(18)] 820 | ); 821 | 822 | let mut b = SharedVector::new(); 823 | b.push(num(1)); 824 | b.push(num(2)); 825 | b.push(num(3)); 826 | 827 | assert_eq!(b.as_slice(), &[num(1), num(2), num(3)]); 828 | 829 | let mut b = AtomicSharedVector::new(); 830 | b.push(num(1)); 831 | b.push(num(2)); 832 | b.push(num(3)); 833 | 834 | assert_eq!(b.as_slice(), &[num(1), num(2), num(3)]); 835 | } 836 | 837 | #[test] 838 | fn ensure_unique_empty() { 839 | let mut v: SharedVector = SharedVector::new(); 840 | v.ensure_unique(); 841 | } 842 | 843 | 844 | #[test] 845 | fn shrink_to_zero() { 846 | let mut v: SharedVector = SharedVector::new(); 847 | v.shrink_to(0); 848 | } 849 | -------------------------------------------------------------------------------- /src/splice.rs: -------------------------------------------------------------------------------- 1 | use crate::alloc::{Allocator, Global}; 2 | use core::ptr::{self}; 3 | use core::slice::{self}; 4 | 5 | use crate::drain::Drain; 6 | 7 | /// A splicing iterator for `Vec`. 8 | /// 9 | /// This struct is created by [`Vec::splice()`]. 10 | /// See its documentation for more. 11 | /// 12 | /// # Example 13 | /// 14 | /// ``` 15 | /// let mut v = vec![0, 1, 2]; 16 | /// let new = [7, 8]; 17 | /// let iter: std::vec::Splice<_> = v.splice(1.., new); 18 | /// ``` 19 | #[derive(Debug)] 20 | pub struct Splice< 21 | 'a, 22 | I: Iterator + 'a, 23 | A: Allocator + 'a = Global, 24 | > { 25 | pub(crate) drain: Drain<'a, I::Item>, 26 | pub(crate) replace_with: I, 27 | pub(crate) allocator: &'a A, 28 | } 29 | 30 | impl Iterator for Splice<'_, I, A> { 31 | type Item = I::Item; 32 | 33 | fn next(&mut self) -> Option { 34 | self.drain.next() 35 | } 36 | 37 | fn size_hint(&self) -> (usize, Option) { 38 | self.drain.size_hint() 39 | } 40 | } 41 | 42 | impl DoubleEndedIterator for Splice<'_, I, A> { 43 | fn next_back(&mut self) -> Option { 44 | self.drain.next_back() 45 | } 46 | } 47 | 48 | impl ExactSizeIterator for Splice<'_, I, A> {} 49 | 50 | impl Drop for Splice<'_, I, A> { 51 | fn drop(&mut self) { 52 | self.drain.by_ref().for_each(drop); 53 | // At this point draining is done and the only remaining tasks are splicing 54 | // and moving things into the final place. 55 | // Which means we can replace the slice::Iter with pointers that won't point to deallocated 56 | // memory, so that Drain::drop is still allowed to call iter.len(), otherwise it would break 57 | // the ptr.sub_ptr contract. 58 | self.drain.iter = (&[]).iter(); 59 | 60 | unsafe { 61 | if self.drain.tail_len == 0 { 62 | self.drain.vec.as_mut().extend(self.allocator, self.replace_with.by_ref()); 63 | return; 64 | } 65 | 66 | // First fill the range left by drain(). 67 | if !self.drain.fill(&mut self.replace_with) { 68 | return; 69 | } 70 | 71 | // There may be more elements. Use the lower bound as an estimate. 72 | // FIXME: Is the upper bound a better guess? Or something else? 73 | let (lower_bound, _upper_bound) = self.replace_with.size_hint(); 74 | if lower_bound > 0 { 75 | self.drain.move_tail(self.allocator, lower_bound); 76 | if !self.drain.fill(&mut self.replace_with) { 77 | return; 78 | } 79 | } 80 | 81 | // Collect any remaining elements. 82 | // This is a zero-length vector which does not allocate if `lower_bound` was exact. 83 | let mut collected = self.replace_with.by_ref().collect::>().into_iter(); 84 | // Now we have an exact count. 85 | if collected.len() > 0 { 86 | self.drain.move_tail(self.allocator, collected.len()); 87 | let filled = self.drain.fill(&mut collected); 88 | debug_assert!(filled); 89 | debug_assert_eq!(collected.len(), 0); 90 | } 91 | } 92 | // Let `Drain::drop` move the tail back if necessary and restore `vec.len`. 93 | } 94 | } 95 | 96 | /// Private helper methods for `Splice::drop` 97 | impl Drain<'_, T> { 98 | /// The range from `self.vec.len` to `self.tail_start` contains elements 99 | /// that have been moved out. 100 | /// Fill that range as much as possible with new elements from the `replace_with` iterator. 101 | /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.) 102 | unsafe fn fill>(&mut self, replace_with: &mut I) -> bool { 103 | let vec = unsafe { self.vec.as_mut() }; 104 | let range_start = vec.header.len as usize; 105 | let range_end = self.tail_start as usize; 106 | let range_slice = unsafe { 107 | slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start) 108 | }; 109 | 110 | for place in range_slice { 111 | if let Some(new_item) = replace_with.next() { 112 | unsafe { ptr::write(place, new_item) }; 113 | vec.header.len += 1; 114 | } else { 115 | return false; 116 | } 117 | } 118 | true 119 | } 120 | 121 | /// Makes room for inserting more elements before the tail. 122 | unsafe fn move_tail(&mut self, allocator: &A, additional: usize) { 123 | let vec = unsafe { self.vec.as_mut() }; 124 | let len = self.tail_start + self.tail_len; 125 | unsafe { vec.try_reserve(allocator, len + additional).unwrap(); } 126 | 127 | let new_tail_start = self.tail_start + additional; 128 | unsafe { 129 | let src = vec.as_ptr().add(self.tail_start); 130 | let dst = vec.as_mut_ptr().add(new_tail_start); 131 | ptr::copy(src, dst, self.tail_len); 132 | } 133 | self.tail_start = new_tail_start; 134 | } 135 | } -------------------------------------------------------------------------------- /src/vector.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Debug; 2 | use core::ops::{Deref, DerefMut, Index, IndexMut}; 3 | use core::ptr::NonNull; 4 | use core::{mem, ptr}; 5 | use core::ops::RangeBounds; 6 | 7 | use crate::alloc::{AllocError, Allocator, Global}; 8 | use crate::drain::Drain; 9 | use crate::raw::{ 10 | self, buffer_layout, AtomicRefCount, BufferSize, Header, HeaderBuffer, RefCount, VecHeader, move_data, 11 | }; 12 | use crate::shared::{AtomicSharedVector, SharedVector}; 13 | use crate::splice::Splice; 14 | use crate::{grow_amortized, DefaultRefCount}; 15 | 16 | /// A heap allocated, mutable contiguous buffer containing elements of type `T`, with manual deallocation. 17 | /// 18 | /// 19 | /// 20 | /// See also `Vector`. 21 | /// 22 | /// This container is similar to this crate's `Vector` data structure with two key difference: 23 | /// - It does store an allocator field. Instead, all methods that require interacting with an allocator are 24 | /// marked unsafe and take the allocator as parameter. 25 | /// - `RawVector`'s `Drop` implementation does not automatically deallocate the memory. Instead the memory 26 | /// must be manually deallocated via the `deallocate` method. Dropping a raw vector without deallocating it 27 | /// silently leaks the memory. 28 | /// 29 | /// `Vector` is implemented as a thin wrapper around this type. 30 | /// 31 | /// # Use cases 32 | /// 33 | /// In most cases, `Vector` is more appropriate. However in some situations it can be beneficial to not 34 | /// store the allocator in the container. In complex data structures that contain many vectors, for example, 35 | /// it may be preferable to store the allocator once at the root of the data structure than multiple times 36 | /// in each of the internally managed vectors. 37 | pub struct RawVector { 38 | pub(crate) data: NonNull, 39 | pub(crate) header: VecHeader, 40 | } 41 | 42 | impl RawVector { 43 | /// Creates an empty, unallocated raw vector. 44 | pub fn new() -> Self { 45 | RawVector { data: NonNull::dangling(), header: VecHeader { len: 0, cap: 0 } } 46 | } 47 | 48 | /// Creates an empty pre-allocated vector with a given storage capacity. 49 | /// 50 | /// Does not allocate memory if `cap` is zero. 51 | pub fn try_with_capacity(allocator: &A, cap: usize) -> Result, AllocError> { 52 | if cap == 0 { 53 | return Ok(RawVector::new()); 54 | } 55 | 56 | unsafe { 57 | let (base_ptr, cap) = raw::allocate_header_buffer::(cap, allocator)?; 58 | let data = NonNull::new_unchecked(raw::data_ptr::, T>( 59 | base_ptr.cast(), 60 | )); 61 | Ok(RawVector { 62 | data, 63 | header: VecHeader { cap: cap as BufferSize, len: 0 }, 64 | }) 65 | } 66 | } 67 | 68 | pub fn try_from_slice(allocator: &A, data: &[T]) -> Result 69 | where 70 | T: Clone, 71 | { 72 | let mut v = Self::try_with_capacity(allocator, data.len())?; 73 | unsafe { 74 | v.extend_from_slice(allocator, data); 75 | } 76 | 77 | Ok(v) 78 | } 79 | 80 | /// Creates a raw vector with `n` clones of `elem`. 81 | pub fn try_from_elem(allocator: &A, elem: T, n: usize) -> Result 82 | where 83 | T: Clone, 84 | { 85 | if n == 0 { 86 | return Ok(Self::new()); 87 | } 88 | 89 | let mut v = Self::try_with_capacity(allocator, n)?; 90 | unsafe { 91 | for _ in 0..(n - 1) { 92 | v.push(allocator, elem.clone()) 93 | } 94 | 95 | v.push(allocator, elem); 96 | } 97 | 98 | Ok(v) 99 | } 100 | 101 | /// Clears and deallocates this raw vector, leaving it in its unallocated state. 102 | /// 103 | /// It is safe (no-op) to call `deallocate` on a vector that is already in its unallocated state. 104 | /// 105 | /// # Safety 106 | /// 107 | /// The provided allocator must be the one this raw vector was created with. 108 | pub unsafe fn deallocate(&mut self, allocator: &A) { 109 | if self.header.cap == 0 { 110 | return; 111 | } 112 | 113 | self.clear(); 114 | 115 | self.deallocate_buffer(allocator); 116 | 117 | self.data = NonNull::dangling(); 118 | self.header.cap = 0; 119 | self.header.len = 0; 120 | } 121 | 122 | #[inline] 123 | /// Returns `true` if the vector contains no elements. 124 | pub fn is_empty(&self) -> bool { 125 | self.header.len == 0 126 | } 127 | 128 | #[inline] 129 | /// Returns the number of elements in the vector, also referred to as its ‘length’. 130 | pub fn len(&self) -> usize { 131 | self.header.len as usize 132 | } 133 | 134 | #[inline] 135 | /// Returns the total number of elements the vector can hold without reallocating. 136 | pub fn capacity(&self) -> usize { 137 | self.header.cap as usize 138 | } 139 | 140 | /// Returns number of elements that can be added without reallocating. 141 | #[inline] 142 | pub fn remaining_capacity(&self) -> usize { 143 | (self.header.cap - self.header.len) as usize 144 | } 145 | 146 | #[inline] 147 | fn data_ptr(&self) -> *mut T { 148 | self.data.as_ptr() 149 | } 150 | 151 | #[inline] 152 | pub fn as_slice(&self) -> &[T] { 153 | unsafe { core::slice::from_raw_parts(self.data_ptr(), self.len()) } 154 | } 155 | 156 | #[inline] 157 | pub fn as_mut_slice(&mut self) -> &mut [T] { 158 | unsafe { core::slice::from_raw_parts_mut(self.data_ptr(), self.len()) } 159 | } 160 | 161 | /// Clears the vector, removing all values. 162 | pub fn clear(&mut self) { 163 | unsafe { 164 | raw::clear(self.data_ptr(), &mut self.header) 165 | } 166 | } 167 | 168 | unsafe fn base_ptr(&self, _allocator: &A) -> NonNull { 169 | debug_assert!(self.header.cap > 0); 170 | raw::header_from_data_ptr::, T>(self.data).cast() 171 | } 172 | 173 | /// Appends an element to the back of a collection. 174 | /// 175 | /// # Safety 176 | /// 177 | /// The provided allocator must be the one this raw vector was created with. 178 | /// 179 | /// # Panics 180 | /// 181 | /// Panics if the new capacity exceeds `u32::MAX` bytes. 182 | #[inline] 183 | pub unsafe fn push(&mut self, allocator: &A, val: T) { 184 | if self.header.len == self.header.cap { 185 | self.try_realloc_additional(allocator, 1).unwrap(); 186 | } 187 | 188 | raw::push_assuming_capacity(self.data_ptr(), &mut self.header, val); 189 | } 190 | 191 | /// Appends an element if there is sufficient spare capacity, otherwise an error is returned 192 | /// with the element. 193 | /// 194 | /// Unlike push this method will not reallocate when there’s insufficient capacity. 195 | /// The caller should use reserve or try_reserve to ensure that there is enough capacity. 196 | #[inline] 197 | pub fn push_within_capacity(&mut self, val: T) -> Result<(), T> { 198 | if self.header.len == self.header.cap { 199 | return Err(val); 200 | } 201 | 202 | unsafe { 203 | let dst = self.data_ptr().add(self.header.len as usize); 204 | self.header.len += 1; 205 | ptr::write(dst, val); 206 | } 207 | 208 | Ok(()) 209 | } 210 | 211 | /// Removes the last element from the vector and returns it, or `None` if it is empty. 212 | #[inline] 213 | pub fn pop(&mut self) -> Option { 214 | unsafe { 215 | raw::pop(self.data_ptr(), &mut self.header) 216 | } 217 | } 218 | 219 | /// Removes and returns the element at position `index` within the vector, 220 | /// shifting all elements after it to the left. 221 | /// 222 | /// # Panics 223 | /// 224 | /// Panics if `index` is out of bounds. 225 | /// 226 | pub fn remove(&mut self, index: usize) -> T { 227 | #[cold] 228 | #[inline(never)] 229 | #[track_caller] 230 | fn assert_failed(index: usize, len: usize) -> ! { 231 | panic!("removal index (is {index}) should be < len (is {len})"); 232 | } 233 | 234 | let len = self.len(); 235 | if index >= len { 236 | assert_failed(index, len); 237 | } 238 | unsafe { 239 | // infallible 240 | let ret; 241 | { 242 | // the place we are taking from. 243 | let ptr = self.as_mut_ptr().add(index); 244 | // copy it out, unsafely having a copy of the value on 245 | // the stack and in the vector at the same time. 246 | ret = ptr::read(ptr); 247 | 248 | // Shift everything down to fill in that spot. 249 | ptr::copy(ptr.add(1), ptr, len - index - 1); 250 | } 251 | self.header.len = len as u32 - 1; 252 | ret 253 | } 254 | } 255 | 256 | /// Removes an element from the vector and returns it. 257 | /// 258 | /// The removed element is replaced by the last element of the vector. 259 | /// 260 | /// # Panics 261 | /// 262 | /// Panics if index is out of bounds. 263 | #[inline] 264 | pub fn swap_remove(&mut self, idx: usize) -> T { 265 | let len = self.len(); 266 | assert!(idx < len); 267 | 268 | unsafe { 269 | let ptr = self.data_ptr().add(idx); 270 | let item = ptr::read(ptr); 271 | 272 | let last_idx = len - 1; 273 | if idx != last_idx { 274 | let last_ptr = self.data_ptr().add(last_idx); 275 | ptr::write(ptr, ptr::read(last_ptr)); 276 | } 277 | 278 | self.header.len -= 1; 279 | 280 | item 281 | } 282 | } 283 | 284 | /// Inserts an element at position `index` within the vector, shifting all 285 | /// elements after it to the right. 286 | /// 287 | /// # Panics 288 | /// 289 | /// Panics if `index > len`. 290 | pub unsafe fn insert(&mut self, allocator: &A, index: usize, element: T) { 291 | #[cold] 292 | #[inline(never)] 293 | fn assert_failed(index: usize, len: usize) -> ! { 294 | panic!("insertion index (is {index}) should be <= len (is {len})"); 295 | } 296 | 297 | unsafe { 298 | // space for the new element 299 | if self.header.len == self.header.cap { 300 | self.try_reserve(allocator, 1).unwrap(); 301 | } 302 | 303 | let len = self.len(); 304 | 305 | // infallible 306 | // The spot to put the new value 307 | { 308 | let p = self.as_mut_ptr().add(index); 309 | if index < len { 310 | // Shift everything over to make space. (Duplicating the 311 | // `index`th element into two consecutive places.) 312 | ptr::copy(p, p.add(1), len - index); 313 | } else if index == len { 314 | // No elements need shifting. 315 | } else { 316 | assert_failed(index, len); 317 | } 318 | // Write it in, overwriting the first copy of the `index`th 319 | // element. 320 | ptr::write(p, element); 321 | } 322 | self.header.len += 1; 323 | } 324 | } 325 | 326 | /// Clones and appends the contents of the slice to the back of a collection. 327 | /// 328 | /// # Safety 329 | /// 330 | /// The provided allocator must be the one this raw vector was created with. 331 | pub unsafe fn extend_from_slice(&mut self, allocator: &A, slice: &[T]) 332 | where 333 | T: Clone, 334 | { 335 | self.try_reserve(allocator, slice.len()).unwrap(); 336 | unsafe { 337 | raw::extend_from_slice_assuming_capacity(self.data_ptr(), &mut self.header, slice); 338 | } 339 | } 340 | 341 | /// Moves all the elements of `other` into `self`, leaving `other` empty. 342 | /// 343 | /// # Safety 344 | /// 345 | /// The provided allocator must be the one this raw vector was created with. 346 | pub unsafe fn append(&mut self, allocator: &A, other: &mut Self) 347 | where 348 | T: Clone, 349 | { 350 | if other.is_empty() { 351 | return; 352 | } 353 | 354 | self.try_reserve(allocator, other.len()).unwrap(); 355 | 356 | unsafe { 357 | move_data(other.data_ptr(), &mut other.header, self.data_ptr(), &mut self.header); 358 | } 359 | } 360 | 361 | /// Appends the contents of an iterator to the back of a collection. 362 | /// 363 | /// # Safety 364 | /// 365 | /// The provided allocator must be the one this raw vector was created with. 366 | pub unsafe fn extend(&mut self, allocator: &A, data: impl IntoIterator) { 367 | let mut iter = data.into_iter(); 368 | let (min, max) = iter.size_hint(); 369 | self.try_reserve(allocator, max.unwrap_or(min)).unwrap(); 370 | unsafe { 371 | self.extend_within_capacity(&mut iter); 372 | 373 | for item in iter { 374 | self.push(allocator, item); 375 | } 376 | } 377 | } 378 | 379 | unsafe fn extend_within_capacity(&mut self, iter: &mut impl Iterator) { 380 | let n = self.remaining_capacity() as BufferSize; 381 | 382 | let mut ptr = self.data_ptr().add(self.len()); 383 | let mut count = 0; 384 | 385 | unsafe { 386 | for item in iter { 387 | if count == n { 388 | break; 389 | } 390 | ptr::write(ptr, item); 391 | ptr = ptr.add(1); 392 | count += 1; 393 | } 394 | self.header.len += count; 395 | } 396 | } 397 | 398 | /// Allocate a clone of this buffer. 399 | /// 400 | /// The provided allocator does not need to be the one this raw vector was created with. 401 | /// The returned raw vector is considered to be created with the provided allocator. 402 | pub fn clone_buffer(&self, allocator: &A) -> Self 403 | where 404 | T: Clone, 405 | { 406 | self.clone_buffer_with_capacity(allocator, self.len()) 407 | } 408 | 409 | /// Allocate a clone of this buffer with a different capacity 410 | /// 411 | /// The capacity must be at least as large as the buffer's length. 412 | pub fn clone_buffer_with_capacity(&self, allocator: &A, cap: usize) -> Self 413 | where 414 | T: Clone, 415 | { 416 | let mut clone = 417 | Self::try_with_capacity(allocator, cap.max(self.len())).unwrap(); 418 | 419 | unsafe { 420 | raw::extend_from_slice_assuming_capacity(clone.data_ptr(), &mut clone.header, self.as_slice()); 421 | } 422 | 423 | clone 424 | } 425 | 426 | // Note: Marking this #[inline(never)] is a pretty large regression in the push benchmark. 427 | #[cold] 428 | unsafe fn try_realloc_additional(&mut self, allocator: &A, additional: usize) -> Result<(), AllocError> { 429 | let new_cap = grow_amortized(self.len(), additional); 430 | if new_cap < self.len() { 431 | return Err(AllocError); 432 | } 433 | 434 | self.try_realloc_with_capacity(allocator, new_cap) 435 | } 436 | 437 | #[cold] 438 | unsafe fn try_realloc_with_capacity(&mut self, allocator: &A, new_cap: usize) -> Result<(), AllocError> { 439 | type R = DefaultRefCount; 440 | 441 | unsafe { 442 | if new_cap == 0 { 443 | self.deallocate_buffer(allocator); 444 | } 445 | 446 | let new_layout = buffer_layout::, T>(new_cap).unwrap(); 447 | 448 | let new_alloc = if self.header.cap == 0 { 449 | allocator.allocate(new_layout)? 450 | } else { 451 | let old_cap = self.capacity(); 452 | let old_ptr = self.base_ptr(allocator); 453 | let old_layout = buffer_layout::, T>(old_cap).unwrap(); 454 | let new_layout = buffer_layout::, T>(new_cap).unwrap(); 455 | 456 | if new_layout.size() >= old_layout.size() { 457 | allocator.grow(old_ptr, old_layout, new_layout) 458 | } else { 459 | allocator.shrink(old_ptr, old_layout, new_layout) 460 | }? 461 | }; 462 | 463 | let new_data_ptr = crate::raw::data_ptr::, T>(new_alloc.cast()); 464 | self.data = NonNull::new_unchecked(new_data_ptr); 465 | self.header.cap = new_cap as u32; 466 | } 467 | 468 | Ok(()) 469 | } 470 | 471 | // Deallocates the memory, does not drop the vector's content. 472 | unsafe fn deallocate_buffer(&mut self, allocator: &A) { 473 | let layout = buffer_layout::, T>(self.capacity()).unwrap(); 474 | let ptr = self.base_ptr(allocator); 475 | 476 | allocator.deallocate(ptr, layout); 477 | 478 | self.header.cap = 0; 479 | self.header.len = 0; 480 | self.data = NonNull::dangling(); 481 | } 482 | 483 | /// Tries to reserve at least enough space for `additional` extra items. 484 | /// 485 | /// # Safety 486 | /// 487 | /// The provided allocator must be the one this raw vector was created with. 488 | #[inline] 489 | pub unsafe fn try_reserve(&mut self, allocator: &A, additional: usize) -> Result<(), AllocError> { 490 | if self.remaining_capacity() < additional { 491 | self.try_realloc_additional(allocator, additional)?; 492 | } 493 | 494 | Ok(()) 495 | } 496 | 497 | /// Tries to reserve the minimum capacity for at least `additional` elements to be inserted in the given vector. 498 | /// 499 | /// Unlike `try_reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations. 500 | /// After calling `reserve_exact`, capacity will be greater than or equal to `self.len() + additional`. 501 | /// This will also allocate if the vector is not unique. 502 | /// Does nothing if the capacity is already sufficient and the vector is unique. 503 | /// 504 | /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not 505 | /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected. 506 | /// 507 | /// # Safety 508 | /// 509 | /// The provided allocator must be the one this raw vector was created with. 510 | pub unsafe fn try_reserve_exact(&mut self, allocator: &A, additional: usize) -> Result<(), AllocError> { 511 | if self.remaining_capacity() >= additional { 512 | return Ok(()); 513 | } 514 | 515 | self.try_realloc_with_capacity(allocator, self.len() + additional) 516 | } 517 | 518 | /// Shrinks the capacity of the vector with a lower bound. 519 | /// 520 | /// The capacity will remain at least as large as both the length and the supplied value. 521 | /// If the current capacity is less than the lower limit, this is a no-op. 522 | /// 523 | /// # Safety 524 | /// 525 | /// The provided allocator must be the one this raw vector was created with. 526 | pub unsafe fn shrink_to(&mut self, allocator: &A, min_capacity: usize) 527 | { 528 | let min_capacity = min_capacity.max(self.len()); 529 | if self.capacity() <= min_capacity { 530 | return; 531 | } 532 | 533 | self.try_realloc_with_capacity(allocator, min_capacity).unwrap(); 534 | } 535 | 536 | /// Shrinks the capacity of the vector as much as possible. 537 | /// 538 | /// # Safety 539 | /// 540 | /// The provided allocator must be the one this raw vector was created with. 541 | pub unsafe fn shrink_to_fit(&mut self, allocator: &A) 542 | { 543 | self.shrink_to(allocator, self.len()) 544 | } 545 | 546 | /// Removes the specified range from the vector in bulk, returning all 547 | /// removed elements as an iterator. If the iterator is dropped before 548 | /// being fully consumed, it drops the remaining removed elements. 549 | /// 550 | /// The returned iterator keeps a mutable borrow on the vector to optimize 551 | /// its implementation. 552 | /// 553 | /// # Panics 554 | /// 555 | /// Panics if the starting point is greater than the end point or if 556 | /// the end point is greater than the length of the vector. 557 | /// 558 | /// # Leaking 559 | /// 560 | /// If the returned iterator goes out of scope without being dropped (due to 561 | /// [`mem::forget`], for example), the vector may have lost and leaked 562 | /// elements arbitrarily, including elements outside the range. 563 | /// 564 | pub fn drain(&mut self, range: R) -> Drain<'_, T> 565 | where 566 | R: RangeBounds, 567 | { 568 | // Memory safety 569 | // 570 | // When the Drain is first created, it shortens the length of 571 | // the source vector to make sure no uninitialized or moved-from elements 572 | // are accessible at all if the Drain's destructor never gets to run. 573 | // 574 | // Drain will ptr::read out the values to remove. 575 | // When finished, remaining tail of the vec is copied back to cover 576 | // the hole, and the vector length is restored to the new length. 577 | // 578 | use core::ops::Bound::*; 579 | let len = self.len(); 580 | let end = match range.end_bound() { 581 | Included(n) => *n + 1, 582 | Excluded(n) => *n, 583 | Unbounded => len 584 | }; 585 | let start = match range.start_bound() { 586 | Included(n) => *n, 587 | Excluded(n) => *n+1, 588 | Unbounded => 0 589 | }; 590 | assert!(end <= len); 591 | assert!(start <= end); 592 | 593 | unsafe { 594 | // Set self.vec length's to start, to be safe in case Drain is leaked 595 | self.header.len = start as u32; 596 | let range_slice = core::slice::from_raw_parts(self.as_ptr().add(start), end - start); 597 | Drain { 598 | tail_start: end, 599 | tail_len: len - end, 600 | iter: range_slice.iter(), 601 | vec: NonNull::from(self), 602 | } 603 | } 604 | } 605 | 606 | /// Creates a splicing iterator that replaces the specified range in the vector 607 | /// with the given `replace_with` iterator and yields the removed items. 608 | /// `replace_with` does not need to be the same length as `range`. 609 | /// 610 | /// `range` is removed even if the iterator is not consumed until the end. 611 | /// 612 | /// It is unspecified how many elements are removed from the vector 613 | /// if the `Splice` value is leaked. 614 | /// 615 | /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped. 616 | /// 617 | /// This is optimal if: 618 | /// 619 | /// * The tail (elements in the vector after `range`) is empty, 620 | /// * or `replace_with` yields fewer or equal elements than `range`’s length 621 | /// * or the lower bound of its `size_hint()` is exact. 622 | /// 623 | /// Otherwise, a temporary vector is allocated and the tail is moved twice. 624 | /// 625 | /// # Panics 626 | /// 627 | /// Panics if the starting point is greater than the end point or if 628 | /// the end point is greater than the length of the vector. 629 | /// 630 | pub unsafe fn splice<'l, A, R, I>( 631 | &'l mut self, 632 | allocator: &'l A, 633 | range: R, 634 | replace_with: I 635 | ) -> Splice<'l, ::IntoIter, A> 636 | where 637 | A: Allocator, 638 | R: RangeBounds, 639 | I: IntoIterator, 640 | { 641 | Splice { 642 | drain: self.drain(range), 643 | replace_with: replace_with.into_iter(), 644 | allocator, 645 | } 646 | } 647 | 648 | /// Retains only the elements specified by the predicate. 649 | /// 650 | /// In other words, remove all elements `e` for which `f(&e)` returns `false`. 651 | /// This method operates in place, visiting each element exactly once in the 652 | /// original order, and preserves the order of the retained elements. 653 | pub fn retain(&mut self, mut f: F) 654 | where 655 | F: FnMut(&T) -> bool, 656 | { 657 | self.retain_mut(|elem| f(elem)); 658 | } 659 | 660 | /// Retains only the elements specified by the predicate, passing a mutable reference to it. 661 | /// 662 | /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`. 663 | /// This method operates in place, visiting each element exactly once in the 664 | /// original order, and preserves the order of the retained elements. 665 | pub fn retain_mut(&mut self, mut f: F) 666 | where 667 | F: FnMut(&mut T) -> bool, 668 | { 669 | let original_len = self.len(); 670 | // Avoid double drop if the drop guard is not executed, 671 | // since we may make some holes during the process. 672 | self.header.len = 0; 673 | 674 | // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked] 675 | // |<- processed len ->| ^- next to check 676 | // |<- deleted cnt ->| 677 | // |<- original_len ->| 678 | // Kept: Elements which predicate returns true on. 679 | // Hole: Moved or dropped element slot. 680 | // Unchecked: Unchecked valid elements. 681 | // 682 | // This drop guard will be invoked when predicate or `drop` of element panicked. 683 | // It shifts unchecked elements to cover holes and `set_len` to the correct length. 684 | // In cases when predicate and `drop` never panick, it will be optimized out. 685 | struct BackshiftOnDrop<'a, T> { 686 | v: &'a mut RawVector, 687 | processed_len: usize, 688 | deleted_cnt: usize, 689 | original_len: usize, 690 | } 691 | 692 | impl Drop for BackshiftOnDrop<'_, T> { 693 | fn drop(&mut self) { 694 | if self.deleted_cnt > 0 { 695 | // SAFETY: Trailing unchecked items must be valid since we never touch them. 696 | unsafe { 697 | ptr::copy( 698 | self.v.as_ptr().add(self.processed_len), 699 | self.v.as_mut_ptr().add(self.processed_len - self.deleted_cnt), 700 | self.original_len - self.processed_len, 701 | ); 702 | } 703 | } 704 | // SAFETY: After filling holes, all items are in contiguous memory. 705 | self.v.header.len = (self.original_len - self.deleted_cnt) as u32; 706 | } 707 | } 708 | 709 | let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len }; 710 | 711 | fn process_loop( 712 | original_len: usize, 713 | f: &mut F, 714 | g: &mut BackshiftOnDrop<'_, T>, 715 | ) where 716 | F: FnMut(&mut T) -> bool, 717 | { 718 | while g.processed_len != original_len { 719 | // SAFETY: Unchecked element must be valid. 720 | let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) }; 721 | if !f(cur) { 722 | // Advance early to avoid double drop if `drop_in_place` panicked. 723 | g.processed_len += 1; 724 | g.deleted_cnt += 1; 725 | // SAFETY: We never touch this element again after dropped. 726 | unsafe { ptr::drop_in_place(cur) }; 727 | // We already advanced the counter. 728 | if DELETED { 729 | continue; 730 | } else { 731 | break; 732 | } 733 | } 734 | if DELETED { 735 | // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element. 736 | // We use copy for move, and never touch this element again. 737 | unsafe { 738 | let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt); 739 | ptr::copy_nonoverlapping(cur, hole_slot, 1); 740 | } 741 | } 742 | g.processed_len += 1; 743 | } 744 | } 745 | 746 | // Stage 1: Nothing was deleted. 747 | process_loop::(original_len, &mut f, &mut g); 748 | 749 | // Stage 2: Some elements were deleted. 750 | process_loop::(original_len, &mut f, &mut g); 751 | 752 | // All item are processed. This can be optimized to `set_len` by LLVM. 753 | drop(g); 754 | } 755 | 756 | /// Transfers ownership of this raw vector's contents to the one that is returned, and leaves 757 | /// this one empty and unallocated. 758 | pub fn take(&mut self) -> Self { 759 | mem::replace(self, RawVector::new()) 760 | } 761 | } 762 | 763 | impl> PartialEq> for RawVector { 764 | fn eq(&self, other: &Self) -> bool { 765 | self.as_slice() == other.as_slice() 766 | } 767 | } 768 | 769 | impl> PartialEq<&[T]> for RawVector { 770 | fn eq(&self, other: &&[T]) -> bool { 771 | self.as_slice() == *other 772 | } 773 | } 774 | 775 | impl Eq for RawVector { 776 | 777 | } 778 | 779 | impl AsRef<[T]> for RawVector { 780 | fn as_ref(&self) -> &[T] { 781 | self.as_slice() 782 | } 783 | } 784 | 785 | impl AsMut<[T]> for RawVector { 786 | fn as_mut(&mut self) -> &mut [T] { 787 | self.as_mut_slice() 788 | } 789 | } 790 | 791 | impl Default for RawVector { 792 | fn default() -> Self { 793 | Self::new() 794 | } 795 | } 796 | 797 | impl<'a, T> IntoIterator for &'a RawVector { 798 | type Item = &'a T; 799 | type IntoIter = core::slice::Iter<'a, T>; 800 | fn into_iter(self) -> core::slice::Iter<'a, T> { 801 | self.as_slice().iter() 802 | } 803 | } 804 | 805 | impl<'a, T> IntoIterator for &'a mut RawVector { 806 | type Item = &'a mut T; 807 | type IntoIter = core::slice::IterMut<'a, T>; 808 | fn into_iter(self) -> core::slice::IterMut<'a, T> { 809 | self.as_mut_slice().iter_mut() 810 | } 811 | } 812 | 813 | impl Index for RawVector 814 | where 815 | I: core::slice::SliceIndex<[T]>, 816 | { 817 | type Output = >::Output; 818 | fn index(&self, index: I) -> &Self::Output { 819 | self.as_slice().index(index) 820 | } 821 | } 822 | 823 | impl IndexMut for RawVector 824 | where 825 | I: core::slice::SliceIndex<[T]>, 826 | { 827 | fn index_mut(&mut self, index: I) -> &mut Self::Output { 828 | self.as_mut_slice().index_mut(index) 829 | } 830 | } 831 | 832 | impl Deref for RawVector { 833 | type Target = [T]; 834 | fn deref(&self) -> &[T] { 835 | self.as_slice() 836 | } 837 | } 838 | 839 | impl DerefMut for RawVector { 840 | fn deref_mut(&mut self) -> &mut [T] { 841 | self.as_mut_slice() 842 | } 843 | } 844 | 845 | impl Debug for RawVector { 846 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { 847 | self.as_slice().fmt(f) 848 | } 849 | } 850 | 851 | impl core::hash::Hash for RawVector { 852 | fn hash(&self, state: &mut H) where H: core::hash::Hasher { 853 | self.as_slice().hash(state) 854 | } 855 | } 856 | 857 | 858 | /// A heap allocated, mutable contiguous buffer containing elements of type `T`. 859 | /// 860 | /// 861 | /// 862 | /// Similar in principle to a `Vec`. 863 | /// It can be converted for free into a reference counted `SharedVector` or `AtomicSharedVector`. 864 | /// 865 | /// Unique and shared vectors expose similar functionality. `Vector` takes advantage of 866 | /// the guaranteed uniqueness at the type level to provide overall faster operations than its 867 | /// shared counterparts, while its memory layout makes it very cheap to convert to a shared vector 868 | /// (involving no allocation or copy). 869 | /// 870 | /// # Internal representation 871 | /// 872 | /// `Vector` stores its length and capacity inline and points to the first element of the 873 | /// allocated buffer. Room for a header is left uninitialized before the elements so that the 874 | /// vector can be converted into a `SharedVector` or `AtomicSharedVector` without reallocating 875 | /// the storage. 876 | /// 877 | /// Internally, `Vector` is built on top of `RawVector`. 878 | pub struct Vector { 879 | pub(crate) raw: RawVector, 880 | pub(crate) allocator: A, 881 | } 882 | 883 | impl Vector { 884 | /// Creates an empty vector. 885 | /// 886 | /// This does not allocate memory. 887 | pub fn new() -> Vector { 888 | Vector { 889 | raw: RawVector::new(), 890 | allocator: Global, 891 | } 892 | } 893 | 894 | 895 | 896 | /// Creates an empty pre-allocated vector with a given storage capacity. 897 | /// 898 | /// Does not allocate memory if `cap` is zero. 899 | pub fn with_capacity(cap: usize) -> Vector { 900 | Self::try_with_capacity(cap).unwrap() 901 | } 902 | 903 | /// Creates an empty pre-allocated vector with a given storage capacity. 904 | /// 905 | /// Does not allocate memory if `cap` is zero. 906 | pub fn try_with_capacity(cap: usize) -> Result, AllocError> { 907 | Vector::try_with_capacity_in(cap, Global) 908 | } 909 | 910 | pub fn from_slice(data: &[T]) -> Self 911 | where 912 | T: Clone, 913 | { 914 | Vector { raw: RawVector::try_from_slice(&Global, data).unwrap(), allocator: Global } 915 | } 916 | 917 | /// Creates a vector with `n` clones of `elem`. 918 | pub fn from_elem(elem: T, n: usize) -> Vector 919 | where 920 | T: Clone, 921 | { 922 | Vector { raw: RawVector::try_from_elem(&Global, elem, n).unwrap(), allocator: Global } 923 | } 924 | } 925 | 926 | impl Vector { 927 | /// Creates an empty vector without allocating memory. 928 | pub fn new_in(allocator: A) -> Self { 929 | Self::try_with_capacity_in(0, allocator).unwrap() 930 | } 931 | 932 | /// Creates an empty pre-allocated vector with a given storage capacity. 933 | /// 934 | /// Does not allocate memory if `cap` is zero. 935 | pub fn with_capacity_in(cap: usize, allocator: A) -> Self { 936 | Self::try_with_capacity_in(cap, allocator).unwrap() 937 | } 938 | 939 | /// Creates an empty pre-allocated vector with a given storage capacity. 940 | /// 941 | /// Does not allocate memory if `cap` is zero. 942 | pub fn try_with_capacity_in(cap: usize, allocator: A) -> Result, AllocError> { 943 | let raw = RawVector::try_with_capacity(&allocator, cap)?; 944 | 945 | Ok(Vector { raw, allocator }) 946 | } 947 | 948 | 949 | #[inline(always)] 950 | /// Returns `true` if the vector contains no elements. 951 | pub fn is_empty(&self) -> bool { 952 | self.raw.is_empty() 953 | } 954 | 955 | #[inline(always)] 956 | /// Returns the number of elements in the vector, also referred to as its ‘length’. 957 | pub fn len(&self) -> usize { 958 | self.raw.len() 959 | } 960 | 961 | #[inline(always)] 962 | /// Returns the total number of elements the vector can hold without reallocating. 963 | pub fn capacity(&self) -> usize { 964 | self.raw.capacity() 965 | } 966 | 967 | /// Returns number of elements that can be added without reallocating. 968 | #[inline(always)] 969 | pub fn remaining_capacity(&self) -> usize { 970 | self.raw.remaining_capacity() 971 | } 972 | 973 | /// Returns a reference to the underlying allocator. 974 | #[inline(always)] 975 | pub fn allocator(&self) -> &A { 976 | &self.allocator 977 | } 978 | 979 | #[inline(always)] 980 | pub fn as_slice(&self) -> &[T] { 981 | self.raw.as_slice() 982 | } 983 | 984 | #[inline(always)] 985 | pub fn as_mut_slice(&mut self) -> &mut [T] { 986 | self.raw.as_mut_slice() 987 | } 988 | 989 | /// Clears the vector, removing all values. 990 | pub fn clear(&mut self) { 991 | unsafe { 992 | raw::clear(self.raw.data_ptr(), &mut self.raw.header) 993 | } 994 | } 995 | 996 | unsafe fn into_header_buffer(mut self) -> HeaderBuffer 997 | where 998 | R: RefCount, 999 | { 1000 | debug_assert!(self.raw.header.cap != 0); 1001 | unsafe { 1002 | let mut header = raw::header_from_data_ptr(self.raw.data); 1003 | 1004 | *header.as_mut() = raw::Header { 1005 | vec: VecHeader { 1006 | len: self.raw.header.len, 1007 | cap: self.raw.header.cap, 1008 | }, 1009 | ref_count: R::new(1), 1010 | allocator: ptr::read(&mut self.allocator), 1011 | }; 1012 | 1013 | mem::forget(self); 1014 | 1015 | HeaderBuffer::from_raw(header) 1016 | } 1017 | } 1018 | 1019 | /// Make this vector immutable. 1020 | /// 1021 | /// This operation is cheap, the underlying storage does not not need 1022 | /// to be reallocated. 1023 | #[inline] 1024 | pub fn into_shared(self) -> SharedVector 1025 | where 1026 | A: Allocator + Clone, 1027 | { 1028 | if self.raw.header.cap == 0 { 1029 | return SharedVector::try_with_capacity_in(0, self.allocator.clone()).unwrap(); 1030 | } 1031 | unsafe { 1032 | let inner = self.into_header_buffer::(); 1033 | SharedVector { inner } 1034 | } 1035 | } 1036 | 1037 | /// Make this vector immutable. 1038 | /// 1039 | /// This operation is cheap, the underlying storage does not not need 1040 | /// to be reallocated. 1041 | #[inline] 1042 | pub fn into_shared_atomic(self) -> AtomicSharedVector 1043 | where 1044 | A: Allocator + Clone, 1045 | { 1046 | if self.raw.header.cap == 0 { 1047 | return AtomicSharedVector::try_with_capacity_in(0, self.allocator.clone()).unwrap(); 1048 | } 1049 | unsafe { 1050 | let inner = self.into_header_buffer::(); 1051 | AtomicSharedVector { inner } 1052 | } 1053 | } 1054 | 1055 | /// Appends an element to the back of a collection. 1056 | /// 1057 | /// # Panics 1058 | /// 1059 | /// Panics if the new capacity exceeds `u32::MAX` bytes. 1060 | #[inline(always)] 1061 | pub fn push(&mut self, val: T) { 1062 | unsafe { 1063 | self.raw.push(&self.allocator, val); 1064 | } 1065 | } 1066 | 1067 | /// Appends an element if there is sufficient spare capacity, otherwise an error is returned 1068 | /// with the element. 1069 | /// 1070 | /// Unlike push this method will not reallocate when there’s insufficient capacity. 1071 | /// The caller should use reserve or try_reserve to ensure that there is enough capacity. 1072 | #[inline(always)] 1073 | pub fn push_within_capacity(&mut self, val: T) -> Result<(), T> { 1074 | self.raw.push_within_capacity(val) 1075 | } 1076 | 1077 | /// Removes the last element from the vector and returns it, or `None` if it is empty. 1078 | #[inline(always)] 1079 | pub fn pop(&mut self) -> Option { 1080 | self.raw.pop() 1081 | } 1082 | 1083 | /// Removes and returns the element at position `index` within the vector, 1084 | /// shifting all elements after it to the left. 1085 | /// 1086 | /// # Panics 1087 | /// 1088 | /// Panics if `index` is out of bounds. 1089 | /// 1090 | #[inline(always)] 1091 | pub fn remove(&mut self, index: usize) -> T { 1092 | self.raw.remove(index) 1093 | } 1094 | 1095 | /// Removes an element from the vector and returns it. 1096 | /// 1097 | /// The removed element is replaced by the last element of the vector. 1098 | /// 1099 | /// # Panics 1100 | /// 1101 | /// Panics if index is out of bounds. 1102 | #[inline(always)] 1103 | pub fn swap_remove(&mut self, idx: usize) -> T { 1104 | self.raw.swap_remove(idx) 1105 | } 1106 | 1107 | /// Inserts an element at position `index` within the vector, shifting all 1108 | /// elements after it to the right. 1109 | /// 1110 | /// # Panics 1111 | /// 1112 | /// Panics if `index > len`. 1113 | #[inline(always)] 1114 | pub fn insert(&mut self, index: usize, element: T) { 1115 | unsafe { self.raw.insert(&self.allocator, index, element) } 1116 | } 1117 | 1118 | /// Clones and appends the contents of the slice to the back of a collection. 1119 | #[inline(always)] 1120 | pub fn extend_from_slice(&mut self, data: &[T]) 1121 | where 1122 | T: Clone, 1123 | { 1124 | unsafe { 1125 | self.raw.extend_from_slice(&self.allocator, data) 1126 | } 1127 | } 1128 | 1129 | /// Moves all the elements of `other` into `self`, leaving `other` empty. 1130 | #[inline(always)] 1131 | pub fn append(&mut self, other: &mut Self) 1132 | where 1133 | T: Clone, 1134 | { 1135 | unsafe { 1136 | self.raw.append(&self.allocator, &mut other.raw) 1137 | } 1138 | } 1139 | 1140 | /// Appends the contents of an iterator to the back of a collection. 1141 | #[inline(always)] 1142 | pub fn extend(&mut self, data: impl IntoIterator) { 1143 | unsafe { 1144 | self.raw.extend(&self.allocator, data) 1145 | } 1146 | } 1147 | 1148 | /// Allocates a clone of this buffer. 1149 | #[inline(always)] 1150 | pub fn clone_buffer(&self) -> Self 1151 | where 1152 | T: Clone, 1153 | A: Clone, 1154 | { 1155 | Vector { 1156 | raw: self.raw.clone_buffer(&self.allocator), 1157 | allocator: self.allocator.clone(), 1158 | } 1159 | } 1160 | 1161 | /// Allocate a clone of this buffer with a different capacity 1162 | /// 1163 | /// The capacity must be at least as large as the buffer's length. 1164 | #[inline(always)] 1165 | pub fn clone_buffer_with_capacity(&self, cap: usize) -> Self 1166 | where 1167 | T: Clone, 1168 | A: Clone, 1169 | { 1170 | Vector { 1171 | raw: self.raw.clone_buffer_with_capacity(&self.allocator, cap), 1172 | allocator: self.allocator.clone(), 1173 | } 1174 | } 1175 | 1176 | #[inline(always)] 1177 | pub fn reserve(&mut self, additional: usize) { 1178 | unsafe { 1179 | self.raw.try_reserve(&self.allocator, additional).unwrap() 1180 | } 1181 | } 1182 | 1183 | #[inline(always)] 1184 | pub fn try_reserve(&mut self, additional: usize) -> Result<(), AllocError> { 1185 | unsafe { 1186 | self.raw.try_reserve(&self.allocator, additional) 1187 | } 1188 | } 1189 | 1190 | /// Reserves the minimum capacity for at least `additional` elements to be inserted in the given vector. 1191 | /// 1192 | /// Unlike `reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations. 1193 | /// After calling `try_reserve_exact`, capacity will be greater than or equal to `self.len() + additional` if 1194 | /// it returns `Ok(())`. 1195 | /// This will also allocate if the vector is not unique. 1196 | /// Does nothing if the capacity is already sufficient and the vector is unique. 1197 | /// 1198 | /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not 1199 | /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected. 1200 | pub fn reserve_exact(&mut self, additional: usize) 1201 | where 1202 | T: Clone, 1203 | { 1204 | self.try_reserve_exact(additional).unwrap(); 1205 | } 1206 | 1207 | /// Tries to reserve the minimum capacity for at least `additional` elements to be inserted in the given vector. 1208 | /// 1209 | /// Unlike `try_reserve`, this will not deliberately over-allocate to speculatively avoid frequent allocations. 1210 | /// After calling `reserve_exact`, capacity will be greater than or equal to `self.len() + additional`. 1211 | /// This will also allocate if the vector is not unique. 1212 | /// Does nothing if the capacity is already sufficient and the vector is unique. 1213 | /// 1214 | /// Note that the allocator may give the collection more space than it requests. Therefore, capacity can not 1215 | /// be relied upon to be precisely minimal. Prefer `try_reserve` if future insertions are expected. 1216 | pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), AllocError> { 1217 | unsafe { 1218 | self.raw.try_reserve_exact(&self.allocator, additional) 1219 | } 1220 | } 1221 | 1222 | /// Shrinks the capacity of the vector with a lower bound. 1223 | /// 1224 | /// The capacity will remain at least as large as both the length and the supplied value. 1225 | /// If the current capacity is less than the lower limit, this is a no-op. 1226 | #[inline(always)] 1227 | pub fn shrink_to(&mut self, min_capacity: usize) 1228 | where 1229 | T: Clone, 1230 | { 1231 | unsafe { 1232 | self.raw.shrink_to(&self.allocator, min_capacity) 1233 | } 1234 | } 1235 | 1236 | /// Shrinks the capacity of the vector as much as possible. 1237 | #[inline(always)] 1238 | pub fn shrink_to_fit(&mut self) 1239 | where 1240 | T: Clone, 1241 | { 1242 | unsafe { 1243 | self.raw.shrink_to_fit(&self.allocator) 1244 | } 1245 | } 1246 | 1247 | /// Removes the specified range from the vector in bulk, returning all 1248 | /// removed elements as an iterator. If the iterator is dropped before 1249 | /// being fully consumed, it drops the remaining removed elements. 1250 | /// 1251 | /// The returned iterator keeps a mutable borrow on the vector to optimize 1252 | /// its implementation. 1253 | /// 1254 | /// # Panics 1255 | /// 1256 | /// Panics if the starting point is greater than the end point or if 1257 | /// the end point is greater than the length of the vector. 1258 | /// 1259 | /// # Leaking 1260 | /// 1261 | /// If the returned iterator goes out of scope without being dropped (due to 1262 | /// [`mem::forget`], for example), the vector may have lost and leaked 1263 | /// elements arbitrarily, including elements outside the range. 1264 | /// 1265 | pub fn drain(&mut self, range: R) -> Drain<'_, T> 1266 | where 1267 | R: RangeBounds, 1268 | { 1269 | self.raw.drain(range) 1270 | } 1271 | 1272 | /// Creates a splicing iterator that replaces the specified range in the vector 1273 | /// with the given `replace_with` iterator and yields the removed items. 1274 | /// `replace_with` does not need to be the same length as `range`. 1275 | /// 1276 | /// `range` is removed even if the iterator is not consumed until the end. 1277 | /// 1278 | /// It is unspecified how many elements are removed from the vector 1279 | /// if the `Splice` value is leaked. 1280 | /// 1281 | /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped. 1282 | /// 1283 | /// This is optimal if: 1284 | /// 1285 | /// * The tail (elements in the vector after `range`) is empty, 1286 | /// * or `replace_with` yields fewer or equal elements than `range`’s length 1287 | /// * or the lower bound of its `size_hint()` is exact. 1288 | /// 1289 | /// Otherwise, a temporary vector is allocated and the tail is moved twice. 1290 | /// 1291 | /// # Panics 1292 | /// 1293 | /// Panics if the starting point is greater than the end point or if 1294 | /// the end point is greater than the length of the vector. 1295 | /// 1296 | pub fn splice( 1297 | &mut self, 1298 | range: R, 1299 | replace_with: I 1300 | ) -> Splice<'_, ::IntoIter, A> 1301 | where 1302 | R: RangeBounds, 1303 | I: IntoIterator, 1304 | { 1305 | unsafe { self.raw.splice(&self.allocator, range, replace_with) } 1306 | } 1307 | 1308 | /// Retains only the elements specified by the predicate. 1309 | /// 1310 | /// In other words, remove all elements `e` for which `f(&e)` returns `false`. 1311 | /// This method operates in place, visiting each element exactly once in the 1312 | /// original order, and preserves the order of the retained elements. 1313 | pub fn retain(&mut self, f: F) 1314 | where 1315 | F: FnMut(&T) -> bool, 1316 | { 1317 | self.raw.retain(f) 1318 | } 1319 | 1320 | /// Retains only the elements specified by the predicate, passing a mutable reference to it. 1321 | /// 1322 | /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`. 1323 | /// This method operates in place, visiting each element exactly once in the 1324 | /// original order, and preserves the order of the retained elements. 1325 | pub fn retain_mut(&mut self, f: F) 1326 | where 1327 | F: FnMut(&mut T) -> bool, 1328 | { 1329 | self.raw.retain_mut(f) 1330 | } 1331 | 1332 | #[inline(always)] 1333 | pub fn take(&mut self) -> Self 1334 | where 1335 | A: Clone, 1336 | { 1337 | let other = Vector { 1338 | raw: self.raw.take(), 1339 | allocator: self.allocator.clone(), 1340 | }; 1341 | 1342 | other 1343 | } 1344 | } 1345 | 1346 | impl Drop for Vector { 1347 | fn drop(&mut self) { 1348 | unsafe { 1349 | self.raw.deallocate(&self.allocator) 1350 | } 1351 | } 1352 | } 1353 | 1354 | impl Clone for Vector { 1355 | fn clone(&self) -> Self { 1356 | self.clone_buffer() 1357 | } 1358 | } 1359 | 1360 | impl, A: Allocator> PartialEq> for Vector { 1361 | fn eq(&self, other: &Self) -> bool { 1362 | self.as_slice() == other.as_slice() 1363 | } 1364 | } 1365 | 1366 | impl, A: Allocator> PartialEq<&[T]> for Vector { 1367 | fn eq(&self, other: &&[T]) -> bool { 1368 | self.as_slice() == *other 1369 | } 1370 | } 1371 | 1372 | impl AsRef<[T]> for Vector { 1373 | fn as_ref(&self) -> &[T] { 1374 | self.as_slice() 1375 | } 1376 | } 1377 | 1378 | impl AsMut<[T]> for Vector { 1379 | fn as_mut(&mut self) -> &mut [T] { 1380 | self.as_mut_slice() 1381 | } 1382 | } 1383 | 1384 | impl Default for Vector { 1385 | fn default() -> Self { 1386 | Self::new() 1387 | } 1388 | } 1389 | 1390 | impl<'a, T, A: Allocator> IntoIterator for &'a Vector { 1391 | type Item = &'a T; 1392 | type IntoIter = core::slice::Iter<'a, T>; 1393 | fn into_iter(self) -> core::slice::Iter<'a, T> { 1394 | self.as_slice().iter() 1395 | } 1396 | } 1397 | 1398 | impl<'a, T, A: Allocator> IntoIterator for &'a mut Vector { 1399 | type Item = &'a mut T; 1400 | type IntoIter = core::slice::IterMut<'a, T>; 1401 | fn into_iter(self) -> core::slice::IterMut<'a, T> { 1402 | self.as_mut_slice().iter_mut() 1403 | } 1404 | } 1405 | 1406 | impl Index for Vector 1407 | where 1408 | I: core::slice::SliceIndex<[T]>, 1409 | { 1410 | type Output = >::Output; 1411 | fn index(&self, index: I) -> &Self::Output { 1412 | self.as_slice().index(index) 1413 | } 1414 | } 1415 | 1416 | impl IndexMut for Vector 1417 | where 1418 | I: core::slice::SliceIndex<[T]>, 1419 | { 1420 | fn index_mut(&mut self, index: I) -> &mut Self::Output { 1421 | self.as_mut_slice().index_mut(index) 1422 | } 1423 | } 1424 | 1425 | impl Deref for Vector { 1426 | type Target = [T]; 1427 | fn deref(&self) -> &[T] { 1428 | self.as_slice() 1429 | } 1430 | } 1431 | 1432 | impl DerefMut for Vector { 1433 | fn deref_mut(&mut self) -> &mut [T] { 1434 | self.as_mut_slice() 1435 | } 1436 | } 1437 | 1438 | impl Debug for Vector { 1439 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { 1440 | self.as_slice().fmt(f) 1441 | } 1442 | } 1443 | 1444 | impl From> for Vector { 1445 | fn from(shared: SharedVector) -> Self { 1446 | shared.into_unique() 1447 | } 1448 | } 1449 | 1450 | impl From> for Vector { 1451 | fn from(shared: AtomicSharedVector) -> Self { 1452 | shared.into_unique() 1453 | } 1454 | } 1455 | 1456 | impl core::hash::Hash for Vector { 1457 | fn hash(&self, state: &mut H) where H: core::hash::Hasher { 1458 | self.as_slice().hash(state) 1459 | } 1460 | } 1461 | 1462 | #[test] 1463 | fn bump_alloc() { 1464 | use blink_alloc::BlinkAlloc; 1465 | 1466 | let allocator = BlinkAlloc::new(); 1467 | 1468 | { 1469 | let mut v1: Vector = Vector::try_with_capacity_in(4, &allocator).unwrap(); 1470 | v1.push(0); 1471 | v1.push(1); 1472 | v1.push(2); 1473 | assert_eq!(v1.capacity(), 4); 1474 | assert_eq!(v1.as_slice(), &[0, 1, 2]); 1475 | 1476 | // let mut v2 = crate::vector!(@ &allocator [10, 11]); 1477 | let mut v2 = crate::vector!([10, 11] in &allocator); 1478 | assert_eq!(v2.capacity(), 2); 1479 | 1480 | assert_eq!(v2.as_slice(), &[10, 11]); 1481 | 1482 | v1.push(3); 1483 | v1.push(4); 1484 | 1485 | assert_eq!(v1.as_slice(), &[0, 1, 2, 3, 4]); 1486 | 1487 | assert!(v1.capacity() > 4); 1488 | 1489 | v2.push(12); 1490 | v2.push(13); 1491 | v2.push(14); 1492 | 1493 | let v2 = v2.into_shared(); 1494 | 1495 | assert_eq!(v1.as_slice(), &[0, 1, 2, 3, 4]); 1496 | assert_eq!(v2.as_slice(), &[10, 11, 12, 13, 14]); 1497 | } 1498 | } 1499 | 1500 | #[test] 1501 | fn basic_unique() { 1502 | fn num(val: u32) -> Box { 1503 | Box::new(val) 1504 | } 1505 | 1506 | let mut a = Vector::with_capacity(256); 1507 | 1508 | a.push(num(0)); 1509 | a.push(num(1)); 1510 | a.push(num(2)); 1511 | 1512 | let a = a.into_shared(); 1513 | 1514 | assert_eq!(a.len(), 3); 1515 | 1516 | assert_eq!(a.as_slice(), &[num(0), num(1), num(2)]); 1517 | 1518 | assert!(a.is_unique()); 1519 | 1520 | let b = Vector::from_slice(&[num(0), num(1), num(2), num(3), num(4)]); 1521 | 1522 | assert_eq!(b.as_slice(), &[num(0), num(1), num(2), num(3), num(4)]); 1523 | 1524 | let c = a.clone_buffer(); 1525 | assert!(!c.ptr_eq(&a)); 1526 | 1527 | let a2 = a.new_ref(); 1528 | assert!(a2.ptr_eq(&a)); 1529 | assert!(!a.is_unique()); 1530 | assert!(!a2.is_unique()); 1531 | 1532 | mem::drop(a2); 1533 | 1534 | assert!(a.is_unique()); 1535 | 1536 | let _ = c.clone_buffer(); 1537 | let _ = b.clone_buffer(); 1538 | 1539 | let mut d = Vector::with_capacity(64); 1540 | d.extend_from_slice(&[num(0), num(1), num(2)]); 1541 | d.extend_from_slice(&[]); 1542 | d.extend_from_slice(&[num(3), num(4)]); 1543 | 1544 | assert_eq!(d.as_slice(), &[num(0), num(1), num(2), num(3), num(4)]); 1545 | } 1546 | 1547 | #[test] 1548 | fn shrink() { 1549 | let mut v: Vector = Vector::with_capacity(32); 1550 | v.shrink_to(8); 1551 | } 1552 | 1553 | #[test] 1554 | fn zst() { 1555 | let mut v = Vector::new(); 1556 | v.push(()); 1557 | v.push(()); 1558 | v.push(()); 1559 | v.push(()); 1560 | 1561 | assert_eq!(v.len(), 4); 1562 | } 1563 | 1564 | #[test] 1565 | fn dyn_allocator() { 1566 | let allocator: &dyn Allocator = &Global; 1567 | let mut v = crate::vector!([1u32, 2, 3] in allocator); 1568 | 1569 | v.push(4); 1570 | 1571 | assert_eq!(&v[..], &[1, 2, 3, 4]); 1572 | } 1573 | 1574 | #[test] 1575 | fn borrowd_dyn_alloc() { 1576 | struct DataStructure<'a> { 1577 | data: Vector, 1578 | } 1579 | 1580 | impl DataStructure<'static> { 1581 | fn new() -> DataStructure<'static> { 1582 | DataStructure { 1583 | data: Vector::new_in(&Global as &'static dyn Allocator) 1584 | } 1585 | } 1586 | } 1587 | 1588 | impl<'a> DataStructure<'a> { 1589 | fn new_in(allocator: &'a dyn Allocator) -> DataStructure<'a> { 1590 | DataStructure { data: Vector::new_in(allocator) } 1591 | } 1592 | 1593 | fn push(&mut self, val: u32) { 1594 | self.data.push(val); 1595 | } 1596 | } 1597 | 1598 | let mut ds1 = DataStructure::new(); 1599 | ds1.push(1); 1600 | 1601 | let alloc = Global; 1602 | let mut ds2 = DataStructure::new_in(&alloc); 1603 | ds2.push(2); 1604 | 1605 | } 1606 | 1607 | #[test] 1608 | fn splice1() { 1609 | let mut vec = Vector::new(); 1610 | vec.splice(0..0, vec![Box::new(1); 5].into_iter()); 1611 | vec.splice(0..0, vec![Box::new(2); 5].into_iter()); 1612 | } 1613 | 1614 | #[test] 1615 | fn drain1() { 1616 | let mut vectors: [Vector>; 4] = [ 1617 | Vector::new(), 1618 | Vector::new(), 1619 | Vector::new(), 1620 | Vector::new(), 1621 | ]; 1622 | vectors[0].shrink_to(3906369431118283232); 1623 | vectors[2].extend_from_slice(&[Box::new(1), Box::new(2), Box::new(3)]); 1624 | let vec = &mut vectors[2]; 1625 | let len = vec.len(); 1626 | let start = if len > 0 { 16059518370053021184 % len } else { 0 }; 1627 | let end = 16059518370053021185.min(len); 1628 | vectors[2].drain(start..end); 1629 | } 1630 | --------------------------------------------------------------------------------