├── .gitignore ├── .rustfmt.toml ├── test-nostd ├── Cargo.toml └── src │ └── lib.rs ├── test-sval ├── Cargo.toml └── src │ └── lib.rs ├── test-serde ├── Cargo.toml └── src │ └── lib.rs ├── tests ├── macros_full_path.rs ├── tests.rs └── equivalent_trait.rs ├── src ├── rayon │ ├── mod.rs │ └── map.rs ├── sval.rs ├── arbitrary.rs ├── util.rs ├── set │ ├── mutable.rs │ ├── slice.rs │ └── iter.rs ├── inner │ ├── extract.rs │ └── entry.rs ├── borsh.rs ├── map │ ├── serde_seq.rs │ ├── mutable.rs │ └── entry.rs ├── serde.rs ├── macros.rs └── lib.rs ├── LICENSE-MIT ├── Cargo.toml ├── README.md ├── benches ├── faststring.rs └── bench.rs ├── .github └── workflows │ └── ci.yml ├── LICENSE-APACHE └── RELEASES.md /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | -------------------------------------------------------------------------------- /test-nostd/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-nostd" 3 | version = "0.1.0" 4 | publish = false 5 | edition = "2021" 6 | 7 | [dependencies.indexmap] 8 | path = ".." 9 | default-features = false 10 | features = ["serde"] 11 | 12 | [dev-dependencies] 13 | -------------------------------------------------------------------------------- /test-sval/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-sval" 3 | version = "0.1.0" 4 | publish = false 5 | edition = "2021" 6 | 7 | [dependencies] 8 | 9 | [dev-dependencies] 10 | fnv = "1.0" 11 | indexmap = { path = "..", features = ["sval"] } 12 | sval = { version = "2", features = ["derive"] } 13 | sval_test = "2" 14 | -------------------------------------------------------------------------------- /test-serde/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-serde" 3 | version = "0.1.0" 4 | publish = false 5 | edition = "2021" 6 | 7 | [dependencies] 8 | 9 | [dev-dependencies] 10 | fnv = "1.0" 11 | indexmap = { path = "..", features = ["serde"] } 12 | serde = { version = "1.0.99", features = ["derive"] } 13 | serde_test = "1.0.99" 14 | -------------------------------------------------------------------------------- /tests/macros_full_path.rs: -------------------------------------------------------------------------------- 1 | #[test] 2 | fn test_create_map() { 3 | let _m = indexmap::indexmap! { 4 | 1 => 2, 5 | 7 => 1, 6 | 2 => 2, 7 | 3 => 3, 8 | }; 9 | } 10 | 11 | #[test] 12 | fn test_create_set() { 13 | let _s = indexmap::indexset! { 14 | 1, 15 | 7, 16 | 2, 17 | 3, 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /src/rayon/mod.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, doc(cfg(feature = "rayon")))] 2 | 3 | use rayon::prelude::*; 4 | 5 | use alloc::collections::LinkedList; 6 | use alloc::vec::Vec; 7 | 8 | pub mod map; 9 | pub mod set; 10 | 11 | // This form of intermediate collection is also how Rayon collects `HashMap`. 12 | // Note that the order will also be preserved! 13 | fn collect(iter: I) -> LinkedList> { 14 | iter.into_par_iter().collect_vec_list() 15 | } 16 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | use indexmap::{indexmap, indexset}; 2 | 3 | #[test] 4 | fn test_sort() { 5 | let m = indexmap! { 6 | 1 => 2, 7 | 7 => 1, 8 | 2 => 2, 9 | 3 => 3, 10 | }; 11 | 12 | itertools::assert_equal( 13 | m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)), 14 | vec![(7, 1), (1, 2), (2, 2), (3, 3)], 15 | ); 16 | } 17 | 18 | #[test] 19 | fn test_sort_set() { 20 | let s = indexset! { 21 | 1, 22 | 7, 23 | 2, 24 | 3, 25 | }; 26 | 27 | itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]); 28 | } 29 | -------------------------------------------------------------------------------- /test-nostd/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | use core::hash::BuildHasherDefault; 4 | use core::hash::Hasher; 5 | 6 | use indexmap::IndexMap; 7 | use indexmap::IndexSet; 8 | 9 | #[derive(Default)] 10 | struct BadHasher(u64); 11 | 12 | impl Hasher for BadHasher { 13 | fn finish(&self) -> u64 { 14 | self.0 15 | } 16 | fn write(&mut self, bytes: &[u8]) { 17 | for &byte in bytes { 18 | self.0 += byte as u64 19 | } 20 | } 21 | } 22 | 23 | type Map = IndexMap>; 24 | type Set = IndexSet>; 25 | 26 | pub fn test_compile() { 27 | let mut map = Map::default(); 28 | map.insert(1, 1); 29 | map.insert(2, 4); 30 | for (_, _) in map.iter() {} 31 | 32 | let _map2 = Map::from_iter(Some((1, 1))); 33 | 34 | let mut set = Set::default(); 35 | set.insert("a"); 36 | } 37 | -------------------------------------------------------------------------------- /src/sval.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, doc(cfg(feature = "sval")))] 2 | 3 | use crate::{IndexMap, IndexSet}; 4 | use sval::{Stream, Value}; 5 | 6 | impl Value for IndexMap { 7 | fn stream<'sval, ST: Stream<'sval> + ?Sized>(&'sval self, stream: &mut ST) -> sval::Result { 8 | stream.map_begin(Some(self.len()))?; 9 | 10 | for (k, v) in self { 11 | stream.map_key_begin()?; 12 | stream.value(k)?; 13 | stream.map_key_end()?; 14 | 15 | stream.map_value_begin()?; 16 | stream.value(v)?; 17 | stream.map_value_end()?; 18 | } 19 | 20 | stream.map_end() 21 | } 22 | } 23 | 24 | impl Value for IndexSet { 25 | fn stream<'sval, ST: Stream<'sval> + ?Sized>(&'sval self, stream: &mut ST) -> sval::Result { 26 | stream.seq_begin(Some(self.len()))?; 27 | 28 | for value in self { 29 | stream.seq_value_begin()?; 30 | stream.value(value)?; 31 | stream.seq_value_end()?; 32 | } 33 | 34 | stream.seq_end() 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016--2017 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /tests/equivalent_trait.rs: -------------------------------------------------------------------------------- 1 | use indexmap::indexmap; 2 | use indexmap::Equivalent; 3 | 4 | use std::hash::Hash; 5 | 6 | #[derive(Debug, Hash)] 7 | pub struct Pair(pub A, pub B); 8 | 9 | impl PartialEq<(A, B)> for Pair 10 | where 11 | C: PartialEq, 12 | D: PartialEq, 13 | { 14 | fn eq(&self, rhs: &(A, B)) -> bool { 15 | self.0 == rhs.0 && self.1 == rhs.1 16 | } 17 | } 18 | 19 | impl Equivalent for Pair 20 | where 21 | Pair: PartialEq, 22 | A: Hash + Eq, 23 | B: Hash + Eq, 24 | { 25 | fn equivalent(&self, other: &X) -> bool { 26 | *self == *other 27 | } 28 | } 29 | 30 | #[test] 31 | fn test_lookup() { 32 | let s = String::from; 33 | let map = indexmap! { 34 | (s("a"), s("b")) => 1, 35 | (s("a"), s("x")) => 2, 36 | }; 37 | 38 | assert!(map.contains_key(&Pair("a", "b"))); 39 | assert!(!map.contains_key(&Pair("b", "a"))); 40 | } 41 | 42 | #[test] 43 | fn test_string_str() { 44 | let s = String::from; 45 | let mut map = indexmap! { 46 | s("a") => 1, s("b") => 2, 47 | s("x") => 3, s("y") => 4, 48 | }; 49 | 50 | assert!(map.contains_key("a")); 51 | assert!(!map.contains_key("z")); 52 | assert_eq!(map.swap_remove("b"), Some(2)); 53 | } 54 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "indexmap" 3 | edition = "2021" 4 | version = "2.12.1" 5 | documentation = "https://docs.rs/indexmap/" 6 | repository = "https://github.com/indexmap-rs/indexmap" 7 | license = "Apache-2.0 OR MIT" 8 | description = "A hash table with consistent order and fast iteration." 9 | keywords = ["hashmap", "no_std"] 10 | categories = ["data-structures", "no-std"] 11 | rust-version = "1.82" 12 | 13 | [lib] 14 | bench = false 15 | 16 | [dependencies] 17 | equivalent = { version = "1.0", default-features = false } 18 | hashbrown = { version = "0.16.1", default-features = false } 19 | 20 | arbitrary = { version = "1.0", optional = true, default-features = false } 21 | quickcheck = { version = "1.0", optional = true, default-features = false } 22 | serde_core = { version = "1.0.220", optional = true, default-features = false } 23 | rayon = { version = "1.9", optional = true } 24 | sval = { version = "2", optional = true, default-features = false } 25 | 26 | # deprecated: use borsh's "indexmap" feature instead. 27 | borsh = { version = "1.2", optional = true, default-features = false } 28 | 29 | # serde v1.0.220 is the first version that released with `serde_core`. 30 | # This is required to avoid conflict with other `serde` users which may require an older version. 31 | [target.'cfg(any())'.dependencies] 32 | serde = { version = "1.0.220", default-features = false, optional = true } 33 | 34 | [dev-dependencies] 35 | itertools = "0.14" 36 | fastrand = { version = "2", default-features = false } 37 | quickcheck = { version = "1.0", default-features = false } 38 | fnv = "1.0" 39 | serde = { version = "1.0", default-features = false, features = ["derive"] } 40 | 41 | [features] 42 | default = ["std"] 43 | std = [] 44 | serde = ["dep:serde_core", "dep:serde"] 45 | 46 | # for testing only, of course 47 | test_debug = [] 48 | 49 | [profile.bench] 50 | debug = true 51 | 52 | [package.metadata.release] 53 | allow-branch = ["main"] 54 | sign-tag = true 55 | tag-name = "{{version}}" 56 | 57 | [package.metadata.docs.rs] 58 | features = ["arbitrary", "quickcheck", "serde", "borsh", "rayon", "sval"] 59 | rustdoc-args = ["--cfg", "docsrs"] 60 | 61 | [workspace] 62 | members = ["test-nostd", "test-serde", "test-sval"] 63 | 64 | [lints.rust] 65 | private-bounds = "deny" 66 | private-interfaces = "deny" 67 | unnameable-types = "deny" 68 | unreachable-pub = "deny" 69 | 70 | # We *mostly* avoid unsafe code, but there are a few fine-grained cases allowed 71 | unsafe-code = "deny" 72 | 73 | rust-2018-idioms = "warn" 74 | 75 | [lints.clippy] 76 | style = "allow" 77 | -------------------------------------------------------------------------------- /src/arbitrary.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "arbitrary")] 2 | #[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))] 3 | mod impl_arbitrary { 4 | use crate::{IndexMap, IndexSet}; 5 | use arbitrary::{Arbitrary, Result, Unstructured}; 6 | use core::hash::{BuildHasher, Hash}; 7 | 8 | impl<'a, K, V, S> Arbitrary<'a> for IndexMap 9 | where 10 | K: Arbitrary<'a> + Hash + Eq, 11 | V: Arbitrary<'a>, 12 | S: BuildHasher + Default, 13 | { 14 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 15 | u.arbitrary_iter()?.collect() 16 | } 17 | 18 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 19 | u.arbitrary_take_rest_iter()?.collect() 20 | } 21 | } 22 | 23 | impl<'a, T, S> Arbitrary<'a> for IndexSet 24 | where 25 | T: Arbitrary<'a> + Hash + Eq, 26 | S: BuildHasher + Default, 27 | { 28 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 29 | u.arbitrary_iter()?.collect() 30 | } 31 | 32 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 33 | u.arbitrary_take_rest_iter()?.collect() 34 | } 35 | } 36 | } 37 | 38 | #[cfg(feature = "quickcheck")] 39 | #[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))] 40 | mod impl_quickcheck { 41 | use crate::{IndexMap, IndexSet}; 42 | use alloc::boxed::Box; 43 | use alloc::vec::Vec; 44 | use core::hash::{BuildHasher, Hash}; 45 | use quickcheck::{Arbitrary, Gen}; 46 | 47 | impl Arbitrary for IndexMap 48 | where 49 | K: Arbitrary + Hash + Eq, 50 | V: Arbitrary, 51 | S: BuildHasher + Default + Clone + 'static, 52 | { 53 | fn arbitrary(g: &mut Gen) -> Self { 54 | Self::from_iter(Vec::arbitrary(g)) 55 | } 56 | 57 | fn shrink(&self) -> Box> { 58 | let vec = Vec::from_iter(self.clone()); 59 | Box::new(vec.shrink().map(Self::from_iter)) 60 | } 61 | } 62 | 63 | impl Arbitrary for IndexSet 64 | where 65 | T: Arbitrary + Hash + Eq, 66 | S: BuildHasher + Default + Clone + 'static, 67 | { 68 | fn arbitrary(g: &mut Gen) -> Self { 69 | Self::from_iter(Vec::arbitrary(g)) 70 | } 71 | 72 | fn shrink(&self) -> Box> { 73 | let vec = Vec::from_iter(self.clone()); 74 | Box::new(vec.shrink().map(Self::from_iter)) 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Bound, Range, RangeBounds}; 2 | 3 | pub(crate) fn third(t: (A, B, C)) -> C { 4 | t.2 5 | } 6 | 7 | #[track_caller] 8 | pub(crate) fn simplify_range(range: R, len: usize) -> Range 9 | where 10 | R: RangeBounds, 11 | { 12 | let start = match range.start_bound() { 13 | Bound::Unbounded => 0, 14 | Bound::Included(&i) if i <= len => i, 15 | Bound::Excluded(&i) if i < len => i + 1, 16 | Bound::Included(i) | Bound::Excluded(i) => { 17 | panic!("range start index {i} out of range for slice of length {len}") 18 | } 19 | }; 20 | let end = match range.end_bound() { 21 | Bound::Unbounded => len, 22 | Bound::Excluded(&i) if i <= len => i, 23 | Bound::Included(&i) if i < len => i + 1, 24 | Bound::Included(i) | Bound::Excluded(i) => { 25 | panic!("range end index {i} out of range for slice of length {len}") 26 | } 27 | }; 28 | if start > end { 29 | panic!( 30 | "range start index {:?} should be <= range end index {:?}", 31 | range.start_bound(), 32 | range.end_bound() 33 | ); 34 | } 35 | start..end 36 | } 37 | 38 | pub(crate) fn try_simplify_range(range: R, len: usize) -> Option> 39 | where 40 | R: RangeBounds, 41 | { 42 | let start = match range.start_bound() { 43 | Bound::Unbounded => 0, 44 | Bound::Included(&i) if i <= len => i, 45 | Bound::Excluded(&i) if i < len => i + 1, 46 | _ => return None, 47 | }; 48 | let end = match range.end_bound() { 49 | Bound::Unbounded => len, 50 | Bound::Excluded(&i) if i <= len => i, 51 | Bound::Included(&i) if i < len => i + 1, 52 | _ => return None, 53 | }; 54 | if start > end { 55 | return None; 56 | } 57 | Some(start..end) 58 | } 59 | 60 | // Generic slice equality -- copied from the standard library but adding a custom comparator, 61 | // allowing for our `Bucket` wrapper on either or both sides. 62 | pub(crate) fn slice_eq(left: &[T], right: &[U], eq: impl Fn(&T, &U) -> bool) -> bool { 63 | if left.len() != right.len() { 64 | return false; 65 | } 66 | 67 | // Implemented as explicit indexing rather 68 | // than zipped iterators for performance reasons. 69 | // See PR https://github.com/rust-lang/rust/pull/116846 70 | for i in 0..left.len() { 71 | // bound checks are optimized away 72 | if !eq(&left[i], &right[i]) { 73 | return false; 74 | } 75 | } 76 | 77 | true 78 | } 79 | -------------------------------------------------------------------------------- /src/set/mutable.rs: -------------------------------------------------------------------------------- 1 | use core::hash::{BuildHasher, Hash}; 2 | 3 | use super::{Equivalent, IndexSet}; 4 | use crate::map::MutableKeys; 5 | 6 | /// Opt-in mutable access to [`IndexSet`] values. 7 | /// 8 | /// These methods expose `&mut T`, mutable references to the value as it is stored 9 | /// in the set. 10 | /// You are allowed to modify the values in the set **if the modification 11 | /// does not change the value's hash and equality**. 12 | /// 13 | /// If values are modified erroneously, you can no longer look them up. 14 | /// This is sound (memory safe) but a logical error hazard (just like 15 | /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). 16 | /// 17 | /// `use` this trait to enable its methods for `IndexSet`. 18 | /// 19 | /// This trait is sealed and cannot be implemented for types outside this crate. 20 | #[expect(private_bounds)] 21 | pub trait MutableValues: Sealed { 22 | type Value; 23 | 24 | /// Return item index and mutable reference to the value 25 | /// 26 | /// Computes in **O(1)** time (average). 27 | fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut Self::Value)> 28 | where 29 | Q: ?Sized + Hash + Equivalent; 30 | 31 | /// Return mutable reference to the value at an index. 32 | /// 33 | /// Valid indices are `0 <= index < self.len()`. 34 | /// 35 | /// Computes in **O(1)** time. 36 | fn get_index_mut2(&mut self, index: usize) -> Option<&mut Self::Value>; 37 | 38 | /// Scan through each value in the set and keep those where the 39 | /// closure `keep` returns `true`. 40 | /// 41 | /// The values are visited in order, and remaining values keep their order. 42 | /// 43 | /// Computes in **O(n)** time (average). 44 | fn retain2(&mut self, keep: F) 45 | where 46 | F: FnMut(&mut Self::Value) -> bool; 47 | } 48 | 49 | /// Opt-in mutable access to [`IndexSet`] values. 50 | /// 51 | /// See [`MutableValues`] for more information. 52 | impl MutableValues for IndexSet 53 | where 54 | S: BuildHasher, 55 | { 56 | type Value = T; 57 | 58 | fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut T)> 59 | where 60 | Q: ?Sized + Hash + Equivalent, 61 | { 62 | match self.map.get_full_mut2(value) { 63 | Some((index, value, ())) => Some((index, value)), 64 | None => None, 65 | } 66 | } 67 | 68 | fn get_index_mut2(&mut self, index: usize) -> Option<&mut T> { 69 | match self.map.get_index_mut2(index) { 70 | Some((value, ())) => Some(value), 71 | None => None, 72 | } 73 | } 74 | 75 | fn retain2(&mut self, mut keep: F) 76 | where 77 | F: FnMut(&mut T) -> bool, 78 | { 79 | self.map.retain2(move |value, ()| keep(value)); 80 | } 81 | } 82 | 83 | trait Sealed {} 84 | 85 | impl Sealed for IndexSet {} 86 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # indexmap 2 | 3 | [![build status](https://github.com/indexmap-rs/indexmap/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/indexmap-rs/indexmap/actions) 4 | [![crates.io](https://img.shields.io/crates/v/indexmap.svg)](https://crates.io/crates/indexmap) 5 | [![docs](https://docs.rs/indexmap/badge.svg)](https://docs.rs/indexmap) 6 | [![rustc](https://img.shields.io/badge/rust-1.82%2B-orange.svg)](https://img.shields.io/badge/rust-1.82%2B-orange.svg) 7 | 8 | A pure-Rust hash table which preserves (in a limited sense) insertion order. 9 | 10 | This crate implements compact map and set data-structures, 11 | where the iteration order of the keys is independent from their hash or 12 | value. It preserves insertion order (except after removals), and it 13 | allows lookup of entries by either hash table key or numerical index. 14 | 15 | Note: this crate was originally released under the name `ordermap`, 16 | but it was renamed to `indexmap` to better reflect its features. 17 | The [`ordermap`](https://crates.io/crates/ordermap) crate now exists 18 | as a wrapper over `indexmap` with stronger ordering properties. 19 | 20 | # Background 21 | 22 | This was inspired by Python 3.6's new dict implementation (which remembers 23 | the insertion order and is fast to iterate, and is compact in memory). 24 | 25 | Some of those features were translated to Rust, and some were not. The result 26 | was indexmap, a hash table that has following properties: 27 | 28 | - Order is **independent of hash function** and hash values of keys. 29 | - Fast to iterate. 30 | - Indexed in compact space. 31 | - Preserves insertion order **as long** as you don't call `.remove()`, 32 | `.swap_remove()`, or other methods that explicitly change order. 33 | The alternate `.shift_remove()` does preserve relative order. 34 | - Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does. 35 | 36 | ## Performance 37 | 38 | `IndexMap` derives a couple of performance facts directly from how it is constructed, 39 | which is roughly: 40 | 41 | > A raw hash table of key-value indices, and a vector of key-value pairs. 42 | 43 | - Iteration is very fast since it is on the dense key-values. 44 | - Removal is fast since it moves memory areas only in the table, 45 | and uses a single swap in the vector. 46 | - Lookup is fast-ish because the initial 7-bit hash lookup uses SIMD, and indices are 47 | densely stored. Lookup also is slow-ish since the actual key-value pairs are stored 48 | separately. (Visible when cpu caches size is limiting.) 49 | 50 | - In practice, `IndexMap` has been tested out as the hashmap in rustc in [PR45282] and 51 | the performance was roughly on par across the whole workload. 52 | - If you want the properties of `IndexMap`, or its strongest performance points 53 | fits your workload, it might be the best hash table implementation. 54 | 55 | [PR45282]: https://github.com/rust-lang/rust/pull/45282 56 | 57 | # Recent Changes 58 | 59 | See [RELEASES.md](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md). 60 | -------------------------------------------------------------------------------- /test-sval/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg(test)] 2 | 3 | use fnv::FnvBuildHasher; 4 | use indexmap::{indexmap, indexset, IndexMap, IndexSet}; 5 | use sval_test::{assert_tokens, Token}; 6 | 7 | #[test] 8 | fn test_sval_map() { 9 | let map = indexmap! { 1 => 2, 3 => 4 }; 10 | assert_tokens( 11 | &map, 12 | &[ 13 | Token::MapBegin(Some(2)), 14 | Token::MapKeyBegin, 15 | Token::I32(1), 16 | Token::MapKeyEnd, 17 | Token::MapValueBegin, 18 | Token::I32(2), 19 | Token::MapValueEnd, 20 | Token::MapKeyBegin, 21 | Token::I32(3), 22 | Token::MapKeyEnd, 23 | Token::MapValueBegin, 24 | Token::I32(4), 25 | Token::MapValueEnd, 26 | Token::MapEnd, 27 | ], 28 | ); 29 | } 30 | 31 | #[test] 32 | fn test_sval_set() { 33 | let set = indexset! { 1, 2, 3, 4 }; 34 | assert_tokens( 35 | &set, 36 | &[ 37 | Token::SeqBegin(Some(4)), 38 | Token::SeqValueBegin, 39 | Token::I32(1), 40 | Token::SeqValueEnd, 41 | Token::SeqValueBegin, 42 | Token::I32(2), 43 | Token::SeqValueEnd, 44 | Token::SeqValueBegin, 45 | Token::I32(3), 46 | Token::SeqValueEnd, 47 | Token::SeqValueBegin, 48 | Token::I32(4), 49 | Token::SeqValueEnd, 50 | Token::SeqEnd, 51 | ], 52 | ); 53 | } 54 | 55 | #[test] 56 | fn test_sval_map_fnv_hasher() { 57 | let mut map: IndexMap = Default::default(); 58 | map.insert(1, 2); 59 | map.insert(3, 4); 60 | assert_tokens( 61 | &map, 62 | &[ 63 | Token::MapBegin(Some(2)), 64 | Token::MapKeyBegin, 65 | Token::I32(1), 66 | Token::MapKeyEnd, 67 | Token::MapValueBegin, 68 | Token::I32(2), 69 | Token::MapValueEnd, 70 | Token::MapKeyBegin, 71 | Token::I32(3), 72 | Token::MapKeyEnd, 73 | Token::MapValueBegin, 74 | Token::I32(4), 75 | Token::MapValueEnd, 76 | Token::MapEnd, 77 | ], 78 | ); 79 | } 80 | 81 | #[test] 82 | fn test_sval_set_fnv_hasher() { 83 | let mut set: IndexSet = Default::default(); 84 | set.extend(1..5); 85 | assert_tokens( 86 | &set, 87 | &[ 88 | Token::SeqBegin(Some(4)), 89 | Token::SeqValueBegin, 90 | Token::I32(1), 91 | Token::SeqValueEnd, 92 | Token::SeqValueBegin, 93 | Token::I32(2), 94 | Token::SeqValueEnd, 95 | Token::SeqValueBegin, 96 | Token::I32(3), 97 | Token::SeqValueEnd, 98 | Token::SeqValueBegin, 99 | Token::I32(4), 100 | Token::SeqValueEnd, 101 | Token::SeqEnd, 102 | ], 103 | ); 104 | } 105 | -------------------------------------------------------------------------------- /test-serde/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg(test)] 2 | 3 | use fnv::FnvBuildHasher; 4 | use indexmap::{indexmap, indexset, IndexMap, IndexSet}; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_test::{assert_tokens, Token}; 7 | 8 | #[test] 9 | fn test_serde_map() { 10 | let map = indexmap! { 1 => 2, 3 => 4 }; 11 | assert_tokens( 12 | &map, 13 | &[ 14 | Token::Map { len: Some(2) }, 15 | Token::I32(1), 16 | Token::I32(2), 17 | Token::I32(3), 18 | Token::I32(4), 19 | Token::MapEnd, 20 | ], 21 | ); 22 | } 23 | 24 | #[test] 25 | fn test_serde_set() { 26 | let set = indexset! { 1, 2, 3, 4 }; 27 | assert_tokens( 28 | &set, 29 | &[ 30 | Token::Seq { len: Some(4) }, 31 | Token::I32(1), 32 | Token::I32(2), 33 | Token::I32(3), 34 | Token::I32(4), 35 | Token::SeqEnd, 36 | ], 37 | ); 38 | } 39 | 40 | #[test] 41 | fn test_serde_map_fnv_hasher() { 42 | let mut map: IndexMap = Default::default(); 43 | map.insert(1, 2); 44 | map.insert(3, 4); 45 | assert_tokens( 46 | &map, 47 | &[ 48 | Token::Map { len: Some(2) }, 49 | Token::I32(1), 50 | Token::I32(2), 51 | Token::I32(3), 52 | Token::I32(4), 53 | Token::MapEnd, 54 | ], 55 | ); 56 | } 57 | 58 | #[test] 59 | fn test_serde_set_fnv_hasher() { 60 | let mut set: IndexSet = Default::default(); 61 | set.extend(1..5); 62 | assert_tokens( 63 | &set, 64 | &[ 65 | Token::Seq { len: Some(4) }, 66 | Token::I32(1), 67 | Token::I32(2), 68 | Token::I32(3), 69 | Token::I32(4), 70 | Token::SeqEnd, 71 | ], 72 | ); 73 | } 74 | 75 | #[test] 76 | fn test_serde_seq_map() { 77 | #[derive(Debug, Deserialize, Serialize)] 78 | #[serde(transparent)] 79 | struct SeqIndexMap { 80 | #[serde(with = "indexmap::map::serde_seq")] 81 | map: IndexMap, 82 | } 83 | 84 | impl PartialEq for SeqIndexMap { 85 | fn eq(&self, other: &Self) -> bool { 86 | // explicitly compare items in order 87 | self.map.iter().eq(&other.map) 88 | } 89 | } 90 | 91 | let map = indexmap! { 1 => 2, 3 => 4, -1 => -2, -3 => -4 }; 92 | assert_tokens( 93 | &SeqIndexMap { map }, 94 | &[ 95 | Token::Seq { len: Some(4) }, 96 | Token::Tuple { len: 2 }, 97 | Token::I32(1), 98 | Token::I32(2), 99 | Token::TupleEnd, 100 | Token::Tuple { len: 2 }, 101 | Token::I32(3), 102 | Token::I32(4), 103 | Token::TupleEnd, 104 | Token::Tuple { len: 2 }, 105 | Token::I32(-1), 106 | Token::I32(-2), 107 | Token::TupleEnd, 108 | Token::Tuple { len: 2 }, 109 | Token::I32(-3), 110 | Token::I32(-4), 111 | Token::TupleEnd, 112 | Token::SeqEnd, 113 | ], 114 | ); 115 | } 116 | -------------------------------------------------------------------------------- /src/inner/extract.rs: -------------------------------------------------------------------------------- 1 | #![allow(unsafe_code)] 2 | 3 | use super::{Bucket, Core}; 4 | use crate::util::simplify_range; 5 | 6 | use core::ops::RangeBounds; 7 | 8 | impl Core { 9 | #[track_caller] 10 | pub(crate) fn extract(&mut self, range: R) -> ExtractCore<'_, K, V> 11 | where 12 | R: RangeBounds, 13 | { 14 | let range = simplify_range(range, self.entries.len()); 15 | 16 | // SAFETY: We must have consistent lengths to start, so that's a hard assertion. 17 | // Then the worst `set_len` can do is leak items if `ExtractCore` doesn't drop. 18 | assert_eq!(self.entries.len(), self.indices.len()); 19 | unsafe { 20 | self.entries.set_len(range.start); 21 | } 22 | ExtractCore { 23 | map: self, 24 | new_len: range.start, 25 | current: range.start, 26 | end: range.end, 27 | } 28 | } 29 | } 30 | 31 | pub(crate) struct ExtractCore<'a, K, V> { 32 | map: &'a mut Core, 33 | new_len: usize, 34 | current: usize, 35 | end: usize, 36 | } 37 | 38 | impl Drop for ExtractCore<'_, K, V> { 39 | fn drop(&mut self) { 40 | let old_len = self.map.indices.len(); 41 | let mut new_len = self.new_len; 42 | 43 | debug_assert!(new_len <= self.current); 44 | debug_assert!(self.current <= self.end); 45 | debug_assert!(self.current <= old_len); 46 | debug_assert!(old_len <= self.map.entries.capacity()); 47 | 48 | // SAFETY: We assume `new_len` and `current` were correctly maintained by the iterator. 49 | // So `entries[new_len..current]` were extracted, but the rest before and after are valid. 50 | unsafe { 51 | if new_len == self.current { 52 | // Nothing was extracted, so any remaining items can be left in place. 53 | new_len = old_len; 54 | } else if self.current < old_len { 55 | // Need to shift the remaining items down. 56 | let tail_len = old_len - self.current; 57 | let base = self.map.entries.as_mut_ptr(); 58 | let src = base.add(self.current); 59 | let dest = base.add(new_len); 60 | src.copy_to(dest, tail_len); 61 | new_len += tail_len; 62 | } 63 | self.map.entries.set_len(new_len); 64 | } 65 | 66 | if new_len != old_len { 67 | // We don't keep track of *which* items were extracted, so reindex everything. 68 | self.map.rebuild_hash_table(); 69 | } 70 | } 71 | } 72 | 73 | impl ExtractCore<'_, K, V> { 74 | pub(crate) fn extract_if(&mut self, mut pred: F) -> Option> 75 | where 76 | F: FnMut(&mut Bucket) -> bool, 77 | { 78 | debug_assert!(self.end <= self.map.entries.capacity()); 79 | 80 | let base = self.map.entries.as_mut_ptr(); 81 | while self.current < self.end { 82 | // SAFETY: We're maintaining both indices within bounds of the original entries, so 83 | // 0..new_len and current..indices.len() are always valid items for our Drop to keep. 84 | unsafe { 85 | let item = base.add(self.current); 86 | if pred(&mut *item) { 87 | // Extract it! 88 | self.current += 1; 89 | return Some(item.read()); 90 | } else { 91 | // Keep it, shifting it down if needed. 92 | if self.new_len != self.current { 93 | debug_assert!(self.new_len < self.current); 94 | let dest = base.add(self.new_len); 95 | item.copy_to_nonoverlapping(dest, 1); 96 | } 97 | self.current += 1; 98 | self.new_len += 1; 99 | } 100 | } 101 | } 102 | None 103 | } 104 | 105 | pub(crate) fn remaining(&self) -> usize { 106 | self.end - self.current 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/borsh.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, doc(cfg(feature = "borsh")))] 2 | 3 | use alloc::vec::Vec; 4 | use core::hash::BuildHasher; 5 | use core::hash::Hash; 6 | 7 | use borsh::error::ERROR_ZST_FORBIDDEN; 8 | use borsh::io::{Error, ErrorKind, Read, Result, Write}; 9 | use borsh::{BorshDeserialize, BorshSerialize}; 10 | 11 | use crate::map::IndexMap; 12 | use crate::set::IndexSet; 13 | 14 | // NOTE: the real `#[deprecated]` attribute doesn't work for trait implementations, 15 | // but we can get close by mimicking the message style for documentation. 16 | ///
👎Deprecated: use borsh's indexmap feature instead.
17 | impl BorshSerialize for IndexMap 18 | where 19 | K: BorshSerialize, 20 | V: BorshSerialize, 21 | { 22 | #[inline] 23 | fn serialize(&self, writer: &mut W) -> Result<()> { 24 | check_zst::()?; 25 | 26 | let iterator = self.iter(); 27 | 28 | u32::try_from(iterator.len()) 29 | .map_err(|_| ErrorKind::InvalidData)? 30 | .serialize(writer)?; 31 | 32 | for (key, value) in iterator { 33 | key.serialize(writer)?; 34 | value.serialize(writer)?; 35 | } 36 | 37 | Ok(()) 38 | } 39 | } 40 | 41 | ///
👎Deprecated: use borsh's indexmap feature instead.
42 | impl BorshDeserialize for IndexMap 43 | where 44 | K: BorshDeserialize + Eq + Hash, 45 | V: BorshDeserialize, 46 | S: BuildHasher + Default, 47 | { 48 | #[inline] 49 | fn deserialize_reader(reader: &mut R) -> Result { 50 | check_zst::()?; 51 | let vec = >::deserialize_reader(reader)?; 52 | Ok(vec.into_iter().collect::>()) 53 | } 54 | } 55 | 56 | ///
👎Deprecated: use borsh's indexmap feature instead.
57 | impl BorshSerialize for IndexSet 58 | where 59 | T: BorshSerialize, 60 | { 61 | #[inline] 62 | fn serialize(&self, writer: &mut W) -> Result<()> { 63 | check_zst::()?; 64 | 65 | let iterator = self.iter(); 66 | 67 | u32::try_from(iterator.len()) 68 | .map_err(|_| ErrorKind::InvalidData)? 69 | .serialize(writer)?; 70 | 71 | for item in iterator { 72 | item.serialize(writer)?; 73 | } 74 | 75 | Ok(()) 76 | } 77 | } 78 | 79 | ///
👎Deprecated: use borsh's indexmap feature instead.
80 | impl BorshDeserialize for IndexSet 81 | where 82 | T: BorshDeserialize + Eq + Hash, 83 | S: BuildHasher + Default, 84 | { 85 | #[inline] 86 | fn deserialize_reader(reader: &mut R) -> Result { 87 | check_zst::()?; 88 | let vec = >::deserialize_reader(reader)?; 89 | Ok(vec.into_iter().collect::>()) 90 | } 91 | } 92 | 93 | fn check_zst() -> Result<()> { 94 | if size_of::() == 0 { 95 | return Err(Error::new(ErrorKind::InvalidData, ERROR_ZST_FORBIDDEN)); 96 | } 97 | Ok(()) 98 | } 99 | 100 | #[cfg(test)] 101 | mod borsh_tests { 102 | use super::*; 103 | 104 | #[test] 105 | fn map_borsh_roundtrip() { 106 | let original_map: IndexMap = { 107 | let mut map = IndexMap::new(); 108 | map.insert(1, 2); 109 | map.insert(3, 4); 110 | map.insert(5, 6); 111 | map 112 | }; 113 | let serialized_map = borsh::to_vec(&original_map).unwrap(); 114 | let deserialized_map: IndexMap = 115 | BorshDeserialize::try_from_slice(&serialized_map).unwrap(); 116 | assert_eq!(original_map, deserialized_map); 117 | } 118 | 119 | #[test] 120 | fn set_borsh_roundtrip() { 121 | let original_map: IndexSet = [1, 2, 3, 4, 5, 6].into_iter().collect(); 122 | let serialized_map = borsh::to_vec(&original_map).unwrap(); 123 | let deserialized_map: IndexSet = 124 | BorshDeserialize::try_from_slice(&serialized_map).unwrap(); 125 | assert_eq!(original_map, deserialized_map); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/map/serde_seq.rs: -------------------------------------------------------------------------------- 1 | //! Functions to serialize and deserialize an [`IndexMap`] as an ordered sequence. 2 | //! 3 | //! The default `serde` implementation serializes `IndexMap` as a normal map, 4 | //! but there is no guarantee that serialization formats will preserve the order 5 | //! of the key-value pairs. This module serializes `IndexMap` as a sequence of 6 | //! `(key, value)` elements instead, in order. 7 | //! 8 | //! This module may be used in a field attribute for derived implementations: 9 | //! 10 | //! ``` 11 | //! # use indexmap::IndexMap; 12 | //! # use serde::{Deserialize, Serialize}; 13 | //! #[derive(Deserialize, Serialize)] 14 | //! struct Data { 15 | //! #[serde(with = "indexmap::map::serde_seq")] 16 | //! map: IndexMap, 17 | //! // ... 18 | //! } 19 | //! ``` 20 | 21 | use serde_core::de::{Deserialize, Deserializer, SeqAccess, Visitor}; 22 | use serde_core::ser::{Serialize, Serializer}; 23 | 24 | use core::fmt::{self, Formatter}; 25 | use core::hash::{BuildHasher, Hash}; 26 | use core::marker::PhantomData; 27 | 28 | use crate::map::Slice as MapSlice; 29 | use crate::serde::cautious_capacity; 30 | use crate::set::Slice as SetSlice; 31 | use crate::IndexMap; 32 | 33 | /// Serializes a [`map::Slice`][MapSlice] as an ordered sequence. 34 | /// 35 | /// This behaves like [`crate::map::serde_seq`] for `IndexMap`, serializing a sequence 36 | /// of `(key, value)` pairs, rather than as a map that might not preserve order. 37 | impl Serialize for MapSlice 38 | where 39 | K: Serialize, 40 | V: Serialize, 41 | { 42 | fn serialize(&self, serializer: T) -> Result 43 | where 44 | T: Serializer, 45 | { 46 | serializer.collect_seq(self) 47 | } 48 | } 49 | 50 | /// Serializes a [`set::Slice`][SetSlice] as an ordered sequence. 51 | impl Serialize for SetSlice 52 | where 53 | T: Serialize, 54 | { 55 | fn serialize(&self, serializer: Se) -> Result 56 | where 57 | Se: Serializer, 58 | { 59 | serializer.collect_seq(self) 60 | } 61 | } 62 | 63 | /// Serializes an [`IndexMap`] as an ordered sequence. 64 | /// 65 | /// This function may be used in a field attribute for deriving [`Serialize`]: 66 | /// 67 | /// ``` 68 | /// # use indexmap::IndexMap; 69 | /// # use serde::Serialize; 70 | /// #[derive(Serialize)] 71 | /// struct Data { 72 | /// #[serde(serialize_with = "indexmap::map::serde_seq::serialize")] 73 | /// map: IndexMap, 74 | /// // ... 75 | /// } 76 | /// ``` 77 | pub fn serialize(map: &IndexMap, serializer: T) -> Result 78 | where 79 | K: Serialize, 80 | V: Serialize, 81 | T: Serializer, 82 | { 83 | serializer.collect_seq(map) 84 | } 85 | 86 | /// Visitor to deserialize a *sequenced* `IndexMap` 87 | struct SeqVisitor(PhantomData<(K, V, S)>); 88 | 89 | impl<'de, K, V, S> Visitor<'de> for SeqVisitor 90 | where 91 | K: Deserialize<'de> + Eq + Hash, 92 | V: Deserialize<'de>, 93 | S: Default + BuildHasher, 94 | { 95 | type Value = IndexMap; 96 | 97 | fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { 98 | write!(formatter, "a sequenced map") 99 | } 100 | 101 | fn visit_seq
(self, mut seq: A) -> Result 102 | where 103 | A: SeqAccess<'de>, 104 | { 105 | let capacity = cautious_capacity::(seq.size_hint()); 106 | let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default()); 107 | 108 | while let Some((key, value)) = seq.next_element()? { 109 | map.insert(key, value); 110 | } 111 | 112 | Ok(map) 113 | } 114 | } 115 | 116 | /// Deserializes an [`IndexMap`] from an ordered sequence. 117 | /// 118 | /// This function may be used in a field attribute for deriving [`Deserialize`]: 119 | /// 120 | /// ``` 121 | /// # use indexmap::IndexMap; 122 | /// # use serde::Deserialize; 123 | /// #[derive(Deserialize)] 124 | /// struct Data { 125 | /// #[serde(deserialize_with = "indexmap::map::serde_seq::deserialize")] 126 | /// map: IndexMap, 127 | /// // ... 128 | /// } 129 | /// ``` 130 | pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result, D::Error> 131 | where 132 | D: Deserializer<'de>, 133 | K: Deserialize<'de> + Eq + Hash, 134 | V: Deserialize<'de>, 135 | S: Default + BuildHasher, 136 | { 137 | deserializer.deserialize_seq(SeqVisitor(PhantomData)) 138 | } 139 | -------------------------------------------------------------------------------- /benches/faststring.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use test::Bencher; 6 | 7 | use indexmap::IndexMap; 8 | 9 | use std::collections::HashMap; 10 | 11 | use std::hash::{Hash, Hasher}; 12 | 13 | use std::borrow::Borrow; 14 | use std::ops::Deref; 15 | 16 | /// Use a consistently seeded Rng for benchmark stability 17 | fn small_rng() -> fastrand::Rng { 18 | let seed = u64::from_le_bytes(*b"indexmap"); 19 | fastrand::Rng::with_seed(seed) 20 | } 21 | 22 | #[derive(PartialEq, Eq, Copy, Clone)] 23 | #[repr(transparent)] 24 | pub struct OneShot(pub T); 25 | 26 | impl Hash for OneShot { 27 | fn hash(&self, h: &mut H) { 28 | h.write(self.0.as_bytes()) 29 | } 30 | } 31 | 32 | impl<'a, S> From<&'a S> for &'a OneShot 33 | where 34 | S: AsRef, 35 | { 36 | #[allow(unsafe_code)] 37 | fn from(s: &'a S) -> Self { 38 | let s: &str = s.as_ref(); 39 | // SAFETY: OneShot is a `repr(transparent)` wrapper 40 | unsafe { &*(s as *const str as *const OneShot) } 41 | } 42 | } 43 | 44 | impl Hash for OneShot { 45 | fn hash(&self, h: &mut H) { 46 | h.write(self.0.as_bytes()) 47 | } 48 | } 49 | 50 | impl Borrow> for OneShot { 51 | fn borrow(&self) -> &OneShot { 52 | <&OneShot>::from(&self.0) 53 | } 54 | } 55 | 56 | impl Deref for OneShot { 57 | type Target = T; 58 | fn deref(&self) -> &T { 59 | &self.0 60 | } 61 | } 62 | 63 | fn shuffled_keys(iter: I) -> Vec 64 | where 65 | I: IntoIterator, 66 | { 67 | let mut v = Vec::from_iter(iter); 68 | let mut rng = small_rng(); 69 | rng.shuffle(&mut v); 70 | v 71 | } 72 | 73 | #[bench] 74 | fn insert_hashmap_string_10_000(b: &mut Bencher) { 75 | let c = 10_000; 76 | b.iter(|| { 77 | let mut map = HashMap::with_capacity(c); 78 | for x in 0..c { 79 | map.insert(x.to_string(), ()); 80 | } 81 | map 82 | }); 83 | } 84 | 85 | #[bench] 86 | fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { 87 | let c = 10_000; 88 | b.iter(|| { 89 | let mut map = HashMap::with_capacity(c); 90 | for x in 0..c { 91 | map.insert(OneShot(x.to_string()), ()); 92 | } 93 | map 94 | }); 95 | } 96 | 97 | #[bench] 98 | fn insert_indexmap_string_10_000(b: &mut Bencher) { 99 | let c = 10_000; 100 | b.iter(|| { 101 | let mut map = IndexMap::with_capacity(c); 102 | for x in 0..c { 103 | map.insert(x.to_string(), ()); 104 | } 105 | map 106 | }); 107 | } 108 | 109 | #[bench] 110 | fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) { 111 | let c = 10_000; 112 | let mut map = HashMap::with_capacity(c); 113 | let keys = shuffled_keys(0..c); 114 | for &key in &keys { 115 | map.insert(key.to_string(), 1); 116 | } 117 | let lookups = (5000..c).map(|x| x.to_string()).collect::>(); 118 | b.iter(|| { 119 | let mut found = 0; 120 | for key in &lookups { 121 | found += map.get(key).is_some() as i32; 122 | } 123 | found 124 | }); 125 | } 126 | 127 | #[bench] 128 | fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { 129 | let c = 10_000; 130 | let mut map = HashMap::with_capacity(c); 131 | let keys = shuffled_keys(0..c); 132 | for &key in &keys { 133 | map.insert(OneShot(key.to_string()), 1); 134 | } 135 | let lookups = (5000..c) 136 | .map(|x| OneShot(x.to_string())) 137 | .collect::>(); 138 | b.iter(|| { 139 | let mut found = 0; 140 | for key in &lookups { 141 | found += map.get(key).is_some() as i32; 142 | } 143 | found 144 | }); 145 | } 146 | 147 | #[bench] 148 | fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) { 149 | let c = 10_000; 150 | let mut map = IndexMap::with_capacity(c); 151 | let keys = shuffled_keys(0..c); 152 | for &key in &keys { 153 | map.insert(key.to_string(), 1); 154 | } 155 | let lookups = (5000..c).map(|x| x.to_string()).collect::>(); 156 | b.iter(|| { 157 | let mut found = 0; 158 | for key in &lookups { 159 | found += map.get(key).is_some() as i32; 160 | } 161 | found 162 | }); 163 | } 164 | 165 | #[bench] 166 | fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) { 167 | let c = 10_000; 168 | let mut map = IndexMap::with_capacity(c); 169 | let keys = shuffled_keys(0..c); 170 | for &key in &keys { 171 | map.insert(OneShot(key.to_string()), 1); 172 | } 173 | let lookups = (5000..c) 174 | .map(|x| OneShot(x.to_string())) 175 | .collect::>(); 176 | b.iter(|| { 177 | let mut found = 0; 178 | for key in &lookups { 179 | found += map.get(key).is_some() as i32; 180 | } 181 | found 182 | }); 183 | } 184 | -------------------------------------------------------------------------------- /src/serde.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, doc(cfg(feature = "serde")))] 2 | 3 | use serde_core::de::value::{MapDeserializer, SeqDeserializer}; 4 | use serde_core::de::{ 5 | Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor, 6 | }; 7 | use serde_core::ser::{Serialize, Serializer}; 8 | 9 | use core::fmt::{self, Formatter}; 10 | use core::hash::{BuildHasher, Hash}; 11 | use core::marker::PhantomData; 12 | 13 | use crate::{Bucket, IndexMap, IndexSet}; 14 | 15 | /// Limit our preallocated capacity from a deserializer `size_hint()`. 16 | /// 17 | /// We do account for the `Bucket` overhead from its saved `hash` field, but we don't count the 18 | /// `RawTable` allocation or the fact that its raw capacity will be rounded up to a power of two. 19 | /// The "max" is an arbitrary choice anyway, not something that needs precise adherence. 20 | /// 21 | /// This is based on the internal `serde::de::size_hint::cautious(hint)` function. 22 | pub(crate) fn cautious_capacity(hint: Option) -> usize { 23 | const MAX_PREALLOC_BYTES: usize = 1024 * 1024; 24 | 25 | Ord::min( 26 | hint.unwrap_or(0), 27 | MAX_PREALLOC_BYTES / size_of::>(), 28 | ) 29 | } 30 | 31 | impl Serialize for IndexMap 32 | where 33 | K: Serialize, 34 | V: Serialize, 35 | { 36 | fn serialize(&self, serializer: T) -> Result 37 | where 38 | T: Serializer, 39 | { 40 | serializer.collect_map(self) 41 | } 42 | } 43 | 44 | struct IndexMapVisitor(PhantomData<(K, V, S)>); 45 | 46 | impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor 47 | where 48 | K: Deserialize<'de> + Eq + Hash, 49 | V: Deserialize<'de>, 50 | S: Default + BuildHasher, 51 | { 52 | type Value = IndexMap; 53 | 54 | fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { 55 | write!(formatter, "a map") 56 | } 57 | 58 | fn visit_map(self, mut map: A) -> Result 59 | where 60 | A: MapAccess<'de>, 61 | { 62 | let capacity = cautious_capacity::(map.size_hint()); 63 | let mut values = IndexMap::with_capacity_and_hasher(capacity, S::default()); 64 | 65 | while let Some((key, value)) = map.next_entry()? { 66 | values.insert(key, value); 67 | } 68 | 69 | Ok(values) 70 | } 71 | } 72 | 73 | impl<'de, K, V, S> Deserialize<'de> for IndexMap 74 | where 75 | K: Deserialize<'de> + Eq + Hash, 76 | V: Deserialize<'de>, 77 | S: Default + BuildHasher, 78 | { 79 | fn deserialize(deserializer: D) -> Result 80 | where 81 | D: Deserializer<'de>, 82 | { 83 | deserializer.deserialize_map(IndexMapVisitor(PhantomData)) 84 | } 85 | } 86 | 87 | impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap 88 | where 89 | K: IntoDeserializer<'de, E> + Eq + Hash, 90 | V: IntoDeserializer<'de, E>, 91 | S: BuildHasher, 92 | E: Error, 93 | { 94 | type Deserializer = MapDeserializer<'de, ::IntoIter, E>; 95 | 96 | fn into_deserializer(self) -> Self::Deserializer { 97 | MapDeserializer::new(self.into_iter()) 98 | } 99 | } 100 | 101 | impl Serialize for IndexSet 102 | where 103 | T: Serialize, 104 | { 105 | fn serialize(&self, serializer: Se) -> Result 106 | where 107 | Se: Serializer, 108 | { 109 | serializer.collect_seq(self) 110 | } 111 | } 112 | 113 | struct IndexSetVisitor(PhantomData<(T, S)>); 114 | 115 | impl<'de, T, S> Visitor<'de> for IndexSetVisitor 116 | where 117 | T: Deserialize<'de> + Eq + Hash, 118 | S: Default + BuildHasher, 119 | { 120 | type Value = IndexSet; 121 | 122 | fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { 123 | write!(formatter, "a set") 124 | } 125 | 126 | fn visit_seq(self, mut seq: A) -> Result 127 | where 128 | A: SeqAccess<'de>, 129 | { 130 | let capacity = cautious_capacity::(seq.size_hint()); 131 | let mut values = IndexSet::with_capacity_and_hasher(capacity, S::default()); 132 | 133 | while let Some(value) = seq.next_element()? { 134 | values.insert(value); 135 | } 136 | 137 | Ok(values) 138 | } 139 | } 140 | 141 | impl<'de, T, S> Deserialize<'de> for IndexSet 142 | where 143 | T: Deserialize<'de> + Eq + Hash, 144 | S: Default + BuildHasher, 145 | { 146 | fn deserialize(deserializer: D) -> Result 147 | where 148 | D: Deserializer<'de>, 149 | { 150 | deserializer.deserialize_seq(IndexSetVisitor(PhantomData)) 151 | } 152 | } 153 | 154 | impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet 155 | where 156 | T: IntoDeserializer<'de, E> + Eq + Hash, 157 | S: BuildHasher, 158 | E: Error, 159 | { 160 | type Deserializer = SeqDeserializer<::IntoIter, E>; 161 | 162 | fn into_deserializer(self) -> Self::Deserializer { 163 | SeqDeserializer::new(self.into_iter()) 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | merge_group: 7 | 8 | name: CI 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | CARGO_INCREMENTAL: 0 13 | 14 | jobs: 15 | tests: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | include: 20 | - rust: 1.82.0 # MSRV 21 | features: 22 | - rust: stable 23 | features: arbitrary 24 | - rust: stable 25 | features: quickcheck 26 | - rust: stable 27 | features: rayon 28 | - rust: stable 29 | features: serde 30 | - rust: stable 31 | features: sval 32 | - rust: stable 33 | features: borsh 34 | - rust: stable 35 | features: std 36 | - rust: beta 37 | features: 38 | - rust: nightly 39 | bench: test build benchmarks 40 | 41 | steps: 42 | - uses: actions/checkout@v4 43 | - name: Lock MSRV-compatible dependencies 44 | if: matrix.rust == '1.82.0' 45 | env: 46 | CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback 47 | # Note that this uses the runner's pre-installed stable cargo 48 | run: cargo generate-lockfile 49 | - uses: dtolnay/rust-toolchain@master 50 | with: 51 | toolchain: ${{ matrix.rust }} 52 | - name: Tests 53 | run: | 54 | cargo build --verbose --features "${{ matrix.features }}" 55 | cargo doc --verbose --features "${{ matrix.features }}" 56 | cargo test --verbose --features "${{ matrix.features }}" 57 | cargo test --release --verbose --features "${{ matrix.features }}" 58 | - name: Tests (serde) 59 | if: matrix.features == 'serde' 60 | run: | 61 | cargo test --verbose -p test-serde 62 | - name: Tests (sval) 63 | if: matrix.features == 'sval' 64 | run: | 65 | cargo test --verbose -p test-sval 66 | - name: Test run benchmarks 67 | if: matrix.bench != '' 68 | run: cargo test -v --benches 69 | 70 | nostd_build: 71 | runs-on: ubuntu-latest 72 | strategy: 73 | matrix: 74 | include: 75 | - rust: 1.82.0 76 | target: thumbv6m-none-eabi 77 | - rust: stable 78 | target: thumbv6m-none-eabi 79 | 80 | steps: 81 | - uses: actions/checkout@v4 82 | - name: Lock MSRV-compatible dependencies 83 | if: matrix.rust == '1.82.0' 84 | env: 85 | CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback 86 | # Note that this uses the runner's pre-installed stable cargo 87 | run: cargo generate-lockfile 88 | - uses: dtolnay/rust-toolchain@master 89 | with: 90 | toolchain: ${{ matrix.rust }} 91 | target: ${{ matrix.target }} 92 | - name: Tests 93 | run: | 94 | cargo build -vv --target=${{ matrix.target }} --no-default-features 95 | cargo build -v -p test-nostd --target=${{ matrix.target }} 96 | 97 | clippy: 98 | runs-on: ubuntu-latest 99 | steps: 100 | - uses: actions/checkout@v4 101 | - uses: dtolnay/rust-toolchain@beta 102 | with: 103 | components: clippy 104 | - run: cargo clippy --all-features 105 | 106 | miri: 107 | runs-on: ubuntu-latest 108 | steps: 109 | - uses: actions/checkout@v4 110 | - uses: dtolnay/rust-toolchain@nightly 111 | with: 112 | components: miri, rust-src 113 | - uses: taiki-e/install-action@v2 114 | with: 115 | tool: cargo-nextest 116 | if: github.event_name == 'merge_group' 117 | - run: cargo miri nextest run 118 | if: github.event_name == 'merge_group' 119 | - run: cargo miri test --doc 120 | 121 | minimal-versions: 122 | name: Check MSRV and minimal-versions 123 | runs-on: ubuntu-latest 124 | steps: 125 | - uses: actions/checkout@v4 126 | - uses: dtolnay/rust-toolchain@nightly 127 | - uses: dtolnay/rust-toolchain@1.82.0 # MSRV 128 | - uses: taiki-e/install-action@v2 129 | with: 130 | tool: cargo-hack 131 | - name: Lock minimal direct dependencies 132 | run: cargo +nightly hack generate-lockfile --remove-dev-deps -Z direct-minimal-versions 133 | env: 134 | CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback 135 | - name: Build (nightly) 136 | run: cargo +nightly build --verbose --all-features 137 | - name: Build (MSRV) 138 | run: cargo build --verbose --features arbitrary,quickcheck,serde,sval,rayon 139 | 140 | # One job that "summarizes" the success state of this pipeline. This can then be added to branch 141 | # protection, rather than having to add each job separately. 142 | success: 143 | name: Success 144 | runs-on: ubuntu-latest 145 | needs: [tests, nostd_build, clippy, miri, minimal-versions] 146 | # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency 147 | # failed" as success. So we have to do some contortions to ensure the job fails if any of its 148 | # dependencies fails. 149 | if: always() # make sure this is never "skipped" 150 | steps: 151 | # Manually check the status of all dependencies. `if: failure()` does not work. 152 | - name: check if any dependency failed 153 | run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' 154 | -------------------------------------------------------------------------------- /src/map/mutable.rs: -------------------------------------------------------------------------------- 1 | use core::hash::{BuildHasher, Hash}; 2 | 3 | use super::{ 4 | Bucket, Entry, Equivalent, IndexMap, IndexedEntry, IterMut2, OccupiedEntry, VacantEntry, 5 | }; 6 | 7 | /// Opt-in mutable access to [`IndexMap`] keys. 8 | /// 9 | /// These methods expose `&mut K`, mutable references to the key as it is stored 10 | /// in the map. 11 | /// You are allowed to modify the keys in the map **if the modification 12 | /// does not change the key's hash and equality**. 13 | /// 14 | /// If keys are modified erroneously, you can no longer look them up. 15 | /// This is sound (memory safe) but a logical error hazard (just like 16 | /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). 17 | /// 18 | /// `use` this trait to enable its methods for `IndexMap`. 19 | /// 20 | /// This trait is sealed and cannot be implemented for types outside this crate. 21 | #[expect(private_bounds)] 22 | pub trait MutableKeys: Sealed { 23 | type Key; 24 | type Value; 25 | 26 | /// Return item index, mutable reference to key and value 27 | /// 28 | /// Computes in **O(1)** time (average). 29 | fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut Self::Key, &mut Self::Value)> 30 | where 31 | Q: ?Sized + Hash + Equivalent; 32 | 33 | /// Return mutable reference to key and value at an index. 34 | /// 35 | /// Valid indices are `0 <= index < self.len()`. 36 | /// 37 | /// Computes in **O(1)** time. 38 | fn get_index_mut2(&mut self, index: usize) -> Option<(&mut Self::Key, &mut Self::Value)>; 39 | 40 | /// Return an iterator over the key-value pairs of the map, in their order 41 | fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value>; 42 | 43 | /// Scan through each key-value pair in the map and keep those where the 44 | /// closure `keep` returns `true`. 45 | /// 46 | /// The elements are visited in order, and remaining elements keep their 47 | /// order. 48 | /// 49 | /// Computes in **O(n)** time (average). 50 | fn retain2(&mut self, keep: F) 51 | where 52 | F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; 53 | } 54 | 55 | /// Opt-in mutable access to [`IndexMap`] keys. 56 | /// 57 | /// See [`MutableKeys`] for more information. 58 | impl MutableKeys for IndexMap 59 | where 60 | S: BuildHasher, 61 | { 62 | type Key = K; 63 | type Value = V; 64 | 65 | fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> 66 | where 67 | Q: ?Sized + Hash + Equivalent, 68 | { 69 | if let Some(i) = self.get_index_of(key) { 70 | let entry = &mut self.as_entries_mut()[i]; 71 | Some((i, &mut entry.key, &mut entry.value)) 72 | } else { 73 | None 74 | } 75 | } 76 | 77 | fn get_index_mut2(&mut self, index: usize) -> Option<(&mut K, &mut V)> { 78 | self.as_entries_mut().get_mut(index).map(Bucket::muts) 79 | } 80 | 81 | fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value> { 82 | IterMut2::new(self.as_entries_mut()) 83 | } 84 | 85 | fn retain2(&mut self, keep: F) 86 | where 87 | F: FnMut(&mut K, &mut V) -> bool, 88 | { 89 | self.core.retain_in_order(keep); 90 | } 91 | } 92 | 93 | /// Opt-in mutable access to [`Entry`] keys. 94 | /// 95 | /// These methods expose `&mut K`, mutable references to the key as it is stored 96 | /// in the map. 97 | /// You are allowed to modify the keys in the map **if the modification 98 | /// does not change the key's hash and equality**. 99 | /// 100 | /// If keys are modified erroneously, you can no longer look them up. 101 | /// This is sound (memory safe) but a logical error hazard (just like 102 | /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). 103 | /// 104 | /// `use` this trait to enable its methods for `Entry`. 105 | /// 106 | /// This trait is sealed and cannot be implemented for types outside this crate. 107 | #[expect(private_bounds)] 108 | pub trait MutableEntryKey: Sealed { 109 | type Key; 110 | 111 | /// Gets a mutable reference to the entry's key, either within the map if occupied, 112 | /// or else the new key that was used to find the entry. 113 | fn key_mut(&mut self) -> &mut Self::Key; 114 | } 115 | 116 | /// Opt-in mutable access to [`Entry`] keys. 117 | /// 118 | /// See [`MutableEntryKey`] for more information. 119 | impl MutableEntryKey for Entry<'_, K, V> { 120 | type Key = K; 121 | fn key_mut(&mut self) -> &mut Self::Key { 122 | match self { 123 | Entry::Occupied(e) => e.key_mut(), 124 | Entry::Vacant(e) => e.key_mut(), 125 | } 126 | } 127 | } 128 | 129 | /// Opt-in mutable access to [`OccupiedEntry`] keys. 130 | /// 131 | /// See [`MutableEntryKey`] for more information. 132 | impl MutableEntryKey for OccupiedEntry<'_, K, V> { 133 | type Key = K; 134 | fn key_mut(&mut self) -> &mut Self::Key { 135 | &mut self.get_bucket_mut().key 136 | } 137 | } 138 | 139 | /// Opt-in mutable access to [`VacantEntry`] keys. 140 | /// 141 | /// See [`MutableEntryKey`] for more information. 142 | impl MutableEntryKey for VacantEntry<'_, K, V> { 143 | type Key = K; 144 | fn key_mut(&mut self) -> &mut Self::Key { 145 | self.key_mut() 146 | } 147 | } 148 | 149 | /// Opt-in mutable access to [`IndexedEntry`] keys. 150 | /// 151 | /// See [`MutableEntryKey`] for more information. 152 | impl MutableEntryKey for IndexedEntry<'_, K, V> { 153 | type Key = K; 154 | fn key_mut(&mut self) -> &mut Self::Key { 155 | self.key_mut() 156 | } 157 | } 158 | 159 | trait Sealed {} 160 | 161 | impl Sealed for IndexMap {} 162 | impl Sealed for Entry<'_, K, V> {} 163 | impl Sealed for OccupiedEntry<'_, K, V> {} 164 | impl Sealed for VacantEntry<'_, K, V> {} 165 | impl Sealed for IndexedEntry<'_, K, V> {} 166 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | /// Create an [`IndexMap`][crate::IndexMap] from a list of key-value pairs 2 | /// and a [`BuildHasherDefault`][core::hash::BuildHasherDefault]-wrapped custom hasher. 3 | /// 4 | /// ## Example 5 | /// 6 | /// ``` 7 | /// use indexmap::indexmap_with_default; 8 | /// use fnv::FnvHasher; 9 | /// 10 | /// let map = indexmap_with_default!{ 11 | /// FnvHasher; 12 | /// "a" => 1, 13 | /// "b" => 2, 14 | /// }; 15 | /// assert_eq!(map["a"], 1); 16 | /// assert_eq!(map["b"], 2); 17 | /// assert_eq!(map.get("c"), None); 18 | /// 19 | /// // "a" is the first key 20 | /// assert_eq!(map.keys().next(), Some(&"a")); 21 | /// ``` 22 | #[macro_export] 23 | macro_rules! indexmap_with_default { 24 | ($H:ty; $($key:expr => $value:expr,)+) => { $crate::indexmap_with_default!($H; $($key => $value),+) }; 25 | ($H:ty; $($key:expr => $value:expr),*) => {{ 26 | let builder = ::core::hash::BuildHasherDefault::<$H>::default(); 27 | const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); 28 | #[allow(unused_mut)] 29 | // Specify your custom `H` (must implement Default + Hasher) as the hasher: 30 | let mut map = $crate::IndexMap::with_capacity_and_hasher(CAP, builder); 31 | $( 32 | map.insert($key, $value); 33 | )* 34 | map 35 | }}; 36 | } 37 | 38 | #[cfg(feature = "std")] 39 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 40 | #[macro_export] 41 | /// Create an [`IndexMap`][crate::IndexMap] from a list of key-value pairs 42 | /// 43 | /// ## Example 44 | /// 45 | /// ``` 46 | /// use indexmap::indexmap; 47 | /// 48 | /// let map = indexmap!{ 49 | /// "a" => 1, 50 | /// "b" => 2, 51 | /// }; 52 | /// assert_eq!(map["a"], 1); 53 | /// assert_eq!(map["b"], 2); 54 | /// assert_eq!(map.get("c"), None); 55 | /// 56 | /// // "a" is the first key 57 | /// assert_eq!(map.keys().next(), Some(&"a")); 58 | /// ``` 59 | macro_rules! indexmap { 60 | ($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) }; 61 | ($($key:expr => $value:expr),*) => { 62 | { 63 | // Note: `stringify!($key)` is just here to consume the repetition, 64 | // but we throw away that string literal during constant evaluation. 65 | const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); 66 | let mut map = $crate::IndexMap::with_capacity(CAP); 67 | $( 68 | map.insert($key, $value); 69 | )* 70 | map 71 | } 72 | }; 73 | } 74 | 75 | /// Create an [`IndexSet`][crate::IndexSet] from a list of values 76 | /// and a [`BuildHasherDefault`][core::hash::BuildHasherDefault]-wrapped custom hasher. 77 | /// 78 | /// ## Example 79 | /// 80 | /// ``` 81 | /// use indexmap::indexset_with_default; 82 | /// use fnv::FnvHasher; 83 | /// 84 | /// let set = indexset_with_default!{ 85 | /// FnvHasher; 86 | /// "a", 87 | /// "b", 88 | /// }; 89 | /// assert!(set.contains("a")); 90 | /// assert!(set.contains("b")); 91 | /// assert!(!set.contains("c")); 92 | /// 93 | /// // "a" is the first value 94 | /// assert_eq!(set.iter().next(), Some(&"a")); 95 | /// ``` 96 | #[macro_export] 97 | macro_rules! indexset_with_default { 98 | ($H:ty; $($value:expr,)+) => { $crate::indexset_with_default!($H; $($value),+) }; 99 | ($H:ty; $($value:expr),*) => {{ 100 | let builder = ::core::hash::BuildHasherDefault::<$H>::default(); 101 | const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); 102 | #[allow(unused_mut)] 103 | // Specify your custom `H` (must implement Default + Hash) as the hasher: 104 | let mut set = $crate::IndexSet::with_capacity_and_hasher(CAP, builder); 105 | $( 106 | set.insert($value); 107 | )* 108 | set 109 | }}; 110 | } 111 | 112 | #[cfg(feature = "std")] 113 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 114 | #[macro_export] 115 | /// Create an [`IndexSet`][crate::IndexSet] from a list of values 116 | /// 117 | /// ## Example 118 | /// 119 | /// ``` 120 | /// use indexmap::indexset; 121 | /// 122 | /// let set = indexset!{ 123 | /// "a", 124 | /// "b", 125 | /// }; 126 | /// assert!(set.contains("a")); 127 | /// assert!(set.contains("b")); 128 | /// assert!(!set.contains("c")); 129 | /// 130 | /// // "a" is the first value 131 | /// assert_eq!(set.iter().next(), Some(&"a")); 132 | /// ``` 133 | macro_rules! indexset { 134 | ($($value:expr,)+) => { $crate::indexset!($($value),+) }; 135 | ($($value:expr),*) => { 136 | { 137 | // Note: `stringify!($value)` is just here to consume the repetition, 138 | // but we throw away that string literal during constant evaluation. 139 | const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); 140 | let mut set = $crate::IndexSet::with_capacity(CAP); 141 | $( 142 | set.insert($value); 143 | )* 144 | set 145 | } 146 | }; 147 | } 148 | 149 | // generate all the Iterator methods by just forwarding to the underlying 150 | // self.iter and mapping its element. 151 | macro_rules! iterator_methods { 152 | // $map_elt is the mapping function from the underlying iterator's element 153 | // same mapping function for both options and iterators 154 | ($map_elt:expr) => { 155 | fn next(&mut self) -> Option { 156 | self.iter.next().map($map_elt) 157 | } 158 | 159 | fn size_hint(&self) -> (usize, Option) { 160 | self.iter.size_hint() 161 | } 162 | 163 | fn count(self) -> usize { 164 | self.iter.len() 165 | } 166 | 167 | fn nth(&mut self, n: usize) -> Option { 168 | self.iter.nth(n).map($map_elt) 169 | } 170 | 171 | fn last(mut self) -> Option { 172 | self.next_back() 173 | } 174 | 175 | fn collect(self) -> C 176 | where 177 | C: FromIterator, 178 | { 179 | // NB: forwarding this directly to standard iterators will 180 | // allow it to leverage unstable traits like `TrustedLen`. 181 | self.iter.map($map_elt).collect() 182 | } 183 | }; 184 | } 185 | 186 | macro_rules! double_ended_iterator_methods { 187 | // $map_elt is the mapping function from the underlying iterator's element 188 | // same mapping function for both options and iterators 189 | ($map_elt:expr) => { 190 | fn next_back(&mut self) -> Option { 191 | self.iter.next_back().map($map_elt) 192 | } 193 | 194 | fn nth_back(&mut self, n: usize) -> Option { 195 | self.iter.nth_back(n).map($map_elt) 196 | } 197 | }; 198 | } 199 | 200 | // generate `ParallelIterator` methods by just forwarding to the underlying 201 | // self.entries and mapping its elements. 202 | #[cfg(feature = "rayon")] 203 | macro_rules! parallel_iterator_methods { 204 | // $map_elt is the mapping function from the underlying iterator's element 205 | ($map_elt:expr) => { 206 | fn drive_unindexed(self, consumer: C) -> C::Result 207 | where 208 | C: UnindexedConsumer, 209 | { 210 | self.entries 211 | .into_par_iter() 212 | .map($map_elt) 213 | .drive_unindexed(consumer) 214 | } 215 | 216 | // NB: This allows indexed collection, e.g. directly into a `Vec`, but the 217 | // underlying iterator must really be indexed. We should remove this if we 218 | // start having tombstones that must be filtered out. 219 | fn opt_len(&self) -> Option { 220 | Some(self.entries.len()) 221 | } 222 | }; 223 | } 224 | 225 | // generate `IndexedParallelIterator` methods by just forwarding to the underlying 226 | // self.entries and mapping its elements. 227 | #[cfg(feature = "rayon")] 228 | macro_rules! indexed_parallel_iterator_methods { 229 | // $map_elt is the mapping function from the underlying iterator's element 230 | ($map_elt:expr) => { 231 | fn drive(self, consumer: C) -> C::Result 232 | where 233 | C: Consumer, 234 | { 235 | self.entries.into_par_iter().map($map_elt).drive(consumer) 236 | } 237 | 238 | fn len(&self) -> usize { 239 | self.entries.len() 240 | } 241 | 242 | fn with_producer(self, callback: CB) -> CB::Output 243 | where 244 | CB: ProducerCallback, 245 | { 246 | self.entries 247 | .into_par_iter() 248 | .map($map_elt) 249 | .with_producer(callback) 250 | } 251 | }; 252 | } 253 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | //! [`IndexMap`] is a hash table where the iteration order of the key-value 4 | //! pairs is independent of the hash values of the keys. 5 | //! 6 | //! [`IndexSet`] is a corresponding hash set using the same implementation and 7 | //! with similar properties. 8 | //! 9 | //! ### Highlights 10 | //! 11 | //! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap` 12 | //! and `HashSet`, but they also have some features of note: 13 | //! 14 | //! - The ordering semantics (see their documentation for details) 15 | //! - Sorting methods and the [`.pop()`][IndexMap::pop] methods. 16 | //! - The [`Equivalent`] trait, which offers more flexible equality definitions 17 | //! between borrowed and owned versions of keys. 18 | //! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable 19 | //! access to map keys, and [`MutableValues`][set::MutableValues] for sets. 20 | //! 21 | //! ### Feature Flags 22 | //! 23 | //! To reduce the amount of compiled code in the crate by default, certain 24 | //! features are gated behind [feature flags]. These allow you to opt in to (or 25 | //! out of) functionality. Below is a list of the features available in this 26 | //! crate. 27 | //! 28 | //! * `std`: Enables features which require the Rust standard library. For more 29 | //! information see the section on [`no_std`]. 30 | //! * `rayon`: Enables parallel iteration and other parallel methods. 31 | //! * `serde`: Adds implementations for [`Serialize`] and [`Deserialize`] 32 | //! to [`IndexMap`] and [`IndexSet`]. Alternative implementations for 33 | //! (de)serializing [`IndexMap`] as an ordered sequence are available in the 34 | //! [`map::serde_seq`] module. 35 | //! * `arbitrary`: Adds implementations for the [`arbitrary::Arbitrary`] trait 36 | //! to [`IndexMap`] and [`IndexSet`]. 37 | //! * `quickcheck`: Adds implementations for the [`quickcheck::Arbitrary`] trait 38 | //! to [`IndexMap`] and [`IndexSet`]. 39 | //! * `borsh` (**deprecated**): Adds implementations for [`BorshSerialize`] and 40 | //! [`BorshDeserialize`] to [`IndexMap`] and [`IndexSet`]. Due to a cyclic 41 | //! dependency that arose between [`borsh`] and `indexmap`, `borsh v1.5.6` 42 | //! added an `indexmap` feature that should be used instead of enabling the 43 | //! feature here. 44 | //! 45 | //! _Note: only the `std` feature is enabled by default._ 46 | //! 47 | //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section 48 | //! [`no_std`]: #no-standard-library-targets 49 | //! [`Serialize`]: `::serde_core::Serialize` 50 | //! [`Deserialize`]: `::serde_core::Deserialize` 51 | //! [`BorshSerialize`]: `::borsh::BorshSerialize` 52 | //! [`BorshDeserialize`]: `::borsh::BorshDeserialize` 53 | //! [`borsh`]: `::borsh` 54 | //! [`arbitrary::Arbitrary`]: `::arbitrary::Arbitrary` 55 | //! [`quickcheck::Arbitrary`]: `::quickcheck::Arbitrary` 56 | //! 57 | //! ### Alternate Hashers 58 | //! 59 | //! [`IndexMap`] and [`IndexSet`] have a default hasher type 60 | //! [`S = RandomState`][std::hash::RandomState], 61 | //! just like the standard `HashMap` and `HashSet`, which is resistant to 62 | //! HashDoS attacks but not the most performant. Type aliases can make it easier 63 | //! to use alternate hashers: 64 | //! 65 | //! ``` 66 | //! use fnv::FnvBuildHasher; 67 | //! use indexmap::{IndexMap, IndexSet}; 68 | //! 69 | //! type FnvIndexMap = IndexMap; 70 | //! type FnvIndexSet = IndexSet; 71 | //! 72 | //! let std: IndexSet = (0..100).collect(); 73 | //! let fnv: FnvIndexSet = (0..100).collect(); 74 | //! assert_eq!(std, fnv); 75 | //! ``` 76 | //! 77 | //! ### Rust Version 78 | //! 79 | //! This version of indexmap requires Rust 1.82 or later. 80 | //! 81 | //! The indexmap 2.x release series will use a carefully considered version 82 | //! upgrade policy, where in a later 2.x version, we will raise the minimum 83 | //! required Rust version. 84 | //! 85 | //! ## No Standard Library Targets 86 | //! 87 | //! This crate supports being built without `std`, requiring `alloc` instead. 88 | //! This is chosen by disabling the default "std" cargo feature, by adding 89 | //! `default-features = false` to your dependency specification. 90 | //! 91 | //! - Creating maps and sets using [`new`][IndexMap::new] and 92 | //! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`. 93 | //! Use methods [`IndexMap::default`], [`with_hasher`][IndexMap::with_hasher], 94 | //! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead. 95 | //! A no-std compatible hasher will be needed as well, for example 96 | //! from the crate `twox-hash`. 97 | //! - Macros [`indexmap!`] and [`indexset!`] are unavailable without `std`. Use 98 | //! the macros [`indexmap_with_default!`] and [`indexset_with_default!`] instead. 99 | 100 | #![cfg_attr(docsrs, feature(doc_cfg))] 101 | 102 | extern crate alloc; 103 | 104 | #[cfg(feature = "std")] 105 | #[macro_use] 106 | extern crate std; 107 | 108 | mod arbitrary; 109 | mod inner; 110 | #[macro_use] 111 | mod macros; 112 | #[cfg(feature = "borsh")] 113 | mod borsh; 114 | #[cfg(feature = "serde")] 115 | mod serde; 116 | #[cfg(feature = "sval")] 117 | mod sval; 118 | mod util; 119 | 120 | pub mod map; 121 | pub mod set; 122 | 123 | // Placed after `map` and `set` so new `rayon` methods on the types 124 | // are documented after the "normal" methods. 125 | #[cfg(feature = "rayon")] 126 | mod rayon; 127 | 128 | pub use crate::map::IndexMap; 129 | pub use crate::set::IndexSet; 130 | pub use equivalent::Equivalent; 131 | 132 | // shared private items 133 | 134 | /// Hash value newtype. Not larger than usize, since anything larger 135 | /// isn't used for selecting position anyway. 136 | #[derive(Clone, Copy, Debug, PartialEq)] 137 | struct HashValue(usize); 138 | 139 | impl HashValue { 140 | #[inline(always)] 141 | fn get(self) -> u64 { 142 | self.0 as u64 143 | } 144 | } 145 | 146 | #[derive(Copy, Debug)] 147 | struct Bucket { 148 | hash: HashValue, 149 | key: K, 150 | value: V, 151 | } 152 | 153 | impl Clone for Bucket 154 | where 155 | K: Clone, 156 | V: Clone, 157 | { 158 | fn clone(&self) -> Self { 159 | Bucket { 160 | hash: self.hash, 161 | key: self.key.clone(), 162 | value: self.value.clone(), 163 | } 164 | } 165 | 166 | fn clone_from(&mut self, other: &Self) { 167 | self.hash = other.hash; 168 | self.key.clone_from(&other.key); 169 | self.value.clone_from(&other.value); 170 | } 171 | } 172 | 173 | impl Bucket { 174 | // field accessors -- used for `f` instead of closures in `.map(f)` 175 | fn key_ref(&self) -> &K { 176 | &self.key 177 | } 178 | fn value_ref(&self) -> &V { 179 | &self.value 180 | } 181 | fn value_mut(&mut self) -> &mut V { 182 | &mut self.value 183 | } 184 | fn key(self) -> K { 185 | self.key 186 | } 187 | fn value(self) -> V { 188 | self.value 189 | } 190 | fn key_value(self) -> (K, V) { 191 | (self.key, self.value) 192 | } 193 | fn refs(&self) -> (&K, &V) { 194 | (&self.key, &self.value) 195 | } 196 | fn ref_mut(&mut self) -> (&K, &mut V) { 197 | (&self.key, &mut self.value) 198 | } 199 | fn muts(&mut self) -> (&mut K, &mut V) { 200 | (&mut self.key, &mut self.value) 201 | } 202 | } 203 | 204 | /// The error type for [`try_reserve`][IndexMap::try_reserve] methods. 205 | #[derive(Clone, PartialEq, Eq, Debug)] 206 | pub struct TryReserveError { 207 | kind: TryReserveErrorKind, 208 | } 209 | 210 | #[derive(Clone, PartialEq, Eq, Debug)] 211 | enum TryReserveErrorKind { 212 | // The standard library's kind is currently opaque to us, otherwise we could unify this. 213 | Std(alloc::collections::TryReserveError), 214 | CapacityOverflow, 215 | AllocError { layout: alloc::alloc::Layout }, 216 | } 217 | 218 | // These are not `From` so we don't expose them in our public API. 219 | impl TryReserveError { 220 | fn from_alloc(error: alloc::collections::TryReserveError) -> Self { 221 | Self { 222 | kind: TryReserveErrorKind::Std(error), 223 | } 224 | } 225 | 226 | fn from_hashbrown(error: hashbrown::TryReserveError) -> Self { 227 | Self { 228 | kind: match error { 229 | hashbrown::TryReserveError::CapacityOverflow => { 230 | TryReserveErrorKind::CapacityOverflow 231 | } 232 | hashbrown::TryReserveError::AllocError { layout } => { 233 | TryReserveErrorKind::AllocError { layout } 234 | } 235 | }, 236 | } 237 | } 238 | } 239 | 240 | impl core::fmt::Display for TryReserveError { 241 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 242 | let reason = match &self.kind { 243 | TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f), 244 | TryReserveErrorKind::CapacityOverflow => { 245 | " because the computed capacity exceeded the collection's maximum" 246 | } 247 | TryReserveErrorKind::AllocError { .. } => { 248 | " because the memory allocator returned an error" 249 | } 250 | }; 251 | f.write_str("memory allocation failed")?; 252 | f.write_str(reason) 253 | } 254 | } 255 | 256 | impl core::error::Error for TryReserveError {} 257 | 258 | // NOTE: This is copied from the slice module in the std lib. 259 | /// The error type returned by [`get_disjoint_indices_mut`][`IndexMap::get_disjoint_indices_mut`]. 260 | /// 261 | /// It indicates one of two possible errors: 262 | /// - An index is out-of-bounds. 263 | /// - The same index appeared multiple times in the array. 264 | // (or different but overlapping indices when ranges are provided) 265 | #[derive(Debug, Clone, PartialEq, Eq)] 266 | pub enum GetDisjointMutError { 267 | /// An index provided was out-of-bounds for the slice. 268 | IndexOutOfBounds, 269 | /// Two indices provided were overlapping. 270 | OverlappingIndices, 271 | } 272 | 273 | impl core::fmt::Display for GetDisjointMutError { 274 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 275 | let msg = match self { 276 | GetDisjointMutError::IndexOutOfBounds => "an index is out of bounds", 277 | GetDisjointMutError::OverlappingIndices => "there were overlapping indices", 278 | }; 279 | 280 | core::fmt::Display::fmt(msg, f) 281 | } 282 | } 283 | 284 | impl core::error::Error for GetDisjointMutError {} 285 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/map/entry.rs: -------------------------------------------------------------------------------- 1 | use crate::inner::{Core, OccupiedEntry, VacantEntry}; 2 | use crate::Bucket; 3 | use core::{fmt, mem}; 4 | 5 | /// Entry for an existing key-value pair in an [`IndexMap`][crate::IndexMap] 6 | /// or a vacant location to insert one. 7 | pub enum Entry<'a, K, V> { 8 | /// Existing slot with equivalent key. 9 | Occupied(OccupiedEntry<'a, K, V>), 10 | /// Vacant slot (no equivalent key in the map). 11 | Vacant(VacantEntry<'a, K, V>), 12 | } 13 | 14 | impl<'a, K, V> Entry<'a, K, V> { 15 | /// Return the index where the key-value pair exists or will be inserted. 16 | pub fn index(&self) -> usize { 17 | match self { 18 | Entry::Occupied(entry) => entry.index(), 19 | Entry::Vacant(entry) => entry.index(), 20 | } 21 | } 22 | 23 | /// Sets the value of the entry (after inserting if vacant), and returns an `OccupiedEntry`. 24 | /// 25 | /// Computes in **O(1)** time (amortized average). 26 | pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V> { 27 | match self { 28 | Entry::Occupied(mut entry) => { 29 | entry.insert(value); 30 | entry 31 | } 32 | Entry::Vacant(entry) => entry.insert_entry(value), 33 | } 34 | } 35 | 36 | /// Inserts the given default value in the entry if it is vacant and returns a mutable 37 | /// reference to it. Otherwise a mutable reference to an already existent value is returned. 38 | /// 39 | /// Computes in **O(1)** time (amortized average). 40 | pub fn or_insert(self, default: V) -> &'a mut V { 41 | match self { 42 | Entry::Occupied(entry) => entry.into_mut(), 43 | Entry::Vacant(entry) => entry.insert(default), 44 | } 45 | } 46 | 47 | /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable 48 | /// reference to it. Otherwise a mutable reference to an already existent value is returned. 49 | /// 50 | /// Computes in **O(1)** time (amortized average). 51 | pub fn or_insert_with(self, call: F) -> &'a mut V 52 | where 53 | F: FnOnce() -> V, 54 | { 55 | match self { 56 | Entry::Occupied(entry) => entry.into_mut(), 57 | Entry::Vacant(entry) => entry.insert(call()), 58 | } 59 | } 60 | 61 | /// Inserts the result of the `call` function with a reference to the entry's key if it is 62 | /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to 63 | /// an already existent value is returned. 64 | /// 65 | /// Computes in **O(1)** time (amortized average). 66 | pub fn or_insert_with_key(self, call: F) -> &'a mut V 67 | where 68 | F: FnOnce(&K) -> V, 69 | { 70 | match self { 71 | Entry::Occupied(entry) => entry.into_mut(), 72 | Entry::Vacant(entry) => { 73 | let value = call(entry.key()); 74 | entry.insert(value) 75 | } 76 | } 77 | } 78 | 79 | /// Gets a reference to the entry's key, either within the map if occupied, 80 | /// or else the new key that was used to find the entry. 81 | pub fn key(&self) -> &K { 82 | match *self { 83 | Entry::Occupied(ref entry) => entry.key(), 84 | Entry::Vacant(ref entry) => entry.key(), 85 | } 86 | } 87 | 88 | /// Modifies the entry if it is occupied. 89 | pub fn and_modify(mut self, f: F) -> Self 90 | where 91 | F: FnOnce(&mut V), 92 | { 93 | if let Entry::Occupied(entry) = &mut self { 94 | f(entry.get_mut()); 95 | } 96 | self 97 | } 98 | 99 | /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable 100 | /// reference to it. Otherwise a mutable reference to an already existent value is returned. 101 | /// 102 | /// Computes in **O(1)** time (amortized average). 103 | pub fn or_default(self) -> &'a mut V 104 | where 105 | V: Default, 106 | { 107 | match self { 108 | Entry::Occupied(entry) => entry.into_mut(), 109 | Entry::Vacant(entry) => entry.insert(V::default()), 110 | } 111 | } 112 | } 113 | 114 | impl fmt::Debug for Entry<'_, K, V> { 115 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 116 | let mut tuple = f.debug_tuple("Entry"); 117 | match self { 118 | Entry::Vacant(v) => tuple.field(v), 119 | Entry::Occupied(o) => tuple.field(o), 120 | }; 121 | tuple.finish() 122 | } 123 | } 124 | 125 | impl fmt::Debug for OccupiedEntry<'_, K, V> { 126 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 127 | f.debug_struct("OccupiedEntry") 128 | .field("key", self.key()) 129 | .field("value", self.get()) 130 | .finish() 131 | } 132 | } 133 | 134 | impl fmt::Debug for VacantEntry<'_, K, V> { 135 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 136 | f.debug_tuple("VacantEntry").field(self.key()).finish() 137 | } 138 | } 139 | 140 | /// A view into an occupied entry in an [`IndexMap`][crate::IndexMap] obtained by index. 141 | /// 142 | /// This `struct` is created from the [`get_index_entry`][crate::IndexMap::get_index_entry] method. 143 | pub struct IndexedEntry<'a, K, V> { 144 | map: &'a mut Core, 145 | // We have a mutable reference to the map, which keeps the index 146 | // valid and pointing to the correct entry. 147 | index: usize, 148 | } 149 | 150 | impl<'a, K, V> IndexedEntry<'a, K, V> { 151 | pub(crate) fn new(map: &'a mut Core, index: usize) -> Option { 152 | if index < map.len() { 153 | Some(Self { map, index }) 154 | } else { 155 | None 156 | } 157 | } 158 | 159 | /// Return the index of the key-value pair 160 | #[inline] 161 | pub fn index(&self) -> usize { 162 | self.index 163 | } 164 | 165 | pub(crate) fn into_core(self) -> &'a mut Core { 166 | self.map 167 | } 168 | 169 | fn get_bucket(&self) -> &Bucket { 170 | &self.map.as_entries()[self.index] 171 | } 172 | 173 | fn get_bucket_mut(&mut self) -> &mut Bucket { 174 | &mut self.map.as_entries_mut()[self.index] 175 | } 176 | 177 | fn into_bucket(self) -> &'a mut Bucket { 178 | &mut self.map.as_entries_mut()[self.index] 179 | } 180 | 181 | /// Gets a reference to the entry's key in the map. 182 | pub fn key(&self) -> &K { 183 | &self.get_bucket().key 184 | } 185 | 186 | pub(super) fn key_mut(&mut self) -> &mut K { 187 | &mut self.get_bucket_mut().key 188 | } 189 | 190 | /// Gets a reference to the entry's value in the map. 191 | pub fn get(&self) -> &V { 192 | &self.get_bucket().value 193 | } 194 | 195 | /// Gets a mutable reference to the entry's value in the map. 196 | /// 197 | /// If you need a reference which may outlive the destruction of the 198 | /// `IndexedEntry` value, see [`into_mut`][Self::into_mut]. 199 | pub fn get_mut(&mut self) -> &mut V { 200 | &mut self.get_bucket_mut().value 201 | } 202 | 203 | /// Sets the value of the entry to `value`, and returns the entry's old value. 204 | pub fn insert(&mut self, value: V) -> V { 205 | mem::replace(self.get_mut(), value) 206 | } 207 | 208 | /// Converts into a mutable reference to the entry's value in the map, 209 | /// with a lifetime bound to the map itself. 210 | pub fn into_mut(self) -> &'a mut V { 211 | &mut self.into_bucket().value 212 | } 213 | 214 | /// Remove and return the key, value pair stored in the map for this entry 215 | /// 216 | /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it 217 | /// with the last element of the map and popping it off. 218 | /// **This perturbs the position of what used to be the last element!** 219 | /// 220 | /// Computes in **O(1)** time (average). 221 | pub fn swap_remove_entry(self) -> (K, V) { 222 | self.map.swap_remove_index(self.index).unwrap() 223 | } 224 | 225 | /// Remove and return the key, value pair stored in the map for this entry 226 | /// 227 | /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the 228 | /// elements that follow it, preserving their relative order. 229 | /// **This perturbs the index of all of those elements!** 230 | /// 231 | /// Computes in **O(n)** time (average). 232 | pub fn shift_remove_entry(self) -> (K, V) { 233 | self.map.shift_remove_index(self.index).unwrap() 234 | } 235 | 236 | /// Remove the key, value pair stored in the map for this entry, and return the value. 237 | /// 238 | /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it 239 | /// with the last element of the map and popping it off. 240 | /// **This perturbs the position of what used to be the last element!** 241 | /// 242 | /// Computes in **O(1)** time (average). 243 | pub fn swap_remove(self) -> V { 244 | self.swap_remove_entry().1 245 | } 246 | 247 | /// Remove the key, value pair stored in the map for this entry, and return the value. 248 | /// 249 | /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the 250 | /// elements that follow it, preserving their relative order. 251 | /// **This perturbs the index of all of those elements!** 252 | /// 253 | /// Computes in **O(n)** time (average). 254 | pub fn shift_remove(self) -> V { 255 | self.shift_remove_entry().1 256 | } 257 | 258 | /// Moves the position of the entry to a new index 259 | /// by shifting all other entries in-between. 260 | /// 261 | /// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`] 262 | /// coming `from` the current [`.index()`][Self::index]. 263 | /// 264 | /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. 265 | /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. 266 | /// 267 | /// ***Panics*** if `to` is out of bounds. 268 | /// 269 | /// Computes in **O(n)** time (average). 270 | #[track_caller] 271 | pub fn move_index(self, to: usize) { 272 | self.map.move_index(self.index, to); 273 | } 274 | 275 | /// Swaps the position of entry with another. 276 | /// 277 | /// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`] 278 | /// with the current [`.index()`][Self::index] as one of the two being swapped. 279 | /// 280 | /// ***Panics*** if the `other` index is out of bounds. 281 | /// 282 | /// Computes in **O(1)** time (average). 283 | #[track_caller] 284 | pub fn swap_indices(self, other: usize) { 285 | self.map.swap_indices(self.index, other); 286 | } 287 | } 288 | 289 | impl fmt::Debug for IndexedEntry<'_, K, V> { 290 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 291 | f.debug_struct("IndexedEntry") 292 | .field("index", &self.index) 293 | .field("key", self.key()) 294 | .field("value", self.get()) 295 | .finish() 296 | } 297 | } 298 | 299 | impl<'a, K, V> From> for IndexedEntry<'a, K, V> { 300 | fn from(other: OccupiedEntry<'a, K, V>) -> Self { 301 | Self { 302 | index: other.index(), 303 | map: other.into_core(), 304 | } 305 | } 306 | } 307 | 308 | #[test] 309 | fn assert_send_sync() { 310 | fn assert_send_sync() {} 311 | assert_send_sync::>(); 312 | assert_send_sync::>(); 313 | } 314 | -------------------------------------------------------------------------------- /src/set/slice.rs: -------------------------------------------------------------------------------- 1 | use super::{Bucket, IndexSet, IntoIter, Iter}; 2 | use crate::util::{slice_eq, try_simplify_range}; 3 | 4 | use alloc::boxed::Box; 5 | use alloc::vec::Vec; 6 | use core::cmp::Ordering; 7 | use core::fmt; 8 | use core::hash::{Hash, Hasher}; 9 | use core::ops::{self, Bound, Index, RangeBounds}; 10 | 11 | /// A dynamically-sized slice of values in an [`IndexSet`]. 12 | /// 13 | /// This supports indexed operations much like a `[T]` slice, 14 | /// but not any hashed operations on the values. 15 | /// 16 | /// Unlike `IndexSet`, `Slice` does consider the order for [`PartialEq`] 17 | /// and [`Eq`], and it also implements [`PartialOrd`], [`Ord`], and [`Hash`]. 18 | #[repr(transparent)] 19 | pub struct Slice { 20 | pub(crate) entries: [Bucket], 21 | } 22 | 23 | // SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, 24 | // and reference lifetimes are bound together in function signatures. 25 | #[allow(unsafe_code)] 26 | impl Slice { 27 | pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { 28 | unsafe { &*(entries as *const [Bucket] as *const Self) } 29 | } 30 | 31 | pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { 32 | unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } 33 | } 34 | 35 | fn into_boxed(self: Box) -> Box<[Bucket]> { 36 | unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } 37 | } 38 | } 39 | 40 | impl Slice { 41 | pub(crate) fn into_entries(self: Box) -> Vec> { 42 | self.into_boxed().into_vec() 43 | } 44 | 45 | /// Returns an empty slice. 46 | pub const fn new<'a>() -> &'a Self { 47 | Self::from_slice(&[]) 48 | } 49 | 50 | /// Return the number of elements in the set slice. 51 | pub const fn len(&self) -> usize { 52 | self.entries.len() 53 | } 54 | 55 | /// Returns true if the set slice contains no elements. 56 | pub const fn is_empty(&self) -> bool { 57 | self.entries.is_empty() 58 | } 59 | 60 | /// Get a value by index. 61 | /// 62 | /// Valid indices are `0 <= index < self.len()`. 63 | pub fn get_index(&self, index: usize) -> Option<&T> { 64 | self.entries.get(index).map(Bucket::key_ref) 65 | } 66 | 67 | /// Returns a slice of values in the given range of indices. 68 | /// 69 | /// Valid indices are `0 <= index < self.len()`. 70 | pub fn get_range>(&self, range: R) -> Option<&Self> { 71 | let range = try_simplify_range(range, self.entries.len())?; 72 | self.entries.get(range).map(Self::from_slice) 73 | } 74 | 75 | /// Get the first value. 76 | pub fn first(&self) -> Option<&T> { 77 | self.entries.first().map(Bucket::key_ref) 78 | } 79 | 80 | /// Get the last value. 81 | pub fn last(&self) -> Option<&T> { 82 | self.entries.last().map(Bucket::key_ref) 83 | } 84 | 85 | /// Divides one slice into two at an index. 86 | /// 87 | /// ***Panics*** if `index > len`. 88 | #[track_caller] 89 | pub fn split_at(&self, index: usize) -> (&Self, &Self) { 90 | let (first, second) = self.entries.split_at(index); 91 | (Self::from_slice(first), Self::from_slice(second)) 92 | } 93 | 94 | /// Returns the first value and the rest of the slice, 95 | /// or `None` if it is empty. 96 | pub fn split_first(&self) -> Option<(&T, &Self)> { 97 | if let [first, rest @ ..] = &self.entries { 98 | Some((&first.key, Self::from_slice(rest))) 99 | } else { 100 | None 101 | } 102 | } 103 | 104 | /// Returns the last value and the rest of the slice, 105 | /// or `None` if it is empty. 106 | pub fn split_last(&self) -> Option<(&T, &Self)> { 107 | if let [rest @ .., last] = &self.entries { 108 | Some((&last.key, Self::from_slice(rest))) 109 | } else { 110 | None 111 | } 112 | } 113 | 114 | /// Return an iterator over the values of the set slice. 115 | pub fn iter(&self) -> Iter<'_, T> { 116 | Iter::new(&self.entries) 117 | } 118 | 119 | /// Search over a sorted set for a value. 120 | /// 121 | /// Returns the position where that value is present, or the position where it can be inserted 122 | /// to maintain the sort. See [`slice::binary_search`] for more details. 123 | /// 124 | /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up in 125 | /// the set this is a slice from using [`IndexSet::get_index_of`], but this can also position 126 | /// missing values. 127 | pub fn binary_search(&self, x: &T) -> Result 128 | where 129 | T: Ord, 130 | { 131 | self.binary_search_by(|p| p.cmp(x)) 132 | } 133 | 134 | /// Search over a sorted set with a comparator function. 135 | /// 136 | /// Returns the position where that value is present, or the position where it can be inserted 137 | /// to maintain the sort. See [`slice::binary_search_by`] for more details. 138 | /// 139 | /// Computes in **O(log(n))** time. 140 | #[inline] 141 | pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result 142 | where 143 | F: FnMut(&'a T) -> Ordering, 144 | { 145 | self.entries.binary_search_by(move |a| f(&a.key)) 146 | } 147 | 148 | /// Search over a sorted set with an extraction function. 149 | /// 150 | /// Returns the position where that value is present, or the position where it can be inserted 151 | /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. 152 | /// 153 | /// Computes in **O(log(n))** time. 154 | #[inline] 155 | pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result 156 | where 157 | F: FnMut(&'a T) -> B, 158 | B: Ord, 159 | { 160 | self.binary_search_by(|k| f(k).cmp(b)) 161 | } 162 | 163 | /// Checks if the values of this slice are sorted. 164 | #[inline] 165 | pub fn is_sorted(&self) -> bool 166 | where 167 | T: PartialOrd, 168 | { 169 | self.entries.is_sorted_by(|a, b| a.key <= b.key) 170 | } 171 | 172 | /// Checks if this slice is sorted using the given comparator function. 173 | #[inline] 174 | pub fn is_sorted_by<'a, F>(&'a self, mut cmp: F) -> bool 175 | where 176 | F: FnMut(&'a T, &'a T) -> bool, 177 | { 178 | self.entries.is_sorted_by(move |a, b| cmp(&a.key, &b.key)) 179 | } 180 | 181 | /// Checks if this slice is sorted using the given sort-key function. 182 | #[inline] 183 | pub fn is_sorted_by_key<'a, F, K>(&'a self, mut sort_key: F) -> bool 184 | where 185 | F: FnMut(&'a T) -> K, 186 | K: PartialOrd, 187 | { 188 | self.entries.is_sorted_by_key(move |a| sort_key(&a.key)) 189 | } 190 | 191 | /// Returns the index of the partition point of a sorted set according to the given predicate 192 | /// (the index of the first element of the second partition). 193 | /// 194 | /// See [`slice::partition_point`] for more details. 195 | /// 196 | /// Computes in **O(log(n))** time. 197 | #[must_use] 198 | pub fn partition_point

(&self, mut pred: P) -> usize 199 | where 200 | P: FnMut(&T) -> bool, 201 | { 202 | self.entries.partition_point(move |a| pred(&a.key)) 203 | } 204 | } 205 | 206 | impl<'a, T> IntoIterator for &'a Slice { 207 | type IntoIter = Iter<'a, T>; 208 | type Item = &'a T; 209 | 210 | fn into_iter(self) -> Self::IntoIter { 211 | self.iter() 212 | } 213 | } 214 | 215 | impl IntoIterator for Box> { 216 | type IntoIter = IntoIter; 217 | type Item = T; 218 | 219 | fn into_iter(self) -> Self::IntoIter { 220 | IntoIter::new(self.into_entries()) 221 | } 222 | } 223 | 224 | impl Default for &'_ Slice { 225 | fn default() -> Self { 226 | Slice::from_slice(&[]) 227 | } 228 | } 229 | 230 | impl Default for Box> { 231 | fn default() -> Self { 232 | Slice::from_boxed(Box::default()) 233 | } 234 | } 235 | 236 | impl Clone for Box> { 237 | fn clone(&self) -> Self { 238 | Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) 239 | } 240 | } 241 | 242 | impl From<&Slice> for Box> { 243 | fn from(slice: &Slice) -> Self { 244 | Slice::from_boxed(Box::from(&slice.entries)) 245 | } 246 | } 247 | 248 | impl fmt::Debug for Slice { 249 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 250 | f.debug_list().entries(self).finish() 251 | } 252 | } 253 | 254 | impl PartialEq> for Slice 255 | where 256 | T: PartialEq, 257 | { 258 | fn eq(&self, other: &Slice) -> bool { 259 | slice_eq(&self.entries, &other.entries, |b1, b2| b1.key == b2.key) 260 | } 261 | } 262 | 263 | impl PartialEq<[U]> for Slice 264 | where 265 | T: PartialEq, 266 | { 267 | fn eq(&self, other: &[U]) -> bool { 268 | slice_eq(&self.entries, other, |b, o| b.key == *o) 269 | } 270 | } 271 | 272 | impl PartialEq> for [T] 273 | where 274 | T: PartialEq, 275 | { 276 | fn eq(&self, other: &Slice) -> bool { 277 | slice_eq(self, &other.entries, |o, b| *o == b.key) 278 | } 279 | } 280 | 281 | impl PartialEq<[U; N]> for Slice 282 | where 283 | T: PartialEq, 284 | { 285 | fn eq(&self, other: &[U; N]) -> bool { 286 | >::eq(self, other) 287 | } 288 | } 289 | 290 | impl PartialEq> for [T; N] 291 | where 292 | T: PartialEq, 293 | { 294 | fn eq(&self, other: &Slice) -> bool { 295 | <[T] as PartialEq>>::eq(self, other) 296 | } 297 | } 298 | 299 | impl Eq for Slice {} 300 | 301 | impl PartialOrd for Slice { 302 | fn partial_cmp(&self, other: &Self) -> Option { 303 | self.iter().partial_cmp(other) 304 | } 305 | } 306 | 307 | impl Ord for Slice { 308 | fn cmp(&self, other: &Self) -> Ordering { 309 | self.iter().cmp(other) 310 | } 311 | } 312 | 313 | impl Hash for Slice { 314 | fn hash(&self, state: &mut H) { 315 | self.len().hash(state); 316 | for value in self { 317 | value.hash(state); 318 | } 319 | } 320 | } 321 | 322 | impl Index for Slice { 323 | type Output = T; 324 | 325 | fn index(&self, index: usize) -> &Self::Output { 326 | &self.entries[index].key 327 | } 328 | } 329 | 330 | // We can't have `impl> Index` because that conflicts with `Index`. 331 | // Instead, we repeat the implementations for all the core range types. 332 | macro_rules! impl_index { 333 | ($($range:ty),*) => {$( 334 | impl Index<$range> for IndexSet { 335 | type Output = Slice; 336 | 337 | fn index(&self, range: $range) -> &Self::Output { 338 | Slice::from_slice(&self.as_entries()[range]) 339 | } 340 | } 341 | 342 | impl Index<$range> for Slice { 343 | type Output = Self; 344 | 345 | fn index(&self, range: $range) -> &Self::Output { 346 | Slice::from_slice(&self.entries[range]) 347 | } 348 | } 349 | )*} 350 | } 351 | impl_index!( 352 | ops::Range, 353 | ops::RangeFrom, 354 | ops::RangeFull, 355 | ops::RangeInclusive, 356 | ops::RangeTo, 357 | ops::RangeToInclusive, 358 | (Bound, Bound) 359 | ); 360 | 361 | #[cfg(test)] 362 | mod tests { 363 | use super::*; 364 | 365 | #[test] 366 | fn slice_index() { 367 | fn check(vec_slice: &[i32], set_slice: &Slice, sub_slice: &Slice) { 368 | assert_eq!(set_slice as *const _, sub_slice as *const _); 369 | itertools::assert_equal(vec_slice, set_slice); 370 | } 371 | 372 | let vec: Vec = (0..10).map(|i| i * i).collect(); 373 | let set: IndexSet = vec.iter().cloned().collect(); 374 | let slice = set.as_slice(); 375 | 376 | // RangeFull 377 | check(&vec[..], &set[..], &slice[..]); 378 | 379 | for i in 0usize..10 { 380 | // Index 381 | assert_eq!(vec[i], set[i]); 382 | assert_eq!(vec[i], slice[i]); 383 | 384 | // RangeFrom 385 | check(&vec[i..], &set[i..], &slice[i..]); 386 | 387 | // RangeTo 388 | check(&vec[..i], &set[..i], &slice[..i]); 389 | 390 | // RangeToInclusive 391 | check(&vec[..=i], &set[..=i], &slice[..=i]); 392 | 393 | // (Bound, Bound) 394 | let bounds = (Bound::Excluded(i), Bound::Unbounded); 395 | check(&vec[i + 1..], &set[bounds], &slice[bounds]); 396 | 397 | for j in i..=10 { 398 | // Range 399 | check(&vec[i..j], &set[i..j], &slice[i..j]); 400 | } 401 | 402 | for j in i..10 { 403 | // RangeInclusive 404 | check(&vec[i..=j], &set[i..=j], &slice[i..=j]); 405 | } 406 | } 407 | } 408 | } 409 | -------------------------------------------------------------------------------- /src/inner/entry.rs: -------------------------------------------------------------------------------- 1 | use super::{equivalent, get_hash, Bucket, Core}; 2 | use crate::map::{Entry, IndexedEntry}; 3 | use crate::HashValue; 4 | use core::cmp::Ordering; 5 | use core::mem; 6 | 7 | impl<'a, K, V> Entry<'a, K, V> { 8 | pub(crate) fn new(map: &'a mut Core, hash: HashValue, key: K) -> Self 9 | where 10 | K: Eq, 11 | { 12 | let eq = equivalent(&key, &map.entries); 13 | match map.indices.find_entry(hash.get(), eq) { 14 | Ok(entry) => Entry::Occupied(OccupiedEntry { 15 | bucket: entry.bucket_index(), 16 | index: *entry.get(), 17 | map, 18 | }), 19 | Err(_) => Entry::Vacant(VacantEntry { map, hash, key }), 20 | } 21 | } 22 | } 23 | 24 | /// A view into an occupied entry in an [`IndexMap`][crate::IndexMap]. 25 | /// It is part of the [`Entry`] enum. 26 | pub struct OccupiedEntry<'a, K, V> { 27 | map: &'a mut Core, 28 | // We have a mutable reference to the map, which keeps these two 29 | // indices valid and pointing to the correct entry. 30 | index: usize, 31 | bucket: usize, 32 | } 33 | 34 | impl<'a, K, V> OccupiedEntry<'a, K, V> { 35 | /// Constructor for `RawEntryMut::from_hash` 36 | pub(crate) fn from_hash( 37 | map: &'a mut Core, 38 | hash: HashValue, 39 | mut is_match: F, 40 | ) -> Result> 41 | where 42 | F: FnMut(&K) -> bool, 43 | { 44 | let entries = &*map.entries; 45 | let eq = move |&i: &usize| is_match(&entries[i].key); 46 | match map.indices.find_entry(hash.get(), eq) { 47 | Ok(entry) => Ok(OccupiedEntry { 48 | bucket: entry.bucket_index(), 49 | index: *entry.get(), 50 | map, 51 | }), 52 | Err(_) => Err(map), 53 | } 54 | } 55 | 56 | pub(crate) fn into_core(self) -> &'a mut Core { 57 | self.map 58 | } 59 | 60 | pub(crate) fn get_bucket(&self) -> &Bucket { 61 | &self.map.entries[self.index] 62 | } 63 | 64 | pub(crate) fn get_bucket_mut(&mut self) -> &mut Bucket { 65 | &mut self.map.entries[self.index] 66 | } 67 | 68 | pub(crate) fn into_bucket(self) -> &'a mut Bucket { 69 | &mut self.map.entries[self.index] 70 | } 71 | 72 | /// Return the index of the key-value pair 73 | #[inline] 74 | pub fn index(&self) -> usize { 75 | self.index 76 | } 77 | 78 | /// Gets a reference to the entry's key in the map. 79 | /// 80 | /// Note that this is not the key that was used to find the entry. There may be an observable 81 | /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like 82 | /// extra fields or the memory address of an allocation. 83 | pub fn key(&self) -> &K { 84 | &self.get_bucket().key 85 | } 86 | 87 | /// Gets a reference to the entry's value in the map. 88 | pub fn get(&self) -> &V { 89 | &self.get_bucket().value 90 | } 91 | 92 | /// Gets a mutable reference to the entry's value in the map. 93 | /// 94 | /// If you need a reference which may outlive the destruction of the 95 | /// [`Entry`] value, see [`into_mut`][Self::into_mut]. 96 | pub fn get_mut(&mut self) -> &mut V { 97 | &mut self.get_bucket_mut().value 98 | } 99 | 100 | /// Converts into a mutable reference to the entry's value in the map, 101 | /// with a lifetime bound to the map itself. 102 | pub fn into_mut(self) -> &'a mut V { 103 | &mut self.into_bucket().value 104 | } 105 | 106 | /// Sets the value of the entry to `value`, and returns the entry's old value. 107 | pub fn insert(&mut self, value: V) -> V { 108 | mem::replace(self.get_mut(), value) 109 | } 110 | 111 | /// Remove the key, value pair stored in the map for this entry, and return the value. 112 | /// 113 | /// **NOTE:** This is equivalent to [`.swap_remove()`][Self::swap_remove], replacing this 114 | /// entry's position with the last element, and it is deprecated in favor of calling that 115 | /// explicitly. If you need to preserve the relative order of the keys in the map, use 116 | /// [`.shift_remove()`][Self::shift_remove] instead. 117 | #[deprecated(note = "`remove` disrupts the map order -- \ 118 | use `swap_remove` or `shift_remove` for explicit behavior.")] 119 | pub fn remove(self) -> V { 120 | self.swap_remove() 121 | } 122 | 123 | /// Remove the key, value pair stored in the map for this entry, and return the value. 124 | /// 125 | /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it 126 | /// with the last element of the map and popping it off. 127 | /// **This perturbs the position of what used to be the last element!** 128 | /// 129 | /// Computes in **O(1)** time (average). 130 | pub fn swap_remove(self) -> V { 131 | self.swap_remove_entry().1 132 | } 133 | 134 | /// Remove the key, value pair stored in the map for this entry, and return the value. 135 | /// 136 | /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the 137 | /// elements that follow it, preserving their relative order. 138 | /// **This perturbs the index of all of those elements!** 139 | /// 140 | /// Computes in **O(n)** time (average). 141 | pub fn shift_remove(self) -> V { 142 | self.shift_remove_entry().1 143 | } 144 | 145 | /// Remove and return the key, value pair stored in the map for this entry 146 | /// 147 | /// **NOTE:** This is equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry], 148 | /// replacing this entry's position with the last element, and it is deprecated in favor of 149 | /// calling that explicitly. If you need to preserve the relative order of the keys in the map, 150 | /// use [`.shift_remove_entry()`][Self::shift_remove_entry] instead. 151 | #[deprecated(note = "`remove_entry` disrupts the map order -- \ 152 | use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")] 153 | pub fn remove_entry(self) -> (K, V) { 154 | self.swap_remove_entry() 155 | } 156 | 157 | /// Remove and return the key, value pair stored in the map for this entry 158 | /// 159 | /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it 160 | /// with the last element of the map and popping it off. 161 | /// **This perturbs the position of what used to be the last element!** 162 | /// 163 | /// Computes in **O(1)** time (average). 164 | pub fn swap_remove_entry(mut self) -> (K, V) { 165 | self.remove_index(); 166 | self.map.swap_remove_finish(self.index) 167 | } 168 | 169 | /// Remove and return the key, value pair stored in the map for this entry 170 | /// 171 | /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the 172 | /// elements that follow it, preserving their relative order. 173 | /// **This perturbs the index of all of those elements!** 174 | /// 175 | /// Computes in **O(n)** time (average). 176 | pub fn shift_remove_entry(mut self) -> (K, V) { 177 | self.remove_index(); 178 | self.map.shift_remove_finish(self.index) 179 | } 180 | 181 | fn remove_index(&mut self) { 182 | let entry = self.map.indices.get_bucket_entry(self.bucket).unwrap(); 183 | debug_assert_eq!(*entry.get(), self.index); 184 | entry.remove(); 185 | } 186 | 187 | /// Moves the position of the entry to a new index 188 | /// by shifting all other entries in-between. 189 | /// 190 | /// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`] 191 | /// coming `from` the current [`.index()`][Self::index]. 192 | /// 193 | /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. 194 | /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. 195 | /// 196 | /// ***Panics*** if `to` is out of bounds. 197 | /// 198 | /// Computes in **O(n)** time (average). 199 | #[track_caller] 200 | pub fn move_index(self, to: usize) { 201 | if self.index != to { 202 | let _ = self.map.entries[to]; // explicit bounds check 203 | self.map.move_index_inner(self.index, to); 204 | self.update_index(to); 205 | } 206 | } 207 | 208 | /// Swaps the position of entry with another. 209 | /// 210 | /// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`] 211 | /// with the current [`.index()`][Self::index] as one of the two being swapped. 212 | /// 213 | /// ***Panics*** if the `other` index is out of bounds. 214 | /// 215 | /// Computes in **O(1)** time (average). 216 | #[track_caller] 217 | pub fn swap_indices(self, other: usize) { 218 | if self.index != other { 219 | // Since we already know where our bucket is, we only need to find the other. 220 | let hash = self.map.entries[other].hash; 221 | let other_mut = self.map.indices.find_mut(hash.get(), move |&i| i == other); 222 | *other_mut.expect("index not found") = self.index; 223 | 224 | self.map.entries.swap(self.index, other); 225 | self.update_index(other); 226 | } 227 | } 228 | 229 | fn update_index(self, to: usize) { 230 | let index = self.map.indices.get_bucket_mut(self.bucket).unwrap(); 231 | debug_assert_eq!(*index, self.index); 232 | *index = to; 233 | } 234 | } 235 | 236 | impl<'a, K, V> From> for OccupiedEntry<'a, K, V> { 237 | fn from(other: IndexedEntry<'a, K, V>) -> Self { 238 | let index = other.index(); 239 | let map = other.into_core(); 240 | let hash = map.entries[index].hash; 241 | let bucket = map 242 | .indices 243 | .find_bucket_index(hash.get(), move |&i| i == index) 244 | .expect("index not found"); 245 | Self { map, index, bucket } 246 | } 247 | } 248 | 249 | /// A view into a vacant entry in an [`IndexMap`][crate::IndexMap]. 250 | /// It is part of the [`Entry`] enum. 251 | pub struct VacantEntry<'a, K, V> { 252 | map: &'a mut Core, 253 | hash: HashValue, 254 | key: K, 255 | } 256 | 257 | impl<'a, K, V> VacantEntry<'a, K, V> { 258 | /// Return the index where a key-value pair may be inserted. 259 | pub fn index(&self) -> usize { 260 | self.map.indices.len() 261 | } 262 | 263 | /// Gets a reference to the key that was used to find the entry. 264 | pub fn key(&self) -> &K { 265 | &self.key 266 | } 267 | 268 | pub(crate) fn key_mut(&mut self) -> &mut K { 269 | &mut self.key 270 | } 271 | 272 | /// Takes ownership of the key, leaving the entry vacant. 273 | pub fn into_key(self) -> K { 274 | self.key 275 | } 276 | 277 | /// Inserts the entry's key and the given value into the map, and returns a mutable reference 278 | /// to the value. 279 | /// 280 | /// Computes in **O(1)** time (amortized average). 281 | pub fn insert(self, value: V) -> &'a mut V { 282 | let Self { map, hash, key } = self; 283 | map.insert_unique(hash, key, value).value_mut() 284 | } 285 | 286 | /// Inserts the entry's key and the given value into the map, and returns an `OccupiedEntry`. 287 | /// 288 | /// Computes in **O(1)** time (amortized average). 289 | pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V> { 290 | let Self { map, hash, key } = self; 291 | let index = map.indices.len(); 292 | debug_assert_eq!(index, map.entries.len()); 293 | let bucket = map 294 | .indices 295 | .insert_unique(hash.get(), index, get_hash(&map.entries)) 296 | .bucket_index(); 297 | map.push_entry(hash, key, value); 298 | OccupiedEntry { map, index, bucket } 299 | } 300 | 301 | /// Inserts the entry's key and the given value into the map at its ordered 302 | /// position among sorted keys, and returns the new index and a mutable 303 | /// reference to the value. 304 | /// 305 | /// If the existing keys are **not** already sorted, then the insertion 306 | /// index is unspecified (like [`slice::binary_search`]), but the key-value 307 | /// pair is inserted at that position regardless. 308 | /// 309 | /// Computes in **O(n)** time (average). 310 | pub fn insert_sorted(self, value: V) -> (usize, &'a mut V) 311 | where 312 | K: Ord, 313 | { 314 | let slice = crate::map::Slice::from_slice(&self.map.entries); 315 | let i = slice.binary_search_keys(&self.key).unwrap_err(); 316 | (i, self.shift_insert(i, value)) 317 | } 318 | 319 | /// Inserts the entry's key and the given value into the map at its ordered 320 | /// position among keys sorted by `cmp`, and returns the new index and a 321 | /// mutable reference to the value. 322 | /// 323 | /// If the existing keys are **not** already sorted, then the insertion 324 | /// index is unspecified (like [`slice::binary_search`]), but the key-value 325 | /// pair is inserted at that position regardless. 326 | /// 327 | /// Computes in **O(n)** time (average). 328 | pub fn insert_sorted_by(self, value: V, mut cmp: F) -> (usize, &'a mut V) 329 | where 330 | F: FnMut(&K, &V, &K, &V) -> Ordering, 331 | { 332 | let slice = crate::map::Slice::from_slice(&self.map.entries); 333 | let (Ok(i) | Err(i)) = slice.binary_search_by(|k, v| cmp(k, v, &self.key, &value)); 334 | (i, self.shift_insert(i, value)) 335 | } 336 | 337 | /// Inserts the entry's key and the given value into the map at its ordered 338 | /// position using a sort-key extraction function, and returns the new index 339 | /// and a mutable reference to the value. 340 | /// 341 | /// If the existing keys are **not** already sorted, then the insertion 342 | /// index is unspecified (like [`slice::binary_search`]), but the key-value 343 | /// pair is inserted at that position regardless. 344 | /// 345 | /// Computes in **O(n)** time (average). 346 | pub fn insert_sorted_by_key(self, value: V, mut sort_key: F) -> (usize, &'a mut V) 347 | where 348 | B: Ord, 349 | F: FnMut(&K, &V) -> B, 350 | { 351 | let search_key = sort_key(&self.key, &value); 352 | let slice = crate::map::Slice::from_slice(&self.map.entries); 353 | let (Ok(i) | Err(i)) = slice.binary_search_by_key(&search_key, sort_key); 354 | (i, self.shift_insert(i, value)) 355 | } 356 | 357 | /// Inserts the entry's key and the given value into the map at the given index, 358 | /// shifting others to the right, and returns a mutable reference to the value. 359 | /// 360 | /// ***Panics*** if `index` is out of bounds. 361 | /// 362 | /// Computes in **O(n)** time (average). 363 | #[track_caller] 364 | pub fn shift_insert(self, index: usize, value: V) -> &'a mut V { 365 | self.map 366 | .shift_insert_unique(index, self.hash, self.key, value) 367 | .value_mut() 368 | } 369 | 370 | /// Replaces the key at the given index with this entry's key, returning the 371 | /// old key and an `OccupiedEntry` for that index. 372 | /// 373 | /// ***Panics*** if `index` is out of bounds. 374 | /// 375 | /// Computes in **O(1)** time (average). 376 | #[track_caller] 377 | pub fn replace_index(self, index: usize) -> (K, OccupiedEntry<'a, K, V>) { 378 | let Self { map, hash, key } = self; 379 | 380 | // NB: This removal and insertion isn't "no grow" (with unreachable hasher) 381 | // because hashbrown's tombstones might force a resize anyway. 382 | let old_hash = map.entries[index].hash; 383 | map.indices 384 | .find_entry(old_hash.get(), move |&i| i == index) 385 | .expect("index not found") 386 | .remove(); 387 | let bucket = map 388 | .indices 389 | .insert_unique(hash.get(), index, get_hash(&map.entries)) 390 | .bucket_index(); 391 | 392 | let entry = &mut map.entries[index]; 393 | entry.hash = hash; 394 | let old_key = mem::replace(&mut entry.key, key); 395 | 396 | (old_key, OccupiedEntry { map, index, bucket }) 397 | } 398 | } 399 | -------------------------------------------------------------------------------- /src/set/iter.rs: -------------------------------------------------------------------------------- 1 | use super::{Bucket, IndexSet, Slice}; 2 | use crate::inner::{Core, ExtractCore}; 3 | 4 | use alloc::vec::{self, Vec}; 5 | use core::fmt; 6 | use core::hash::{BuildHasher, Hash}; 7 | use core::iter::{Chain, FusedIterator}; 8 | use core::ops::RangeBounds; 9 | use core::slice::Iter as SliceIter; 10 | 11 | impl<'a, T, S> IntoIterator for &'a IndexSet { 12 | type Item = &'a T; 13 | type IntoIter = Iter<'a, T>; 14 | 15 | fn into_iter(self) -> Self::IntoIter { 16 | self.iter() 17 | } 18 | } 19 | 20 | impl IntoIterator for IndexSet { 21 | type Item = T; 22 | type IntoIter = IntoIter; 23 | 24 | fn into_iter(self) -> Self::IntoIter { 25 | IntoIter::new(self.into_entries()) 26 | } 27 | } 28 | 29 | /// An iterator over the items of an [`IndexSet`]. 30 | /// 31 | /// This `struct` is created by the [`IndexSet::iter`] method. 32 | /// See its documentation for more. 33 | pub struct Iter<'a, T> { 34 | iter: SliceIter<'a, Bucket>, 35 | } 36 | 37 | impl<'a, T> Iter<'a, T> { 38 | pub(super) fn new(entries: &'a [Bucket]) -> Self { 39 | Self { 40 | iter: entries.iter(), 41 | } 42 | } 43 | 44 | /// Returns a slice of the remaining entries in the iterator. 45 | pub fn as_slice(&self) -> &'a Slice { 46 | Slice::from_slice(self.iter.as_slice()) 47 | } 48 | } 49 | 50 | impl<'a, T> Iterator for Iter<'a, T> { 51 | type Item = &'a T; 52 | 53 | iterator_methods!(Bucket::key_ref); 54 | } 55 | 56 | impl DoubleEndedIterator for Iter<'_, T> { 57 | double_ended_iterator_methods!(Bucket::key_ref); 58 | } 59 | 60 | impl ExactSizeIterator for Iter<'_, T> { 61 | fn len(&self) -> usize { 62 | self.iter.len() 63 | } 64 | } 65 | 66 | impl FusedIterator for Iter<'_, T> {} 67 | 68 | impl Clone for Iter<'_, T> { 69 | fn clone(&self) -> Self { 70 | Iter { 71 | iter: self.iter.clone(), 72 | } 73 | } 74 | } 75 | 76 | impl fmt::Debug for Iter<'_, T> { 77 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 78 | f.debug_list().entries(self.clone()).finish() 79 | } 80 | } 81 | 82 | impl Default for Iter<'_, T> { 83 | fn default() -> Self { 84 | Self { iter: [].iter() } 85 | } 86 | } 87 | 88 | /// An owning iterator over the items of an [`IndexSet`]. 89 | /// 90 | /// This `struct` is created by the [`IndexSet::into_iter`] method 91 | /// (provided by the [`IntoIterator`] trait). See its documentation for more. 92 | #[derive(Clone)] 93 | pub struct IntoIter { 94 | iter: vec::IntoIter>, 95 | } 96 | 97 | impl IntoIter { 98 | pub(super) fn new(entries: Vec>) -> Self { 99 | Self { 100 | iter: entries.into_iter(), 101 | } 102 | } 103 | 104 | /// Returns a slice of the remaining entries in the iterator. 105 | pub fn as_slice(&self) -> &Slice { 106 | Slice::from_slice(self.iter.as_slice()) 107 | } 108 | } 109 | 110 | impl Iterator for IntoIter { 111 | type Item = T; 112 | 113 | iterator_methods!(Bucket::key); 114 | } 115 | 116 | impl DoubleEndedIterator for IntoIter { 117 | double_ended_iterator_methods!(Bucket::key); 118 | } 119 | 120 | impl ExactSizeIterator for IntoIter { 121 | fn len(&self) -> usize { 122 | self.iter.len() 123 | } 124 | } 125 | 126 | impl FusedIterator for IntoIter {} 127 | 128 | impl fmt::Debug for IntoIter { 129 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 130 | let iter = self.iter.as_slice().iter().map(Bucket::key_ref); 131 | f.debug_list().entries(iter).finish() 132 | } 133 | } 134 | 135 | impl Default for IntoIter { 136 | fn default() -> Self { 137 | Self { 138 | iter: Vec::new().into_iter(), 139 | } 140 | } 141 | } 142 | 143 | /// A draining iterator over the items of an [`IndexSet`]. 144 | /// 145 | /// This `struct` is created by the [`IndexSet::drain`] method. 146 | /// See its documentation for more. 147 | pub struct Drain<'a, T> { 148 | iter: vec::Drain<'a, Bucket>, 149 | } 150 | 151 | impl<'a, T> Drain<'a, T> { 152 | pub(super) fn new(iter: vec::Drain<'a, Bucket>) -> Self { 153 | Self { iter } 154 | } 155 | 156 | /// Returns a slice of the remaining entries in the iterator. 157 | pub fn as_slice(&self) -> &Slice { 158 | Slice::from_slice(self.iter.as_slice()) 159 | } 160 | } 161 | 162 | impl Iterator for Drain<'_, T> { 163 | type Item = T; 164 | 165 | iterator_methods!(Bucket::key); 166 | } 167 | 168 | impl DoubleEndedIterator for Drain<'_, T> { 169 | double_ended_iterator_methods!(Bucket::key); 170 | } 171 | 172 | impl ExactSizeIterator for Drain<'_, T> { 173 | fn len(&self) -> usize { 174 | self.iter.len() 175 | } 176 | } 177 | 178 | impl FusedIterator for Drain<'_, T> {} 179 | 180 | impl fmt::Debug for Drain<'_, T> { 181 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 182 | let iter = self.iter.as_slice().iter().map(Bucket::key_ref); 183 | f.debug_list().entries(iter).finish() 184 | } 185 | } 186 | 187 | /// A lazy iterator producing elements in the difference of [`IndexSet`]s. 188 | /// 189 | /// This `struct` is created by the [`IndexSet::difference`] method. 190 | /// See its documentation for more. 191 | pub struct Difference<'a, T, S> { 192 | iter: Iter<'a, T>, 193 | other: &'a IndexSet, 194 | } 195 | 196 | impl<'a, T, S> Difference<'a, T, S> { 197 | pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { 198 | Self { 199 | iter: set.iter(), 200 | other, 201 | } 202 | } 203 | } 204 | 205 | impl<'a, T, S> Iterator for Difference<'a, T, S> 206 | where 207 | T: Eq + Hash, 208 | S: BuildHasher, 209 | { 210 | type Item = &'a T; 211 | 212 | fn next(&mut self) -> Option { 213 | while let Some(item) = self.iter.next() { 214 | if !self.other.contains(item) { 215 | return Some(item); 216 | } 217 | } 218 | None 219 | } 220 | 221 | fn size_hint(&self) -> (usize, Option) { 222 | (0, self.iter.size_hint().1) 223 | } 224 | } 225 | 226 | impl DoubleEndedIterator for Difference<'_, T, S> 227 | where 228 | T: Eq + Hash, 229 | S: BuildHasher, 230 | { 231 | fn next_back(&mut self) -> Option { 232 | while let Some(item) = self.iter.next_back() { 233 | if !self.other.contains(item) { 234 | return Some(item); 235 | } 236 | } 237 | None 238 | } 239 | } 240 | 241 | impl FusedIterator for Difference<'_, T, S> 242 | where 243 | T: Eq + Hash, 244 | S: BuildHasher, 245 | { 246 | } 247 | 248 | impl Clone for Difference<'_, T, S> { 249 | fn clone(&self) -> Self { 250 | Difference { 251 | iter: self.iter.clone(), 252 | ..*self 253 | } 254 | } 255 | } 256 | 257 | impl fmt::Debug for Difference<'_, T, S> 258 | where 259 | T: fmt::Debug + Eq + Hash, 260 | S: BuildHasher, 261 | { 262 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 263 | f.debug_list().entries(self.clone()).finish() 264 | } 265 | } 266 | 267 | /// A lazy iterator producing elements in the intersection of [`IndexSet`]s. 268 | /// 269 | /// This `struct` is created by the [`IndexSet::intersection`] method. 270 | /// See its documentation for more. 271 | pub struct Intersection<'a, T, S> { 272 | iter: Iter<'a, T>, 273 | other: &'a IndexSet, 274 | } 275 | 276 | impl<'a, T, S> Intersection<'a, T, S> { 277 | pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { 278 | Self { 279 | iter: set.iter(), 280 | other, 281 | } 282 | } 283 | } 284 | 285 | impl<'a, T, S> Iterator for Intersection<'a, T, S> 286 | where 287 | T: Eq + Hash, 288 | S: BuildHasher, 289 | { 290 | type Item = &'a T; 291 | 292 | fn next(&mut self) -> Option { 293 | while let Some(item) = self.iter.next() { 294 | if self.other.contains(item) { 295 | return Some(item); 296 | } 297 | } 298 | None 299 | } 300 | 301 | fn size_hint(&self) -> (usize, Option) { 302 | (0, self.iter.size_hint().1) 303 | } 304 | } 305 | 306 | impl DoubleEndedIterator for Intersection<'_, T, S> 307 | where 308 | T: Eq + Hash, 309 | S: BuildHasher, 310 | { 311 | fn next_back(&mut self) -> Option { 312 | while let Some(item) = self.iter.next_back() { 313 | if self.other.contains(item) { 314 | return Some(item); 315 | } 316 | } 317 | None 318 | } 319 | } 320 | 321 | impl FusedIterator for Intersection<'_, T, S> 322 | where 323 | T: Eq + Hash, 324 | S: BuildHasher, 325 | { 326 | } 327 | 328 | impl Clone for Intersection<'_, T, S> { 329 | fn clone(&self) -> Self { 330 | Intersection { 331 | iter: self.iter.clone(), 332 | ..*self 333 | } 334 | } 335 | } 336 | 337 | impl fmt::Debug for Intersection<'_, T, S> 338 | where 339 | T: fmt::Debug + Eq + Hash, 340 | S: BuildHasher, 341 | { 342 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 343 | f.debug_list().entries(self.clone()).finish() 344 | } 345 | } 346 | 347 | /// A lazy iterator producing elements in the symmetric difference of [`IndexSet`]s. 348 | /// 349 | /// This `struct` is created by the [`IndexSet::symmetric_difference`] method. 350 | /// See its documentation for more. 351 | pub struct SymmetricDifference<'a, T, S1, S2> { 352 | iter: Chain, Difference<'a, T, S1>>, 353 | } 354 | 355 | impl<'a, T, S1, S2> SymmetricDifference<'a, T, S1, S2> 356 | where 357 | T: Eq + Hash, 358 | S1: BuildHasher, 359 | S2: BuildHasher, 360 | { 361 | pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self { 362 | let diff1 = set1.difference(set2); 363 | let diff2 = set2.difference(set1); 364 | Self { 365 | iter: diff1.chain(diff2), 366 | } 367 | } 368 | } 369 | 370 | impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> 371 | where 372 | T: Eq + Hash, 373 | S1: BuildHasher, 374 | S2: BuildHasher, 375 | { 376 | type Item = &'a T; 377 | 378 | fn next(&mut self) -> Option { 379 | self.iter.next() 380 | } 381 | 382 | fn size_hint(&self) -> (usize, Option) { 383 | self.iter.size_hint() 384 | } 385 | 386 | fn fold(self, init: B, f: F) -> B 387 | where 388 | F: FnMut(B, Self::Item) -> B, 389 | { 390 | self.iter.fold(init, f) 391 | } 392 | } 393 | 394 | impl DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2> 395 | where 396 | T: Eq + Hash, 397 | S1: BuildHasher, 398 | S2: BuildHasher, 399 | { 400 | fn next_back(&mut self) -> Option { 401 | self.iter.next_back() 402 | } 403 | 404 | fn rfold(self, init: B, f: F) -> B 405 | where 406 | F: FnMut(B, Self::Item) -> B, 407 | { 408 | self.iter.rfold(init, f) 409 | } 410 | } 411 | 412 | impl FusedIterator for SymmetricDifference<'_, T, S1, S2> 413 | where 414 | T: Eq + Hash, 415 | S1: BuildHasher, 416 | S2: BuildHasher, 417 | { 418 | } 419 | 420 | impl Clone for SymmetricDifference<'_, T, S1, S2> { 421 | fn clone(&self) -> Self { 422 | SymmetricDifference { 423 | iter: self.iter.clone(), 424 | } 425 | } 426 | } 427 | 428 | impl fmt::Debug for SymmetricDifference<'_, T, S1, S2> 429 | where 430 | T: fmt::Debug + Eq + Hash, 431 | S1: BuildHasher, 432 | S2: BuildHasher, 433 | { 434 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 435 | f.debug_list().entries(self.clone()).finish() 436 | } 437 | } 438 | 439 | /// A lazy iterator producing elements in the union of [`IndexSet`]s. 440 | /// 441 | /// This `struct` is created by the [`IndexSet::union`] method. 442 | /// See its documentation for more. 443 | pub struct Union<'a, T, S> { 444 | iter: Chain, Difference<'a, T, S>>, 445 | } 446 | 447 | impl<'a, T, S> Union<'a, T, S> 448 | where 449 | T: Eq + Hash, 450 | S: BuildHasher, 451 | { 452 | pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self 453 | where 454 | S2: BuildHasher, 455 | { 456 | Self { 457 | iter: set1.iter().chain(set2.difference(set1)), 458 | } 459 | } 460 | } 461 | 462 | impl<'a, T, S> Iterator for Union<'a, T, S> 463 | where 464 | T: Eq + Hash, 465 | S: BuildHasher, 466 | { 467 | type Item = &'a T; 468 | 469 | fn next(&mut self) -> Option { 470 | self.iter.next() 471 | } 472 | 473 | fn size_hint(&self) -> (usize, Option) { 474 | self.iter.size_hint() 475 | } 476 | 477 | fn fold(self, init: B, f: F) -> B 478 | where 479 | F: FnMut(B, Self::Item) -> B, 480 | { 481 | self.iter.fold(init, f) 482 | } 483 | } 484 | 485 | impl DoubleEndedIterator for Union<'_, T, S> 486 | where 487 | T: Eq + Hash, 488 | S: BuildHasher, 489 | { 490 | fn next_back(&mut self) -> Option { 491 | self.iter.next_back() 492 | } 493 | 494 | fn rfold(self, init: B, f: F) -> B 495 | where 496 | F: FnMut(B, Self::Item) -> B, 497 | { 498 | self.iter.rfold(init, f) 499 | } 500 | } 501 | 502 | impl FusedIterator for Union<'_, T, S> 503 | where 504 | T: Eq + Hash, 505 | S: BuildHasher, 506 | { 507 | } 508 | 509 | impl Clone for Union<'_, T, S> { 510 | fn clone(&self) -> Self { 511 | Union { 512 | iter: self.iter.clone(), 513 | } 514 | } 515 | } 516 | 517 | impl fmt::Debug for Union<'_, T, S> 518 | where 519 | T: fmt::Debug + Eq + Hash, 520 | S: BuildHasher, 521 | { 522 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 523 | f.debug_list().entries(self.clone()).finish() 524 | } 525 | } 526 | 527 | /// A splicing iterator for `IndexSet`. 528 | /// 529 | /// This `struct` is created by [`IndexSet::splice()`]. 530 | /// See its documentation for more. 531 | pub struct Splice<'a, I, T, S> 532 | where 533 | I: Iterator, 534 | T: Hash + Eq, 535 | S: BuildHasher, 536 | { 537 | iter: crate::map::Splice<'a, UnitValue, T, (), S>, 538 | } 539 | 540 | impl<'a, I, T, S> Splice<'a, I, T, S> 541 | where 542 | I: Iterator, 543 | T: Hash + Eq, 544 | S: BuildHasher, 545 | { 546 | #[track_caller] 547 | pub(super) fn new(set: &'a mut IndexSet, range: R, replace_with: I) -> Self 548 | where 549 | R: RangeBounds, 550 | { 551 | Self { 552 | iter: set.map.splice(range, UnitValue(replace_with)), 553 | } 554 | } 555 | } 556 | 557 | impl Iterator for Splice<'_, I, T, S> 558 | where 559 | I: Iterator, 560 | T: Hash + Eq, 561 | S: BuildHasher, 562 | { 563 | type Item = T; 564 | 565 | fn next(&mut self) -> Option { 566 | Some(self.iter.next()?.0) 567 | } 568 | 569 | fn size_hint(&self) -> (usize, Option) { 570 | self.iter.size_hint() 571 | } 572 | } 573 | 574 | impl DoubleEndedIterator for Splice<'_, I, T, S> 575 | where 576 | I: Iterator, 577 | T: Hash + Eq, 578 | S: BuildHasher, 579 | { 580 | fn next_back(&mut self) -> Option { 581 | Some(self.iter.next_back()?.0) 582 | } 583 | } 584 | 585 | impl ExactSizeIterator for Splice<'_, I, T, S> 586 | where 587 | I: Iterator, 588 | T: Hash + Eq, 589 | S: BuildHasher, 590 | { 591 | fn len(&self) -> usize { 592 | self.iter.len() 593 | } 594 | } 595 | 596 | impl FusedIterator for Splice<'_, I, T, S> 597 | where 598 | I: Iterator, 599 | T: Hash + Eq, 600 | S: BuildHasher, 601 | { 602 | } 603 | 604 | struct UnitValue(I); 605 | 606 | impl Iterator for UnitValue { 607 | type Item = (I::Item, ()); 608 | 609 | fn next(&mut self) -> Option { 610 | self.0.next().map(|x| (x, ())) 611 | } 612 | } 613 | 614 | impl fmt::Debug for Splice<'_, I, T, S> 615 | where 616 | I: fmt::Debug + Iterator, 617 | T: fmt::Debug + Hash + Eq, 618 | S: BuildHasher, 619 | { 620 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 621 | fmt::Debug::fmt(&self.iter, f) 622 | } 623 | } 624 | 625 | impl fmt::Debug for UnitValue { 626 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 627 | fmt::Debug::fmt(&self.0, f) 628 | } 629 | } 630 | 631 | /// An extracting iterator for `IndexSet`. 632 | /// 633 | /// This `struct` is created by [`IndexSet::extract_if()`]. 634 | /// See its documentation for more. 635 | pub struct ExtractIf<'a, T, F> { 636 | inner: ExtractCore<'a, T, ()>, 637 | pred: F, 638 | } 639 | 640 | impl ExtractIf<'_, T, F> { 641 | #[track_caller] 642 | pub(super) fn new(core: &mut Core, range: R, pred: F) -> ExtractIf<'_, T, F> 643 | where 644 | R: RangeBounds, 645 | F: FnMut(&T) -> bool, 646 | { 647 | ExtractIf { 648 | inner: core.extract(range), 649 | pred, 650 | } 651 | } 652 | } 653 | 654 | impl Iterator for ExtractIf<'_, T, F> 655 | where 656 | F: FnMut(&T) -> bool, 657 | { 658 | type Item = T; 659 | 660 | fn next(&mut self) -> Option { 661 | self.inner 662 | .extract_if(|bucket| (self.pred)(bucket.key_ref())) 663 | .map(Bucket::key) 664 | } 665 | 666 | fn size_hint(&self) -> (usize, Option) { 667 | (0, Some(self.inner.remaining())) 668 | } 669 | } 670 | 671 | impl FusedIterator for ExtractIf<'_, T, F> where F: FnMut(&T) -> bool {} 672 | 673 | impl fmt::Debug for ExtractIf<'_, T, F> 674 | where 675 | T: fmt::Debug, 676 | { 677 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 678 | f.debug_struct("ExtractIf").finish_non_exhaustive() 679 | } 680 | } 681 | -------------------------------------------------------------------------------- /benches/bench.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use fnv::FnvHasher; 6 | use std::hash::BuildHasherDefault; 7 | use std::hash::Hash; 8 | use std::hint::black_box; 9 | use std::sync::LazyLock; 10 | type FnvBuilder = BuildHasherDefault; 11 | 12 | use test::Bencher; 13 | 14 | use indexmap::IndexMap; 15 | 16 | use std::collections::HashMap; 17 | 18 | /// Use a consistently seeded Rng for benchmark stability 19 | fn small_rng() -> fastrand::Rng { 20 | let seed = u64::from_le_bytes(*b"indexmap"); 21 | fastrand::Rng::with_seed(seed) 22 | } 23 | 24 | #[bench] 25 | fn new_hashmap(b: &mut Bencher) { 26 | b.iter(|| HashMap::::new()); 27 | } 28 | 29 | #[bench] 30 | fn new_indexmap(b: &mut Bencher) { 31 | b.iter(|| IndexMap::::new()); 32 | } 33 | 34 | #[bench] 35 | fn with_capacity_10e5_hashmap(b: &mut Bencher) { 36 | b.iter(|| HashMap::::with_capacity(10_000)); 37 | } 38 | 39 | #[bench] 40 | fn with_capacity_10e5_indexmap(b: &mut Bencher) { 41 | b.iter(|| IndexMap::::with_capacity(10_000)); 42 | } 43 | 44 | #[bench] 45 | fn insert_hashmap_10_000(b: &mut Bencher) { 46 | let c = 10_000; 47 | b.iter(|| { 48 | let mut map = HashMap::with_capacity(c); 49 | for x in 0..c { 50 | map.insert(x, ()); 51 | } 52 | map 53 | }); 54 | } 55 | 56 | #[bench] 57 | fn insert_indexmap_10_000(b: &mut Bencher) { 58 | let c = 10_000; 59 | b.iter(|| { 60 | let mut map = IndexMap::with_capacity(c); 61 | for x in 0..c { 62 | map.insert(x, ()); 63 | } 64 | map 65 | }); 66 | } 67 | 68 | #[bench] 69 | fn insert_hashmap_string_10_000(b: &mut Bencher) { 70 | let c = 10_000; 71 | b.iter(|| { 72 | let mut map = HashMap::with_capacity(c); 73 | for x in 0..c { 74 | map.insert(x.to_string(), ()); 75 | } 76 | map 77 | }); 78 | } 79 | 80 | #[bench] 81 | fn insert_indexmap_string_10_000(b: &mut Bencher) { 82 | let c = 10_000; 83 | b.iter(|| { 84 | let mut map = IndexMap::with_capacity(c); 85 | for x in 0..c { 86 | map.insert(x.to_string(), ()); 87 | } 88 | map 89 | }); 90 | } 91 | 92 | #[bench] 93 | fn insert_hashmap_str_10_000(b: &mut Bencher) { 94 | let c = 10_000; 95 | let ss = Vec::from_iter((0..c).map(|x| x.to_string())); 96 | b.iter(|| { 97 | let mut map = HashMap::with_capacity(c); 98 | for key in &ss { 99 | map.insert(&key[..], ()); 100 | } 101 | map 102 | }); 103 | } 104 | 105 | #[bench] 106 | fn insert_indexmap_str_10_000(b: &mut Bencher) { 107 | let c = 10_000; 108 | let ss = Vec::from_iter((0..c).map(|x| x.to_string())); 109 | b.iter(|| { 110 | let mut map = IndexMap::with_capacity(c); 111 | for key in &ss { 112 | map.insert(&key[..], ()); 113 | } 114 | map 115 | }); 116 | } 117 | 118 | #[bench] 119 | fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { 120 | let c = 10_000; 121 | let value = [0u64; 10]; 122 | b.iter(|| { 123 | let mut map = HashMap::with_capacity(c); 124 | for i in 0..c { 125 | map.insert(i, value); 126 | } 127 | map 128 | }); 129 | } 130 | 131 | #[bench] 132 | fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) { 133 | let c = 10_000; 134 | let value = [0u64; 10]; 135 | b.iter(|| { 136 | let mut map = IndexMap::with_capacity(c); 137 | for i in 0..c { 138 | map.insert(i, value); 139 | } 140 | map 141 | }); 142 | } 143 | 144 | #[bench] 145 | fn insert_hashmap_100_000(b: &mut Bencher) { 146 | let c = 100_000; 147 | b.iter(|| { 148 | let mut map = HashMap::with_capacity(c); 149 | for x in 0..c { 150 | map.insert(x, ()); 151 | } 152 | map 153 | }); 154 | } 155 | 156 | #[bench] 157 | fn insert_indexmap_100_000(b: &mut Bencher) { 158 | let c = 100_000; 159 | b.iter(|| { 160 | let mut map = IndexMap::with_capacity(c); 161 | for x in 0..c { 162 | map.insert(x, ()); 163 | } 164 | map 165 | }); 166 | } 167 | 168 | #[bench] 169 | fn insert_hashmap_150(b: &mut Bencher) { 170 | let c = 150; 171 | b.iter(|| { 172 | let mut map = HashMap::with_capacity(c); 173 | for x in 0..c { 174 | map.insert(x, ()); 175 | } 176 | map 177 | }); 178 | } 179 | 180 | #[bench] 181 | fn insert_indexmap_150(b: &mut Bencher) { 182 | let c = 150; 183 | b.iter(|| { 184 | let mut map = IndexMap::with_capacity(c); 185 | for x in 0..c { 186 | map.insert(x, ()); 187 | } 188 | map 189 | }); 190 | } 191 | 192 | #[bench] 193 | fn entry_hashmap_150(b: &mut Bencher) { 194 | let c = 150; 195 | b.iter(|| { 196 | let mut map = HashMap::with_capacity(c); 197 | for x in 0..c { 198 | map.entry(x).or_insert(()); 199 | } 200 | map 201 | }); 202 | } 203 | 204 | #[bench] 205 | fn entry_indexmap_150(b: &mut Bencher) { 206 | let c = 150; 207 | b.iter(|| { 208 | let mut map = IndexMap::with_capacity(c); 209 | for x in 0..c { 210 | map.entry(x).or_insert(()); 211 | } 212 | map 213 | }); 214 | } 215 | 216 | #[bench] 217 | fn iter_sum_hashmap_10_000(b: &mut Bencher) { 218 | let c = 10_000; 219 | let mut map = HashMap::with_capacity(c); 220 | let len = c - c / 10; 221 | for x in 0..len { 222 | map.insert(x, ()); 223 | } 224 | assert_eq!(map.len(), len); 225 | b.iter(|| map.keys().sum::()); 226 | } 227 | 228 | #[bench] 229 | fn iter_sum_indexmap_10_000(b: &mut Bencher) { 230 | let c = 10_000; 231 | let mut map = IndexMap::with_capacity(c); 232 | let len = c - c / 10; 233 | for x in 0..len { 234 | map.insert(x, ()); 235 | } 236 | assert_eq!(map.len(), len); 237 | b.iter(|| map.keys().sum::()); 238 | } 239 | 240 | #[bench] 241 | fn iter_black_box_hashmap_10_000(b: &mut Bencher) { 242 | let c = 10_000; 243 | let mut map = HashMap::with_capacity(c); 244 | let len = c - c / 10; 245 | for x in 0..len { 246 | map.insert(x, ()); 247 | } 248 | assert_eq!(map.len(), len); 249 | b.iter(|| { 250 | for &key in map.keys() { 251 | black_box(key); 252 | } 253 | }); 254 | } 255 | 256 | #[bench] 257 | fn iter_black_box_indexmap_10_000(b: &mut Bencher) { 258 | let c = 10_000; 259 | let mut map = IndexMap::with_capacity(c); 260 | let len = c - c / 10; 261 | for x in 0..len { 262 | map.insert(x, ()); 263 | } 264 | assert_eq!(map.len(), len); 265 | b.iter(|| { 266 | for &key in map.keys() { 267 | black_box(key); 268 | } 269 | }); 270 | } 271 | 272 | fn shuffled_keys(iter: I) -> Vec 273 | where 274 | I: IntoIterator, 275 | { 276 | let mut v = Vec::from_iter(iter); 277 | let mut rng = small_rng(); 278 | rng.shuffle(&mut v); 279 | v 280 | } 281 | 282 | #[bench] 283 | fn lookup_hashmap_10_000_exist(b: &mut Bencher) { 284 | let c = 10_000; 285 | let mut map = HashMap::with_capacity(c); 286 | let keys = shuffled_keys(0..c); 287 | for &key in &keys { 288 | map.insert(key, 1); 289 | } 290 | b.iter(|| { 291 | let mut found = 0; 292 | for key in 5000..c { 293 | found += map.get(&key).is_some() as i32; 294 | } 295 | found 296 | }); 297 | } 298 | 299 | #[bench] 300 | fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { 301 | let c = 10_000; 302 | let mut map = HashMap::with_capacity(c); 303 | let keys = shuffled_keys(0..c); 304 | for &key in &keys { 305 | map.insert(key, 1); 306 | } 307 | b.iter(|| { 308 | let mut found = 0; 309 | for key in c..15000 { 310 | found += map.get(&key).is_some() as i32; 311 | } 312 | found 313 | }); 314 | } 315 | 316 | #[bench] 317 | fn lookup_indexmap_10_000_exist(b: &mut Bencher) { 318 | let c = 10_000; 319 | let mut map = IndexMap::with_capacity(c); 320 | let keys = shuffled_keys(0..c); 321 | for &key in &keys { 322 | map.insert(key, 1); 323 | } 324 | b.iter(|| { 325 | let mut found = 0; 326 | for key in 5000..c { 327 | found += map.get(&key).is_some() as i32; 328 | } 329 | found 330 | }); 331 | } 332 | 333 | #[bench] 334 | fn lookup_indexmap_10_000_noexist(b: &mut Bencher) { 335 | let c = 10_000; 336 | let mut map = IndexMap::with_capacity(c); 337 | let keys = shuffled_keys(0..c); 338 | for &key in &keys { 339 | map.insert(key, 1); 340 | } 341 | b.iter(|| { 342 | let mut found = 0; 343 | for key in c..15000 { 344 | found += map.get(&key).is_some() as i32; 345 | } 346 | found 347 | }); 348 | } 349 | 350 | // number of items to look up 351 | const LOOKUP_MAP_SIZE: u32 = 100_000_u32; 352 | const LOOKUP_SAMPLE_SIZE: u32 = 5000; 353 | const SORT_MAP_SIZE: usize = 10_000; 354 | 355 | // use (lazy) statics so that comparison benchmarks use the exact same inputs 356 | 357 | static KEYS: LazyLock> = LazyLock::new(|| shuffled_keys(0..LOOKUP_MAP_SIZE)); 358 | 359 | static HMAP_100K: LazyLock> = LazyLock::new(|| { 360 | let c = LOOKUP_MAP_SIZE; 361 | let mut map = HashMap::with_capacity(c as usize); 362 | let keys = &*KEYS; 363 | for &key in keys { 364 | map.insert(key, key); 365 | } 366 | map 367 | }); 368 | 369 | static IMAP_100K: LazyLock> = LazyLock::new(|| { 370 | let c = LOOKUP_MAP_SIZE; 371 | let mut map = IndexMap::with_capacity(c as usize); 372 | let keys = &*KEYS; 373 | for &key in keys { 374 | map.insert(key, key); 375 | } 376 | map 377 | }); 378 | 379 | static IMAP_SORT_U32: LazyLock> = LazyLock::new(|| { 380 | let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); 381 | for &key in &KEYS[..SORT_MAP_SIZE] { 382 | map.insert(key, key); 383 | } 384 | map 385 | }); 386 | 387 | static IMAP_SORT_S: LazyLock> = LazyLock::new(|| { 388 | let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); 389 | for &key in &KEYS[..SORT_MAP_SIZE] { 390 | map.insert(format!("{:^16x}", &key), String::new()); 391 | } 392 | map 393 | }); 394 | 395 | #[bench] 396 | fn lookup_hashmap_100_000_multi(b: &mut Bencher) { 397 | let map = &*HMAP_100K; 398 | b.iter(|| { 399 | let mut found = 0; 400 | for key in 0..LOOKUP_SAMPLE_SIZE { 401 | found += map.get(&key).is_some() as u32; 402 | } 403 | found 404 | }); 405 | } 406 | 407 | #[bench] 408 | fn lookup_indexmap_100_000_multi(b: &mut Bencher) { 409 | let map = &*IMAP_100K; 410 | b.iter(|| { 411 | let mut found = 0; 412 | for key in 0..LOOKUP_SAMPLE_SIZE { 413 | found += map.get(&key).is_some() as u32; 414 | } 415 | found 416 | }); 417 | } 418 | 419 | // inorder: Test looking up keys in the same order as they were inserted 420 | #[bench] 421 | fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { 422 | let map = &*HMAP_100K; 423 | let keys = &*KEYS; 424 | b.iter(|| { 425 | let mut found = 0; 426 | for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { 427 | found += map.get(key).is_some() as u32; 428 | } 429 | found 430 | }); 431 | } 432 | 433 | #[bench] 434 | fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) { 435 | let map = &*IMAP_100K; 436 | let keys = &*KEYS; 437 | b.iter(|| { 438 | let mut found = 0; 439 | for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { 440 | found += map.get(key).is_some() as u32; 441 | } 442 | found 443 | }); 444 | } 445 | 446 | #[bench] 447 | fn lookup_hashmap_100_000_single(b: &mut Bencher) { 448 | let map = &*HMAP_100K; 449 | let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); 450 | b.iter(|| { 451 | let key = iter.next().unwrap(); 452 | map.get(&key).is_some() 453 | }); 454 | } 455 | 456 | #[bench] 457 | fn lookup_indexmap_100_000_single(b: &mut Bencher) { 458 | let map = &*IMAP_100K; 459 | let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); 460 | b.iter(|| { 461 | let key = iter.next().unwrap(); 462 | map.get(&key).is_some() 463 | }); 464 | } 465 | 466 | const GROW_SIZE: usize = 100_000; 467 | type GrowKey = u32; 468 | 469 | // Test grow/resize without preallocation 470 | #[bench] 471 | fn grow_fnv_hashmap_100_000(b: &mut Bencher) { 472 | b.iter(|| { 473 | let mut map: HashMap<_, _, FnvBuilder> = HashMap::default(); 474 | for x in 0..GROW_SIZE { 475 | map.insert(x as GrowKey, x as GrowKey); 476 | } 477 | map 478 | }); 479 | } 480 | 481 | #[bench] 482 | fn grow_fnv_indexmap_100_000(b: &mut Bencher) { 483 | b.iter(|| { 484 | let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default(); 485 | for x in 0..GROW_SIZE { 486 | map.insert(x as GrowKey, x as GrowKey); 487 | } 488 | map 489 | }); 490 | } 491 | 492 | const MERGE: u64 = 10_000; 493 | #[bench] 494 | fn hashmap_merge_simple(b: &mut Bencher) { 495 | let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); 496 | let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 497 | b.iter(|| { 498 | let mut merged = first_map.clone(); 499 | merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); 500 | merged 501 | }); 502 | } 503 | 504 | #[bench] 505 | fn hashmap_merge_shuffle(b: &mut Bencher) { 506 | let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); 507 | let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 508 | let mut v = Vec::new(); 509 | let mut rng = small_rng(); 510 | b.iter(|| { 511 | let mut merged = first_map.clone(); 512 | v.extend(second_map.iter().map(|(&k, &v)| (k, v))); 513 | rng.shuffle(&mut v); 514 | merged.extend(v.drain(..)); 515 | 516 | merged 517 | }); 518 | } 519 | 520 | #[bench] 521 | fn indexmap_merge_simple(b: &mut Bencher) { 522 | let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); 523 | let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 524 | b.iter(|| { 525 | let mut merged = first_map.clone(); 526 | merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); 527 | merged 528 | }); 529 | } 530 | 531 | #[bench] 532 | fn indexmap_merge_shuffle(b: &mut Bencher) { 533 | let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); 534 | let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 535 | let mut v = Vec::new(); 536 | let mut rng = small_rng(); 537 | b.iter(|| { 538 | let mut merged = first_map.clone(); 539 | v.extend(second_map.iter().map(|(&k, &v)| (k, v))); 540 | rng.shuffle(&mut v); 541 | merged.extend(v.drain(..)); 542 | 543 | merged 544 | }); 545 | } 546 | 547 | #[bench] 548 | fn swap_remove_indexmap_100_000(b: &mut Bencher) { 549 | let map = IMAP_100K.clone(); 550 | let mut keys = Vec::from_iter(map.keys().copied()); 551 | let mut rng = small_rng(); 552 | rng.shuffle(&mut keys); 553 | 554 | b.iter(|| { 555 | let mut map = map.clone(); 556 | for key in &keys { 557 | map.swap_remove(key); 558 | } 559 | assert_eq!(map.len(), 0); 560 | map 561 | }); 562 | } 563 | 564 | #[bench] 565 | fn shift_remove_indexmap_100_000_few(b: &mut Bencher) { 566 | let map = IMAP_100K.clone(); 567 | let mut keys = Vec::from_iter(map.keys().copied()); 568 | let mut rng = small_rng(); 569 | rng.shuffle(&mut keys); 570 | keys.truncate(50); 571 | 572 | b.iter(|| { 573 | let mut map = map.clone(); 574 | for key in &keys { 575 | map.shift_remove(key); 576 | } 577 | assert_eq!(map.len(), IMAP_100K.len() - keys.len()); 578 | map 579 | }); 580 | } 581 | 582 | #[bench] 583 | fn shift_remove_indexmap_2_000_full(b: &mut Bencher) { 584 | let mut keys = KEYS[..2_000].to_vec(); 585 | let mut map = IndexMap::with_capacity(keys.len()); 586 | for &key in &keys { 587 | map.insert(key, key); 588 | } 589 | let mut rng = small_rng(); 590 | rng.shuffle(&mut keys); 591 | 592 | b.iter(|| { 593 | let mut map = map.clone(); 594 | for key in &keys { 595 | map.shift_remove(key); 596 | } 597 | assert_eq!(map.len(), 0); 598 | map 599 | }); 600 | } 601 | 602 | #[bench] 603 | fn pop_indexmap_100_000(b: &mut Bencher) { 604 | let map = IMAP_100K.clone(); 605 | 606 | b.iter(|| { 607 | let mut map = map.clone(); 608 | while !map.is_empty() { 609 | map.pop(); 610 | } 611 | assert_eq!(map.len(), 0); 612 | map 613 | }); 614 | } 615 | 616 | #[bench] 617 | fn few_retain_indexmap_100_000(b: &mut Bencher) { 618 | let map = IMAP_100K.clone(); 619 | 620 | b.iter(|| { 621 | let mut map = map.clone(); 622 | map.retain(|k, _| *k % 7 == 0); 623 | map 624 | }); 625 | } 626 | 627 | #[bench] 628 | fn few_retain_hashmap_100_000(b: &mut Bencher) { 629 | let map = HMAP_100K.clone(); 630 | 631 | b.iter(|| { 632 | let mut map = map.clone(); 633 | map.retain(|k, _| *k % 7 == 0); 634 | map 635 | }); 636 | } 637 | 638 | #[bench] 639 | fn half_retain_indexmap_100_000(b: &mut Bencher) { 640 | let map = IMAP_100K.clone(); 641 | 642 | b.iter(|| { 643 | let mut map = map.clone(); 644 | map.retain(|k, _| *k % 2 == 0); 645 | map 646 | }); 647 | } 648 | 649 | #[bench] 650 | fn half_retain_hashmap_100_000(b: &mut Bencher) { 651 | let map = HMAP_100K.clone(); 652 | 653 | b.iter(|| { 654 | let mut map = map.clone(); 655 | map.retain(|k, _| *k % 2 == 0); 656 | map 657 | }); 658 | } 659 | 660 | #[bench] 661 | fn many_retain_indexmap_100_000(b: &mut Bencher) { 662 | let map = IMAP_100K.clone(); 663 | 664 | b.iter(|| { 665 | let mut map = map.clone(); 666 | map.retain(|k, _| *k % 100 != 0); 667 | map 668 | }); 669 | } 670 | 671 | #[bench] 672 | fn many_retain_hashmap_100_000(b: &mut Bencher) { 673 | let map = HMAP_100K.clone(); 674 | 675 | b.iter(|| { 676 | let mut map = map.clone(); 677 | map.retain(|k, _| *k % 100 != 0); 678 | map 679 | }); 680 | } 681 | 682 | // simple sort impl for comparison 683 | pub fn simple_sort(m: &mut IndexMap) { 684 | let mut ordered: Vec<_> = m.drain(..).collect(); 685 | ordered.sort_by(|left, right| left.0.cmp(&right.0)); 686 | m.extend(ordered); 687 | } 688 | 689 | #[bench] 690 | fn indexmap_sort_s(b: &mut Bencher) { 691 | let map = IMAP_SORT_S.clone(); 692 | 693 | // there's a map clone there, but it's still useful to profile this 694 | b.iter(|| { 695 | let mut map = map.clone(); 696 | map.sort_keys(); 697 | map 698 | }); 699 | } 700 | 701 | #[bench] 702 | fn indexmap_simple_sort_s(b: &mut Bencher) { 703 | let map = IMAP_SORT_S.clone(); 704 | 705 | // there's a map clone there, but it's still useful to profile this 706 | b.iter(|| { 707 | let mut map = map.clone(); 708 | simple_sort(&mut map); 709 | map 710 | }); 711 | } 712 | 713 | #[bench] 714 | fn indexmap_sort_u32(b: &mut Bencher) { 715 | let map = IMAP_SORT_U32.clone(); 716 | 717 | // there's a map clone there, but it's still useful to profile this 718 | b.iter(|| { 719 | let mut map = map.clone(); 720 | map.sort_keys(); 721 | map 722 | }); 723 | } 724 | 725 | #[bench] 726 | fn indexmap_simple_sort_u32(b: &mut Bencher) { 727 | let map = IMAP_SORT_U32.clone(); 728 | 729 | // there's a map clone there, but it's still useful to profile this 730 | b.iter(|| { 731 | let mut map = map.clone(); 732 | simple_sort(&mut map); 733 | map 734 | }); 735 | } 736 | 737 | // measure the fixed overhead of cloning in sort benchmarks 738 | #[bench] 739 | fn indexmap_clone_for_sort_s(b: &mut Bencher) { 740 | let map = IMAP_SORT_S.clone(); 741 | 742 | b.iter(|| map.clone()); 743 | } 744 | 745 | #[bench] 746 | fn indexmap_clone_for_sort_u32(b: &mut Bencher) { 747 | let map = IMAP_SORT_U32.clone(); 748 | 749 | b.iter(|| map.clone()); 750 | } 751 | -------------------------------------------------------------------------------- /src/rayon/map.rs: -------------------------------------------------------------------------------- 1 | //! Parallel iterator types for [`IndexMap`] with [`rayon`][::rayon]. 2 | //! 3 | //! You will rarely need to interact with this module directly unless you need to name one of the 4 | //! iterator types. 5 | 6 | use super::collect; 7 | use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; 8 | use rayon::prelude::*; 9 | 10 | use alloc::boxed::Box; 11 | use alloc::vec::Vec; 12 | use core::cmp::Ordering; 13 | use core::fmt; 14 | use core::hash::{BuildHasher, Hash}; 15 | use core::ops::RangeBounds; 16 | 17 | use crate::map::Slice; 18 | use crate::Bucket; 19 | use crate::IndexMap; 20 | 21 | impl IntoParallelIterator for IndexMap 22 | where 23 | K: Send, 24 | V: Send, 25 | { 26 | type Item = (K, V); 27 | type Iter = IntoParIter; 28 | 29 | fn into_par_iter(self) -> Self::Iter { 30 | IntoParIter { 31 | entries: self.into_entries(), 32 | } 33 | } 34 | } 35 | 36 | impl IntoParallelIterator for Box> 37 | where 38 | K: Send, 39 | V: Send, 40 | { 41 | type Item = (K, V); 42 | type Iter = IntoParIter; 43 | 44 | fn into_par_iter(self) -> Self::Iter { 45 | IntoParIter { 46 | entries: self.into_entries(), 47 | } 48 | } 49 | } 50 | 51 | /// A parallel owning iterator over the entries of an [`IndexMap`]. 52 | /// 53 | /// This `struct` is created by the [`IndexMap::into_par_iter`] method 54 | /// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more. 55 | pub struct IntoParIter { 56 | entries: Vec>, 57 | } 58 | 59 | impl fmt::Debug for IntoParIter { 60 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 61 | let iter = self.entries.iter().map(Bucket::refs); 62 | f.debug_list().entries(iter).finish() 63 | } 64 | } 65 | 66 | impl ParallelIterator for IntoParIter { 67 | type Item = (K, V); 68 | 69 | parallel_iterator_methods!(Bucket::key_value); 70 | } 71 | 72 | impl IndexedParallelIterator for IntoParIter { 73 | indexed_parallel_iterator_methods!(Bucket::key_value); 74 | } 75 | 76 | impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap 77 | where 78 | K: Sync, 79 | V: Sync, 80 | { 81 | type Item = (&'a K, &'a V); 82 | type Iter = ParIter<'a, K, V>; 83 | 84 | fn into_par_iter(self) -> Self::Iter { 85 | ParIter { 86 | entries: self.as_entries(), 87 | } 88 | } 89 | } 90 | 91 | impl<'a, K, V> IntoParallelIterator for &'a Slice 92 | where 93 | K: Sync, 94 | V: Sync, 95 | { 96 | type Item = (&'a K, &'a V); 97 | type Iter = ParIter<'a, K, V>; 98 | 99 | fn into_par_iter(self) -> Self::Iter { 100 | ParIter { 101 | entries: &self.entries, 102 | } 103 | } 104 | } 105 | 106 | /// A parallel iterator over the entries of an [`IndexMap`]. 107 | /// 108 | /// This `struct` is created by the [`IndexMap::par_iter`] method 109 | /// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more. 110 | /// 111 | /// [`IndexMap::par_iter`]: ../struct.IndexMap.html#method.par_iter 112 | pub struct ParIter<'a, K, V> { 113 | entries: &'a [Bucket], 114 | } 115 | 116 | impl Clone for ParIter<'_, K, V> { 117 | fn clone(&self) -> Self { 118 | ParIter { ..*self } 119 | } 120 | } 121 | 122 | impl fmt::Debug for ParIter<'_, K, V> { 123 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 124 | let iter = self.entries.iter().map(Bucket::refs); 125 | f.debug_list().entries(iter).finish() 126 | } 127 | } 128 | 129 | impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { 130 | type Item = (&'a K, &'a V); 131 | 132 | parallel_iterator_methods!(Bucket::refs); 133 | } 134 | 135 | impl IndexedParallelIterator for ParIter<'_, K, V> { 136 | indexed_parallel_iterator_methods!(Bucket::refs); 137 | } 138 | 139 | impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap 140 | where 141 | K: Sync + Send, 142 | V: Send, 143 | { 144 | type Item = (&'a K, &'a mut V); 145 | type Iter = ParIterMut<'a, K, V>; 146 | 147 | fn into_par_iter(self) -> Self::Iter { 148 | ParIterMut { 149 | entries: self.as_entries_mut(), 150 | } 151 | } 152 | } 153 | 154 | impl<'a, K, V> IntoParallelIterator for &'a mut Slice 155 | where 156 | K: Sync + Send, 157 | V: Send, 158 | { 159 | type Item = (&'a K, &'a mut V); 160 | type Iter = ParIterMut<'a, K, V>; 161 | 162 | fn into_par_iter(self) -> Self::Iter { 163 | ParIterMut { 164 | entries: &mut self.entries, 165 | } 166 | } 167 | } 168 | 169 | /// A parallel mutable iterator over the entries of an [`IndexMap`]. 170 | /// 171 | /// This `struct` is created by the [`IndexMap::par_iter_mut`] method 172 | /// (provided by rayon's [`IntoParallelRefMutIterator`] trait). See its documentation for more. 173 | /// 174 | /// [`IndexMap::par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut 175 | pub struct ParIterMut<'a, K, V> { 176 | entries: &'a mut [Bucket], 177 | } 178 | 179 | impl fmt::Debug for ParIterMut<'_, K, V> { 180 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 181 | let iter = self.entries.iter().map(Bucket::refs); 182 | f.debug_list().entries(iter).finish() 183 | } 184 | } 185 | 186 | impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { 187 | type Item = (&'a K, &'a mut V); 188 | 189 | parallel_iterator_methods!(Bucket::ref_mut); 190 | } 191 | 192 | impl IndexedParallelIterator for ParIterMut<'_, K, V> { 193 | indexed_parallel_iterator_methods!(Bucket::ref_mut); 194 | } 195 | 196 | impl<'a, K, V, S> ParallelDrainRange for &'a mut IndexMap 197 | where 198 | K: Send, 199 | V: Send, 200 | { 201 | type Item = (K, V); 202 | type Iter = ParDrain<'a, K, V>; 203 | 204 | fn par_drain>(self, range: R) -> Self::Iter { 205 | ParDrain { 206 | entries: self.core.par_drain(range), 207 | } 208 | } 209 | } 210 | 211 | /// A parallel draining iterator over the entries of an [`IndexMap`]. 212 | /// 213 | /// This `struct` is created by the [`IndexMap::par_drain`] method 214 | /// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more. 215 | /// 216 | /// [`IndexMap::par_drain`]: ../struct.IndexMap.html#method.par_drain 217 | pub struct ParDrain<'a, K: Send, V: Send> { 218 | entries: rayon::vec::Drain<'a, Bucket>, 219 | } 220 | 221 | impl ParallelIterator for ParDrain<'_, K, V> { 222 | type Item = (K, V); 223 | 224 | parallel_iterator_methods!(Bucket::key_value); 225 | } 226 | 227 | impl IndexedParallelIterator for ParDrain<'_, K, V> { 228 | indexed_parallel_iterator_methods!(Bucket::key_value); 229 | } 230 | 231 | /// Parallel iterator methods and other parallel methods. 232 | /// 233 | /// The following methods **require crate feature `"rayon"`**. 234 | /// 235 | /// See also the `IntoParallelIterator` implementations. 236 | impl IndexMap 237 | where 238 | K: Sync, 239 | V: Sync, 240 | { 241 | /// Return a parallel iterator over the keys of the map. 242 | /// 243 | /// While parallel iterators can process items in any order, their relative order 244 | /// in the map is still preserved for operations like `reduce` and `collect`. 245 | pub fn par_keys(&self) -> ParKeys<'_, K, V> { 246 | ParKeys { 247 | entries: self.as_entries(), 248 | } 249 | } 250 | 251 | /// Return a parallel iterator over the values of the map. 252 | /// 253 | /// While parallel iterators can process items in any order, their relative order 254 | /// in the map is still preserved for operations like `reduce` and `collect`. 255 | pub fn par_values(&self) -> ParValues<'_, K, V> { 256 | ParValues { 257 | entries: self.as_entries(), 258 | } 259 | } 260 | } 261 | 262 | /// Parallel iterator methods and other parallel methods. 263 | /// 264 | /// The following methods **require crate feature `"rayon"`**. 265 | /// 266 | /// See also the `IntoParallelIterator` implementations. 267 | impl Slice 268 | where 269 | K: Sync, 270 | V: Sync, 271 | { 272 | /// Return a parallel iterator over the keys of the map slice. 273 | /// 274 | /// While parallel iterators can process items in any order, their relative order 275 | /// in the slice is still preserved for operations like `reduce` and `collect`. 276 | pub fn par_keys(&self) -> ParKeys<'_, K, V> { 277 | ParKeys { 278 | entries: &self.entries, 279 | } 280 | } 281 | 282 | /// Return a parallel iterator over the values of the map slice. 283 | /// 284 | /// While parallel iterators can process items in any order, their relative order 285 | /// in the slice is still preserved for operations like `reduce` and `collect`. 286 | pub fn par_values(&self) -> ParValues<'_, K, V> { 287 | ParValues { 288 | entries: &self.entries, 289 | } 290 | } 291 | } 292 | 293 | impl IndexMap 294 | where 295 | K: Hash + Eq + Sync, 296 | V: Sync, 297 | S: BuildHasher, 298 | { 299 | /// Returns `true` if `self` contains all of the same key-value pairs as `other`, 300 | /// regardless of each map's indexed order, determined in parallel. 301 | pub fn par_eq(&self, other: &IndexMap) -> bool 302 | where 303 | V: PartialEq, 304 | V2: Sync, 305 | S2: BuildHasher + Sync, 306 | { 307 | self.len() == other.len() 308 | && self 309 | .par_iter() 310 | .all(move |(key, value)| other.get(key).map_or(false, |v| *value == *v)) 311 | } 312 | } 313 | 314 | /// A parallel iterator over the keys of an [`IndexMap`]. 315 | /// 316 | /// This `struct` is created by the [`IndexMap::par_keys`] method. 317 | /// See its documentation for more. 318 | pub struct ParKeys<'a, K, V> { 319 | entries: &'a [Bucket], 320 | } 321 | 322 | impl Clone for ParKeys<'_, K, V> { 323 | fn clone(&self) -> Self { 324 | ParKeys { ..*self } 325 | } 326 | } 327 | 328 | impl fmt::Debug for ParKeys<'_, K, V> { 329 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 330 | let iter = self.entries.iter().map(Bucket::key_ref); 331 | f.debug_list().entries(iter).finish() 332 | } 333 | } 334 | 335 | impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { 336 | type Item = &'a K; 337 | 338 | parallel_iterator_methods!(Bucket::key_ref); 339 | } 340 | 341 | impl IndexedParallelIterator for ParKeys<'_, K, V> { 342 | indexed_parallel_iterator_methods!(Bucket::key_ref); 343 | } 344 | 345 | /// A parallel iterator over the values of an [`IndexMap`]. 346 | /// 347 | /// This `struct` is created by the [`IndexMap::par_values`] method. 348 | /// See its documentation for more. 349 | pub struct ParValues<'a, K, V> { 350 | entries: &'a [Bucket], 351 | } 352 | 353 | impl Clone for ParValues<'_, K, V> { 354 | fn clone(&self) -> Self { 355 | ParValues { ..*self } 356 | } 357 | } 358 | 359 | impl fmt::Debug for ParValues<'_, K, V> { 360 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 361 | let iter = self.entries.iter().map(Bucket::value_ref); 362 | f.debug_list().entries(iter).finish() 363 | } 364 | } 365 | 366 | impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { 367 | type Item = &'a V; 368 | 369 | parallel_iterator_methods!(Bucket::value_ref); 370 | } 371 | 372 | impl IndexedParallelIterator for ParValues<'_, K, V> { 373 | indexed_parallel_iterator_methods!(Bucket::value_ref); 374 | } 375 | 376 | impl IndexMap 377 | where 378 | K: Send, 379 | V: Send, 380 | { 381 | /// Return a parallel iterator over mutable references to the values of the map 382 | /// 383 | /// While parallel iterators can process items in any order, their relative order 384 | /// in the map is still preserved for operations like `reduce` and `collect`. 385 | pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { 386 | ParValuesMut { 387 | entries: self.as_entries_mut(), 388 | } 389 | } 390 | } 391 | 392 | impl Slice 393 | where 394 | K: Send, 395 | V: Send, 396 | { 397 | /// Return a parallel iterator over mutable references to the the values of the map slice. 398 | /// 399 | /// While parallel iterators can process items in any order, their relative order 400 | /// in the slice is still preserved for operations like `reduce` and `collect`. 401 | pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { 402 | ParValuesMut { 403 | entries: &mut self.entries, 404 | } 405 | } 406 | } 407 | 408 | impl IndexMap 409 | where 410 | K: Send, 411 | V: Send, 412 | { 413 | /// Sort the map's key-value pairs in parallel, by the default ordering of the keys. 414 | pub fn par_sort_keys(&mut self) 415 | where 416 | K: Ord, 417 | { 418 | self.with_entries(|entries| { 419 | entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key)); 420 | }); 421 | } 422 | 423 | /// Sort the map's key-value pairs in place and in parallel, using the comparison 424 | /// function `cmp`. 425 | /// 426 | /// The comparison function receives two key and value pairs to compare (you 427 | /// can sort by keys or values or their combination as needed). 428 | pub fn par_sort_by(&mut self, cmp: F) 429 | where 430 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 431 | { 432 | self.with_entries(|entries| { 433 | entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 434 | }); 435 | } 436 | 437 | /// Sort the key-value pairs of the map in parallel and return a by-value parallel 438 | /// iterator of the key-value pairs with the result. 439 | pub fn par_sorted_by(self, cmp: F) -> IntoParIter 440 | where 441 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 442 | { 443 | let mut entries = self.into_entries(); 444 | entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 445 | IntoParIter { entries } 446 | } 447 | 448 | /// Sort the map's key-value pairs in place and in parallel, using a sort-key extraction 449 | /// function. 450 | pub fn par_sort_by_key(&mut self, sort_key: F) 451 | where 452 | T: Ord, 453 | F: Fn(&K, &V) -> T + Sync, 454 | { 455 | self.with_entries(move |entries| { 456 | entries.par_sort_by_key(move |a| sort_key(&a.key, &a.value)); 457 | }); 458 | } 459 | 460 | /// Sort the map's key-value pairs in parallel, by the default ordering of the keys. 461 | pub fn par_sort_unstable_keys(&mut self) 462 | where 463 | K: Ord, 464 | { 465 | self.with_entries(|entries| { 466 | entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key)); 467 | }); 468 | } 469 | 470 | /// Sort the map's key-value pairs in place and in parallel, using the comparison 471 | /// function `cmp`. 472 | /// 473 | /// The comparison function receives two key and value pairs to compare (you 474 | /// can sort by keys or values or their combination as needed). 475 | pub fn par_sort_unstable_by(&mut self, cmp: F) 476 | where 477 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 478 | { 479 | self.with_entries(|entries| { 480 | entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 481 | }); 482 | } 483 | 484 | /// Sort the key-value pairs of the map in parallel and return a by-value parallel 485 | /// iterator of the key-value pairs with the result. 486 | pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter 487 | where 488 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 489 | { 490 | let mut entries = self.into_entries(); 491 | entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 492 | IntoParIter { entries } 493 | } 494 | 495 | /// Sort the map's key-value pairs in place and in parallel, using a sort-key extraction 496 | /// function. 497 | pub fn par_sort_unstable_by_key(&mut self, sort_key: F) 498 | where 499 | T: Ord, 500 | F: Fn(&K, &V) -> T + Sync, 501 | { 502 | self.with_entries(move |entries| { 503 | entries.par_sort_unstable_by_key(move |a| sort_key(&a.key, &a.value)); 504 | }); 505 | } 506 | 507 | /// Sort the map's key-value pairs in place and in parallel, using a sort-key extraction 508 | /// function. 509 | pub fn par_sort_by_cached_key(&mut self, sort_key: F) 510 | where 511 | T: Ord + Send, 512 | F: Fn(&K, &V) -> T + Sync, 513 | { 514 | self.with_entries(move |entries| { 515 | entries.par_sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); 516 | }); 517 | } 518 | } 519 | 520 | /// A parallel mutable iterator over the values of an [`IndexMap`]. 521 | /// 522 | /// This `struct` is created by the [`IndexMap::par_values_mut`] method. 523 | /// See its documentation for more. 524 | pub struct ParValuesMut<'a, K, V> { 525 | entries: &'a mut [Bucket], 526 | } 527 | 528 | impl fmt::Debug for ParValuesMut<'_, K, V> { 529 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 530 | let iter = self.entries.iter().map(Bucket::value_ref); 531 | f.debug_list().entries(iter).finish() 532 | } 533 | } 534 | 535 | impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { 536 | type Item = &'a mut V; 537 | 538 | parallel_iterator_methods!(Bucket::value_mut); 539 | } 540 | 541 | impl IndexedParallelIterator for ParValuesMut<'_, K, V> { 542 | indexed_parallel_iterator_methods!(Bucket::value_mut); 543 | } 544 | 545 | impl FromParallelIterator<(K, V)> for IndexMap 546 | where 547 | K: Eq + Hash + Send, 548 | V: Send, 549 | S: BuildHasher + Default + Send, 550 | { 551 | fn from_par_iter(iter: I) -> Self 552 | where 553 | I: IntoParallelIterator, 554 | { 555 | let list = collect(iter); 556 | let len = list.iter().map(Vec::len).sum(); 557 | let mut map = Self::with_capacity_and_hasher(len, S::default()); 558 | for vec in list { 559 | map.extend(vec); 560 | } 561 | map 562 | } 563 | } 564 | 565 | impl ParallelExtend<(K, V)> for IndexMap 566 | where 567 | K: Eq + Hash + Send, 568 | V: Send, 569 | S: BuildHasher + Send, 570 | { 571 | fn par_extend(&mut self, iter: I) 572 | where 573 | I: IntoParallelIterator, 574 | { 575 | for vec in collect(iter) { 576 | self.extend(vec); 577 | } 578 | } 579 | } 580 | 581 | impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap 582 | where 583 | K: Copy + Eq + Hash + Send + Sync, 584 | V: Copy + Send + Sync, 585 | S: BuildHasher + Send, 586 | { 587 | fn par_extend(&mut self, iter: I) 588 | where 589 | I: IntoParallelIterator, 590 | { 591 | for vec in collect(iter) { 592 | self.extend(vec); 593 | } 594 | } 595 | } 596 | 597 | #[cfg(test)] 598 | mod tests { 599 | use super::*; 600 | use std::string::String; 601 | 602 | #[test] 603 | fn insert_order() { 604 | let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; 605 | let mut map = IndexMap::new(); 606 | 607 | for &elt in &insert { 608 | map.insert(elt, ()); 609 | } 610 | 611 | assert_eq!(map.par_keys().count(), map.len()); 612 | assert_eq!(map.par_keys().count(), insert.len()); 613 | insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| { 614 | assert_eq!(a, b); 615 | }); 616 | (0..insert.len()) 617 | .into_par_iter() 618 | .zip(map.par_keys()) 619 | .for_each(|(i, k)| { 620 | assert_eq!(map.get_index(i).unwrap().0, k); 621 | }); 622 | } 623 | 624 | #[test] 625 | fn partial_eq_and_eq() { 626 | let mut map_a = IndexMap::new(); 627 | map_a.insert(1, "1"); 628 | map_a.insert(2, "2"); 629 | let mut map_b = map_a.clone(); 630 | assert!(map_a.par_eq(&map_b)); 631 | map_b.swap_remove(&1); 632 | assert!(!map_a.par_eq(&map_b)); 633 | map_b.insert(3, "3"); 634 | assert!(!map_a.par_eq(&map_b)); 635 | 636 | let map_c: IndexMap<_, String> = 637 | map_b.into_par_iter().map(|(k, v)| (k, v.into())).collect(); 638 | assert!(!map_a.par_eq(&map_c)); 639 | assert!(!map_c.par_eq(&map_a)); 640 | } 641 | 642 | #[test] 643 | fn extend() { 644 | let mut map = IndexMap::new(); 645 | map.par_extend(vec![(&1, &2), (&3, &4)]); 646 | map.par_extend(vec![(5, 6)]); 647 | assert_eq!( 648 | map.into_par_iter().collect::>(), 649 | vec![(1, 2), (3, 4), (5, 6)] 650 | ); 651 | } 652 | 653 | #[test] 654 | fn keys() { 655 | let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; 656 | let map: IndexMap<_, _> = vec.into_par_iter().collect(); 657 | let keys: Vec<_> = map.par_keys().copied().collect(); 658 | assert_eq!(keys.len(), 3); 659 | assert!(keys.contains(&1)); 660 | assert!(keys.contains(&2)); 661 | assert!(keys.contains(&3)); 662 | } 663 | 664 | #[test] 665 | fn values() { 666 | let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; 667 | let map: IndexMap<_, _> = vec.into_par_iter().collect(); 668 | let values: Vec<_> = map.par_values().copied().collect(); 669 | assert_eq!(values.len(), 3); 670 | assert!(values.contains(&'a')); 671 | assert!(values.contains(&'b')); 672 | assert!(values.contains(&'c')); 673 | } 674 | 675 | #[test] 676 | fn values_mut() { 677 | let vec = vec![(1, 1), (2, 2), (3, 3)]; 678 | let mut map: IndexMap<_, _> = vec.into_par_iter().collect(); 679 | map.par_values_mut().for_each(|value| *value *= 2); 680 | let values: Vec<_> = map.par_values().copied().collect(); 681 | assert_eq!(values.len(), 3); 682 | assert!(values.contains(&2)); 683 | assert!(values.contains(&4)); 684 | assert!(values.contains(&6)); 685 | } 686 | } 687 | -------------------------------------------------------------------------------- /RELEASES.md: -------------------------------------------------------------------------------- 1 | # Releases 2 | 3 | ## 2.12.1 (2025-11-20) 4 | 5 | - Simplified a lot of internals using `hashbrown`'s new bucket API. 6 | 7 | ## 2.12.0 (2025-10-17) 8 | 9 | - **MSRV**: Rust 1.82.0 or later is now required. 10 | - Updated the `hashbrown` dependency to 0.16 alone. 11 | - Error types now implement `core::error::Error`. 12 | - Added `pop_if` methods to `IndexMap` and `IndexSet`, similar to the 13 | method for `Vec` added in Rust 1.86. 14 | 15 | ## 2.11.4 (2025-09-18) 16 | 17 | - Updated the `hashbrown` dependency to a range allowing 0.15 or 0.16. 18 | 19 | ## 2.11.3 (2025-09-15) 20 | 21 | - Make the minimum `serde` version only apply when "serde" is enabled. 22 | 23 | ## 2.11.2 (2025-09-15) 24 | 25 | - Switched the "serde" feature to depend on `serde_core`, improving build 26 | parallelism in cases where other dependents have enabled "serde/derive". 27 | 28 | ## 2.11.1 (2025-09-08) 29 | 30 | - Added a `get_key_value_mut` method to `IndexMap`. 31 | - Removed the unnecessary `Ord` bound on `insert_sorted_by` methods. 32 | 33 | ## 2.11.0 (2025-08-22) 34 | 35 | - Added `insert_sorted_by` and `insert_sorted_by_key` methods to `IndexMap`, 36 | `IndexSet`, and `VacantEntry`, like customizable versions of `insert_sorted`. 37 | - Added `is_sorted`, `is_sorted_by`, and `is_sorted_by_key` methods to 38 | `IndexMap` and `IndexSet`, as well as their `Slice` counterparts. 39 | - Added `sort_by_key` and `sort_unstable_by_key` methods to `IndexMap` and 40 | `IndexSet`, as well as parallel counterparts. 41 | - Added `replace_index` methods to `IndexMap`, `IndexSet`, and `VacantEntry` 42 | to replace the key (or set value) at a given index. 43 | - Added optional `sval` serialization support. 44 | 45 | ## 2.10.0 (2025-06-26) 46 | 47 | - Added `extract_if` methods to `IndexMap` and `IndexSet`, similar to the 48 | methods for `HashMap` and `HashSet` with ranges like `Vec::extract_if`. 49 | - Added more `#[track_caller]` annotations to functions that may panic. 50 | 51 | ## 2.9.0 (2025-04-04) 52 | 53 | - Added a `get_disjoint_mut` method to `IndexMap`, matching Rust 1.86's 54 | `HashMap` method. 55 | - Added a `get_disjoint_indices_mut` method to `IndexMap` and `map::Slice`, 56 | matching Rust 1.86's `get_disjoint_mut` method on slices. 57 | - Deprecated the `borsh` feature in favor of their own `indexmap` feature, 58 | solving a cyclic dependency that occurred via `borsh-derive`. 59 | 60 | ## 2.8.0 (2025-03-10) 61 | 62 | - Added `indexmap_with_default!` and `indexset_with_default!` to be used with 63 | alternative hashers, especially when using the crate without `std`. 64 | - Implemented `PartialEq` between each `Slice` and `[]`/arrays. 65 | - Removed the internal `rustc-rayon` feature and dependency. 66 | 67 | ## 2.7.1 (2025-01-19) 68 | 69 | - Added `#[track_caller]` to functions that may panic. 70 | - Improved memory reservation for `insert_entry`. 71 | 72 | ## 2.7.0 (2024-11-30) 73 | 74 | - Added methods `Entry::insert_entry` and `VacantEntry::insert_entry`, returning 75 | an `OccupiedEntry` after insertion. 76 | 77 | ## 2.6.0 (2024-10-01) 78 | 79 | - Implemented `Clone` for `map::IntoIter` and `set::IntoIter`. 80 | - Updated the `hashbrown` dependency to version 0.15. 81 | 82 | ## 2.5.0 (2024-08-30) 83 | 84 | - Added an `insert_before` method to `IndexMap` and `IndexSet`, as an 85 | alternative to `shift_insert` with different behavior on existing entries. 86 | - Added `first_entry` and `last_entry` methods to `IndexMap`. 87 | - Added `From` implementations between `IndexedEntry` and `OccupiedEntry`. 88 | 89 | ## 2.4.0 (2024-08-13) 90 | 91 | - Added methods `IndexMap::append` and `IndexSet::append`, moving all items from 92 | one map or set into another, and leaving the original capacity for reuse. 93 | 94 | ## 2.3.0 (2024-07-31) 95 | 96 | - Added trait `MutableEntryKey` for opt-in mutable access to map entry keys. 97 | - Added method `MutableKeys::iter_mut2` for opt-in mutable iteration of map 98 | keys and values. 99 | 100 | ## 2.2.6 (2024-03-22) 101 | 102 | - Added trait `MutableValues` for opt-in mutable access to set values. 103 | 104 | ## 2.2.5 (2024-02-29) 105 | 106 | - Added optional `borsh` serialization support. 107 | 108 | ## 2.2.4 (2024-02-28) 109 | 110 | - Added an `insert_sorted` method on `IndexMap`, `IndexSet`, and `VacantEntry`. 111 | - Avoid hashing for lookups in single-entry maps. 112 | - Limit preallocated memory in `serde` deserializers. 113 | 114 | ## 2.2.3 (2024-02-11) 115 | 116 | - Added `move_index` and `swap_indices` methods to `IndexedEntry`, 117 | `OccupiedEntry`, and `RawOccupiedEntryMut`, functioning like the existing 118 | methods on `IndexMap`. 119 | - Added `shift_insert` methods on `VacantEntry` and `RawVacantEntryMut`, as 120 | well as `shift_insert_hashed_nocheck` on the latter, to insert the new entry 121 | at a particular index. 122 | - Added `shift_insert` methods on `IndexMap` and `IndexSet` to insert a new 123 | entry at a particular index, or else move an existing entry there. 124 | 125 | ## 2.2.2 (2024-01-31) 126 | 127 | - Added indexing methods to raw entries: `RawEntryBuilder::from_hash_full`, 128 | `RawEntryBuilder::index_from_hash`, and `RawEntryMut::index`. 129 | 130 | ## 2.2.1 (2024-01-28) 131 | 132 | - Corrected the signature of `RawOccupiedEntryMut::into_key(self) -> &'a mut K`, 133 | This a breaking change from 2.2.0, but that version was published for less 134 | than a day and has now been yanked. 135 | 136 | ## 2.2.0 (2024-01-28) 137 | 138 | - The new `IndexMap::get_index_entry` method finds an entry by its index for 139 | in-place manipulation. 140 | 141 | - The `Keys` iterator now implements `Index` for quick access to the 142 | entry's key, compared to indexing the map to get the value. 143 | 144 | - The new `IndexMap::splice` and `IndexSet::splice` methods will drain the 145 | given range as an iterator, and then replace that range with entries from 146 | an input iterator. 147 | 148 | - The new trait `RawEntryApiV1` offers opt-in access to a raw entry API for 149 | `IndexMap`, corresponding to the unstable API on `HashSet` as of Rust 1.75. 150 | 151 | - Many `IndexMap` and `IndexSet` methods have relaxed their type constraints, 152 | e.g. removing `K: Hash` on methods that don't actually need to hash. 153 | 154 | - Removal methods `remove`, `remove_entry`, and `take` are now deprecated 155 | in favor of their `shift_` or `swap_` prefixed variants, which are more 156 | explicit about their effect on the index and order of remaining items. 157 | The deprecated methods will remain to guide drop-in replacements from 158 | `HashMap` and `HashSet` toward the prefixed methods. 159 | 160 | ## 2.1.0 (2023-10-31) 161 | 162 | - Empty slices can now be created with `map::Slice::{new, new_mut}` and 163 | `set::Slice::new`. In addition, `Slice::new`, `len`, and `is_empty` are 164 | now `const` functions on both types. 165 | 166 | - `IndexMap`, `IndexSet`, and their respective `Slice`s all have binary 167 | search methods for sorted data: map `binary_search_keys` and set 168 | `binary_search` for plain comparison, `binary_search_by` for custom 169 | comparators, `binary_search_by_key` for key extraction, and 170 | `partition_point` for boolean conditions. 171 | 172 | ## 2.0.2 (2023-09-29) 173 | 174 | - The `hashbrown` dependency has been updated to version 0.14.1 to 175 | complete the support for Rust 1.63. 176 | 177 | ## 2.0.1 (2023-09-27) 178 | 179 | - **MSRV**: Rust 1.63.0 is now supported as well, pending publication of 180 | `hashbrown`'s relaxed MSRV (or use cargo `--ignore-rust-version`). 181 | 182 | ## 2.0.0 (2023-06-23) 183 | 184 | - **MSRV**: Rust 1.64.0 or later is now required. 185 | 186 | - The `"std"` feature is no longer auto-detected. It is included in the 187 | default feature set, or else can be enabled like any other Cargo feature. 188 | 189 | - The `"serde-1"` feature has been removed, leaving just the optional 190 | `"serde"` dependency to be enabled like a feature itself. 191 | 192 | - `IndexMap::get_index_mut` now returns `Option<(&K, &mut V)>`, changing 193 | the key part from `&mut K` to `&K`. There is also a new alternative 194 | `MutableKeys::get_index_mut2` to access the former behavior. 195 | 196 | - The new `map::Slice` and `set::Slice` offer a linear view of maps 197 | and sets, behaving a lot like normal `[(K, V)]` and `[T]` slices. Notably, 198 | comparison traits like `Eq` only consider items in order, rather than hash 199 | lookups, and slices even implement `Hash`. 200 | 201 | - `IndexMap` and `IndexSet` now have `sort_by_cached_key` and 202 | `par_sort_by_cached_key` methods which perform stable sorts in place 203 | using a key extraction function. 204 | 205 | - `IndexMap` and `IndexSet` now have `reserve_exact`, `try_reserve`, and 206 | `try_reserve_exact` methods that correspond to the same methods on `Vec`. 207 | However, exactness only applies to the direct capacity for items, while the 208 | raw hash table still follows its own rules for capacity and load factor. 209 | 210 | - The `Equivalent` trait is now re-exported from the `equivalent` crate, 211 | intended as a common base to allow types to work with multiple map types. 212 | 213 | - The `hashbrown` dependency has been updated to version 0.14. 214 | 215 | - The `serde_seq` module has been moved from the crate root to below the 216 | `map` module. 217 | 218 | ## 1.9.3 (2023-03-24) 219 | 220 | - Bump the `rustc-rayon` dependency, for compiler use only. 221 | 222 | ## 1.9.2 (2022-11-17) 223 | 224 | - `IndexMap` and `IndexSet` both implement `arbitrary::Arbitrary<'_>` and 225 | `quickcheck::Arbitrary` if those optional dependency features are enabled. 226 | 227 | ## 1.9.1 (2022-06-21) 228 | 229 | - The MSRV now allows Rust 1.56.0 as well. However, currently `hashbrown` 230 | 0.12.1 requires 1.56.1, so users on 1.56.0 should downgrade that to 0.12.0 231 | until there is a later published version relaxing its requirement. 232 | 233 | ## 1.9.0 (2022-06-16) 234 | 235 | - **MSRV**: Rust 1.56.1 or later is now required. 236 | 237 | - The `hashbrown` dependency has been updated to version 0.12. 238 | 239 | - `IterMut` and `ValuesMut` now implement `Debug`. 240 | 241 | - The new `IndexMap::shrink_to` and `IndexSet::shrink_to` methods shrink 242 | the capacity with a lower bound. 243 | 244 | - The new `IndexMap::move_index` and `IndexSet::move_index` methods change 245 | the position of an item from one index to another, shifting the items 246 | between to accommodate the move. 247 | 248 | ## 1.8.2 (2022-05-27) 249 | 250 | - Bump the `rustc-rayon` dependency, for compiler use only. 251 | 252 | ## 1.8.1 (2022-03-29) 253 | 254 | - The new `IndexSet::replace_full` will return the index of the item along 255 | with the replaced value, if any, by @zakcutner in PR [222]. 256 | 257 | [222]: https://github.com/indexmap-rs/indexmap/pull/222 258 | 259 | ## 1.8.0 (2022-01-07) 260 | 261 | - The new `IndexMap::into_keys` and `IndexMap::into_values` will consume 262 | the map into keys or values, respectively, matching Rust 1.54's `HashMap` 263 | methods, by @taiki-e in PR [195]. 264 | 265 | - More of the iterator types implement `Debug`, `ExactSizeIterator`, and 266 | `FusedIterator`, by @cuviper in PR [196]. 267 | 268 | - `IndexMap` and `IndexSet` now implement rayon's `ParallelDrainRange`, 269 | by @cuviper in PR [197]. 270 | 271 | - `IndexMap::with_hasher` and `IndexSet::with_hasher` are now `const` 272 | functions, allowing static maps and sets, by @mwillsey in PR [203]. 273 | 274 | - `IndexMap` and `IndexSet` now implement `From` for arrays, matching 275 | Rust 1.56's implementation for `HashMap`, by @rouge8 in PR [205]. 276 | 277 | - `IndexMap` and `IndexSet` now have methods `sort_unstable_keys`, 278 | `sort_unstable_by`, `sorted_unstable_by`, and `par_*` equivalents, 279 | which sort in-place without preserving the order of equal items, by 280 | @bhgomes in PR [211]. 281 | 282 | [195]: https://github.com/indexmap-rs/indexmap/pull/195 283 | [196]: https://github.com/indexmap-rs/indexmap/pull/196 284 | [197]: https://github.com/indexmap-rs/indexmap/pull/197 285 | [203]: https://github.com/indexmap-rs/indexmap/pull/203 286 | [205]: https://github.com/indexmap-rs/indexmap/pull/205 287 | [211]: https://github.com/indexmap-rs/indexmap/pull/211 288 | 289 | ## 1.7.0 (2021-06-29) 290 | 291 | - **MSRV**: Rust 1.49 or later is now required. 292 | 293 | - The `hashbrown` dependency has been updated to version 0.11. 294 | 295 | ## 1.6.2 (2021-03-05) 296 | 297 | - Fixed to match `std` behavior, `OccupiedEntry::key` now references the 298 | existing key in the map instead of the lookup key, by @cuviper in PR [170]. 299 | 300 | - The new `Entry::or_insert_with_key` matches Rust 1.50's `Entry` method, 301 | passing `&K` to the callback to create a value, by @cuviper in PR [175]. 302 | 303 | [170]: https://github.com/indexmap-rs/indexmap/pull/170 304 | [175]: https://github.com/indexmap-rs/indexmap/pull/175 305 | 306 | ## 1.6.1 (2020-12-14) 307 | 308 | - The new `serde_seq` module implements `IndexMap` serialization as a 309 | sequence to ensure order is preserved, by @cuviper in PR [158]. 310 | 311 | - New methods on maps and sets work like the `Vec`/slice methods by the same name: 312 | `truncate`, `split_off`, `first`, `first_mut`, `last`, `last_mut`, and 313 | `swap_indices`, by @cuviper in PR [160]. 314 | 315 | [158]: https://github.com/indexmap-rs/indexmap/pull/158 316 | [160]: https://github.com/indexmap-rs/indexmap/pull/160 317 | 318 | ## 1.6.0 (2020-09-05) 319 | 320 | - **MSRV**: Rust 1.36 or later is now required. 321 | 322 | - The `hashbrown` dependency has been updated to version 0.9. 323 | 324 | ## 1.5.2 (2020-09-01) 325 | 326 | - The new "std" feature will force the use of `std` for users that explicitly 327 | want the default `S = RandomState`, bypassing the autodetection added in 1.3.0, 328 | by @cuviper in PR [145]. 329 | 330 | [145]: https://github.com/indexmap-rs/indexmap/pull/145 331 | 332 | ## 1.5.1 (2020-08-07) 333 | 334 | - Values can now be indexed by their `usize` position by @cuviper in PR [132]. 335 | 336 | - Some of the generic bounds have been relaxed to match `std` by @cuviper in PR [141]. 337 | 338 | - `drain` now accepts any `R: RangeBounds` by @cuviper in PR [142]. 339 | 340 | [132]: https://github.com/indexmap-rs/indexmap/pull/132 341 | [141]: https://github.com/indexmap-rs/indexmap/pull/141 342 | [142]: https://github.com/indexmap-rs/indexmap/pull/142 343 | 344 | ## 1.5.0 (2020-07-17) 345 | 346 | - **MSRV**: Rust 1.32 or later is now required. 347 | 348 | - The inner hash table is now based on `hashbrown` by @cuviper in PR [131]. 349 | This also completes the method `reserve` and adds `shrink_to_fit`. 350 | 351 | - Add new methods `get_key_value`, `remove_entry`, `swap_remove_entry`, 352 | and `shift_remove_entry`, by @cuviper in PR [136] 353 | 354 | - `Clone::clone_from` reuses allocations by @cuviper in PR [125] 355 | 356 | - Add new method `reverse` by @linclelinkpart5 in PR [128] 357 | 358 | [125]: https://github.com/indexmap-rs/indexmap/pull/125 359 | [128]: https://github.com/indexmap-rs/indexmap/pull/128 360 | [131]: https://github.com/indexmap-rs/indexmap/pull/131 361 | [136]: https://github.com/indexmap-rs/indexmap/pull/136 362 | 363 | ## 1.4.0 (2020-06-01) 364 | 365 | - Add new method `get_index_of` by @Thermatrix in PR [115] and [120] 366 | 367 | - Fix build script rebuild-if-changed configuration to use "build.rs"; 368 | fixes issue [123]. Fix by @cuviper. 369 | 370 | - Dev-dependencies (rand and quickcheck) have been updated. The crate's tests 371 | now run using Rust 1.32 or later (MSRV for building the crate has not changed). 372 | by @kjeremy and @bluss 373 | 374 | [123]: https://github.com/indexmap-rs/indexmap/issues/123 375 | [115]: https://github.com/indexmap-rs/indexmap/pull/115 376 | [120]: https://github.com/indexmap-rs/indexmap/pull/120 377 | 378 | ## 1.3.2 (2020-02-05) 379 | 380 | - Maintenance update to regenerate the published `Cargo.toml`. 381 | 382 | ## 1.3.1 (2020-01-15) 383 | 384 | - Maintenance update for formatting and `autocfg` 1.0. 385 | 386 | ## 1.3.0 (2019-10-18) 387 | 388 | - The deprecation messages in the previous version have been removed. 389 | (The methods have not otherwise changed.) Docs for removal methods have been 390 | improved. 391 | - From Rust 1.36, this crate supports being built **without std**, requiring 392 | `alloc` instead. This is enabled automatically when it is detected that 393 | `std` is not available. There is no crate feature to enable/disable to 394 | trigger this. The new build-dep `autocfg` enables this. 395 | 396 | ## 1.2.0 (2019-09-08) 397 | 398 | - Plain `.remove()` now has a deprecation message, it informs the user 399 | about picking one of the removal functions `swap_remove` and `shift_remove` 400 | which have different performance and order semantics. 401 | Plain `.remove()` will not be removed, the warning message and method 402 | will remain until further. 403 | 404 | - Add new method `shift_remove` for order preserving removal on the map, 405 | and `shift_take` for the corresponding operation on the set. 406 | 407 | - Add methods `swap_remove`, `swap_remove_entry` to `Entry`. 408 | 409 | - Fix indexset/indexmap to support full paths, like `indexmap::indexmap!()` 410 | 411 | - Internal improvements: fix warnings, deprecations and style lints 412 | 413 | ## 1.1.0 (2019-08-20) 414 | 415 | - Added optional feature `"rayon"` that adds parallel iterator support 416 | to `IndexMap` and `IndexSet` using Rayon. This includes all the regular 417 | iterators in parallel versions, and parallel sort. 418 | 419 | - Implemented `Clone` for `map::{Iter, Keys, Values}` and 420 | `set::{Difference, Intersection, Iter, SymmetricDifference, Union}` 421 | 422 | - Implemented `Debug` for `map::{Entry, IntoIter, Iter, Keys, Values}` and 423 | `set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}` 424 | 425 | - Serde trait `IntoDeserializer` are implemented for `IndexMap` and `IndexSet`. 426 | 427 | - Minimum Rust version requirement increased to Rust 1.30 for development builds. 428 | 429 | ## 1.0.2 (2018-10-22) 430 | 431 | - The new methods `IndexMap::insert_full` and `IndexSet::insert_full` are 432 | both like `insert` with the index included in the return value. 433 | 434 | - The new method `Entry::and_modify` can be used to modify occupied 435 | entries, matching the new methods of `std` maps in Rust 1.26. 436 | 437 | - The new method `Entry::or_default` inserts a default value in unoccupied 438 | entries, matching the new methods of `std` maps in Rust 1.28. 439 | 440 | ## 1.0.1 (2018-03-24) 441 | 442 | - Document Rust version policy for the crate (see rustdoc) 443 | 444 | ## 1.0.0 (2018-03-11) 445 | 446 | - This is the 1.0 release for `indexmap`! (the crate and datastructure 447 | formerly known as “ordermap”) 448 | - `OccupiedEntry::insert` changed its signature, to use `&mut self` for 449 | the method receiver, matching the equivalent method for a standard 450 | `HashMap`. Thanks to @dtolnay for finding this bug. 451 | - The deprecated old names from ordermap were removed: `OrderMap`, 452 | `OrderSet`, `ordermap!{}`, `orderset!{}`. Use the new `IndexMap` 453 | etc names instead. 454 | 455 | ## 0.4.1 (2018-02-14) 456 | 457 | - Renamed crate to `indexmap`; the `ordermap` crate is now deprecated 458 | and the types `OrderMap/Set` now have a deprecation notice. 459 | 460 | ## 0.4.0 (2018-02-02) 461 | 462 | - This is the last release series for this `ordermap` under that name, 463 | because the crate is **going to be renamed** to `indexmap` (with types 464 | `IndexMap`, `IndexSet`) and no change in functionality! 465 | - The map and its associated structs moved into the `map` submodule of the 466 | crate, so that the map and set are symmetric 467 | 468 | + The iterators, `Entry` and other structs are now under `ordermap::map::` 469 | 470 | - Internally refactored `OrderMap` so that all the main algorithms 471 | (insertion, lookup, removal etc) that don't use the `S` parameter (the 472 | hasher) are compiled without depending on `S`, which reduces generics bloat. 473 | 474 | - `Entry` no longer has a type parameter `S`, which is just like 475 | the standard `HashMap`'s entry. 476 | 477 | - Minimum Rust version requirement increased to Rust 1.18 478 | 479 | ## 0.3.5 (2018-01-14) 480 | 481 | - Documentation improvements 482 | 483 | ## 0.3.4 (2018-01-04) 484 | 485 | - The `.retain()` methods for `OrderMap` and `OrderSet` now 486 | traverse the elements in order, and the retained elements **keep their order** 487 | - Added new methods `.sort_by()`, `.sort_keys()` to `OrderMap` and 488 | `.sort_by()`, `.sort()` to `OrderSet`. These methods allow you to 489 | sort the maps in place efficiently. 490 | 491 | ## 0.3.3 (2017-12-28) 492 | 493 | - Document insertion behaviour better by @lucab 494 | - Updated dependences (no feature changes) by @ignatenkobrain 495 | 496 | ## 0.3.2 (2017-11-25) 497 | 498 | - Add `OrderSet` by @cuviper! 499 | - `OrderMap::drain` is now (too) a double ended iterator. 500 | 501 | ## 0.3.1 (2017-11-19) 502 | 503 | - In all ordermap iterators, forward the `collect` method to the underlying 504 | iterator as well. 505 | - Add crates.io categories. 506 | 507 | ## 0.3.0 (2017-10-07) 508 | 509 | - The methods `get_pair`, `get_pair_index` were both replaced by 510 | `get_full` (and the same for the mutable case). 511 | - Method `swap_remove_pair` replaced by `swap_remove_full`. 512 | - Add trait `MutableKeys` for opt-in mutable key access. Mutable key access 513 | is only possible through the methods of this extension trait. 514 | - Add new trait `Equivalent` for key equivalence. This extends the 515 | `Borrow` trait mechanism for `OrderMap::get` in a backwards compatible 516 | way, just some minor type inference related issues may become apparent. 517 | See [#10] for more information. 518 | - Implement `Extend<(&K, &V)>` by @xfix. 519 | 520 | [#10]: https://github.com/indexmap-rs/indexmap/pull/10 521 | 522 | ## 0.2.13 (2017-09-30) 523 | 524 | - Fix deserialization to support custom hashers by @Techcable. 525 | - Add methods `.index()` on the entry types by @garro95. 526 | 527 | ## 0.2.12 (2017-09-11) 528 | 529 | - Add methods `.with_hasher()`, `.hasher()`. 530 | 531 | ## 0.2.11 (2017-08-29) 532 | 533 | - Support `ExactSizeIterator` for the iterators. By @Binero. 534 | - Use `Box<[Pos]>` internally, saving a word in the `OrderMap` struct. 535 | - Serde support, with crate feature `"serde-1"`. By @xfix. 536 | 537 | ## 0.2.10 (2017-04-29) 538 | 539 | - Add iterator `.drain(..)` by @stevej. 540 | 541 | ## 0.2.9 (2017-03-26) 542 | 543 | - Add method `.is_empty()` by @overvenus. 544 | - Implement `PartialEq, Eq` by @overvenus. 545 | - Add method `.sorted_by()`. 546 | 547 | ## 0.2.8 (2017-03-01) 548 | 549 | - Add iterators `.values()` and `.values_mut()`. 550 | - Fix compatibility with 32-bit platforms. 551 | 552 | ## 0.2.7 (2016-11-02) 553 | 554 | - Add `.retain()`. 555 | 556 | ## 0.2.6 (2016-11-02) 557 | 558 | - Add `OccupiedEntry::remove_entry` and other minor entry methods, 559 | so that it now has all the features of `HashMap`'s entries. 560 | 561 | ## 0.2.5 (2016-10-31) 562 | 563 | - Improved `.pop()` slightly. 564 | 565 | ## 0.2.4 (2016-10-22) 566 | 567 | - Improved performance of `.insert()` ([#3]) by @pczarn. 568 | 569 | [#3]: https://github.com/indexmap-rs/indexmap/pull/3 570 | 571 | ## 0.2.3 (2016-10-11) 572 | 573 | - Generalize `Entry` for now, so that it works on hashmaps with non-default 574 | hasher. However, there's a lingering compat issue since libstd `HashMap` 575 | does not parameterize its entries by the hasher (`S` typarm). 576 | - Special case some iterator methods like `.nth()`. 577 | 578 | ## 0.2.2 (2016-10-02) 579 | 580 | - Disable the verbose `Debug` impl by default. 581 | 582 | ## 0.2.1 (2016-10-02) 583 | 584 | - Fix doc links and clarify docs. 585 | 586 | ## 0.2.0 (2016-10-01) 587 | 588 | - Add more `HashMap` methods & compat with its API. 589 | - Experimental support for `.entry()` (the simplest parts of the API). 590 | - Add `.reserve()` (placeholder impl). 591 | - Add `.remove()` as synonym for `.swap_remove()`. 592 | - Changed `.insert()` to swap value if the entry already exists, and 593 | return `Option`. 594 | - Experimental support as an *indexed* hash map! Added methods 595 | `.get_index()`, `.get_index_mut()`, `.swap_remove_index()`, 596 | `.get_pair_index()`, `.get_pair_index_mut()`. 597 | 598 | ## 0.1.2 (2016-09-19) 599 | 600 | - Implement the 32/32 split idea for `Pos` which improves cache utilization 601 | and lookup performance. 602 | 603 | ## 0.1.1 (2016-09-16) 604 | 605 | - Initial release. 606 | --------------------------------------------------------------------------------