├── .github └── dependabot.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── criterion.rs └── src └── lib.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "04:00" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 7 | Cargo.lock 8 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "clockpro-cache" 3 | description = "CLOCK-Pro cache replacement policy" 4 | version = "0.1.12" 5 | homepage = "https://github.com/jedisct1/rust-clockpro-cache" 6 | keywords = ["cache"] 7 | license = "MIT" 8 | authors = ["Frank Denis "] 9 | categories = ["algorithms", "caching"] 10 | edition = "2018" 11 | 12 | [badges] 13 | appveyor = { repository = "jedisct1/rust-clockpro-cache" } 14 | 15 | [dependencies] 16 | slabigator = { version = "0.9.1", features = ["slot_usize", "releasefast"] } 17 | bitflags = "2.4" 18 | 19 | [dev-dependencies] 20 | criterion = "0.5" 21 | rand = "0.8.5" 22 | rand_distr = "0.4.3" 23 | 24 | [[bench]] 25 | name = "criterion" 26 | harness = false 27 | 28 | [profile.release] 29 | panic = "abort" 30 | opt-level = 3 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016-2021 Frank Denis 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![dependency status](https://deps.rs/repo/github/jedisct1/rust-clockpro-cache/status.svg)](https://deps.rs/repo/github/jedisct1/rust-clockpro-cache) 2 | 3 | rust-clockpro-cache 4 | =================== 5 | CLOCK-Pro cache replacement algorithm for Rust 6 | 7 | Based on a [Python implementation](https://bitbucket.org/SamiLehtinen/pyclockpro) 8 | by Sami Lehtinen 9 | and a [Go implementation](https://github.com/dgryski/go-clockpro) by Damian Gryski. 10 | 11 | Original paper: 12 | [CLOCK-Pro: An Effective Improvement of the CLOCK Replacement](http://static.usenix.org/event/usenix05/tech/general/full_papers/jiang/jiang_html/html.html). 13 | -------------------------------------------------------------------------------- /benches/criterion.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | 4 | use clockpro_cache::ClockProCache; 5 | use criterion::{black_box, Criterion}; 6 | use rand::thread_rng; 7 | use rand_distr::{Distribution, Normal, Uniform}; 8 | 9 | fn bench_sequence(c: &mut Criterion) { 10 | c.bench_function("bench_sequence", |b| { 11 | let mut cache: ClockProCache = ClockProCache::new(68).unwrap(); 12 | b.iter(|| { 13 | for i in 1..1000 { 14 | let n = i % 100; 15 | black_box(cache.insert(n, n)); 16 | } 17 | }); 18 | b.iter(|| { 19 | for i in 1..1000 { 20 | let n = i % 100; 21 | black_box(cache.get(&n)); 22 | } 23 | }); 24 | }); 25 | } 26 | 27 | fn bench_composite(c: &mut Criterion) { 28 | c.bench_function("bench_composite", |b| { 29 | let mut cache: ClockProCache, u64)> = ClockProCache::new(68).unwrap(); 30 | let mut rng = thread_rng(); 31 | let uniform = Uniform::new(0, 100); 32 | let mut rand_iter = uniform.sample_iter(&mut rng); 33 | b.iter(|| { 34 | for _ in 1..1000 { 35 | let n = rand_iter.next().unwrap(); 36 | black_box(cache.insert(n, (vec![0u8; 12], n))); 37 | } 38 | }); 39 | b.iter(|| { 40 | for _ in 1..1000 { 41 | let n = rand_iter.next().unwrap(); 42 | black_box(cache.get(&n)); 43 | } 44 | }); 45 | }); 46 | } 47 | 48 | fn bench_composite_normal(c: &mut Criterion) { 49 | // The cache size is ~ 1x sigma (stddev) to retain roughly >68% of records 50 | const SIGMA: f64 = 50.0 / 3.0; 51 | 52 | c.bench_function("bench_composite_normal", |b| { 53 | let mut cache: ClockProCache, u64)> = 54 | ClockProCache::new(SIGMA as usize).unwrap(); 55 | 56 | // This should roughly cover all elements (within 3-sigma) 57 | let mut rng = thread_rng(); 58 | let normal = Normal::new(50.0, SIGMA).unwrap(); 59 | let mut rand_iter = normal.sample_iter(&mut rng).map(|x| (x as u64) % 100); 60 | b.iter(|| { 61 | for _ in 1..1000 { 62 | let n = rand_iter.next().unwrap(); 63 | black_box(cache.insert(n, (vec![0u8; 12], n))); 64 | } 65 | }); 66 | b.iter(|| { 67 | for _ in 1..1000 { 68 | let n = rand_iter.next().unwrap(); 69 | black_box(cache.get(&n)); 70 | } 71 | }); 72 | }); 73 | } 74 | 75 | criterion_group!( 76 | benches, 77 | bench_sequence, 78 | bench_composite, 79 | bench_composite_normal 80 | ); 81 | criterion_main!(benches); 82 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | 3 | #[macro_use] 4 | extern crate bitflags; 5 | 6 | use crate::token_ring::{Token, TokenRing}; 7 | use std::borrow::Borrow; 8 | use std::collections::HashMap; 9 | use std::hash::Hash; 10 | use std::mem::MaybeUninit; 11 | 12 | bitflags! { 13 | struct NodeType: u8 { 14 | const EMPTY = 0b00001; 15 | const HOT = 0b00010; 16 | const COLD = 0b00100; 17 | const TEST = 0b01000; 18 | const MASK = Self::EMPTY.bits() | Self::HOT.bits() | Self::COLD.bits() | Self::TEST.bits(); 19 | const REFERENCE = 0b10000; 20 | } 21 | } 22 | 23 | struct Node { 24 | key: MaybeUninit, 25 | value: Option, 26 | node_type: NodeType, 27 | } 28 | 29 | impl Default for Node { 30 | fn default() -> Self { 31 | Node { 32 | key: MaybeUninit::uninit(), 33 | value: None, 34 | node_type: NodeType::EMPTY, 35 | } 36 | } 37 | } 38 | 39 | /// A CLOCK-Pro cache that maps keys to values. 40 | pub struct ClockProCache { 41 | capacity: usize, 42 | test_capacity: usize, 43 | cold_capacity: usize, 44 | map: HashMap, 45 | ring: TokenRing, 46 | nodes: Vec>, 47 | hand_hot: Token, 48 | hand_cold: Token, 49 | hand_test: Token, 50 | count_hot: usize, 51 | count_cold: usize, 52 | count_test: usize, 53 | inserted: u64, 54 | evicted: u64, 55 | } 56 | 57 | impl ClockProCache 58 | where 59 | K: Eq + Hash + Clone, 60 | { 61 | /// Create a new cache with the given capacity. 62 | pub fn new(capacity: usize) -> Result { 63 | Self::new_with_test_capacity(capacity, capacity) 64 | } 65 | 66 | /// Create a new cache with the given value and test capacities. 67 | /// 68 | /// The test capacity is used for tracking recently evicted entries, so that they will 69 | /// be considered frequently used if they get reinserted. 70 | pub fn new_with_test_capacity( 71 | capacity: usize, 72 | test_capacity: usize, 73 | ) -> Result { 74 | if capacity < 3 { 75 | return Err("Cache size cannot be less than 3 entries"); 76 | } 77 | let mut nodes = Vec::with_capacity(capacity + test_capacity); 78 | nodes.resize_with(capacity + test_capacity, Node::default); 79 | let cache = ClockProCache { 80 | capacity, 81 | test_capacity, 82 | cold_capacity: capacity, 83 | map: HashMap::with_capacity(capacity + test_capacity), 84 | ring: TokenRing::with_capacity(capacity + test_capacity), 85 | nodes, 86 | hand_hot: 0, 87 | hand_cold: 0, 88 | hand_test: 0, 89 | count_hot: 0, 90 | count_cold: 0, 91 | count_test: 0, 92 | inserted: 0, 93 | evicted: 0, 94 | }; 95 | Ok(cache) 96 | } 97 | 98 | /// Returns the number of cached values. 99 | #[inline] 100 | pub fn len(&self) -> usize { 101 | self.count_cold + self.count_hot 102 | } 103 | 104 | /// Returns `true` when no values are currently cached. 105 | #[inline] 106 | pub fn is_empty(&self) -> bool { 107 | self.len() == 0 108 | } 109 | 110 | /// Returns the number of recently inserted values. 111 | #[inline] 112 | pub fn recent_len(&self) -> usize { 113 | self.count_cold 114 | } 115 | 116 | /// Returns the number of frequently fetched or updated values. 117 | #[inline] 118 | pub fn frequent_len(&self) -> usize { 119 | self.count_hot 120 | } 121 | 122 | /// Returns the number of test entries. 123 | #[inline] 124 | pub fn test_len(&self) -> usize { 125 | self.count_test 126 | } 127 | 128 | /// Returns how many values have been inserted into the cache overall. 129 | #[inline] 130 | pub fn inserted(&self) -> u64 { 131 | self.inserted 132 | } 133 | 134 | /// Returns how many values have been evicted from the cache. 135 | #[inline] 136 | pub fn evicted(&self) -> u64 { 137 | self.evicted 138 | } 139 | 140 | /// Get a mutable reference to the value in the cache mapped to by `key`. 141 | /// 142 | /// If no value exists for `key`, this returns `None`. 143 | pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> 144 | where 145 | K: Borrow, 146 | Q: Eq + Hash, 147 | { 148 | let token = *self.map.get(key)?; 149 | let node = &mut self.nodes[token]; 150 | let value = node.value.as_mut()?; 151 | node.node_type.insert(NodeType::REFERENCE); 152 | Some(value) 153 | } 154 | 155 | /// Get an immutable reference to the value in the cache mapped to by `key`. 156 | /// 157 | /// If no value exists for `key`, this returns `None`. 158 | pub fn get(&mut self, key: &Q) -> Option<&V> 159 | where 160 | Q: Hash + Eq, 161 | K: Borrow, 162 | { 163 | let token = *self.map.get(key)?; 164 | let node = &mut self.nodes[token]; 165 | let value = &node.value.as_ref()?; 166 | node.node_type.insert(NodeType::REFERENCE); 167 | Some(value) 168 | } 169 | 170 | /// Returns `true` if there is a value in the cache mapped to by `key`. 171 | pub fn contains_key(&mut self, key: &Q) -> bool 172 | where 173 | Q: Hash + Eq, 174 | K: Borrow, 175 | { 176 | if let Some(&token) = self.map.get(key) { 177 | self.nodes[token].value.is_some() 178 | } else { 179 | false 180 | } 181 | } 182 | 183 | /// Map `key` to `value` in the cache, possibly evicting old entries. 184 | /// 185 | /// This method returns `true` when this is a new entry, and `false` if an existing entry was 186 | /// updated. 187 | pub fn insert(&mut self, key: K, value: V) -> bool { 188 | let token = match self.map.get(&key).cloned() { 189 | None => { 190 | self.meta_add(key, value, NodeType::COLD); 191 | self.count_cold += 1; 192 | self.inserted += 1; 193 | return true; 194 | } 195 | Some(token) => token, 196 | }; 197 | { 198 | let mentry = &mut self.nodes[token]; 199 | if mentry.value.is_some() { 200 | mentry.value = Some(value); 201 | mentry.node_type.insert(NodeType::REFERENCE); 202 | return false; 203 | } 204 | } 205 | if self.cold_capacity < self.capacity { 206 | self.cold_capacity += 1; 207 | } 208 | self.count_test -= 1; 209 | self.meta_del(token); 210 | self.meta_add(key, value, NodeType::HOT); 211 | self.count_hot += 1; 212 | true 213 | } 214 | 215 | /// Remove the cache entry mapped to by `key`. 216 | /// 217 | /// This method returns the value removed from the cache. If `key` did not map to any value, 218 | /// then this returns `None`. 219 | pub fn remove(&mut self, key: &Q) -> Option 220 | where 221 | K: Borrow, 222 | Q: Eq + Hash, 223 | { 224 | let token = *self.map.get(key)?; 225 | let node = &mut self.nodes[token]; 226 | let value = node.value.take(); 227 | 228 | // The key is in map, so the node must be HOT or COLD 229 | if node.node_type.intersects(NodeType::HOT) { 230 | self.count_hot -= 1; 231 | } else if node.node_type.intersects(NodeType::COLD) { 232 | self.count_cold -= 1; 233 | } 234 | 235 | self.meta_del(token); 236 | value 237 | } 238 | 239 | fn meta_add(&mut self, key: K, value: V, node_type: NodeType) { 240 | self.evict(); 241 | let token = self.ring.insert_after(self.hand_hot); 242 | self.nodes[token] = Node { 243 | key: MaybeUninit::new(key.clone()), 244 | value: Some(value), 245 | node_type, 246 | }; 247 | self.map.insert(key, token); 248 | if self.hand_cold == self.hand_hot { 249 | self.hand_cold = self.ring.prev_for_token(self.hand_cold); 250 | } 251 | } 252 | 253 | fn evict(&mut self) { 254 | while self.count_hot + self.count_cold >= self.capacity { 255 | self.run_hand_cold(); 256 | } 257 | } 258 | 259 | fn run_hand_cold(&mut self) { 260 | let mut run_hand_test = false; 261 | { 262 | let mentry = &mut self.nodes[self.hand_cold]; 263 | if mentry.node_type.intersects(NodeType::COLD) { 264 | if mentry.node_type.intersects(NodeType::REFERENCE) { 265 | mentry.node_type = NodeType::HOT; 266 | self.count_cold -= 1; 267 | self.count_hot += 1; 268 | } else { 269 | mentry.node_type.remove(NodeType::MASK); 270 | mentry.node_type.insert(NodeType::TEST); 271 | mentry.value = None; 272 | self.count_cold -= 1; 273 | self.count_test += 1; 274 | run_hand_test = true 275 | } 276 | } 277 | } 278 | if run_hand_test { 279 | while self.count_test > self.test_capacity { 280 | self.run_hand_test(); 281 | } 282 | } 283 | self.hand_cold = self.ring.next_for_token(self.hand_cold); 284 | while self.count_hot > self.capacity - self.cold_capacity { 285 | self.run_hand_hot(); 286 | } 287 | } 288 | 289 | fn run_hand_hot(&mut self) { 290 | if self.hand_hot == self.hand_test { 291 | self.run_hand_test(); 292 | } 293 | { 294 | let mentry = &mut self.nodes[self.hand_hot]; 295 | if mentry.node_type.intersects(NodeType::HOT) { 296 | if mentry.node_type.intersects(NodeType::REFERENCE) { 297 | mentry.node_type.remove(NodeType::REFERENCE); 298 | } else { 299 | mentry.node_type.remove(NodeType::MASK); 300 | mentry.node_type.insert(NodeType::COLD); 301 | self.count_hot -= 1; 302 | self.count_cold += 1; 303 | } 304 | } 305 | } 306 | self.hand_hot = self.ring.next_for_token(self.hand_hot); 307 | } 308 | 309 | fn run_hand_test(&mut self) { 310 | if self.hand_test == self.hand_cold { 311 | self.run_hand_cold(); 312 | } 313 | if self.nodes[self.hand_test] 314 | .node_type 315 | .intersects(NodeType::TEST) 316 | { 317 | let prev = self.ring.prev_for_token(self.hand_test); 318 | let hand_test = self.hand_test; 319 | self.meta_del(hand_test); 320 | self.hand_test = prev; 321 | self.count_test -= 1; 322 | if self.cold_capacity > 1 { 323 | self.cold_capacity -= 1; 324 | } 325 | } 326 | self.hand_test = self.ring.next_for_token(self.hand_test); 327 | } 328 | 329 | fn meta_del(&mut self, token: Token) { 330 | { 331 | let mentry = &mut self.nodes[token]; 332 | mentry.node_type.remove(NodeType::MASK); 333 | mentry.node_type.insert(NodeType::EMPTY); 334 | mentry.value = None; 335 | self.map.remove(unsafe { mentry.key.assume_init_ref() }); 336 | } 337 | if token == self.hand_hot { 338 | self.hand_hot = self.ring.prev_for_token(self.hand_hot); 339 | } 340 | if token == self.hand_cold { 341 | self.hand_cold = self.ring.prev_for_token(self.hand_cold); 342 | } 343 | if token == self.hand_test { 344 | self.hand_test = self.ring.prev_for_token(self.hand_test); 345 | } 346 | self.ring.remove(token); 347 | self.evicted += 1; 348 | } 349 | } 350 | 351 | unsafe impl Send for ClockProCache 352 | where 353 | K: Send, 354 | V: Send, 355 | { 356 | } 357 | 358 | unsafe impl Sync for ClockProCache 359 | where 360 | K: Sync, 361 | V: Sync, 362 | { 363 | } 364 | 365 | mod token_ring { 366 | use slabigator::Slab; 367 | 368 | pub type Token = usize; 369 | const TOKEN_THUMBSTONE: Token = !0; 370 | 371 | pub struct Node { 372 | next: Token, 373 | prev: Token, 374 | } 375 | 376 | pub struct TokenRing { 377 | head: Token, 378 | tail: Token, 379 | slab: Slab, 380 | } 381 | 382 | impl TokenRing { 383 | pub fn with_capacity(capacity: usize) -> Self { 384 | if capacity < 1 { 385 | panic!("A ring cannot have a capacity smaller than 1"); 386 | } 387 | let slab = Slab::with_capacity(capacity).expect("requested capacity is too large"); 388 | TokenRing { 389 | head: TOKEN_THUMBSTONE, 390 | tail: TOKEN_THUMBSTONE, 391 | slab, 392 | } 393 | } 394 | 395 | #[allow(dead_code)] 396 | #[inline] 397 | pub fn len(&self) -> usize { 398 | self.slab.len() 399 | } 400 | 401 | #[inline] 402 | pub fn next_for_token(&self, token: Token) -> Token { 403 | let next = self.slab[token].next; 404 | if next == TOKEN_THUMBSTONE { 405 | assert!(self.head != TOKEN_THUMBSTONE); 406 | self.head 407 | } else { 408 | next 409 | } 410 | } 411 | 412 | #[inline] 413 | pub fn prev_for_token(&self, token: Token) -> Token { 414 | let prev = self.slab[token].prev; 415 | if prev == TOKEN_THUMBSTONE { 416 | assert!(self.tail != TOKEN_THUMBSTONE); 417 | self.tail 418 | } else { 419 | prev 420 | } 421 | } 422 | 423 | pub fn remove(&mut self, token: Token) { 424 | let (prev, next) = (self.slab[token].prev, self.slab[token].next); 425 | if prev != TOKEN_THUMBSTONE { 426 | self.slab[prev].next = next; 427 | } else { 428 | self.head = next; 429 | } 430 | if next != TOKEN_THUMBSTONE { 431 | self.slab[next].prev = prev; 432 | } else { 433 | self.tail = prev; 434 | } 435 | self.slab[token].prev = TOKEN_THUMBSTONE; 436 | self.slab[token].next = TOKEN_THUMBSTONE; 437 | self.slab.remove(token).expect("removed token not in slab"); 438 | } 439 | 440 | pub fn insert_after(&mut self, to: Token) -> Token { 441 | if self.slab.is_empty() { 442 | let node = Node { 443 | prev: TOKEN_THUMBSTONE, 444 | next: TOKEN_THUMBSTONE, 445 | }; 446 | let token = self.slab.push_front(node).expect("over capacity"); 447 | self.head = token; 448 | self.tail = token; 449 | return token; 450 | } 451 | let to_prev = self.slab[to].prev; 452 | let old_second = to_prev; 453 | if old_second == TOKEN_THUMBSTONE { 454 | let old_second = self.tail; 455 | let node = Node { 456 | prev: old_second, 457 | next: TOKEN_THUMBSTONE, 458 | }; 459 | let token = self.slab.push_front(node).expect("over capacity"); 460 | self.slab[old_second].next = token; 461 | self.tail = token; 462 | token 463 | } else { 464 | let node = Node { 465 | prev: old_second, 466 | next: to, 467 | }; 468 | let token = self.slab.push_front(node).expect("over capacity"); 469 | self.slab[old_second].next = token; 470 | self.slab[to].prev = token; 471 | token 472 | } 473 | } 474 | } 475 | } 476 | 477 | #[cfg(test)] 478 | mod tests { 479 | use super::ClockProCache; 480 | 481 | #[test] 482 | fn test_cache() { 483 | let mut cache = ClockProCache::new(3).unwrap(); 484 | cache.insert("testkey", "testvalue"); 485 | assert!(cache.contains_key("testkey")); 486 | cache.insert("testkey2", "testvalue2"); 487 | assert!(cache.contains_key("testkey2")); 488 | cache.insert("testkey3", "testvalue3"); 489 | assert!(cache.contains_key("testkey3")); 490 | cache.insert("testkey4", "testvalue4"); 491 | assert!(cache.contains_key("testkey4")); 492 | assert!(cache.contains_key("testkey3")); 493 | assert!(!cache.contains_key("testkey2")); 494 | cache.insert("testkey", "testvalue"); 495 | assert!(cache.get_mut("testkey").is_some()); 496 | assert!(cache.get_mut("testkey-nx").is_none()); 497 | } 498 | 499 | #[test] 500 | fn test_recycle() { 501 | let mut cache: ClockProCache = ClockProCache::new(3).unwrap(); 502 | for i in 0..7 { 503 | assert!(cache.insert(i, i)); 504 | } 505 | for i in 0..2 { 506 | match cache.get(&i) { 507 | None => {} 508 | Some(x) => assert_eq!(*x, i), 509 | } 510 | } 511 | } 512 | 513 | #[test] 514 | fn test_composite() { 515 | let mut cache: ClockProCache, u64)> = ClockProCache::new(3).unwrap(); 516 | for i in 0..7 { 517 | assert!(cache.insert(i, (vec![0u8; 12], i))); 518 | } 519 | for i in 0..2 { 520 | match cache.get(&i) { 521 | None => {} 522 | Some(x) => assert_eq!(x.1, i), 523 | } 524 | } 525 | } 526 | 527 | #[test] 528 | fn test_remove() { 529 | let mut cache: ClockProCache = ClockProCache::new(4).unwrap(); 530 | for i in 0..4 { 531 | assert!(cache.insert(i, i)); 532 | } 533 | 534 | assert_eq!(cache.remove(&2), Some(2)); 535 | assert_eq!(cache.remove(&3), Some(3)); 536 | assert_eq!(cache.remove(&3), None); 537 | 538 | for i in 0..4 { 539 | match i { 540 | 2 | 3 => assert_eq!(cache.get(&i), None), 541 | _ => assert_eq!(*cache.get(&i).unwrap(), i), 542 | }; 543 | } 544 | 545 | // Reinsert removed entries 546 | for i in 2..4 { 547 | assert!(cache.insert(i, i)); 548 | } 549 | 550 | // Check that all entries still exist 551 | for i in 0..4 { 552 | assert_eq!(*cache.get(&i).unwrap(), i); 553 | } 554 | } 555 | 556 | #[test] 557 | fn test_length_and_counters() { 558 | let mut cache: ClockProCache = ClockProCache::new(5).unwrap(); 559 | 560 | // Cache starts out empty. 561 | assert_eq!(cache.is_empty(), true); 562 | 563 | for i in 1..=5 { 564 | // Cache length should increase with each new item. 565 | assert!(cache.insert(i, i)); 566 | assert_eq!(cache.len(), i); 567 | } 568 | 569 | // Cache is no longer empty. 570 | assert_eq!(cache.is_empty(), false); 571 | assert_eq!(cache.inserted(), 5); 572 | assert_eq!(cache.frequent_len(), 0); 573 | assert_eq!(cache.recent_len(), 5); 574 | 575 | // Cache length should be capped at capacity. 576 | assert!(cache.insert(6, 6)); 577 | assert!(cache.insert(7, 7)); 578 | 579 | assert_eq!(cache.len(), 5); 580 | assert_eq!(cache.inserted(), 7); 581 | assert_eq!(cache.frequent_len(), 0); 582 | assert_eq!(cache.recent_len(), 5); 583 | 584 | // Reference the two recent values and insert new ones to run the hand 585 | // and make the REFERENCED nodes HOT. 586 | assert_eq!(cache.get(&6), Some(&6)); 587 | assert_eq!(cache.get(&7), Some(&7)); 588 | 589 | for i in 8..=15 { 590 | assert!(cache.insert(i, i)); 591 | } 592 | 593 | // Both 6 and 7 should be HOT and not have been evicted. 594 | assert_eq!(cache.get(&6), Some(&6)); 595 | assert_eq!(cache.get(&7), Some(&7)); 596 | 597 | assert_eq!(cache.len(), 5); 598 | assert_eq!(cache.inserted(), 15); 599 | assert_eq!(cache.frequent_len(), 2); 600 | assert_eq!(cache.recent_len(), 3); 601 | assert_eq!(cache.test_len(), 5); 602 | 603 | // Removing 6 and 15 should decrement HOT and COLD counters. 604 | assert_eq!(cache.remove(&6), Some(6)); 605 | assert_eq!(cache.remove(&15), Some(15)); 606 | assert_eq!(cache.frequent_len(), 1); 607 | assert_eq!(cache.recent_len(), 2); 608 | } 609 | 610 | #[test] 611 | fn test_evicted_to_hot() { 612 | let mut cache: ClockProCache = 613 | ClockProCache::new_with_test_capacity(3, 30).unwrap(); 614 | 615 | // Insert test capacity items. 616 | for i in 0..30 { 617 | assert!(cache.insert(i, i)); 618 | } 619 | 620 | assert_eq!(cache.frequent_len(), 0); 621 | assert_eq!(cache.recent_len(), 3); 622 | assert_eq!(cache.test_len(), 27); 623 | 624 | // 10 should be evicted but still have a TEST node. 625 | assert_eq!(cache.get(&10), None); 626 | 627 | // Inserting 0 again should replace the TEST node w/ a HOT one. 628 | assert!(cache.insert(10, 10)); 629 | assert_eq!(cache.frequent_len(), 1); 630 | assert_eq!(cache.recent_len(), 2); 631 | assert_eq!(cache.test_len(), 27); 632 | } 633 | } 634 | --------------------------------------------------------------------------------