├── .editorconfig ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── rustfmt.toml └── src └── lib.rs /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = space 5 | indent_size = 2 6 | charset = utf-8 7 | trim_trailing_whitespace = true 8 | insert_final_newline = true 9 | 10 | [*.sql] 11 | insert_final_newline: false 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | 8 | .env 9 | 10 | Cargo.lock 11 | 12 | .DS_Store 13 | 14 | 15 | 16 | # Added by cargo 17 | # 18 | # already existing elements were commented out 19 | 20 | /target 21 | #Cargo.lock 22 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Thomas Sieverding "] 3 | edition = "2021" 4 | name = "evlru" 5 | version = "0.1.1" 6 | description = "An eventually consistent LRU designed for lock-free concurrent reads" 7 | readme = "./README.md" 8 | license = "MIT" 9 | repository = "https://github.com/Bajix/evlru/" 10 | 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [dependencies] 15 | crossbeam = "0.8" 16 | evmap = "10.0.2" 17 | rayon = "1.6" 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Thomas Sieverding 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EVLRU 2 | 3 | ![License](https://img.shields.io/badge/license-MIT-green.svg) 4 | [![Cargo](https://img.shields.io/crates/v/evlru.svg)](https://crates.io/crates/evlru) 5 | [![Documentation](https://docs.rs/evlru/badge.svg)](https://docs.rs/evlru) 6 | 7 | An eventually consistent LRU designed for lock-free concurrent reads and eventual eviction. 8 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | imports_granularity = "Crate" 2 | newline_style = "Unix" 3 | tab_spaces = 2 4 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | use crossbeam::{ 2 | atomic::AtomicCell, 3 | queue::{ArrayQueue, SegQueue}, 4 | }; 5 | use evmap::{ReadHandle, ShallowCopy, WriteHandle}; 6 | use std::{ 7 | collections::HashMap, 8 | hash::{Hash, Hasher}, 9 | mem::ManuallyDrop, 10 | sync::{Arc, Mutex, MutexGuard}, 11 | }; 12 | 13 | pub trait Key: Send + Sync + Hash + Eq + Clone + 'static {} 14 | impl Key for T {} 15 | 16 | pub trait Value: Send + Sync + Clone + Eq + Hash + 'static {} 17 | impl Value for T {} 18 | 19 | enum EventualOp { 20 | SetValue(K, Arc), 21 | Extend(HashMap>), 22 | InvalidateKey(K), 23 | GarbageCollect(K, Arc>), 24 | PurgeCache, 25 | } 26 | 27 | #[derive(Clone)] 28 | struct ValueBox { 29 | value: Arc, 30 | lru_counter: Arc>, 31 | } 32 | 33 | impl PartialEq for ValueBox 34 | where 35 | T: Value, 36 | { 37 | fn eq(&self, other: &Self) -> bool { 38 | self.value.eq(&other.value) 39 | } 40 | } 41 | 42 | impl Eq for ValueBox where T: Value {} 43 | impl Hash for ValueBox 44 | where 45 | T: Value, 46 | { 47 | fn hash(&self, state: &mut H) { 48 | self.value.hash(state); 49 | } 50 | } 51 | 52 | impl ShallowCopy for ValueBox 53 | where 54 | T: Value, 55 | { 56 | unsafe fn shallow_copy(&self) -> ManuallyDrop { 57 | ManuallyDrop::new(ValueBox { 58 | value: ManuallyDrop::into_inner(self.value.shallow_copy()), 59 | lru_counter: ManuallyDrop::into_inner(self.lru_counter.shallow_copy()), 60 | }) 61 | } 62 | } 63 | 64 | impl From> for ValueBox 65 | where 66 | T: Value, 67 | { 68 | fn from(value: Arc) -> Self { 69 | let lru_counter = Arc::new(AtomicCell::new(0)); 70 | ValueBox { value, lru_counter } 71 | } 72 | } 73 | 74 | struct EventualWriter { 75 | write_handle: Mutex>>, 76 | access_log: ArrayQueue<(K, Arc>)>, 77 | recycling_bin: ArrayQueue<(K, Arc>)>, 78 | pending_ops: SegQueue>, 79 | } 80 | 81 | impl EventualWriter 82 | where 83 | K: Key, 84 | V: Value, 85 | { 86 | fn new(write_handle: WriteHandle>, capacity: usize) -> Arc { 87 | let write_handle = Mutex::from(write_handle); 88 | let access_log = ArrayQueue::new(capacity); 89 | let recycling_bin = ArrayQueue::new(capacity); 90 | let pending_ops = SegQueue::new(); 91 | 92 | Arc::new(EventualWriter { 93 | write_handle, 94 | access_log, 95 | recycling_bin, 96 | pending_ops, 97 | }) 98 | } 99 | 100 | fn set( 101 | &self, 102 | key: K, 103 | value: ValueBox, 104 | write_handle: &mut MutexGuard>>, 105 | ) { 106 | self.push_access_entry(key.clone(), &value); 107 | write_handle.update(key, value); 108 | } 109 | 110 | fn push_access_entry(&self, key: K, value: &ValueBox) { 111 | let lru_counter = value.lru_counter.to_owned(); 112 | (&*lru_counter).fetch_add(1); 113 | 114 | let mut entry = (key, lru_counter); 115 | 116 | loop { 117 | entry = match self.access_log.push(entry) { 118 | Ok(()) => return, 119 | Err(entry) => { 120 | if let Some(entry) = self.access_log.pop() { 121 | if (&*entry.1).fetch_sub(1).eq(&1) { 122 | self.push_recycling_bin(entry); 123 | } 124 | } 125 | 126 | entry 127 | } 128 | }; 129 | } 130 | } 131 | 132 | fn push_recycling_bin(&self, mut entry: (K, Arc>)) { 133 | loop { 134 | entry = match self.recycling_bin.push(entry) { 135 | Ok(()) => return, 136 | 137 | Err(entry) => { 138 | if let Some((key, lru_counter)) = self.recycling_bin.pop() { 139 | if lru_counter.as_ref().load().eq(&0) { 140 | self 141 | .pending_ops 142 | .push(EventualOp::GarbageCollect(key, lru_counter)); 143 | } 144 | } 145 | 146 | entry 147 | } 148 | } 149 | } 150 | } 151 | 152 | fn apply_pending_ops(&self, write_handle: &mut MutexGuard>>) { 153 | let read_handle = write_handle.clone(); 154 | 155 | while let Some(op) = self.pending_ops.pop() { 156 | self.apply_op(op, &read_handle, write_handle); 157 | } 158 | } 159 | 160 | fn apply_op( 161 | &self, 162 | op: EventualOp, 163 | read_handle: &ReadHandle>, 164 | write_handle: &mut MutexGuard>>, 165 | ) { 166 | match op { 167 | EventualOp::SetValue(key, value) => self.set(key, value.into(), write_handle), 168 | EventualOp::Extend(data) => { 169 | for (key, value) in data { 170 | self.set(key, value.into(), write_handle); 171 | } 172 | } 173 | EventualOp::InvalidateKey(key) => { 174 | write_handle.clear(key); 175 | } 176 | EventualOp::GarbageCollect(key, lru_counter) => { 177 | if lru_counter.as_ref().load().eq(&0) { 178 | if let Some(container) = read_handle.get_one(&key) { 179 | if Arc::ptr_eq(&container.lru_counter, &lru_counter) { 180 | write_handle.clear(key); 181 | } 182 | } 183 | } 184 | } 185 | EventualOp::PurgeCache => { 186 | while self.access_log.pop().is_some() {} 187 | 188 | write_handle.purge(); 189 | } 190 | } 191 | } 192 | 193 | fn is_pending_empty(&self) -> bool { 194 | self.pending_ops.is_empty() 195 | } 196 | } 197 | 198 | /// An eventually consistent LRU designed for lock-free concurrent reads. This is `!Sync` but can be cloned and used thread local 199 | #[derive(Clone)] 200 | pub struct EVLRU { 201 | reader: ReadHandle>, 202 | writer: Arc>, 203 | } 204 | 205 | impl EVLRU 206 | where 207 | K: Key, 208 | V: Value, 209 | { 210 | /// Create a new ELVRU instance with a flex-capacity. 211 | pub fn new(flex_capacity: usize) -> Self { 212 | let (reader, write_handle) = evmap::new(); 213 | let writer = EventualWriter::new(write_handle, flex_capacity); 214 | 215 | EVLRU { reader, writer } 216 | } 217 | 218 | /// Get current key value as applied and appends a new access counter to the access log. At capacity, the last used access counter is dropped and should that be the last access counter of it's key/value pair, this slow will be evicted during the next apply cycle should no reads/write occurr in the interim. 219 | pub fn get(&self, key: K) -> Option> { 220 | if let Some(container) = self.reader.get_one(&key).map(|guard| guard.clone()) { 221 | self.writer.push_access_entry(key, &container); 222 | 223 | Some(container.value) 224 | } else { 225 | None 226 | } 227 | } 228 | 229 | /// Get current key value as applied without updating the access log 230 | pub fn peek(&self, key: K) -> Option> { 231 | if let Some(container) = self.reader.get_one(&key).map(|guard| guard.clone()) { 232 | Some(container.value) 233 | } else { 234 | None 235 | } 236 | } 237 | 238 | /// Returns a bool indicating whether the given key is in the cache as currently applied and without updating the access log 239 | pub fn contains(&self, key: &K) -> bool { 240 | self.reader.contains_key(key) 241 | } 242 | 243 | /// Set value on next apply cycle 244 | pub fn set(&self, key: K, value: Arc) { 245 | self 246 | .writer 247 | .pending_ops 248 | .push(EventualOp::SetValue(key, value)); 249 | } 250 | 251 | /// Extend data on next apply cycle 252 | pub fn extend)>>(&self, iter: I) { 253 | let data: HashMap> = HashMap::from_iter(iter); 254 | 255 | self.writer.pending_ops.push(EventualOp::Extend(data)); 256 | } 257 | 258 | /// Mark key to be invalidated on next apply cycle 259 | pub fn invalidate_key(&self, key: K) { 260 | self.writer.pending_ops.push(EventualOp::InvalidateKey(key)); 261 | } 262 | 263 | /// Clear pending operations and mark cache to be purged on next apply cycle 264 | pub fn purge(&self) { 265 | while self.writer.pending_ops.pop().is_some() {} 266 | self.writer.pending_ops.push(EventualOp::PurgeCache); 267 | } 268 | 269 | /// Block to acquire write lock and apply all pending changes. It is preferrable to use [`EVLRU::apply_cooperatively`] or [`EVLRU::background_apply_changes`]instead whenever possible because this usage only blocks to apply changes when necessary and otherwise delegates the responsibility to apply changes to the current lock holder. 270 | pub fn apply_blocking(&self) { 271 | let mut write_handle = self.writer.write_handle.lock().unwrap(); 272 | self.writer.apply_pending_ops(&mut write_handle); 273 | write_handle.refresh(); 274 | } 275 | 276 | /// Cooperatively apply pending changes in cycles pending changes in cycles until no more pending ops are present or another writer lock holder takes over between cycles. The ensures all pending work is eventually processed either directly or indirectly and without blocking ever for lock acquisition. 277 | pub fn apply_cooperatively(&self) { 278 | while let Ok(mut write_handle) = self.writer.write_handle.try_lock() { 279 | self.writer.apply_pending_ops(&mut write_handle); 280 | write_handle.refresh(); 281 | drop(write_handle); 282 | 283 | if self.writer.pending_ops.is_empty() { 284 | break; 285 | } 286 | } 287 | } 288 | 289 | /// Cooperatively apply pending changes in cycles using rayons's thread pool. Each cycle the responsibility is delegated via lock acquisition to apply pending ops and then to flush updates to readers and this repeats until it is guaranteed that no pending ops are left unprocessed. 290 | pub fn background_apply_changes(&self) { 291 | let writer = self.writer.clone(); 292 | 293 | if !writer.is_pending_empty() { 294 | rayon::spawn(move || { 295 | // If there is already a lock holder the responsibility of handling pending ops is delegated 296 | while let Ok(mut write_handle) = writer.write_handle.try_lock() { 297 | writer.apply_pending_ops(&mut write_handle); 298 | write_handle.refresh(); 299 | drop(write_handle); 300 | 301 | // This ensures there's exactly one writer driving all pending ops to completion and so that ops pushed while flushing are delegated and not left unprocessed 302 | if writer.pending_ops.is_empty() { 303 | break; 304 | } 305 | } 306 | }); 307 | } 308 | } 309 | } 310 | 311 | #[cfg(test)] 312 | mod tests { 313 | use crate::EVLRU; 314 | 315 | #[test] 316 | fn it_expires_oldest() { 317 | let cache: EVLRU<&'static str, &'static str> = EVLRU::new(2); 318 | 319 | cache.set("apple", "red".into()); 320 | cache.set("banana", "yellow".into()); 321 | 322 | cache.apply_blocking(); 323 | 324 | assert!(cache.get("apple").is_some()); 325 | assert!(cache.get("banana").is_some()); 326 | 327 | cache.set("pear", "green".into()); 328 | cache.set("peach", "orange".into()); 329 | cache.set("coconut", "brown".into()); 330 | 331 | cache.apply_blocking(); 332 | 333 | assert!(cache.get("banana").is_some()); 334 | assert!(cache.get("pear").is_some()); 335 | assert!(cache.get("apple").is_none()); 336 | } 337 | 338 | #[test] 339 | fn it_purges() { 340 | let cache: EVLRU<&'static str, &'static str> = EVLRU::new(2); 341 | 342 | cache.set("apple", "red".into()); 343 | cache.set("banana", "yellow".into()); 344 | 345 | cache.apply_blocking(); 346 | 347 | assert!(cache.get("apple").is_some()); 348 | assert!(cache.get("banana").is_some()); 349 | 350 | cache.purge(); 351 | cache.apply_blocking(); 352 | 353 | assert!(cache.get("pear").is_none()); 354 | assert!(cache.get("apple").is_none()); 355 | } 356 | 357 | #[test] 358 | fn it_invalidates() { 359 | let cache: EVLRU<&'static str, &'static str> = EVLRU::new(2); 360 | 361 | cache.set("apple", "red".into()); 362 | 363 | cache.apply_blocking(); 364 | 365 | assert!(cache.get("apple").is_some()); 366 | 367 | cache.invalidate_key("apple"); 368 | 369 | cache.apply_blocking(); 370 | 371 | assert!(cache.get("apple").is_none()); 372 | } 373 | } 374 | --------------------------------------------------------------------------------