├── .gitattributes ├── .github └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── memlog ├── Cargo.toml ├── src │ ├── lib.rs │ └── log.rs └── tests │ ├── atomics_and_locks.rs │ ├── common │ ├── harness.rs │ ├── mod.rs │ └── utils.rs │ ├── exchange_ordering.rs │ ├── preshing.rs │ ├── seq_cst.rs │ ├── stackoverflow.rs │ ├── standard_atomic.rs │ ├── standard_fence.rs │ ├── system.rs │ ├── update.rs │ └── williams.rs ├── readme.md ├── src ├── lib.rs ├── main.rs └── temper │ ├── memory │ ├── core.rs │ └── mod.rs │ ├── mod.rs │ ├── system │ ├── core.rs │ └── mod.rs │ └── utils │ ├── mod.rs │ └── sleepwait.rs └── tests ├── atom.rs ├── common ├── mod.rs └── utils.rs └── memory.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | env: 4 | CARGO_TERM_COLOR: always 5 | 6 | on: 7 | push: 8 | branches: [ '*' ] 9 | pull_request: 10 | branches: [ '*' ] 11 | 12 | jobs: 13 | build_and_test: 14 | name: Temper 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v2 18 | - uses: actions-rs/toolchain@v1 19 | with: 20 | toolchain: stable 21 | - uses: actions-rs/cargo@v1 22 | with: 23 | command: test 24 | args: --release --workspace -- --test-threads=1 25 | 26 | clippy: 27 | name: Clippy 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v2 31 | - uses: actions-rs/toolchain@v1 32 | with: 33 | profile: minimal 34 | toolchain: stable 35 | override: true 36 | - run: rustup component add clippy 37 | - uses: actions-rs/cargo@v1 38 | with: 39 | command: clippy 40 | args: --tests --workspace -- -D warnings -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .idea -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "autocfg" 7 | version = "1.1.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" 10 | 11 | [[package]] 12 | name = "cfg-if" 13 | version = "1.0.0" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 16 | 17 | [[package]] 18 | name = "chrono" 19 | version = "0.4.19" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" 22 | dependencies = [ 23 | "libc", 24 | "num-integer", 25 | "num-traits", 26 | "time", 27 | "winapi", 28 | ] 29 | 30 | [[package]] 31 | name = "getrandom" 32 | version = "0.2.5" 33 | source = "registry+https://github.com/rust-lang/crates.io-index" 34 | checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" 35 | dependencies = [ 36 | "cfg-if", 37 | "libc", 38 | "wasi", 39 | ] 40 | 41 | [[package]] 42 | name = "hermit-abi" 43 | version = "0.1.19" 44 | source = "registry+https://github.com/rust-lang/crates.io-index" 45 | checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" 46 | dependencies = [ 47 | "libc", 48 | ] 49 | 50 | [[package]] 51 | name = "libc" 52 | version = "0.2.119" 53 | source = "registry+https://github.com/rust-lang/crates.io-index" 54 | checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4" 55 | 56 | [[package]] 57 | name = "memlog" 58 | version = "0.1.0" 59 | dependencies = [ 60 | "rand", 61 | "rand_chacha", 62 | ] 63 | 64 | [[package]] 65 | name = "num-integer" 66 | version = "0.1.44" 67 | source = "registry+https://github.com/rust-lang/crates.io-index" 68 | checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" 69 | dependencies = [ 70 | "autocfg", 71 | "num-traits", 72 | ] 73 | 74 | [[package]] 75 | name = "num-traits" 76 | version = "0.2.14" 77 | source = "registry+https://github.com/rust-lang/crates.io-index" 78 | checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" 79 | dependencies = [ 80 | "autocfg", 81 | ] 82 | 83 | [[package]] 84 | name = "num_cpus" 85 | version = "1.13.1" 86 | source = "registry+https://github.com/rust-lang/crates.io-index" 87 | checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" 88 | dependencies = [ 89 | "hermit-abi", 90 | "libc", 91 | ] 92 | 93 | [[package]] 94 | name = "ppv-lite86" 95 | version = "0.2.16" 96 | source = "registry+https://github.com/rust-lang/crates.io-index" 97 | checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" 98 | 99 | [[package]] 100 | name = "rand" 101 | version = "0.8.5" 102 | source = "registry+https://github.com/rust-lang/crates.io-index" 103 | checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" 104 | dependencies = [ 105 | "libc", 106 | "rand_chacha", 107 | "rand_core", 108 | ] 109 | 110 | [[package]] 111 | name = "rand_chacha" 112 | version = "0.3.1" 113 | source = "registry+https://github.com/rust-lang/crates.io-index" 114 | checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" 115 | dependencies = [ 116 | "ppv-lite86", 117 | "rand_core", 118 | ] 119 | 120 | [[package]] 121 | name = "rand_core" 122 | version = "0.6.3" 123 | source = "registry+https://github.com/rust-lang/crates.io-index" 124 | checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" 125 | dependencies = [ 126 | "getrandom", 127 | ] 128 | 129 | [[package]] 130 | name = "temper" 131 | version = "0.1.0" 132 | dependencies = [ 133 | "chrono", 134 | "rand", 135 | "rand_chacha", 136 | "threadpool", 137 | "uuid", 138 | ] 139 | 140 | [[package]] 141 | name = "threadpool" 142 | version = "1.8.1" 143 | source = "registry+https://github.com/rust-lang/crates.io-index" 144 | checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" 145 | dependencies = [ 146 | "num_cpus", 147 | ] 148 | 149 | [[package]] 150 | name = "time" 151 | version = "0.1.44" 152 | source = "registry+https://github.com/rust-lang/crates.io-index" 153 | checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" 154 | dependencies = [ 155 | "libc", 156 | "wasi", 157 | "winapi", 158 | ] 159 | 160 | [[package]] 161 | name = "uuid" 162 | version = "0.8.2" 163 | source = "registry+https://github.com/rust-lang/crates.io-index" 164 | checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" 165 | dependencies = [ 166 | "getrandom", 167 | ] 168 | 169 | [[package]] 170 | name = "wasi" 171 | version = "0.10.0+wasi-snapshot-preview1" 172 | source = "registry+https://github.com/rust-lang/crates.io-index" 173 | checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" 174 | 175 | [[package]] 176 | name = "winapi" 177 | version = "0.3.9" 178 | source = "registry+https://github.com/rust-lang/crates.io-index" 179 | checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" 180 | dependencies = [ 181 | "winapi-i686-pc-windows-gnu", 182 | "winapi-x86_64-pc-windows-gnu", 183 | ] 184 | 185 | [[package]] 186 | name = "winapi-i686-pc-windows-gnu" 187 | version = "0.4.0" 188 | source = "registry+https://github.com/rust-lang/crates.io-index" 189 | checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 190 | 191 | [[package]] 192 | name = "winapi-x86_64-pc-windows-gnu" 193 | version = "0.4.0" 194 | source = "registry+https://github.com/rust-lang/crates.io-index" 195 | checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 196 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "memlog", 5 | ] 6 | 7 | [package] 8 | name = "temper" 9 | version = "0.1.0" 10 | edition = "2021" 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [dependencies] 15 | uuid = {version="0.8.2",features=["v4"]} 16 | rand_chacha = "0.3.1" 17 | rand = "0.8.5" 18 | threadpool = "1.8.1" 19 | chrono = "0.4.19" -------------------------------------------------------------------------------- /memlog/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "memlog" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | rand_chacha = "0.3.1" 10 | rand = "0.8.5" -------------------------------------------------------------------------------- /memlog/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod log; 2 | -------------------------------------------------------------------------------- /memlog/src/log.rs: -------------------------------------------------------------------------------- 1 | use rand::{Rng, RngCore, SeedableRng}; 2 | use rand_chacha::ChaCha8Rng; 3 | use std::collections::HashMap; 4 | use std::sync::atomic::Ordering; 5 | 6 | #[derive(Default, Debug, Clone)] 7 | pub struct MemorySequence { 8 | pub sequence: HashMap, 9 | } 10 | 11 | impl MemorySequence { 12 | pub fn synchronize(&mut self, other: &MemorySequence) { 13 | for (k, v) in other.sequence.iter() { 14 | let res = if let Some(ev) = self.sequence.get(k) { 15 | (*ev).max(*v) 16 | } else { 17 | *v 18 | }; 19 | 20 | self.sequence.insert(*k, res); 21 | } 22 | } 23 | } 24 | 25 | #[derive(Debug)] 26 | pub struct MemoryOperation { 27 | pub thread: usize, 28 | pub thread_sequence: usize, 29 | pub global_sequence: usize, 30 | pub level: Ordering, 31 | pub address: usize, 32 | pub value: usize, 33 | pub release_chain: bool, 34 | pub source_sequence: MemorySequence, 35 | pub source_fence_sequence: FenceSequence, 36 | } 37 | 38 | #[derive(Default, Clone, Debug)] 39 | pub struct FenceSequence { 40 | pub atomic: MemorySequence, 41 | pub fence: MemorySequence, 42 | } 43 | 44 | impl FenceSequence { 45 | pub fn synchronize(&mut self, other: &FenceSequence) { 46 | self.atomic.synchronize(&other.atomic); 47 | self.fence.synchronize(&other.fence); 48 | } 49 | 50 | pub fn mask_atomic(&self) -> FenceSequence { 51 | FenceSequence { 52 | atomic: Default::default(), 53 | ..self.clone() 54 | } 55 | } 56 | } 57 | 58 | #[derive(Default)] 59 | pub struct ThreadView { 60 | pub sequence: usize, 61 | pub min_seq_cst_sequence: usize, 62 | pub mem_sequence: MemorySequence, 63 | pub fence_sequence: FenceSequence, 64 | pub read_fence_sequence: FenceSequence, 65 | } 66 | 67 | pub struct MemorySystem { 68 | pub global_sequence: usize, 69 | pub seq_cst_sequence: MemorySequence, 70 | pub log: Vec, 71 | pub acc: Vec, 72 | pub threads: Vec, 73 | } 74 | 75 | impl MemorySystem { 76 | fn op Option>( 77 | &mut self, 78 | thread: usize, 79 | addr: usize, 80 | f: F, 81 | success: Ordering, 82 | failure: Ordering, 83 | ) -> Result { 84 | assert!( 85 | failure == Ordering::SeqCst 86 | || failure == Ordering::Acquire 87 | || failure == Ordering::Relaxed 88 | ); 89 | 90 | let view = &mut self.threads[thread]; 91 | 92 | let all_ops = std::iter::once(&self.acc[addr]).chain(self.log.iter()); 93 | 94 | let choice: &MemoryOperation = all_ops.filter(|mo| mo.address == addr).last().unwrap(); 95 | 96 | let (load_ordering, store_ordering) = if success == Ordering::AcqRel { 97 | (Ordering::Acquire, Ordering::Release) 98 | } else { 99 | (success, success) 100 | }; 101 | 102 | let v = choice.value; 103 | let res = f(v); 104 | 105 | if res.is_none() { 106 | Self::read_synchronize(view, choice, failure); 107 | 108 | return Err(v); 109 | } 110 | 111 | Self::read_synchronize(view, choice, load_ordering); 112 | 113 | Self::write_synchronize(view, &mut self.global_sequence, addr, store_ordering); 114 | 115 | let choice_seqs = ( 116 | choice.source_sequence.clone(), 117 | choice.source_fence_sequence.clone(), 118 | ); 119 | 120 | let this_seqs = (view.mem_sequence.clone(), view.fence_sequence.clone()); 121 | 122 | let combined_seqs = { 123 | let mut ms = view.mem_sequence.clone(); 124 | ms.synchronize(&choice.source_sequence); 125 | let mut fs = view.fence_sequence.clone(); 126 | fs.synchronize(&choice.source_fence_sequence); 127 | (ms, fs) 128 | }; 129 | 130 | // Are we continuing a release chain? 131 | let release_chain = choice.level != Ordering::Relaxed; 132 | 133 | let seqs = if choice.level == Ordering::Relaxed { 134 | this_seqs 135 | } else if success == Ordering::Release 136 | || success == Ordering::AcqRel 137 | || success == Ordering::SeqCst 138 | { 139 | combined_seqs 140 | } else { 141 | choice_seqs 142 | }; 143 | 144 | let fence_sequence = if store_ordering == Ordering::Relaxed { 145 | seqs.1.mask_atomic() 146 | } else { 147 | seqs.1.clone() 148 | }; 149 | 150 | self.log.push(MemoryOperation { 151 | thread, 152 | thread_sequence: view.sequence, 153 | global_sequence: self.global_sequence, 154 | source_fence_sequence: fence_sequence, 155 | level: store_ordering, 156 | release_chain, 157 | source_sequence: seqs.0, 158 | address: addr, 159 | value: res.unwrap(), 160 | }); 161 | 162 | Ok(v) 163 | } 164 | 165 | // Used to implement fetch_add, fetch_sub etc 166 | pub fn fetch_op usize>( 167 | &mut self, 168 | thread: usize, 169 | addr: usize, 170 | f: F, 171 | level: Ordering, 172 | ) -> usize { 173 | // Relaxed is passed in for failure ordering - this operation can't fail 174 | self.op(thread, addr, |v| Some(f(v)), level, Ordering::Relaxed) 175 | .unwrap() 176 | } 177 | 178 | pub fn compare_exchange( 179 | &mut self, 180 | thread: usize, 181 | addr: usize, 182 | current: usize, 183 | new: usize, 184 | success: Ordering, 185 | failure: Ordering, 186 | ) -> Result { 187 | self.op( 188 | thread, 189 | addr, 190 | |v| if v == current { Some(new) } else { None }, 191 | success, 192 | failure, 193 | ) 194 | } 195 | 196 | pub fn compare_exchange_weak( 197 | &mut self, 198 | thread: usize, 199 | addr: usize, 200 | current: usize, 201 | new: usize, 202 | success: Ordering, 203 | failure: Ordering, 204 | ) -> Result { 205 | let s = std::time::UNIX_EPOCH.elapsed().unwrap().as_nanos() as u64; 206 | let mut rng = ChaCha8Rng::seed_from_u64(s); 207 | 208 | if rng.gen_bool(0.5) { 209 | self.op( 210 | thread, 211 | addr, 212 | |v| if v == current { Some(new) } else { None }, 213 | success, 214 | failure, 215 | ) 216 | } else { 217 | Err(self.load(thread, addr, failure)) 218 | } 219 | } 220 | 221 | pub fn fetch_update Option>( 222 | &mut self, 223 | thread: usize, 224 | addr: usize, 225 | f: F, 226 | set_order: Ordering, 227 | fetch_order: Ordering, 228 | ) -> Result { 229 | loop { 230 | let current = self.load(thread, addr, fetch_order); 231 | match f(current) { 232 | None => return Err(current), 233 | Some(new) => { 234 | if self 235 | .compare_exchange_weak(thread, addr, current, new, set_order, fetch_order) 236 | .is_ok() 237 | { 238 | return Ok(current); 239 | } 240 | } 241 | } 242 | } 243 | } 244 | 245 | pub fn fence(&mut self, thread: usize, level: Ordering) { 246 | assert!( 247 | level == Ordering::Acquire 248 | || level == Ordering::Release 249 | || level == Ordering::SeqCst 250 | || level == Ordering::AcqRel 251 | ); 252 | 253 | self.global_sequence += 1; 254 | 255 | let view = &mut self.threads[thread]; 256 | 257 | if level == Ordering::SeqCst { 258 | view.mem_sequence.synchronize(&self.seq_cst_sequence); 259 | self.seq_cst_sequence.synchronize(&view.mem_sequence); 260 | view.min_seq_cst_sequence = self.global_sequence; 261 | } 262 | 263 | if level == Ordering::Release || level == Ordering::SeqCst || level == Ordering::AcqRel { 264 | view.fence_sequence.fence.synchronize(&view.mem_sequence); 265 | } 266 | 267 | if level == Ordering::Acquire || level == Ordering::SeqCst || level == Ordering::AcqRel { 268 | view.mem_sequence 269 | .synchronize(&view.read_fence_sequence.fence); 270 | view.mem_sequence 271 | .synchronize(&view.read_fence_sequence.atomic); 272 | } 273 | } 274 | 275 | pub fn store(&mut self, thread: usize, addr: usize, val: usize, level: Ordering) { 276 | assert!( 277 | level == Ordering::Relaxed || level == Ordering::Release || level == Ordering::SeqCst 278 | ); 279 | 280 | let view = &mut self.threads[thread]; 281 | 282 | Self::write_synchronize(view, &mut self.global_sequence, addr, level); 283 | 284 | let fence_sequence = if level == Ordering::Relaxed { 285 | view.fence_sequence.mask_atomic() 286 | } else { 287 | view.fence_sequence.clone() 288 | }; 289 | 290 | self.log.push(MemoryOperation { 291 | thread, 292 | thread_sequence: view.sequence, 293 | global_sequence: self.global_sequence, 294 | source_fence_sequence: fence_sequence, 295 | level, 296 | release_chain: false, 297 | source_sequence: view.mem_sequence.clone(), 298 | address: addr, 299 | value: val, 300 | }); 301 | } 302 | 303 | fn write_synchronize( 304 | view: &mut ThreadView, 305 | global_sequence: &mut usize, 306 | addr: usize, 307 | level: Ordering, 308 | ) { 309 | *global_sequence += 1; 310 | view.sequence += 1; 311 | view.mem_sequence.sequence.insert(addr, *global_sequence); 312 | 313 | if level == Ordering::SeqCst || level == Ordering::Release { 314 | view.fence_sequence.atomic.synchronize(&view.mem_sequence); 315 | } 316 | } 317 | 318 | fn read_synchronize(view: &mut ThreadView, choice: &MemoryOperation, level: Ordering) { 319 | if (choice.level == Ordering::Release 320 | || choice.level == Ordering::SeqCst 321 | || choice.release_chain) 322 | && (level == Ordering::SeqCst || level == Ordering::Acquire) 323 | { 324 | view.mem_sequence.synchronize(&choice.source_sequence); 325 | } 326 | 327 | if level == Ordering::Acquire || level == Ordering::SeqCst { 328 | view.mem_sequence 329 | .synchronize(&choice.source_fence_sequence.fence); 330 | } 331 | 332 | view.read_fence_sequence 333 | .synchronize(&choice.source_fence_sequence); 334 | 335 | view.mem_sequence 336 | .sequence 337 | .insert(choice.address, choice.global_sequence); 338 | } 339 | 340 | pub fn load(&mut self, thread: usize, addr: usize, level: Ordering) -> usize { 341 | assert!( 342 | level == Ordering::Relaxed || level == Ordering::Acquire || level == Ordering::SeqCst 343 | ); 344 | let s = std::time::UNIX_EPOCH.elapsed().unwrap().as_nanos() as u64; 345 | let mut rng = ChaCha8Rng::seed_from_u64(s); 346 | 347 | let view = &mut self.threads[thread]; 348 | 349 | let all_ops = std::iter::once(&self.acc[addr]).chain(self.log.iter()); 350 | 351 | let possible: Vec<&MemoryOperation> = all_ops.filter(|mo| mo.address == addr).collect(); 352 | 353 | let seq_cst_ops = possible.iter().filter(|mo| mo.level == Ordering::SeqCst); 354 | 355 | let minimum_op = if level == Ordering::SeqCst { 356 | // A seq_cst load will see the latest seq_cst store if it exists 357 | let latest_seq_cst_op = seq_cst_ops 358 | .last() 359 | .map(|mo| mo.global_sequence) 360 | .unwrap_or(0_usize); 361 | 362 | // A seq_cst load will see all stores (regardless of level) prior to a seq_cst memory fence 363 | let latest_fence_op = self 364 | .seq_cst_sequence 365 | .sequence 366 | .get(&addr) 367 | .unwrap_or(&0_usize); 368 | 369 | latest_seq_cst_op.max(*latest_fence_op) 370 | } else { 371 | // A seq_cst fence on this thread causes the latest prior seq_cst store to be the minimum 372 | seq_cst_ops 373 | .filter(|mo| mo.global_sequence < view.min_seq_cst_sequence) 374 | .last() 375 | .map(|v| v.global_sequence) 376 | .unwrap_or(0_usize) 377 | }; 378 | 379 | let first_ind = possible 380 | .iter() 381 | .position(|mo| { 382 | mo.global_sequence 383 | >= *view 384 | .mem_sequence 385 | .sequence 386 | .get(&addr) 387 | .unwrap_or(&0_usize) 388 | .max(&minimum_op) 389 | }) 390 | .unwrap(); 391 | 392 | let possible = &possible[first_ind..]; 393 | 394 | let choice = possible[(rng.next_u32() as usize) % possible.len()]; 395 | 396 | Self::read_synchronize(view, choice, level); 397 | 398 | choice.value 399 | } 400 | } 401 | 402 | impl Default for MemorySystem { 403 | fn default() -> Self { 404 | MemorySystem { 405 | threads: vec![], 406 | acc: vec![], 407 | global_sequence: 10, 408 | seq_cst_sequence: Default::default(), 409 | log: vec![], 410 | } 411 | } 412 | } 413 | 414 | impl MemorySystem { 415 | pub fn add_thread(&mut self) -> usize { 416 | let v = self.threads.len(); 417 | 418 | self.threads.push(ThreadView::default()); 419 | 420 | v 421 | } 422 | 423 | pub fn malloc(&mut self, size: usize) -> usize { 424 | let base = self.acc.len(); 425 | 426 | for i in 0..size { 427 | self.acc.push(MemoryOperation { 428 | thread: 0, 429 | thread_sequence: 0, 430 | global_sequence: 0, 431 | level: Ordering::Relaxed, 432 | release_chain: false, 433 | address: i, 434 | value: 0, 435 | source_sequence: Default::default(), 436 | source_fence_sequence: Default::default(), 437 | }) 438 | } 439 | 440 | base 441 | } 442 | } 443 | -------------------------------------------------------------------------------- /memlog/tests/atomics_and_locks.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::{run_until, run_until_pred}; 3 | use std::collections::HashSet; 4 | use std::sync::atomic::Ordering; 5 | use std::sync::{Arc, Mutex}; 6 | 7 | mod common; 8 | 9 | // Tests from "Rust Atomics and Locks" by Mara Bos 10 | // Code examples taken fom https://github.com/m-ou-se/rust-atomics-and-locks/tree/main/examples 11 | 12 | // The book touches on basically all of Rust concurrency. Memlog was specifically built to 13 | // model non coherent low level memory access, so much of the book isn't specifically relevant. 14 | 15 | /* 16 | Chapter 1 & 2 skipped except where marked. Not in scope for memlog: 17 | * Thread spawning, joining, scoping 18 | * Rc, Cell, Refcell, Mutex 19 | * Thread sleeping, parking and waking 20 | * Reified time 21 | * Condvars 22 | */ 23 | 24 | // Listing 2.4 25 | // Tests single thread lazy loads and stores, in isolation a single thread should emulate SeqCst 26 | 27 | #[test] 28 | fn test_2_4() { 29 | fn inner() -> Vec<[usize; 2]> { 30 | let mut lt = LogTest::default(); 31 | 32 | lt.add(|mut eg: Environment| { 33 | let mut get_value = || { 34 | let mut v = eg.a.load(Ordering::Relaxed); 35 | if v == 0 { 36 | v = 123; 37 | eg.a.store(v, Ordering::Relaxed); 38 | } 39 | v 40 | }; 41 | 42 | [get_value(), get_value()] 43 | }); 44 | 45 | lt.run() 46 | } 47 | 48 | assert!(run_until(inner, vec![vec![[123_usize, 123_usize]]])); 49 | } 50 | 51 | // Listing 2.4 52 | // Tests single thread fetch adds, in isolation a single thread should emulate SeqCst 53 | 54 | #[test] 55 | fn test_2_5() { 56 | fn inner() -> Vec<[usize; 2]> { 57 | let mut lt = LogTest::default(); 58 | 59 | lt.add(|mut eg: Environment| { 60 | let v0 = eg.a.fetch_op(|x| x + 23, Ordering::Relaxed); 61 | let v1 = eg.a.load(Ordering::Relaxed); 62 | 63 | [v0, v1] 64 | }); 65 | 66 | lt.run() 67 | } 68 | 69 | assert!(run_until(inner, vec![vec![[0_usize, 23_usize]]])); 70 | } 71 | 72 | // Listing 2.8 and 2.9 skipped - memlog testing harness only supports 64 bit ints 73 | 74 | // Listing 2.10 75 | // Emulating overflow in an ugly way, this test shows that duplicate IDs can be generated 76 | // due to a race condition between the add and sub + panic checks when generating IDs. 77 | // Test harness is reused for 2.12's fixed version 78 | 79 | #[test] 80 | fn test_2_10_and_2_12() { 81 | fn inner(fixed_version: bool) -> Vec { 82 | let mut lt = LogTest::default(); 83 | 84 | const MAX_ID: usize = 2; 85 | const MAX_REPR: usize = 4; 86 | const PANIC_ID: usize = 100; 87 | 88 | // Broken allocate_id from 2.10 89 | let allocate_id = |eg: &mut Environment| { 90 | // The use of logic in fetch_op here is _not_ something fetch_add etc can provide. But as it's simulating 91 | // wrapping, it'll work for our purposes 92 | let id = 93 | eg.a.fetch_op(|v| if v == MAX_REPR { 0 } else { v + 1 }, Ordering::Relaxed); 94 | 95 | if id > MAX_ID { 96 | eg.a.fetch_op(|v| if v == 0 { MAX_REPR } else { v - 1 }, Ordering::Relaxed); 97 | PANIC_ID 98 | } else { 99 | id 100 | } 101 | }; 102 | 103 | // Safe allocate_id from 2.12 104 | let allocate_id_safe = |eg: &mut Environment| { 105 | let mut id = eg.a.load(Ordering::Relaxed); 106 | loop { 107 | if id > MAX_ID { 108 | return PANIC_ID; 109 | } else { 110 | match eg 111 | .a 112 | .exchange_weak(id, id + 1, Ordering::Relaxed, Ordering::Relaxed) 113 | { 114 | Ok(_) => return id, 115 | Err(v) => id = v, 116 | } 117 | } 118 | } 119 | }; 120 | 121 | for _ in 0..4 { 122 | lt.add(move |mut eg: Environment| { 123 | let mut seen_error = false; 124 | let mut seen = HashSet::::new(); 125 | for _ in 0..10 { 126 | let id = if fixed_version { 127 | allocate_id_safe(&mut eg) 128 | } else { 129 | allocate_id(&mut eg) 130 | }; 131 | 132 | if id == PANIC_ID { 133 | break; 134 | } 135 | 136 | if seen.contains(&id) { 137 | seen_error = true; 138 | }; 139 | 140 | seen.insert(id); 141 | } 142 | seen_error 143 | }); 144 | } 145 | 146 | lt.run() 147 | } 148 | 149 | let check = |hs: &HashSet>| hs.iter().any(|v| v.contains(&true)); 150 | 151 | assert!(run_until_pred(|| inner(false), check)); 152 | assert!(run_until( 153 | || inner(true), 154 | vec![vec![false, false, false, false]] 155 | )); 156 | } 157 | 158 | // Listing 2.10 159 | // Tests an increment based on compare_exchange, using multiple threads to ensure success 160 | #[test] 161 | fn test_2_11() { 162 | fn inner() -> Vec { 163 | let mut lt = LogTest::default(); 164 | 165 | let increment = |eg: &mut Environment| { 166 | let mut current = eg.a.load(Ordering::Relaxed); 167 | 168 | loop { 169 | let new = current + 1; 170 | 171 | // Since we're in a loop, I'm using exchange_weak 172 | match eg 173 | .a 174 | .exchange_weak(current, new, Ordering::Relaxed, Ordering::Relaxed) 175 | { 176 | Ok(_) => return, 177 | Err(v) => current = v, 178 | } 179 | } 180 | }; 181 | 182 | for _ in 0..2 { 183 | lt.add(move |mut eg: Environment| { 184 | increment(&mut eg); 185 | increment(&mut eg); 186 | 187 | // Relaxed is sufficient ordering here, as one of the two threads will successfully write 4, and 188 | // that same thread is guaranteed to reread that same value 189 | eg.a.load(Ordering::Relaxed) 190 | }); 191 | } 192 | 193 | lt.run() 194 | } 195 | 196 | assert!(run_until_pred(inner, |a| a.iter().any(|v| v.contains(&4)))); 197 | } 198 | 199 | // Listing 2.13 200 | // Uses compare_exchange to ensure consistent initialisation of a global constant 201 | // generate_random_key can be called multiple times, but all threads must perceive the same get_key result 202 | #[test] 203 | fn test_2_13() { 204 | fn inner() -> Vec { 205 | let mut lt = LogTest::default(); 206 | 207 | let random_seed: Arc> = Arc::new(Mutex::new(0)); 208 | 209 | let generate_random_key = |random_seed: &Arc>| { 210 | // Doesn't need an rng - just needs to return something new each time it's called 211 | let mut m = random_seed.lock().unwrap(); 212 | *m += 1; 213 | *m 214 | }; 215 | 216 | let get_key = move |eg: &mut Environment, random_seed| { 217 | let key = eg.a.load(Ordering::Relaxed); 218 | 219 | if key == 0 { 220 | let new_key = generate_random_key(&random_seed); 221 | 222 | match eg 223 | .a 224 | .exchange(key, new_key, Ordering::Relaxed, Ordering::Relaxed) 225 | { 226 | Ok(_) => new_key, 227 | Err(k) => k, 228 | } 229 | } else { 230 | key 231 | } 232 | }; 233 | 234 | for _ in 0..4 { 235 | let random_seed = random_seed.clone(); 236 | lt.add(move |mut eg: Environment| get_key(&mut eg, random_seed.clone())); 237 | } 238 | 239 | lt.run() 240 | } 241 | 242 | let same_id = |arr: &Vec| arr.iter().all(|v| *v == arr[0]); 243 | assert!(run_until_pred(inner, |a| a.iter().all(same_id))); 244 | } 245 | 246 | // Listing 3.1 247 | // Demonstrates relaxed reads can be perceived out of store order 248 | #[test] 249 | fn test_3_1() { 250 | fn inner() -> Vec<[usize; 2]> { 251 | let mut lt = LogTest::default(); 252 | 253 | lt.add(|mut eg: Environment| { 254 | eg.a.store(1, Ordering::Relaxed); 255 | eg.b.store(1, Ordering::Relaxed); 256 | 257 | [0, 0] 258 | }); 259 | 260 | lt.add(|mut eg: Environment| { 261 | let a = eg.a.load(Ordering::Relaxed); 262 | let b = eg.b.load(Ordering::Relaxed); 263 | 264 | [a, b] 265 | }); 266 | 267 | lt.run() 268 | } 269 | 270 | // The writes can be perceived in any order, or not at all 271 | assert!(run_until( 272 | inner, 273 | vec![ 274 | vec![[0, 0], [0, 0]], 275 | vec![[0, 0], [0, 1]], 276 | vec![[0, 0], [1, 0]], 277 | vec![[0, 0], [1, 1]] 278 | ] 279 | )); 280 | } 281 | 282 | // Listing 3.2 skipped - memlog does not support thread joining 283 | 284 | // Listing 3.3 285 | // Demonstrates that even for Relaxed stores/loads, stores are perceived in order 286 | #[test] 287 | fn test_3_3() { 288 | fn inner() -> Vec { 289 | let mut lt = LogTest::default(); 290 | 291 | lt.add(|mut eg: Environment| { 292 | eg.a.store(1, Ordering::Relaxed); 293 | eg.a.store(2, Ordering::Relaxed); 294 | true 295 | }); 296 | 297 | lt.add(|mut eg: Environment| { 298 | let v0 = eg.a.load(Ordering::Relaxed); 299 | let v1 = eg.a.load(Ordering::Relaxed); 300 | let v2 = eg.a.load(Ordering::Relaxed); 301 | let v3 = eg.a.load(Ordering::Relaxed); 302 | 303 | v0 <= v1 && v1 <= v2 && v2 <= v3 304 | }); 305 | 306 | lt.run() 307 | } 308 | 309 | // Writes to the same variable should be perceived in order 310 | assert!(run_until(inner, vec![vec![true, true]])); 311 | } 312 | 313 | // Listing 3.4 314 | // Demonstrates that all threads perceive the same modification order of a single atomic variable 315 | #[test] 316 | fn test_3_4() { 317 | fn inner() -> Vec { 318 | let mut lt = LogTest::default(); 319 | 320 | lt.add(|mut eg: Environment| { 321 | eg.a.fetch_op(|v| v + 5, Ordering::Relaxed); 322 | 0 323 | }); 324 | lt.add(|mut eg: Environment| { 325 | eg.a.fetch_op(|v| v + 10, Ordering::Relaxed); 326 | 0 327 | }); 328 | 329 | for _ in 0..2 { 330 | lt.add(|mut eg: Environment| eg.a.load(Ordering::Relaxed)); 331 | } 332 | 333 | lt.run() 334 | } 335 | 336 | // Reader threads can perceive before, after, or in the middle of the writes 337 | // If both perceive a middle write, the modification order must be preserved, which means 338 | // a reading of [5 10] or [10 5] from them is not possible. 339 | 340 | let check_result = |r: &Vec| { 341 | let hs: HashSet<&usize> = HashSet::from_iter(r.iter()); 342 | !(hs.contains(&5) && hs.contains(&10)) 343 | }; 344 | 345 | assert!(run_until_pred(inner, |hs| hs.iter().all(check_result))); 346 | } 347 | 348 | // Listing 3.5 skipped - memlog does not support witchcraft 349 | 350 | // Listing 3.6 351 | // The reader can't terminate until the writer has written to b, at which point a is definitely available 352 | #[test] 353 | fn test_3_6() { 354 | fn inner() -> Vec { 355 | let mut lt = LogTest::default(); 356 | 357 | lt.add(|mut eg: Environment| { 358 | eg.a.store(123, Ordering::Relaxed); 359 | eg.b.store(1, Ordering::Release); 360 | 0 361 | }); 362 | 363 | lt.add(|mut eg: Environment| { 364 | while eg.b.load(Ordering::Acquire) == 0 {} 365 | eg.a.load(Ordering::Relaxed) 366 | }); 367 | 368 | lt.run() 369 | } 370 | 371 | // Second thread should always perceive the 123 write 372 | assert!(run_until(inner, vec![vec![0, 123]])); 373 | } 374 | 375 | // Listing 3.7 skipped - memlog does not yet support nonatomic access 376 | // Todo: Implement 3.7 when non atomics are implemented 377 | 378 | // Listing 3.8 379 | // Implements a lock using Acquire and Release 380 | // Adapted to run the lock operation in a loop so we've got a sensible victory condition 381 | #[test] 382 | fn test_3_8() { 383 | const THREAD_COUNT: usize = 10; 384 | fn inner() -> Vec { 385 | let mut lt = LogTest::default(); 386 | 387 | for _ in 0..THREAD_COUNT { 388 | lt.add(|mut eg: Environment| { 389 | loop { 390 | // This wasn't a weak exchange in the book - but we're putting it in a loop here 391 | if eg 392 | .a 393 | .exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed) 394 | .is_ok() 395 | { 396 | let old = eg.b.load(Ordering::Relaxed); 397 | eg.b.store(old + 1, Ordering::Relaxed); 398 | eg.a.store(0, Ordering::Release); 399 | break; 400 | } 401 | } 402 | eg.b.load(Ordering::Relaxed) 403 | }); 404 | } 405 | 406 | lt.run() 407 | } 408 | 409 | // Exactly one thread should see the final Mutex write 410 | assert!(run_until_pred(inner, |v| v 411 | .iter() 412 | .all(|a| a.contains(&THREAD_COUNT)))); 413 | } 414 | 415 | // Listing 3.9 skipped - memlog does not yet support AtomicPtr 416 | // Otherwise identical to 2.13 417 | 418 | // Listing 3.10 419 | // Tests sequential consistent flags protecting data access 420 | #[test] 421 | fn test_3_10() { 422 | fn inner() -> Vec { 423 | let mut lt = LogTest::default(); 424 | 425 | lt.add(|mut eg: Environment| { 426 | eg.b.store(1, Ordering::SeqCst); 427 | if eg.a.load(Ordering::SeqCst) == 0 { 428 | // Todo: Use nonatomic stores here for eg.c 429 | eg.c.fetch_op(|v| v + 1, Ordering::Relaxed); 430 | } 431 | 432 | eg.c.load(Ordering::Relaxed) 433 | }); 434 | 435 | lt.add(|mut eg: Environment| { 436 | eg.a.store(1, Ordering::SeqCst); 437 | if eg.b.load(Ordering::SeqCst) == 0 { 438 | eg.c.fetch_op(|v| v + 1, Ordering::Relaxed); 439 | } 440 | 441 | eg.c.load(Ordering::Relaxed) 442 | }); 443 | 444 | lt.run() 445 | } 446 | 447 | // At most one store is made to C, and either thread may see it if it occurs 448 | assert!(run_until( 449 | inner, 450 | vec![vec![0, 1], vec![1, 0], vec![0, 0], vec![1, 1]] 451 | )); 452 | } 453 | 454 | // Listing 3.11 455 | // Tests Atomic-Fence synchronisation, where an atomic Release operation on the writer thread synchronises with 456 | // an Acquire fence on the reader thread 457 | #[test] 458 | fn test_3_11() { 459 | fn inner() -> Vec { 460 | let mut lt = LogTest::default(); 461 | 462 | lt.add(|mut eg: Environment| { 463 | eg.a.store(1, Ordering::Relaxed); 464 | eg.b.store(1, Ordering::Release); 465 | 0 466 | }); 467 | 468 | lt.add(|mut eg: Environment| { 469 | eg.c.store(1, Ordering::Relaxed); 470 | eg.d.store(1, Ordering::Release); 471 | 0 472 | }); 473 | 474 | lt.add(|mut eg: Environment| { 475 | let r0 = eg.b.load(Ordering::Relaxed); 476 | let r1 = eg.d.load(Ordering::Relaxed); 477 | 478 | if r0 == 1 || r1 == 1 { 479 | eg.fence(Ordering::Acquire); 480 | } 481 | 482 | let mut stale_reads = 0; 483 | 484 | if r0 == 1 && eg.a.load(Ordering::Relaxed) == 0 { 485 | stale_reads += 1; 486 | } 487 | 488 | if r1 == 1 && eg.c.load(Ordering::Relaxed) == 0 { 489 | stale_reads += 1; 490 | } 491 | 492 | stale_reads 493 | }); 494 | 495 | lt.run() 496 | } 497 | 498 | // Thread #3 should never perceive an old value for a/c when the b/d flags are set, respectively 499 | assert!(run_until(inner, vec![vec![0, 0, 0]])); 500 | } 501 | 502 | // Listing 8.1 skipped - memlog does not support futex 503 | -------------------------------------------------------------------------------- /memlog/tests/common/harness.rs: -------------------------------------------------------------------------------- 1 | use memlog::log::MemorySystem; 2 | use rand::{RngCore, SeedableRng}; 3 | use rand_chacha::ChaCha8Rng; 4 | use std::sync::atomic::Ordering; 5 | use std::sync::{Arc, Barrier, Mutex}; 6 | use std::thread; 7 | use std::thread::JoinHandle; 8 | 9 | pub struct ThreadState { 10 | pub finished: bool, 11 | pub waiting: bool, 12 | pub barrier: Arc, 13 | } 14 | 15 | impl ThreadState { 16 | pub fn wait(thread_state: &Arc>) { 17 | { 18 | let mut ts = thread_state.lock().unwrap(); 19 | ts.waiting = true; 20 | let barrier = ts.barrier.clone(); 21 | drop(ts); 22 | barrier.wait(); 23 | } 24 | 25 | while thread_state.lock().unwrap().waiting {} 26 | } 27 | } 28 | 29 | pub struct Value { 30 | pub thread: usize, 31 | pub addr: usize, 32 | pub thread_state: Arc>, 33 | pub memory: Arc>, 34 | } 35 | 36 | impl Value { 37 | pub fn wait(&mut self) { 38 | ThreadState::wait(&self.thread_state); 39 | } 40 | 41 | #[allow(unused)] 42 | pub fn fetch_update Option>( 43 | &mut self, 44 | f: F, 45 | set_order: Ordering, 46 | fetch_order: Ordering, 47 | ) -> Result { 48 | self.wait(); 49 | let mut mem = self.memory.lock().unwrap(); 50 | mem.fetch_update(self.thread, self.addr, f, set_order, fetch_order) 51 | } 52 | 53 | #[allow(unused)] 54 | pub fn exchange_weak( 55 | &mut self, 56 | old: usize, 57 | new: usize, 58 | success: Ordering, 59 | failure: Ordering, 60 | ) -> Result { 61 | self.wait(); 62 | let mut mem = self.memory.lock().unwrap(); 63 | 64 | mem.compare_exchange_weak(self.thread, self.addr, old, new, success, failure) 65 | } 66 | 67 | #[allow(unused)] 68 | pub fn exchange( 69 | &mut self, 70 | old: usize, 71 | new: usize, 72 | success: Ordering, 73 | failure: Ordering, 74 | ) -> Result { 75 | self.wait(); 76 | let mut mem = self.memory.lock().unwrap(); 77 | 78 | mem.compare_exchange(self.thread, self.addr, old, new, success, failure) 79 | } 80 | 81 | // Used for fetch_add, fetch_sub, etc 82 | #[allow(unused)] 83 | pub fn fetch_op usize>(&mut self, f: F, ordering: Ordering) -> usize { 84 | self.wait(); 85 | let mut mem = self.memory.lock().unwrap(); 86 | mem.fetch_op(self.thread, self.addr, f, ordering) 87 | } 88 | 89 | pub fn load(&mut self, ordering: Ordering) -> usize { 90 | self.wait(); 91 | let mut mem = self.memory.lock().unwrap(); 92 | mem.load(self.thread, self.addr, ordering) 93 | } 94 | 95 | pub fn store(&mut self, val: usize, ordering: Ordering) { 96 | self.wait(); 97 | let mut mem = self.memory.lock().unwrap(); 98 | mem.store(self.thread, self.addr, val, ordering); 99 | } 100 | } 101 | 102 | #[allow(unused)] 103 | pub struct Environment { 104 | pub thread_state: Arc>, 105 | pub a: Value, 106 | pub b: Value, 107 | pub c: Value, 108 | pub d: Value, 109 | pub e: Value, 110 | } 111 | 112 | impl Environment { 113 | #[allow(unused)] 114 | pub fn fence(&mut self, ordering: Ordering) { 115 | ThreadState::wait(&self.thread_state); 116 | let mut mem = self.a.memory.lock().unwrap(); 117 | mem.fence(self.a.thread, ordering) 118 | } 119 | } 120 | 121 | pub struct Thread { 122 | pub thread_state: Arc>, 123 | pub handle: JoinHandle, 124 | } 125 | 126 | #[derive(Default)] 127 | pub struct LogTest { 128 | pub fns: Vec T + Send>>, 129 | } 130 | 131 | impl LogTest { 132 | pub fn add T + Send + 'static + Sized>(&mut self, f: F) { 133 | self.fns.push(Box::new(f)) 134 | } 135 | 136 | pub fn spawn_thread T + Send + 'static + Sized>( 137 | ms: Arc>, 138 | i: usize, 139 | mut f: F, 140 | ) -> Thread { 141 | ms.lock().unwrap().add_thread(); 142 | 143 | let ts = Arc::new(Mutex::new(ThreadState { 144 | finished: false, 145 | waiting: false, 146 | barrier: Arc::new(Barrier::new(2)), 147 | })); 148 | 149 | let mut addr = 0; 150 | 151 | let mut build_value = || { 152 | let res = Value { 153 | thread: i, 154 | addr, 155 | thread_state: ts.clone(), 156 | memory: ms.clone(), 157 | }; 158 | 159 | addr += 1; 160 | res 161 | }; 162 | 163 | let env = Environment { 164 | thread_state: ts.clone(), 165 | a: build_value(), 166 | b: build_value(), 167 | c: build_value(), 168 | d: build_value(), 169 | e: build_value(), 170 | }; 171 | 172 | Thread { 173 | thread_state: ts.clone(), 174 | handle: thread::spawn(move || { 175 | let res: T = f(env); 176 | ts.lock().unwrap().finished = true; 177 | res 178 | }), 179 | } 180 | } 181 | 182 | pub fn drive(mut threads: Vec>) -> Vec { 183 | let s = std::time::UNIX_EPOCH.elapsed().unwrap().as_nanos() as u64; 184 | let mut rng = ChaCha8Rng::seed_from_u64(s); 185 | 186 | loop { 187 | let mut all_finished = true; 188 | let mut all_waiting = true; 189 | for tsm in threads.iter() { 190 | let ts = tsm.thread_state.lock().unwrap(); 191 | if !ts.finished { 192 | all_finished = false; 193 | 194 | if !ts.waiting { 195 | all_waiting = false; 196 | } 197 | } 198 | } 199 | 200 | if all_finished { 201 | break; 202 | } 203 | 204 | if all_waiting { 205 | let ind = (rng.next_u32() as usize) % threads.len(); 206 | let r = &mut threads[ind]; 207 | let mut l = r.thread_state.lock().unwrap(); 208 | if l.waiting { 209 | l.waiting = false; 210 | l.barrier.wait(); 211 | } 212 | } 213 | } 214 | 215 | let mut res = vec![]; 216 | 217 | for h in threads.drain(..) { 218 | res.push(h.handle.join().unwrap()) 219 | } 220 | 221 | res 222 | } 223 | 224 | // Runs all threads randomly interleaved 225 | #[allow(unused)] 226 | pub fn run(&mut self) -> Vec { 227 | let ms = Arc::new(Mutex::new(MemorySystem::default())); 228 | ms.lock().unwrap().malloc(5); 229 | 230 | let mut threads = vec![]; 231 | 232 | for (i, f) in self.fns.drain(..).enumerate() { 233 | threads.push(Self::spawn_thread(ms.clone(), i, f)); 234 | } 235 | 236 | Self::drive(threads) 237 | } 238 | 239 | // Runs Thread A fully, then Thread B, etc 240 | #[allow(unused)] 241 | pub fn run_sequential(&mut self) -> Vec { 242 | let ms = Arc::new(Mutex::new(MemorySystem::default())); 243 | ms.lock().unwrap().malloc(5); 244 | 245 | let mut results = vec![]; 246 | 247 | for (i, f) in self.fns.drain(..).enumerate() { 248 | results.push(Self::drive(vec![Self::spawn_thread(ms.clone(), i, f)])[0]); 249 | } 250 | 251 | results 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /memlog/tests/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod harness; 2 | pub mod utils; 3 | -------------------------------------------------------------------------------- /memlog/tests/common/utils.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::fmt::Debug; 3 | use std::hash::Hash; 4 | use std::sync::atomic::Ordering; 5 | 6 | #[allow(unused)] 7 | pub const ALL_ORDERINGS: [Ordering; 5] = [ 8 | Ordering::Relaxed, 9 | Ordering::SeqCst, 10 | Ordering::Acquire, 11 | Ordering::Release, 12 | Ordering::AcqRel, 13 | ]; 14 | 15 | #[allow(unused)] 16 | fn check_set(hs: &HashSet, arr: &[T]) -> bool { 17 | let mut ns = HashSet::new(); 18 | for x in arr { 19 | ns.insert(x.clone()); 20 | } 21 | ns == *hs 22 | } 23 | 24 | #[allow(unused)] 25 | pub fn run_until T>( 26 | mut f: F, 27 | expected: Vec, 28 | ) -> bool { 29 | let mut res = HashSet::new(); 30 | 31 | for x in 0..10_000 { 32 | res.insert(f()); 33 | 34 | if check_set(&res, &expected) && x > 200 { 35 | return true; 36 | } 37 | 38 | if res.len() > expected.len() { 39 | println!("Failed {:?} {:?}", res, expected); 40 | return false; 41 | } 42 | } 43 | 44 | println!("Failed {:?} {:?}", res, expected); 45 | false 46 | } 47 | 48 | #[allow(unused)] 49 | pub fn run_until_pred< 50 | T: Clone + Eq + Hash + Debug, 51 | F: FnMut() -> T, 52 | FP: Fn(&HashSet) -> bool, 53 | >( 54 | mut f: F, 55 | verify: FP, 56 | ) -> bool { 57 | let mut res = HashSet::new(); 58 | 59 | for x in 0..10_000 { 60 | res.insert(f()); 61 | 62 | if verify(&res) && x > 200 { 63 | return true; 64 | } 65 | } 66 | 67 | println!("Failed {:?}", res); 68 | false 69 | } 70 | 71 | pub fn permutations(possible: Vec>) -> Vec> { 72 | let mut out = vec![vec![]]; 73 | 74 | for x in possible { 75 | let mut nout = vec![]; 76 | 77 | for v in x { 78 | for o in out.iter() { 79 | let mut new_val = o.clone(); 80 | new_val.push(v); 81 | nout.push(new_val); 82 | } 83 | } 84 | 85 | out = nout; 86 | } 87 | 88 | out 89 | } 90 | 91 | fn sorted(mut v: Vec) -> Vec { 92 | v.sort(); 93 | v 94 | } 95 | 96 | #[test] 97 | fn test_permutations() { 98 | assert_eq!( 99 | sorted(permutations(vec![vec![0, 1], vec![1, 2]])), 100 | sorted(vec![vec![0, 1], vec![0, 2], vec![1, 1], vec![1, 2]]) 101 | ); 102 | 103 | assert_eq!( 104 | sorted(permutations(vec![vec![0, 1, 2, 3]])), 105 | sorted(vec![vec![0], vec![1], vec![2], vec![3]]) 106 | ); 107 | } 108 | -------------------------------------------------------------------------------- /memlog/tests/exchange_ordering.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::{permutations, run_until}; 3 | use std::sync::atomic::Ordering; 4 | 5 | mod common; 6 | 7 | /* Release Chain Testing 8 | There are subtle edge cases here. There are two simultaneous guarantees: 9 | * The ordering of the exchange is equivalent to the same load/store pair, with AcqRel mapping to Acquire / Release 10 | * If the exchange is reading from a Release store, and another thread loads the result with Acquire, the release 11 | chain is makes Thread 3 synchronize with Thread 1. If the exchange is Relaxed, this does not make Thread 2 12 | synchronize with Thread 1, or Thread 3 synchronize with Thread 1. 13 | If you're looking to find bugs in this project, this is not a bad place. 14 | */ 15 | 16 | #[test] 17 | fn release_acquire_three_threads() { 18 | fn inner( 19 | first_order: Ordering, 20 | second_order: Ordering, 21 | second_order_failure: Ordering, 22 | ) -> Vec { 23 | let mut lt = LogTest::default(); 24 | 25 | lt.add(move |mut eg: Environment| { 26 | eg.a.store(1, Ordering::Relaxed); 27 | eg.b.store(1, first_order); 28 | 0 29 | }); 30 | 31 | lt.add(move |mut eg: Environment| { 32 | // Any RMW continues the release chain 33 | eg.c.store(10, Ordering::Relaxed); 34 | while eg 35 | .b 36 | .exchange_weak(1, 2, second_order, second_order_failure) 37 | .is_err() 38 | {} 39 | // Continue the release chain without necessarily seeing the store to a 40 | eg.a.load(Ordering::Relaxed) 41 | }); 42 | 43 | lt.add(move |mut eg: Environment| { 44 | while eg.b.load(Ordering::Acquire) < 2 {} 45 | eg.a.load(Ordering::Relaxed) + eg.c.load(Ordering::Relaxed) 46 | }); 47 | 48 | lt.run() 49 | } 50 | 51 | fn check( 52 | first_order: Ordering, 53 | second_order: Ordering, 54 | second_order_failure: Ordering, 55 | vals: Vec>, 56 | ) { 57 | assert!(run_until( 58 | || inner(first_order, second_order, second_order_failure), 59 | permutations(vals) 60 | )); 61 | } 62 | 63 | // Regardless of second thread ordering, no release on the first thread means all bets are off 64 | check( 65 | Ordering::Relaxed, 66 | Ordering::Relaxed, 67 | Ordering::Relaxed, 68 | vec![vec![0], vec![0, 1], vec![0, 1, 10, 11]], 69 | ); 70 | 71 | // Release on the exchange means that thread 3 sees thread 2's store, but not necessary that of 1 72 | check( 73 | Ordering::Relaxed, 74 | Ordering::Release, 75 | Ordering::Relaxed, 76 | vec![vec![0], vec![0, 1], vec![10, 11]], 77 | ); 78 | 79 | // Thread 2 may not see thread 1's write 80 | // Via a release chain, thread 3 always sees it, but may not see thread 2's write. 81 | // Would this ever happen on any platform and compiler combo? 82 | check( 83 | Ordering::Release, 84 | Ordering::Relaxed, 85 | Ordering::Relaxed, 86 | vec![vec![0], vec![0, 1], vec![1, 11]], 87 | ); 88 | 89 | // Release on both means thread 3 sees all stores, but thread 2 still doesn't necessarily see 1 90 | check( 91 | Ordering::Release, 92 | Ordering::Release, 93 | Ordering::Relaxed, 94 | vec![vec![0], vec![0, 1], vec![11]], 95 | ); 96 | 97 | // Thread 2 sees thread 1's store, as does thread 3 via the release chain 98 | // Thread 3 may not see thread 2's store 99 | check( 100 | Ordering::Release, 101 | Ordering::Acquire, 102 | Ordering::Acquire, 103 | vec![vec![0], vec![1], vec![1, 11]], 104 | ); 105 | 106 | // All threads should see all stores 107 | check( 108 | Ordering::Release, 109 | Ordering::AcqRel, 110 | Ordering::Acquire, 111 | vec![vec![0], vec![1], vec![11]], 112 | ); 113 | 114 | // All threads should see all stores 115 | check( 116 | Ordering::Release, 117 | Ordering::SeqCst, 118 | Ordering::SeqCst, 119 | vec![vec![0], vec![1], vec![11]], 120 | ); 121 | } 122 | 123 | /* SeqLock 124 | Based on blog post: 125 | https://puzpuzpuz.dev/seqlock-based-atomic-memory-snapshots 126 | 127 | SeqLocks enable atomic operations for multiple read and write threads. 128 | * Write access is protected by CAS to prevent multiple threads entering the critical section 129 | * Read access is optimistic, writes versioned. Readers ensure version did not change while reading. 130 | */ 131 | 132 | #[test] 133 | fn test_seqlock() { 134 | fn intel_failure_inner() -> Vec { 135 | let mut lt = LogTest::default(); 136 | 137 | let write_fn = |mut eg: Environment| loop { 138 | let version = eg.a.load(Ordering::Acquire); 139 | if version & 1 == 1 { 140 | continue; 141 | } 142 | 143 | if eg 144 | .a 145 | .exchange_weak(version, version + 1, Ordering::Relaxed, Ordering::Relaxed) 146 | .is_err() 147 | { 148 | continue; 149 | } 150 | 151 | eg.fence(Ordering::Release); 152 | 153 | let old_b = eg.b.load(Ordering::Relaxed); 154 | let old_c = eg.c.load(Ordering::Relaxed); 155 | 156 | eg.b.store(old_b + 1, Ordering::Relaxed); 157 | eg.c.store(old_c + 1, Ordering::Relaxed); 158 | 159 | eg.a.store(version + 2, Ordering::Release); 160 | return 0; 161 | }; 162 | 163 | lt.add(write_fn); 164 | lt.add(write_fn); 165 | 166 | lt.add(|mut eg: Environment| loop { 167 | let version = eg.a.load(Ordering::Acquire); 168 | if version & 1 == 1 { 169 | continue; 170 | } 171 | 172 | let b = eg.b.load(Ordering::Relaxed); 173 | let c = eg.c.load(Ordering::Relaxed); 174 | 175 | eg.fence(Ordering::Acquire); 176 | 177 | let current_version = eg.a.load(Ordering::Relaxed); 178 | 179 | if current_version == version { 180 | return b + c; 181 | } 182 | }); 183 | 184 | lt.run() 185 | } 186 | 187 | // Read should either see 0, 1 or 2 atomic writes 188 | // Reading a partial write will result in an odd number 189 | assert!(run_until( 190 | intel_failure_inner, 191 | vec![vec![0, 0, 0], vec![0, 0, 2], vec![0, 0, 4]] 192 | )); 193 | } 194 | 195 | #[test] 196 | fn acquire_chain_test() { 197 | enum AcquireChainStrategy { 198 | WeakExchangeFence, 199 | AcqRelExchange, 200 | StoreRelease, 201 | } 202 | 203 | fn tiny_test(strategy: AcquireChainStrategy) -> Vec { 204 | let mut lt = LogTest::default(); 205 | 206 | lt.add(move |mut eg: Environment| { 207 | match strategy { 208 | AcquireChainStrategy::WeakExchangeFence => { 209 | // Fence is required for correctness 210 | while eg 211 | .a 212 | .exchange_weak(0, 1, Ordering::Relaxed, Ordering::Relaxed) 213 | .is_err() 214 | {} 215 | eg.fence(Ordering::Release); 216 | } 217 | AcquireChainStrategy::AcqRelExchange => { 218 | // AcqRel only guarantees Acquire on load, Release on store. 219 | // Relaxed stores below are _not_ guaranteed to not be reordered before this store 220 | // See https://en.cppreference.com/w/cpp/atomic/memory_order - memory_order_acq_rel 221 | while eg 222 | .a 223 | .exchange_weak(0, 1, Ordering::AcqRel, Ordering::Acquire) 224 | .is_err() 225 | {} 226 | } 227 | AcquireChainStrategy::StoreRelease => { 228 | // Isn't even an edge case. This should obviously not work. 229 | // Exists as a regression test against a memlog bug where relaxed stores 230 | // and loads combined with an Acquire fence were erroneously creating a 231 | // release chain and providing additional guarantees. 232 | eg.a.store(1, Ordering::Release); 233 | } 234 | } 235 | 236 | eg.b.store(1, Ordering::Relaxed); 237 | eg.b.store(2, Ordering::Relaxed); 238 | 239 | eg.a.store(2, Ordering::Release); 240 | 241 | 0 242 | }); 243 | 244 | lt.add(|mut eg: Environment| { 245 | let a = eg.a.load(Ordering::Acquire); 246 | let b = eg.b.load(Ordering::Relaxed); 247 | 248 | eg.fence(Ordering::Acquire); 249 | 250 | let a_2 = eg.a.load(Ordering::Relaxed); 251 | 252 | if a_2 == a && a != 1 { 253 | b 254 | } else { 255 | 0 256 | } 257 | }); 258 | 259 | lt.run() 260 | } 261 | 262 | // AcqRel does not provide the required guarantees according to the C++ memory model 263 | // If an implementation generates an atomic CAS operation, it will incidentally work 264 | // However, something like Load-Link/Store-Conditional could generate different barriers 265 | // for load and store, meaning that a write may be reordered between them. 266 | // The standard only guarantees Acquire on load, Release on store. 267 | assert!(run_until( 268 | || tiny_test(AcquireChainStrategy::AcqRelExchange), 269 | vec![vec![0, 0], vec![0, 1], vec![0, 2]] 270 | )); 271 | 272 | // Regression test against previous release chain/fence bug 273 | assert!(run_until( 274 | || tiny_test(AcquireChainStrategy::StoreRelease), 275 | vec![vec![0, 0], vec![0, 1], vec![0, 2]] 276 | )); 277 | 278 | // Should work correctly - no partial writes observed. 279 | assert!(run_until( 280 | || tiny_test(AcquireChainStrategy::WeakExchangeFence), 281 | vec![vec![0, 0], vec![0, 2]] 282 | )); 283 | } 284 | -------------------------------------------------------------------------------- /memlog/tests/preshing.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::run_until; 3 | use std::sync::atomic::Ordering; 4 | 5 | mod common; 6 | 7 | /* Tests from Preshing on Programming */ 8 | 9 | // https://preshing.com/20130823/the-synchronizes-with-relation/ 10 | #[test] 11 | fn test_basic_acq_rel() { 12 | fn intel_failure_inner() -> Vec { 13 | let mut lt = LogTest::default(); 14 | 15 | lt.add(|mut eg: Environment| { 16 | eg.a.store(1, Ordering::Relaxed); 17 | eg.b.store(1, Ordering::Release); 18 | 0 19 | }); 20 | 21 | lt.add(|mut eg: Environment| { 22 | while eg.b.load(Ordering::Acquire) == 0 {} 23 | eg.a.load(Ordering::Relaxed) 24 | }); 25 | 26 | lt.run() 27 | } 28 | 29 | assert!(run_until(intel_failure_inner, vec![vec![0, 1]])); 30 | } 31 | 32 | // https://preshing.com/20130922/acquire-and-release-fences/ 33 | #[test] 34 | fn test_basic_acq_rel_rel_fence() { 35 | fn intel_failure_inner() -> Vec { 36 | let mut lt = LogTest::default(); 37 | 38 | lt.add(|mut eg: Environment| { 39 | eg.a.store(1, Ordering::Relaxed); 40 | eg.fence(Ordering::Release); 41 | eg.b.store(1, Ordering::Relaxed); 42 | 0 43 | }); 44 | 45 | lt.add(|mut eg: Environment| { 46 | while eg.b.load(Ordering::Acquire) == 0 {} 47 | eg.a.load(Ordering::Relaxed) 48 | }); 49 | 50 | lt.run() 51 | } 52 | 53 | assert!(run_until(intel_failure_inner, vec![vec![0, 1]])); 54 | } 55 | 56 | #[test] 57 | fn test_basic_acq_rel_acq_fence() { 58 | fn intel_failure_inner() -> Vec { 59 | let mut lt = LogTest::default(); 60 | 61 | lt.add(|mut eg: Environment| { 62 | eg.a.store(1, Ordering::Relaxed); 63 | eg.b.store(1, Ordering::Release); 64 | 0 65 | }); 66 | 67 | lt.add(|mut eg: Environment| { 68 | while eg.b.load(Ordering::Relaxed) == 0 {} 69 | eg.fence(Ordering::Acquire); 70 | eg.a.load(Ordering::Relaxed) 71 | }); 72 | 73 | lt.run() 74 | } 75 | 76 | assert!(run_until(intel_failure_inner, vec![vec![0, 1]])); 77 | } 78 | 79 | // https://preshing.com/20131125/acquire-and-release-fences-dont-work-the-way-youd-expect/ 80 | #[test] 81 | fn test_release_reorder() { 82 | fn intel_failure_inner() -> Vec { 83 | let mut lt = LogTest::default(); 84 | 85 | lt.add(|mut eg: Environment| { 86 | eg.a.store(1, Ordering::Release); 87 | eg.b.store(1, Ordering::Relaxed); 88 | 0 89 | }); 90 | 91 | lt.add(|mut eg: Environment| { 92 | while eg.b.load(Ordering::Acquire) == 0 {} 93 | eg.a.load(Ordering::Relaxed) 94 | }); 95 | 96 | lt.run() 97 | } 98 | 99 | assert!(run_until(intel_failure_inner, vec![vec![0, 0], vec![0, 1]])); 100 | } 101 | -------------------------------------------------------------------------------- /memlog/tests/seq_cst.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::{permutations, run_until}; 3 | use std::sync::atomic::Ordering; 4 | 5 | mod common; 6 | 7 | // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/n4713.pdf 8 | // 32.4.4-32.4.8 9 | // From https://stackoverflow.com/questions/59316262/when-is-a-memory-order-seq-cst-fence-useful 10 | 11 | /* 12 | For an atomic operation B that reads the value of an atomic object M 13 | if there is a memory_order_seq_cst fence X sequenced before B 14 | then B observes either the last memory_order_seq_cst modification of M preceding X in the total order S 15 | or a later modification of M in its modification order. 16 | */ 17 | 18 | #[test] 19 | fn test_fence_read() { 20 | fn inner() -> Vec { 21 | let mut lt = LogTest::default(); 22 | 23 | lt.add(move |mut eg: Environment| { 24 | eg.a.store(1, Ordering::Relaxed); // Unrelated modification of M that is not memory_order_seq_cst 25 | eg.a.store(2, Ordering::SeqCst); // Operation A 26 | eg.a.store(3, Ordering::Relaxed); // Does not happen before A 27 | 28 | 0 29 | }); 30 | 31 | lt.add(move |mut eg: Environment| { 32 | eg.fence(Ordering::SeqCst); 33 | eg.a.load(Ordering::Relaxed) // Operation B 34 | }); 35 | 36 | lt.run_sequential() 37 | } 38 | 39 | assert!(run_until(inner, permutations(vec![vec![0], vec![2, 3]]))); 40 | } 41 | 42 | /* 43 | For atomic operations A and B on an atomic object M, 44 | where A modifies M and B takes its value, 45 | if there is a memory_order_seq_cst fence X such that A is sequenced before X and B follows X in S, 46 | then B observes either the effects of A or a later modification of M in its modification order.*/ 47 | 48 | #[test] 49 | fn test_fence_write() { 50 | fn inner() -> Vec { 51 | let mut lt = LogTest::default(); 52 | 53 | lt.add(move |mut eg: Environment| { 54 | eg.a.store(1, Ordering::Relaxed); // Unrelated modification of M that is not memory_order_seq_cst 55 | eg.a.store(2, Ordering::Relaxed); // Operation A 56 | eg.fence(Ordering::SeqCst); // Fence 57 | eg.a.store(3, Ordering::Relaxed); 58 | 59 | 0 60 | }); 61 | 62 | lt.add(move |mut eg: Environment| { 63 | eg.a.load(Ordering::SeqCst) // Operation B 64 | }); 65 | 66 | lt.run_sequential() 67 | } 68 | 69 | assert!(run_until(inner, permutations(vec![vec![0], vec![2, 3]]))); 70 | } 71 | 72 | /* 73 | For atomic operations A and B on an atomic object M, 74 | where A modifies M and B takes its value, 75 | if there are memory_order_seq_cst fences X and Y such that A is sequenced before X, 76 | Y is sequenced before B, 77 | and X precedes Y in S, 78 | then B observes either the effects of A or a later modification of M in its modification order. 79 | */ 80 | 81 | #[test] 82 | fn test_fence_fence() { 83 | fn inner() -> Vec { 84 | let mut lt = LogTest::default(); 85 | 86 | lt.add(move |mut eg: Environment| { 87 | eg.a.store(1, Ordering::Relaxed); // Unrelated modification of M that is not memory_order_seq_cst 88 | eg.a.store(2, Ordering::Relaxed); // Operation A 89 | eg.fence(Ordering::SeqCst); // Fence X 90 | eg.a.store(3, Ordering::Relaxed); 91 | 92 | 0 93 | }); 94 | 95 | lt.add(move |mut eg: Environment| { 96 | eg.fence(Ordering::SeqCst); // Fence Y 97 | eg.a.load(Ordering::Relaxed) // Operation B 98 | }); 99 | 100 | lt.run_sequential() 101 | } 102 | 103 | assert!(run_until(inner, permutations(vec![vec![0], vec![2, 3]]))); 104 | } 105 | 106 | // Store buffer litmus test. SeqCst rules out 0, 0 as a result 107 | #[test] 108 | fn test_intel_failure() { 109 | fn intel_failure_inner() -> Vec { 110 | let mut lt = LogTest::default(); 111 | 112 | lt.add(|mut eg: Environment| { 113 | eg.a.store(1, Ordering::SeqCst); 114 | eg.b.load(Ordering::SeqCst) 115 | }); 116 | 117 | lt.add(|mut eg: Environment| { 118 | eg.b.store(1, Ordering::SeqCst); 119 | eg.a.load(Ordering::SeqCst) 120 | }); 121 | 122 | lt.run() 123 | } 124 | 125 | assert!(run_until( 126 | intel_failure_inner, 127 | vec![vec![0, 1], vec![1, 0], vec![1, 1]] 128 | )); 129 | } 130 | -------------------------------------------------------------------------------- /memlog/tests/stackoverflow.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::run_until; 3 | use std::sync::atomic::Ordering; 4 | 5 | mod common; 6 | 7 | // https://stackoverflow.com/questions/47520748/c-memory-model-do-seq-cst-loads-synchronize-with-seq-cst-stores 8 | #[test] 9 | fn test_seq_cst_sync() { 10 | fn inner() -> Vec { 11 | let mut lt = LogTest::default(); 12 | 13 | lt.add(|mut eg: Environment| { 14 | eg.a.store(1, Ordering::Relaxed); 15 | 16 | if eg.b.load(Ordering::SeqCst) == 1 { 17 | eg.c.store(1, Ordering::Relaxed) 18 | } 19 | 0 20 | }); 21 | 22 | lt.add(|mut eg: Environment| { 23 | eg.b.store(1, Ordering::SeqCst); 24 | 25 | if eg.c.load(Ordering::Relaxed) == 1 { 26 | eg.a.load(Ordering::Relaxed) 27 | } else { 28 | 2 29 | } 30 | }); 31 | 32 | lt.run() 33 | } 34 | 35 | // Thread 2 doesn't synchronize with Thread 1, meaning the store to A may not be available in time 36 | // This defies all reason. I doubt you could replicate it on a real machine. But we simulate it. 37 | assert!(run_until(inner, vec![vec![0, 2], vec![0, 0], vec![0, 1]])); 38 | } 39 | 40 | // https://stackoverflow.com/questions/52606524/what-exact-rules-in-the-c-memory-model-prevent-reordering-before-acquire-opera 41 | #[test] 42 | fn test_exchange() { 43 | fn inner() -> Vec { 44 | let mut lt = LogTest::default(); 45 | 46 | lt.add(|mut eg: Environment| { 47 | let _ = 48 | eg.a.exchange_weak(0, 1, Ordering::AcqRel, Ordering::Acquire); 49 | eg.b.load(Ordering::Relaxed) 50 | }); 51 | 52 | lt.add(|mut eg: Environment| { 53 | let _ = 54 | eg.b.exchange_weak(0, 1, Ordering::AcqRel, Ordering::Acquire); 55 | eg.a.load(Ordering::Relaxed) 56 | }); 57 | 58 | lt.run() 59 | } 60 | 61 | // The threads do not establish any synchronizes with relationship with each other. All bets are off. 62 | assert!(run_until( 63 | inner, 64 | vec![vec![0, 0], vec![0, 1], vec![1, 0], vec![1, 1]] 65 | )); 66 | } 67 | 68 | // Same example, but in the comments. One half of the comment appears to be malformed, and the 69 | // description doesn't match the code. This from the bottom. 70 | #[test] 71 | fn test_exchange_fence() { 72 | fn inner() -> Vec { 73 | let mut lt = LogTest::default(); 74 | 75 | lt.add(|mut eg: Environment| { 76 | eg.a.store(1, Ordering::Relaxed); 77 | eg.fence(Ordering::AcqRel); 78 | eg.b.store(1, Ordering::Relaxed); 79 | 0 80 | }); 81 | 82 | lt.add(|mut eg: Environment| { 83 | // By perceiving the store to B, which is sequenced after the fence 84 | while eg.b.load(Ordering::Relaxed) == 0 {} 85 | // This fence now synchronizes with the release above, and the write to A must be visible 86 | eg.fence(Ordering::AcqRel); 87 | eg.a.load(Ordering::Relaxed) 88 | }); 89 | 90 | lt.run() 91 | } 92 | 93 | assert!(run_until(inner, vec![vec![0, 1]])); 94 | } 95 | 96 | // https://stackoverflow.com/questions/71509935/how-does-mixing-relaxed-and-acquire-release-accesses-on-the-same-atomic-variable 97 | #[test] 98 | fn test_broken_release_chain() { 99 | fn inner() -> Vec { 100 | let mut lt = LogTest::default(); 101 | 102 | lt.add(|mut eg: Environment| { 103 | eg.a.store(42, Ordering::Relaxed); 104 | eg.b.store(1, Ordering::Release); 105 | 0 106 | }); 107 | 108 | lt.add(|mut eg: Environment| { 109 | if eg.b.load(Ordering::Relaxed) == 1 { 110 | eg.b.store(2, Ordering::Relaxed); 111 | } 112 | 0 113 | }); 114 | 115 | lt.add(|mut eg: Environment| { 116 | let v = eg.b.load(Ordering::Acquire); 117 | let ov = eg.a.load(Ordering::Relaxed); 118 | 119 | v + ov 120 | }); 121 | 122 | lt.run() 123 | } 124 | 125 | assert!(run_until( 126 | inner, 127 | vec![ 128 | vec![0, 0, 0], 129 | vec![0, 0, 2], // Broken release chain 130 | vec![0, 0, 42], 131 | vec![0, 0, 43], 132 | vec![0, 0, 44] 133 | ] 134 | )); 135 | } 136 | 137 | // https://stackoverflow.com/questions/67693687/possible-orderings-with-memory-order-seq-cst-and-memory-order-release 138 | 139 | #[test] 140 | fn test_intel_failure_release() { 141 | fn intel_failure_inner() -> Vec { 142 | let mut lt = LogTest::default(); 143 | 144 | lt.add(|mut eg: Environment| { 145 | eg.a.store(1, Ordering::Release); 146 | eg.b.load(Ordering::SeqCst) 147 | }); 148 | 149 | lt.add(|mut eg: Environment| { 150 | eg.b.store(1, Ordering::Release); 151 | eg.a.load(Ordering::SeqCst) 152 | }); 153 | 154 | lt.run() 155 | } 156 | 157 | assert!(run_until( 158 | intel_failure_inner, 159 | vec![vec![0, 0], vec![0, 1], vec![1, 0], vec![1, 1]] 160 | )); 161 | } 162 | -------------------------------------------------------------------------------- /memlog/tests/standard_atomic.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::{permutations, run_until}; 3 | use std::sync::atomic::Ordering; 4 | 5 | mod common; 6 | 7 | // https://en.cppreference.com/w/cpp/atomic/memory_order 8 | // Atomic synchronization 9 | 10 | #[test] 11 | fn explanation_relaxed_ordering_impossible() { 12 | fn inner() -> Vec { 13 | let mut lt = LogTest::default(); 14 | 15 | lt.add(move |mut eg: Environment| { 16 | let v = eg.b.load(Ordering::Relaxed); 17 | eg.a.store(v, Ordering::Relaxed); 18 | v 19 | }); 20 | 21 | lt.add(move |mut eg: Environment| { 22 | let v = eg.a.load(Ordering::Relaxed); 23 | eg.b.store(42, Ordering::Relaxed); 24 | v 25 | }); 26 | 27 | lt.run() 28 | } 29 | 30 | // a = b = 42 is valid here. 31 | // memlog *cannot* simulate this 32 | assert!(run_until(inner, vec![vec![0, 0], vec![42, 0]])); 33 | } 34 | 35 | // This tests "out of thin air values", where 42 is locked behind a circular dependency 36 | // It's not even remotely possible with memlog, but listed here for completion's sake 37 | #[test] 38 | fn explanation_world_not_insane() { 39 | fn inner() -> Vec { 40 | let mut lt = LogTest::default(); 41 | 42 | lt.add(move |mut eg: Environment| { 43 | let v = eg.a.load(Ordering::Relaxed); 44 | if v == 42 { 45 | eg.b.store(v, Ordering::Relaxed); 46 | } 47 | v 48 | }); 49 | 50 | lt.add(move |mut eg: Environment| { 51 | let v = eg.b.load(Ordering::Relaxed); 52 | if v == 42 { 53 | eg.a.store(v, Ordering::Relaxed); 54 | } 55 | v 56 | }); 57 | 58 | lt.run() 59 | } 60 | 61 | assert!(run_until(inner, vec![vec![0, 0]])); 62 | } 63 | 64 | // Incrementing counters test 65 | #[test] 66 | fn explanation_relaxed_increment() { 67 | let mut lt = LogTest::default(); 68 | 69 | lt.add(move |mut eg: Environment| { 70 | for _ in 0..50 { 71 | eg.a.fetch_op(|v| v + 1, Ordering::Relaxed); 72 | } 73 | eg.a.load(Ordering::Relaxed) 74 | }); 75 | 76 | lt.add(move |mut eg: Environment| { 77 | for _ in 0..50 { 78 | eg.a.fetch_op(|v| v + 1, Ordering::Relaxed); 79 | } 80 | eg.a.load(Ordering::Relaxed) 81 | }); 82 | 83 | let res = lt.run(); 84 | 85 | // At least one thread should see the final value 86 | assert_eq!(res[0].max(res[1]), 100); 87 | } 88 | 89 | // Release Acquire 90 | #[test] 91 | fn release_acquire_two_threads() { 92 | fn inner() -> Vec { 93 | let mut lt = LogTest::default(); 94 | 95 | lt.add(move |mut eg: Environment| { 96 | // Todo: Convert this to non atomic type 97 | eg.a.store(1, Ordering::Relaxed); 98 | eg.b.store(1, Ordering::Release); 99 | 0 100 | }); 101 | 102 | lt.add(move |mut eg: Environment| { 103 | while eg.b.load(Ordering::Acquire) == 0 {} 104 | eg.a.load(Ordering::Relaxed) 105 | }); 106 | 107 | lt.run() 108 | } 109 | 110 | assert!(run_until(inner, vec![vec![0, 1]])); 111 | } 112 | 113 | #[test] 114 | fn release_acquire_three_threads() { 115 | fn inner() -> Vec { 116 | let mut lt = LogTest::default(); 117 | 118 | lt.add(move |mut eg: Environment| { 119 | eg.a.store(1, Ordering::Relaxed); 120 | eg.b.store(1, Ordering::Release); 121 | 0 122 | }); 123 | 124 | lt.add(move |mut eg: Environment| { 125 | // Any RMW continues the release chain 126 | while eg 127 | .b 128 | .exchange_weak(1, 2, Ordering::Relaxed, Ordering::Relaxed) 129 | .is_err() 130 | {} 131 | // Continue the release chain without necessarily seeing the store to a 132 | eg.a.load(Ordering::Relaxed) 133 | }); 134 | 135 | lt.add(move |mut eg: Environment| { 136 | while eg.b.load(Ordering::Acquire) < 2 {} 137 | eg.a.load(Ordering::Relaxed) 138 | }); 139 | 140 | lt.run() 141 | } 142 | 143 | assert!(run_until(inner, vec![vec![0, 0, 1], vec![0, 1, 1]])); 144 | } 145 | 146 | // Sequential Consistency 147 | 148 | /* 149 | Each memory_order_seq_cst operation B that loads from atomic variable M, observes one of the following: 150 | * the result of the last operation A that modified M, which appears before B in the single total order 151 | * OR, if there was such an A, B may observe the result of some modification on M that is not memory_order_seq_cst and does not happen-before A 152 | * OR, if there wasn't such an A, B may observe the result of some unrelated modification of M that is not memory_order_seq_cst 153 | */ 154 | 155 | #[test] 156 | fn seq_cst_basic() { 157 | fn inner() -> Vec { 158 | let mut lt = LogTest::default(); 159 | 160 | lt.add(move |mut eg: Environment| { 161 | eg.a.store(1, Ordering::Relaxed); // Unrelated modification of M that is not memory_order_seq_cst 162 | eg.a.store(2, Ordering::SeqCst); // Operation A 163 | eg.a.store(3, Ordering::Relaxed); // Does not happen before A 164 | 0 165 | }); 166 | 167 | lt.add(move |mut eg: Environment| { 168 | eg.a.load(Ordering::SeqCst) // Operation B 169 | }); 170 | 171 | lt.run_sequential() 172 | } 173 | 174 | assert!(run_until(inner, permutations(vec![vec![0], vec![2, 3]]))); 175 | } 176 | 177 | /* If there was a memory_order_seq_cst std::atomic_thread_fence operation X sequenced-before B, then B observes one of the following: 178 | 179 | * the last memory_order_seq_cst modification of M that appears before X in the single total order 180 | * some unrelated modification of M that appears later in M's modification order 181 | */ 182 | 183 | #[test] 184 | fn seq_cst_fence() { 185 | fn inner() -> Vec { 186 | let mut lt = LogTest::default(); 187 | 188 | lt.add(move |mut eg: Environment| { 189 | eg.a.store(1, Ordering::Relaxed); // Unrelated modification of M that is not memory_order_seq_cst 190 | eg.a.store(2, Ordering::SeqCst); // Operation A 191 | eg.a.store(3, Ordering::Relaxed); // Does not happen before A 192 | 193 | 0 194 | }); 195 | 196 | lt.add(move |mut eg: Environment| { 197 | eg.fence(Ordering::SeqCst); 198 | eg.a.load(Ordering::Relaxed) // Operation B 199 | }); 200 | 201 | lt.run_sequential() 202 | } 203 | 204 | assert!(run_until(inner, permutations(vec![vec![0], vec![2, 3]]))); 205 | } 206 | 207 | /* 208 | For a pair of atomic operations on M called A and B, where A writes and B reads M's value, 209 | if there are two memory_order_seq_cst std::atomic_thread_fences X and Y, 210 | and if A is sequenced-before X, Y is sequenced-before B, 211 | and X appears before Y in the Single Total Order, then B observes either: 212 | * the effect of A 213 | * some unrelated modification of M that appears after A in M's modification order 214 | */ 215 | 216 | #[test] 217 | fn seq_cst_two_fence() { 218 | fn inner() -> Vec { 219 | let mut lt = LogTest::default(); 220 | 221 | lt.add(move |mut eg: Environment| { 222 | eg.a.store(1, Ordering::Relaxed); // Unrelated modification of M that is not memory_order_seq_cst 223 | eg.a.store(2, Ordering::Relaxed); // Operation A 224 | eg.fence(Ordering::SeqCst); 225 | eg.a.store(3, Ordering::Relaxed); // Does not happen before A 226 | 227 | 0 228 | }); 229 | 230 | lt.add(move |mut eg: Environment| { 231 | eg.fence(Ordering::SeqCst); 232 | eg.a.load(Ordering::Relaxed) // Operation B 233 | }); 234 | 235 | lt.run_sequential() 236 | } 237 | 238 | assert!(run_until(inner, permutations(vec![vec![0], vec![2, 3]]))); 239 | } 240 | 241 | /* The single total order might not be consistent with happens-before. 242 | This allows more efficient implementation of memory_order_acquire and memory_order_release on some CPUs. 243 | It can produce surprising results when memory_order_acquire and memory_order_release are mixed with memory_order_seq_cst. 244 | 245 | For example, with x and y initially zero, 246 | is allowed to produce r1 == 1 && r2 == 3 && r3 == 0, 247 | (Note: this checks stated outcome is possible, I have not verified other results I assume to be valid) 248 | */ 249 | 250 | #[test] 251 | fn sto_happens_before() { 252 | fn inner() -> Vec { 253 | let mut lt = LogTest::default(); 254 | 255 | lt.add(move |mut eg: Environment| { 256 | eg.a.store(1, Ordering::SeqCst); 257 | eg.b.store(1, Ordering::Release); 258 | 0 259 | }); 260 | 261 | lt.add(move |mut eg: Environment| { 262 | let a = eg.b.fetch_op(|v| v + 1, Ordering::SeqCst); 263 | let b = eg.b.load(Ordering::Relaxed); 264 | 265 | a + b 266 | }); 267 | 268 | lt.add(move |mut eg: Environment| { 269 | eg.b.store(3, Ordering::SeqCst); 270 | eg.a.load(Ordering::SeqCst) 271 | }); 272 | 273 | lt.run() 274 | } 275 | 276 | assert!(run_until( 277 | inner, 278 | permutations(vec![vec![0], vec![1, 3, 4, 7], vec![0, 1]]) 279 | )); 280 | } 281 | 282 | /* This example demonstrates a situation where sequential ordering is necessary. 283 | Any other ordering may trigger the assert because it would be possible for the 284 | threads c and d to observe changes to the atomics x and y in opposite order. */ 285 | 286 | #[test] 287 | fn test_seq_cst() { 288 | fn inner() -> Vec { 289 | let mut lt = LogTest::default(); 290 | 291 | lt.add(move |mut eg: Environment| { 292 | eg.a.store(1, Ordering::SeqCst); 293 | 0 294 | }); 295 | 296 | lt.add(move |mut eg: Environment| { 297 | eg.b.store(1, Ordering::SeqCst); 298 | 0 299 | }); 300 | 301 | lt.add(move |mut eg: Environment| { 302 | while eg.a.load(Ordering::SeqCst) == 0 {} 303 | eg.b.load(Ordering::SeqCst) 304 | }); 305 | 306 | lt.add(move |mut eg: Environment| { 307 | while eg.b.load(Ordering::SeqCst) == 0 {} 308 | eg.a.load(Ordering::SeqCst) 309 | }); 310 | 311 | lt.run() 312 | } 313 | 314 | assert!(run_until( 315 | inner, 316 | vec![vec![0, 0, 0, 1], vec![0, 0, 1, 0], vec![0, 0, 1, 1]] 317 | )); 318 | } 319 | -------------------------------------------------------------------------------- /memlog/tests/standard_fence.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::run_until; 3 | use std::sync::atomic::Ordering; 4 | 5 | mod common; 6 | 7 | // https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence 8 | // Fence-atomic synchronization 9 | 10 | /* 11 | Fence-atomic synchronization 12 | 13 | A release fence F in thread A synchronizes-with atomic acquire operation Y in thread B, if 14 | 15 | * there exists an atomic store X (with any memory order) 16 | * Y reads the value written by X (or the value would be written by release sequence headed by X if X were a release operation) 17 | * F is sequenced-before X in thread A 18 | 19 | In this case, all non-atomic and relaxed atomic stores that are sequenced-before F in thread A will happen-before all non-atomic and relaxed atomic loads from the same locations made in thread B after Y. 20 | */ 21 | 22 | #[test] 23 | fn test_fence_atomic() { 24 | fn inner(release_chain: bool, fence: bool) -> Vec { 25 | let mut lt = LogTest::default(); 26 | 27 | lt.add(move |mut eg: Environment| { 28 | eg.a.store(1, Ordering::Relaxed); // Target 29 | 30 | // Fence behind conditional to ensure when fence is missing, wrong values result 31 | if fence { 32 | eg.fence(Ordering::Release); // F Sequenced before X in thread A 33 | } 34 | 35 | if release_chain { 36 | // Atomic store X that would create a release chain to c were it Release 37 | eg.b.store(1, Ordering::Relaxed); 38 | } else { 39 | // Atomic store X that writes directly to c 40 | eg.c.store(1, Ordering::Relaxed); 41 | } 42 | 0 43 | }); 44 | 45 | lt.add(move |mut eg: Environment| { 46 | if release_chain { 47 | // If modelling a release chain, spin on an intermediate flag 48 | while eg.b.load(Ordering::Acquire) == 0 {} 49 | 50 | eg.c.store(1, Ordering::Release) // Continue release chain from store X 51 | } 52 | 0 53 | }); 54 | 55 | lt.add(|mut eg: Environment| { 56 | // After loop, Y reads value written by X, or a value that would be written by the release chain 57 | while eg.c.load(Ordering::Acquire) == 0 {} 58 | 59 | // This should always see the store 60 | eg.a.load(Ordering::Relaxed) 61 | }); 62 | 63 | lt.run() 64 | } 65 | 66 | // Assert success when fences are present 67 | assert!(run_until(|| inner(true, true), vec![vec![0, 0, 1]])); 68 | assert!(run_until(|| inner(false, true), vec![vec![0, 0, 1]])); 69 | 70 | // Assert failure when fences are missing 71 | assert!(run_until( 72 | || inner(true, false), 73 | vec![vec![0, 0, 0], vec![0, 0, 1]] 74 | )); 75 | assert!(run_until( 76 | || inner(false, false), 77 | vec![vec![0, 0, 0], vec![0, 0, 1]] 78 | )); 79 | } 80 | 81 | /* 82 | Atomic-fence synchronization 83 | 84 | An atomic release operation X in thread A synchronizes-with an acquire fence F in thread B, if 85 | 86 | * there exists an atomic read Y (with any memory order) 87 | * Y reads the value written by X (or by the release sequence headed by X) 88 | * Y is sequenced-before F in thread B 89 | 90 | In this case, all non-atomic and relaxed atomic stores that are sequenced-before X in thread A will happen-before all non-atomic and relaxed atomic loads from the same locations made in thread B after F. 91 | */ 92 | 93 | #[test] 94 | fn test_atomic_fence() { 95 | fn inner(release_chain: bool, fence: bool) -> Vec { 96 | let mut lt = LogTest::default(); 97 | 98 | lt.add(move |mut eg: Environment| { 99 | eg.a.store(1, Ordering::Relaxed); // Target 100 | 101 | if release_chain { 102 | // Atomic store X that creates a release chain to c 103 | eg.b.store(1, Ordering::Release); 104 | } else { 105 | // Atomic store X that writes directly to c 106 | eg.c.store(1, Ordering::Release); 107 | } 108 | 0 109 | }); 110 | 111 | lt.add(move |mut eg: Environment| { 112 | if release_chain { 113 | // If modelling a release chain, spin on an intermediate flag 114 | while eg.b.load(Ordering::Acquire) == 0 {} 115 | 116 | eg.c.store(1, Ordering::Release) // Continue release chain from store X 117 | } 118 | 0 119 | }); 120 | 121 | lt.add(move |mut eg: Environment| { 122 | // Y reads value written by X, or a value that would be written by the release chain 123 | while eg.c.load(Ordering::Relaxed) == 0 {} // Atomic Read Y 124 | 125 | // Fence behind conditional to ensure when fence is missing, wrong values result 126 | if fence { 127 | eg.fence(Ordering::Acquire); // Fence F 128 | } 129 | 130 | // This should always see the store 131 | eg.a.load(Ordering::Relaxed) 132 | }); 133 | 134 | lt.run() 135 | } 136 | 137 | // Assert success when fences are present 138 | assert!(run_until(|| inner(true, true), vec![vec![0, 0, 1]])); 139 | assert!(run_until(|| inner(false, true), vec![vec![0, 0, 1]])); 140 | 141 | // Assert failure when fences are missing 142 | assert!(run_until( 143 | || inner(true, false), 144 | vec![vec![0, 0, 0], vec![0, 0, 1]] 145 | )); 146 | assert!(run_until( 147 | || inner(false, false), 148 | vec![vec![0, 0, 0], vec![0, 0, 1]] 149 | )); 150 | } 151 | 152 | /* 153 | Fence-fence synchronization 154 | 155 | A release fence FA in thread A synchronizes-with an acquire fence FB in thread B, if 156 | 157 | * There exists an atomic object M, 158 | * There exists an atomic write X (with any memory order) that modifies M in thread A 159 | * FA is sequenced-before X in thread A 160 | * There exists an atomic read Y (with any memory order) in thread B 161 | * Y reads the value written by X (or the value would be written by release sequence headed by X if X were a release operation) 162 | * Y is sequenced-before FB in thread B 163 | * In this case, all non-atomic and relaxed atomic stores that are sequenced-before FA in thread A will happen-before all non-atomic and relaxed atomic loads from the same locations made in thread B after FB 164 | */ 165 | 166 | #[test] 167 | fn test_fence_fence() { 168 | fn inner(release_chain: bool, failure: usize) -> Vec { 169 | let mut lt = LogTest::default(); 170 | 171 | lt.add(move |mut eg: Environment| { 172 | eg.a.store(1, Ordering::Relaxed); // Target 173 | 174 | // Fence behind conditional to ensure when fence is missing, wrong values result 175 | if failure != 1 { 176 | eg.fence(Ordering::Release); // Fence FA 177 | } 178 | 179 | if release_chain { 180 | // Atomic store X that would create a release chain to c were it release 181 | eg.b.store(1, Ordering::Relaxed); 182 | } else { 183 | // Atomic store X that writes directly to c 184 | eg.c.store(1, Ordering::Relaxed); 185 | } 186 | 0 187 | }); 188 | 189 | lt.add(move |mut eg: Environment| { 190 | if release_chain { 191 | // If modelling a release chain, spin on an intermediate flag 192 | while eg.b.load(Ordering::Acquire) == 0 {} 193 | 194 | eg.c.store(1, Ordering::Release) // Atomic store X 195 | } 196 | 0 197 | }); 198 | 199 | lt.add(move |mut eg: Environment| { 200 | while eg.c.load(Ordering::Relaxed) == 0 {} // Atomic Read Y 201 | 202 | // Fence behind conditional to ensure when fence is missing, wrong values result 203 | if failure != 2 { 204 | eg.fence(Ordering::Acquire); // Fence FB 205 | } 206 | 207 | // This should always see the store 208 | eg.a.load(Ordering::Relaxed) 209 | }); 210 | 211 | lt.run() 212 | } 213 | 214 | // Assert success when fences are present 215 | assert!(run_until(|| inner(true, 0), vec![vec![0, 0, 1]])); 216 | assert!(run_until(|| inner(false, 0), vec![vec![0, 0, 1]])); 217 | 218 | // Assert failure when first fence is missing 219 | assert!(run_until( 220 | || inner(true, 1), 221 | vec![vec![0, 0, 0], vec![0, 0, 1]] 222 | )); 223 | assert!(run_until( 224 | || inner(false, 1), 225 | vec![vec![0, 0, 0], vec![0, 0, 1]] 226 | )); 227 | 228 | // Assert failure when first second fence is missing 229 | assert!(run_until( 230 | || inner(true, 2), 231 | vec![vec![0, 0, 0], vec![0, 0, 1]] 232 | )); 233 | assert!(run_until( 234 | || inner(false, 2), 235 | vec![vec![0, 0, 0], vec![0, 0, 1]] 236 | )); 237 | } 238 | 239 | /* 240 | Example under "Notes" 241 | 242 | Test ensures that, after seeing the write to C, the second thread can use an Acquire fence to 243 | synchronize with the release fence and see all of the first thread's prior stores 244 | */ 245 | #[test] 246 | fn test_fence_fence_example_a() { 247 | fn inner(failure: usize) -> Vec { 248 | let mut lt = LogTest::default(); 249 | 250 | lt.add(move |mut eg: Environment| { 251 | eg.a.store(1, Ordering::Relaxed); // Relaxed write a 252 | eg.b.store(1, Ordering::Relaxed); // Relaxed write b 253 | 254 | // Fence behind conditional to ensure when fence is missing, wrong values result 255 | if failure != 1 { 256 | eg.fence(Ordering::Release); 257 | } 258 | eg.c.store(1, Ordering::Relaxed); // Target 259 | 0 260 | }); 261 | 262 | lt.add(move |mut eg: Environment| { 263 | if eg.c.load(Ordering::Relaxed) == 1 { 264 | // Fence behind conditional to ensure when fence is missing, wrong values result 265 | if failure != 2 { 266 | eg.fence(Ordering::Acquire); 267 | } 268 | // After a fence, we must see writes to a and b 269 | eg.a.load(Ordering::Relaxed) + eg.b.load(Ordering::Relaxed) 270 | } else { 271 | 0 272 | } 273 | }); 274 | 275 | lt.run() 276 | } 277 | 278 | // When fences are present, if thread two perceives the write to c, it should see both a & b 279 | assert!(run_until(|| inner(0), vec![vec![0, 0], vec![0, 2]])); 280 | // Without first fence, all bets are off 281 | assert!(run_until( 282 | || inner(1), 283 | vec![vec![0, 0], vec![0, 1], vec![0, 2]] 284 | )); 285 | // Without second fence, all bets are off 286 | assert!(run_until( 287 | || inner(2), 288 | vec![vec![0, 0], vec![0, 1], vec![0, 2]] 289 | )); 290 | } 291 | 292 | #[test] 293 | fn test_fence_fence_example_b() { 294 | fn inner(failure: usize) -> Vec { 295 | let mut lt = LogTest::default(); 296 | 297 | lt.add(move |mut eg: Environment| { 298 | // Transaction 1 299 | // Write data to a, bump write pointer c 300 | eg.a.store(1, Ordering::Relaxed); 301 | eg.c.store(1, Ordering::Release); 302 | 303 | // Transaction 2 304 | // Write data to b, bump write pointer c 305 | eg.b.store(1, Ordering::Relaxed); 306 | eg.c.store(2, Ordering::Release); 307 | 0 308 | }); 309 | 310 | lt.add(move |mut eg: Environment| { 311 | // Wait for write pointer to indicate Transaction 1 ready 312 | while eg.c.load(Ordering::Relaxed) < 1 {} 313 | 314 | // Fence behind conditional to ensure when fence is missing, wrong values result 315 | if failure != 1 { 316 | eg.fence(Ordering::Acquire); 317 | } 318 | 319 | eg.a.load(Ordering::Relaxed) 320 | }); 321 | 322 | lt.add(move |mut eg: Environment| { 323 | // Wait for write pointer to indicate Transaction 2 ready 324 | while eg.c.load(Ordering::Relaxed) < 2 {} 325 | 326 | // Fence behind conditional to ensure when fence is missing, wrong values result 327 | if failure != 2 { 328 | eg.fence(Ordering::Acquire); 329 | } 330 | 331 | eg.a.load(Ordering::Relaxed) 332 | }); 333 | 334 | lt.run() 335 | } 336 | 337 | // When fences are present, if thread two perceives the write to c, it should see both a & b 338 | assert!(run_until(|| inner(0), vec![vec![0, 1, 1]])); 339 | 340 | // Drop fence from first reader. It is now not guaranteed to perceive the store to a 341 | assert!(run_until(|| inner(1), vec![vec![0, 0, 1], vec![0, 1, 1]])); 342 | // Drop fence from second reader. It is now not guaranteed to perceive the store to b 343 | assert!(run_until(|| inner(2), vec![vec![0, 1, 0], vec![0, 1, 1]])); 344 | } 345 | -------------------------------------------------------------------------------- /memlog/tests/system.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest}; 2 | use crate::common::utils::run_until; 3 | use std::sync::atomic::Ordering; 4 | 5 | mod common; 6 | 7 | #[test] 8 | fn test_harness() { 9 | let mut lt = LogTest::default(); 10 | 11 | const ITERS: usize = 100; 12 | 13 | lt.add(|mut eg: Environment| { 14 | let mut last = None; 15 | for _ in 0..=ITERS { 16 | let l = eg.a.load(Ordering::Relaxed); 17 | if let Some(v) = last { 18 | assert!(v <= l); 19 | } 20 | last = Some(l); 21 | } 22 | }); 23 | 24 | lt.add(|mut eg: Environment| { 25 | for x in 0..=ITERS { 26 | eg.a.store(x, Ordering::Relaxed); 27 | } 28 | }); 29 | 30 | lt.run(); 31 | } 32 | 33 | #[test] 34 | fn test_same_thread_reads() { 35 | let mut lt = LogTest::default(); 36 | const ITERS: usize = 100; 37 | 38 | lt.add(|mut eg: Environment| { 39 | for x in 0..=ITERS { 40 | eg.a.store(x, Ordering::Relaxed); 41 | assert_eq!(x, eg.a.load(Ordering::Relaxed)); 42 | } 43 | }); 44 | 45 | lt.run(); 46 | } 47 | 48 | #[test] 49 | fn test_intel_failure() { 50 | fn intel_failure_inner() -> Vec { 51 | let mut lt = LogTest::default(); 52 | 53 | lt.add(|mut eg: Environment| { 54 | eg.a.store(1, Ordering::Relaxed); 55 | eg.b.load(Ordering::Relaxed) 56 | }); 57 | 58 | lt.add(|mut eg: Environment| { 59 | eg.b.store(1, Ordering::Relaxed); 60 | eg.a.load(Ordering::Relaxed) 61 | }); 62 | 63 | lt.run() 64 | } 65 | 66 | assert!(run_until( 67 | intel_failure_inner, 68 | vec![vec![0, 0], vec![0, 1], vec![1, 0], vec![1, 1]] 69 | )); 70 | } 71 | -------------------------------------------------------------------------------- /memlog/tests/update.rs: -------------------------------------------------------------------------------- 1 | use crate::common::harness::{Environment, LogTest, Value}; 2 | use crate::common::utils::{permutations, run_until, run_until_pred}; 3 | use std::collections::HashSet; 4 | use std::sync::atomic::Ordering; 5 | 6 | mod common; 7 | 8 | /* 9 | Test cases that fully exercise orderings for fetch and modify operations. It's a little messy - the tests are: 10 | * If using Acquire ordering, Thread #2 should synchronise with the Release in Thread #1, perceiving the write to c 11 | * If using Release ordering, the Acquire in Thread #3 should synchronise with Thread #2, perceiving the write to b 12 | * A fairly standard SeqCst test, using the fetch_add op as a SeqCst write to a flag read by Thread #4 13 | 14 | The operations tested are: 15 | * compare_exchange (happy path) 16 | * fetch_op (fetch_add, etc) 17 | * fetch_update (happy path) 18 | */ 19 | 20 | #[derive(Copy, Clone)] 21 | enum ModifyTestType { 22 | CompareExchange, 23 | FetchOp, 24 | FetchUpdate, 25 | } 26 | 27 | #[test] 28 | fn test_fetch_and_modify() { 29 | const ANOMALY_STALE_B: usize = 10; 30 | const ANOMALY_STALE_C: usize = 100; 31 | const ANOMALY_SEQ_CST: usize = 1000; 32 | 33 | fn inner(test_type: ModifyTestType, ordering: Ordering) -> Vec { 34 | let mut lt = LogTest::default(); 35 | 36 | lt.add(move |mut eg: Environment| { 37 | eg.c.store(1, Ordering::Relaxed); 38 | eg.a.fetch_op(|v| v + 1, Ordering::Release); 39 | 40 | 0 41 | }); 42 | 43 | lt.add(move |mut eg: Environment| { 44 | eg.b.store(1, Ordering::Relaxed); 45 | 46 | // Operation with variable ordering 47 | let res = match test_type { 48 | ModifyTestType::CompareExchange => { 49 | eg.a.exchange_weak(1, 11, ordering, Ordering::Relaxed) 50 | } 51 | ModifyTestType::FetchOp => Ok(eg.a.fetch_op(|v| v + 10, ordering)), 52 | ModifyTestType::FetchUpdate => { 53 | eg.a.fetch_update(|v| Some(v + 10), ordering, Ordering::Relaxed) 54 | } 55 | }; 56 | 57 | if let Ok(a) = res { 58 | let c = eg.c.load(Ordering::Relaxed); 59 | 60 | if eg.d.load(Ordering::SeqCst) == 0 { 61 | eg.e.fetch_op(|v| v + 1, Ordering::Release); 62 | } 63 | 64 | // If we perceive Thread 1's write to a, we should see its write to c under Acquire, AcqRel, SeqCst 65 | if a == 1 && c == 0 { 66 | ANOMALY_STALE_C 67 | } else { 68 | 0 69 | } 70 | } else { 71 | 0 72 | } 73 | }); 74 | 75 | lt.add(move |mut eg: Environment| { 76 | let a = eg.a.load(Ordering::Acquire); 77 | let b = eg.b.load(Ordering::Relaxed); 78 | 79 | // If we perceive Thread 2's write to a, we should see its write to b under Release, AcqRel, SeqCst 80 | if b == 0 && a > 5 { 81 | ANOMALY_STALE_B 82 | } else { 83 | 0 84 | } 85 | }); 86 | 87 | lt.add(move |mut eg: Environment| { 88 | eg.d.store(1, Ordering::SeqCst); 89 | 90 | let a = eg.a.load(Ordering::SeqCst); 91 | 92 | // If we don't see Thread 2's write to a, we should see it's write to e under SeqCst 93 | if a == 0 && eg.e.load(Ordering::Relaxed) != 0 { 94 | ANOMALY_SEQ_CST 95 | } else { 96 | 0 97 | } 98 | }); 99 | 100 | lt.run() 101 | } 102 | 103 | let check_result = |res: &HashSet>, expected: Vec| { 104 | let mut seen = HashSet::::new(); 105 | 106 | for entry in res { 107 | for v in entry { 108 | seen.insert(*v); 109 | } 110 | } 111 | let expected: HashSet = expected.into_iter().collect(); 112 | seen.eq(&expected) 113 | }; 114 | 115 | for op in [ 116 | ModifyTestType::FetchOp, 117 | ModifyTestType::CompareExchange, 118 | ModifyTestType::FetchUpdate, 119 | ] { 120 | assert!(run_until_pred( 121 | || inner(op, Ordering::Relaxed), 122 | |v| { 123 | check_result( 124 | v, 125 | vec![0, ANOMALY_STALE_B, ANOMALY_STALE_C, ANOMALY_SEQ_CST], 126 | ) 127 | } 128 | )); 129 | 130 | assert!(run_until_pred( 131 | || inner(op, Ordering::Release), 132 | |v| { check_result(v, vec![0, ANOMALY_STALE_C, ANOMALY_SEQ_CST],) } 133 | )); 134 | 135 | assert!(run_until_pred( 136 | || inner(op, Ordering::Acquire), 137 | |v| { check_result(v, vec![0, ANOMALY_STALE_B, ANOMALY_SEQ_CST],) } 138 | )); 139 | 140 | assert!(run_until_pred( 141 | || inner(op, Ordering::AcqRel), 142 | |v| { check_result(v, vec![0, ANOMALY_SEQ_CST],) } 143 | )); 144 | 145 | assert!(run_until_pred( 146 | || inner(op, Ordering::SeqCst), 147 | |v| { check_result(v, vec![0],) } 148 | )); 149 | } 150 | } 151 | 152 | /* 153 | Test the distinction between compare_exchange, and compare_exchange_weak. 154 | This test uses run_sequential, and as such is deterministic. The exchange always has the correct old value, 155 | so compare_exchange will always succeed, but compare_exchange_weak will spuriously fail. 156 | */ 157 | 158 | #[test] 159 | fn test_compare_exchange_weakness() { 160 | fn inner(weak: bool) -> Vec { 161 | let mut lt = LogTest::default(); 162 | 163 | lt.add(move |mut eg: Environment| { 164 | if weak { 165 | let _ = 166 | eg.a.exchange_weak(0, 1, Ordering::Relaxed, Ordering::Relaxed); 167 | } else { 168 | let _ = eg.a.exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed); 169 | } 170 | 171 | eg.b.store(1, Ordering::Release); 172 | 173 | 0 174 | }); 175 | 176 | lt.add(move |mut eg: Environment| { 177 | while eg.b.load(Ordering::Acquire) == 0 {} 178 | eg.a.load(Ordering::Acquire) 179 | }); 180 | 181 | lt.run_sequential() 182 | } 183 | 184 | // compare_exchange_weak can spuriously fail 185 | assert!(run_until(|| inner(true), vec![vec![0, 0], vec![0, 1]])); 186 | // However, compare_exchange should succeed if the correct old value is passed in 187 | assert!(run_until(|| inner(false), vec![vec![0, 1]])); 188 | } 189 | 190 | /* 191 | Tests the failure ordering of fetch modify ops. The call uses the incorrect old value, and thus always succeeds. 192 | A Relaxed failure ordering doesn't always perceive the write to a, however higher levels will. SeqCst is used as 193 | the success ordering, ensuring it's not being erroneously used. 194 | */ 195 | #[test] 196 | fn test_fetch_modify_failure_ordering() { 197 | fn inner(test_type: ModifyTestType, failure_ordering: Ordering) -> Vec { 198 | let mut lt = LogTest::default(); 199 | 200 | lt.add(move |mut eg: Environment| { 201 | eg.a.store(1, Ordering::Relaxed); 202 | eg.b.store(1, Ordering::Release); 203 | 204 | 0 205 | }); 206 | 207 | lt.add(move |mut eg: Environment| { 208 | // Failed exchange. Using the strongest possible success ordering, which should be ignored. 209 | let b = match test_type { 210 | ModifyTestType::CompareExchange => { 211 | eg.b.exchange_weak(2, 3, Ordering::SeqCst, failure_ordering) 212 | } 213 | ModifyTestType::FetchOp => panic!(), 214 | ModifyTestType::FetchUpdate => { 215 | eg.b.fetch_update(|_| None, Ordering::SeqCst, failure_ordering) 216 | } 217 | } 218 | .unwrap_err(); 219 | 220 | let a = eg.a.load(Ordering::Relaxed); 221 | 222 | if b == 1 { 223 | a 224 | } else { 225 | 1 226 | } 227 | }); 228 | 229 | lt.run_sequential() 230 | } 231 | 232 | for test in [ModifyTestType::CompareExchange, ModifyTestType::FetchUpdate] { 233 | // Relaxed can see the store to b without seeing the store to a 234 | assert!(run_until( 235 | || inner(test, Ordering::Relaxed), 236 | vec![vec![0, 0], vec![0, 1]] 237 | )); 238 | 239 | assert!(run_until( 240 | || inner(test, Ordering::Acquire), 241 | vec![vec![0, 1]] 242 | )); 243 | 244 | assert!(run_until( 245 | || inner(test, Ordering::SeqCst), 246 | vec![vec![0, 1]] 247 | )); 248 | } 249 | } 250 | 251 | /* 252 | Additional failure test to ensure SeqCst failure ordering has correct behaviour with fetch modify ops. 253 | This is a standard SeqCst test, however we're using failed exchanges in place of a SeqCst load. 254 | */ 255 | #[test] 256 | fn test_fetch_modify_failure_seq_cst_ordering() { 257 | fn inner(test_type: ModifyTestType, failure_ordering: Ordering) -> Vec { 258 | let mut lt = LogTest::default(); 259 | 260 | let failed_read_op = move |v: &mut Value| { 261 | match test_type { 262 | ModifyTestType::CompareExchange => { 263 | v.exchange_weak(2, 3, Ordering::SeqCst, failure_ordering) 264 | } 265 | ModifyTestType::FetchOp => panic!(), 266 | ModifyTestType::FetchUpdate => { 267 | v.fetch_update(|_| None, Ordering::SeqCst, failure_ordering) 268 | } 269 | } 270 | .unwrap_err() 271 | }; 272 | 273 | lt.add(move |mut eg: Environment| { 274 | eg.a.store(1, Ordering::SeqCst); 275 | let b = failed_read_op(&mut eg.b); 276 | 277 | if b == 0 { 278 | eg.c.fetch_op(|v| v + 1, Ordering::Relaxed); 279 | } 280 | 281 | eg.c.load(Ordering::Relaxed) 282 | }); 283 | 284 | lt.add(move |mut eg: Environment| { 285 | eg.b.store(1, Ordering::SeqCst); 286 | let a = failed_read_op(&mut eg.a); 287 | 288 | if a == 0 { 289 | eg.c.fetch_op(|v| v + 1, Ordering::Relaxed); 290 | } 291 | 292 | eg.c.load(Ordering::Relaxed) 293 | }); 294 | 295 | lt.run() 296 | } 297 | for test in [ModifyTestType::CompareExchange, ModifyTestType::FetchUpdate] { 298 | let non_seq_cst_outcomes = vec![ 299 | vec![0, 0], 300 | vec![0, 1], 301 | vec![1, 0], 302 | vec![1, 1], 303 | vec![2, 1], 304 | vec![1, 2], 305 | vec![2, 2], 306 | ]; 307 | 308 | assert!(run_until( 309 | || inner(test, Ordering::Relaxed), 310 | non_seq_cst_outcomes.clone() 311 | )); 312 | assert!(run_until( 313 | || inner(test, Ordering::Acquire), 314 | non_seq_cst_outcomes 315 | )); 316 | 317 | assert!(run_until( 318 | || inner(test, Ordering::SeqCst), 319 | permutations(vec![vec![0, 1], vec![0, 1]]) 320 | )); 321 | } 322 | } 323 | 324 | // Todo: Test fetch ordering in fetch_update in the case of success 325 | // Currently, the test harness is not capable of reentrant access to memlog, which means this cannot be tested. 326 | -------------------------------------------------------------------------------- /memlog/tests/williams.rs: -------------------------------------------------------------------------------- 1 | /* 2 | Tests the model against examples that demonstrate the behaviour described in C++ Concurrency in 3 | Action by Anthony Williams. The implementations differ significantly. 4 | */ 5 | use crate::common::harness::{Environment, LogTest}; 6 | use crate::common::utils::run_until; 7 | use std::sync::atomic::Ordering; 8 | 9 | mod common; 10 | 11 | // Listing 5.4 12 | // Tests to ensure a global ordering for SeqCst operations 13 | #[test] 14 | fn test_5_4() { 15 | fn inner() -> Vec { 16 | let mut lt = LogTest::default(); 17 | 18 | lt.add(|mut eg: Environment| { 19 | eg.a.store(1, Ordering::SeqCst); 20 | 0 21 | }); 22 | 23 | lt.add(|mut eg: Environment| { 24 | eg.b.store(1, Ordering::SeqCst); 25 | 0 26 | }); 27 | 28 | lt.add(|mut eg: Environment| { 29 | while eg.a.load(Ordering::SeqCst) == 0 {} 30 | eg.b.load(Ordering::SeqCst) 31 | }); 32 | 33 | lt.add(|mut eg: Environment| { 34 | while eg.b.load(Ordering::SeqCst) == 0 {} 35 | eg.a.load(Ordering::SeqCst) 36 | }); 37 | 38 | lt.run() 39 | } 40 | 41 | // 0,0 should not be possible, as it would imply the reader threads experienced different orders 42 | assert!(run_until( 43 | inner, 44 | vec![vec![0, 0, 1, 1], vec![0, 0, 0, 1], vec![0, 0, 1, 0]] 45 | )); 46 | } 47 | 48 | // Listing 5.5 49 | // Relaxed stores can be perceived in either order 50 | #[test] 51 | fn test_5_5() { 52 | fn inner() -> Vec { 53 | let mut lt = LogTest::default(); 54 | 55 | lt.add(|mut eg: Environment| { 56 | eg.a.store(1, Ordering::Relaxed); 57 | eg.b.store(1, Ordering::Relaxed); 58 | 0 59 | }); 60 | 61 | lt.add(|mut eg: Environment| { 62 | while eg.b.load(Ordering::Relaxed) == 0 {} 63 | eg.a.load(Ordering::Relaxed) 64 | }); 65 | 66 | lt.run() 67 | } 68 | 69 | assert!(run_until(inner, vec![vec![0, 0], vec![0, 1]])); 70 | } 71 | 72 | // Listing 5.6 73 | // Threads should immediately see their own writes 74 | // Threads should only ever see forward progress from other threads 75 | #[test] 76 | fn test_5_6() { 77 | let mut lt = LogTest::default(); 78 | 79 | lt.add(|mut eg: Environment| { 80 | let mut last = None; 81 | for x in 0..5 { 82 | eg.a.store(x, Ordering::Relaxed); 83 | let r = eg.b.load(Ordering::Relaxed); 84 | 85 | if let Some(l) = last { 86 | assert!(r >= l); 87 | } 88 | 89 | last = Some(r); 90 | 91 | assert_eq!(eg.a.load(Ordering::Relaxed), x); 92 | } 93 | }); 94 | 95 | lt.add(|mut eg: Environment| { 96 | let mut last = None; 97 | for x in 0..5 { 98 | eg.b.store(x, Ordering::Relaxed); 99 | let r = eg.a.load(Ordering::Relaxed); 100 | 101 | if let Some(l) = last { 102 | assert!(r >= l); 103 | } 104 | 105 | last = Some(r); 106 | 107 | assert_eq!(eg.b.load(Ordering::Relaxed), x); 108 | } 109 | }); 110 | 111 | lt.run(); 112 | } 113 | 114 | // Listing 5.7 115 | // With Release/Acquire ordering, threads 3 and 4 can perceive the writes 116 | // to threads 1 and 2 in different orders, the 0,0 case 117 | #[test] 118 | fn test_5_7() { 119 | fn inner() -> Vec { 120 | let mut lt = LogTest::default(); 121 | 122 | lt.add(|mut eg: Environment| { 123 | eg.a.store(1, Ordering::Release); 124 | 0 125 | }); 126 | 127 | lt.add(|mut eg: Environment| { 128 | eg.b.store(1, Ordering::Release); 129 | 0 130 | }); 131 | 132 | lt.add(|mut eg: Environment| { 133 | while eg.a.load(Ordering::Acquire) == 0 {} 134 | eg.b.load(Ordering::Acquire) 135 | }); 136 | 137 | lt.add(|mut eg: Environment| { 138 | while eg.b.load(Ordering::Acquire) == 0 {} 139 | eg.a.load(Ordering::Acquire) 140 | }); 141 | 142 | lt.run() 143 | } 144 | 145 | assert!(run_until( 146 | inner, 147 | vec![ 148 | vec![0, 0, 0, 0], 149 | vec![0, 0, 1, 0], 150 | vec![0, 0, 0, 1], 151 | vec![0, 0, 1, 1] 152 | ] 153 | )); 154 | } 155 | 156 | // Listing 5.8 157 | // Acquire and Release synchronizing on A means that if the second thread reads 1 from A, 158 | // it must also read 1 from B 159 | #[test] 160 | fn test_5_8() { 161 | fn inner() -> Vec { 162 | let mut lt = LogTest::default(); 163 | 164 | lt.add(|mut eg: Environment| { 165 | eg.b.store(1, Ordering::Relaxed); 166 | eg.a.store(1, Ordering::Release); 167 | 0 168 | }); 169 | 170 | lt.add(|mut eg: Environment| { 171 | let a = eg.a.load(Ordering::Acquire); 172 | let b = eg.b.load(Ordering::Relaxed); 173 | // The acquire on A should synchronize with the other thread's release on A 174 | // If the value of (b-a) is negative, this thread has seen the write to A but not B 175 | (b as isize) - (a as isize) 176 | }); 177 | 178 | lt.run() 179 | } 180 | 181 | assert!(run_until(inner, vec![vec![0, 0], vec![0, 1]])); 182 | } 183 | 184 | // Listing 5.9 185 | // Acquire and Release are transitive 186 | #[test] 187 | fn test_5_9() { 188 | fn inner() -> Vec { 189 | let mut lt = LogTest::default(); 190 | 191 | lt.add(|mut eg: Environment| { 192 | eg.a.store(1, Ordering::Relaxed); 193 | eg.b.store(1, Ordering::Release); 194 | 0 195 | }); 196 | 197 | lt.add(|mut eg: Environment| { 198 | while eg.b.load(Ordering::Acquire) == 0 {} 199 | eg.c.store(1, Ordering::Release); 200 | 0 201 | }); 202 | 203 | lt.add(|mut eg: Environment| { 204 | while eg.c.load(Ordering::Acquire) == 0 {} 205 | eg.a.load(Ordering::Relaxed) 206 | }); 207 | 208 | lt.run() 209 | } 210 | 211 | assert!(run_until(inner, vec![vec![0, 0, 1]])); 212 | } 213 | 214 | // Listing 5.10 pertains to consume memory order, and is skipped 215 | 216 | // Listing 5.11 217 | // Exchange weak, even with Ordering::Relaxed, continues the release chain from threads 1-3 218 | // Ref: https://en.cppreference.com/w/cpp/atomic/memory_order 219 | // Release / Acquire ordering 220 | // and RMWs (with any ordering) following a release form a release sequence 221 | #[test] 222 | fn test_5_11() { 223 | fn inner(exchange_order: Ordering) -> Vec { 224 | let mut lt = LogTest::default(); 225 | 226 | lt.add(|mut eg: Environment| { 227 | eg.a.store(1, Ordering::Relaxed); 228 | eg.b.store(1, Ordering::Release); 229 | 0 230 | }); 231 | 232 | lt.add(move |mut eg: Environment| { 233 | let _ = eg.b.exchange_weak(1, 1, exchange_order, Ordering::Relaxed); 234 | 0 235 | }); 236 | 237 | lt.add(|mut eg: Environment| { 238 | while eg.b.load(Ordering::Acquire) == 0 {} 239 | eg.a.load(Ordering::Relaxed) 240 | }); 241 | 242 | lt.run() 243 | } 244 | 245 | assert!(run_until(|| inner(Ordering::AcqRel), vec![vec![0, 0, 1]])); 246 | assert!(run_until(|| inner(Ordering::Relaxed), vec![vec![0, 0, 1]])); 247 | } 248 | 249 | // Listing 5.12 250 | // Tests release and acquire fences 251 | #[test] 252 | fn test_5_12() { 253 | fn inner() -> Vec { 254 | let mut lt = LogTest::default(); 255 | 256 | lt.add(|mut eg: Environment| { 257 | eg.a.store(1, Ordering::Relaxed); 258 | eg.fence(Ordering::Release); 259 | eg.b.store(1, Ordering::Relaxed); 260 | 0 261 | }); 262 | 263 | lt.add(|mut eg: Environment| { 264 | while eg.b.load(Ordering::Relaxed) == 0 {} 265 | eg.fence(Ordering::Acquire); 266 | eg.a.load(Ordering::Relaxed) 267 | }); 268 | 269 | lt.run() 270 | } 271 | 272 | assert!(run_until(inner, vec![vec![0, 1]])); 273 | } 274 | 275 | // Listing 5.12 adapted 276 | // Tests sequentially consistent fences 277 | #[test] 278 | fn test_5_12_seq() { 279 | fn inner() -> Vec { 280 | let mut lt = LogTest::default(); 281 | 282 | lt.add(|mut eg: Environment| { 283 | eg.a.store(1, Ordering::Relaxed); 284 | eg.fence(Ordering::SeqCst); 285 | eg.b.store(1, Ordering::Relaxed); 286 | 0 287 | }); 288 | 289 | lt.add(|mut eg: Environment| { 290 | while eg.b.load(Ordering::Relaxed) == 0 {} 291 | eg.fence(Ordering::SeqCst); 292 | eg.a.load(Ordering::Relaxed) 293 | }); 294 | 295 | lt.run() 296 | } 297 | 298 | assert!(run_until(inner, vec![vec![0, 1]])); 299 | } 300 | 301 | // Listing 5.13 pertains to non atomics, and so isn't included here. 302 | // Memlog doesn't draw a distinction between non atomics and relaxed access. 303 | // Temper deals with this at a higher level. 304 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Temper 2 | 3 | ## About 4 | 5 | Temper is a framework for modelling concurrency and failure in distributed systems. The name comes from Temporal Fuzzing, a term coined by [Rachel Kroll](https://rachelbythebay.com/w/2011/11/19/sleep/). 6 | 7 | Similar to [Loom](https://github.com/tokio-rs/loom), programs targeting Temper should be able to switch between simulation mode, and calling real APIs in release mode. 8 | 9 | It is in early development, and is not yet more than an experiment. It currently features: 10 | 11 | * Low level x86/ARM memory models 12 | * Rust/C++ 11 memory model 13 | 14 | Planned features: 15 | 16 | * MESI protocol simulation to measure cache line contention and false sharing 17 | * Data race detection 18 | * TCP/IP, including congestion, asymmetric net splits, and Byzantine faults 19 | * Disk operations, including fsync and [power failure corruption](https://danluu.com/file-consistency/) 20 | * SQL transactional isolation 21 | 22 | Related Work: 23 | 24 | * [Madsim](https://github.com/madsim-rs/madsim), a similar project with an emphasis on networking 25 | * FoundationDB's [testing strategy](https://www.youtube.com/watch?v=4fFDFbi3toc) 26 | * TigerBeetle's [fault injection](https://www.youtube.com/watch?v=BH2jvJ74npM) testing 27 | * [Loom](https://github.com/tokio-rs/loom), which exhaustively tests on a single node 28 | * [Timecraft](https://github.com/stealthrocket/timecraft), a distributed system testing tool for WebAssembly 29 | * [Coyote](https://microsoft.github.io/coyote), a similar project for the .Net runtime 30 | * [Antithesis](https://antithesis.com/), a deterministic hypervisor that can test arbitrary software 31 | * [Turmoil](https://github.com/tokio-rs/turmoil), a testing framework for Tokio that also does network fault injection 32 | * FrostDB's [testing strategy](https://www.polarsignals.com/blog/posts/2024/05/28/mostly-dst-in-go), compiling Go to 33 | WASM 34 | 35 | Reading: 36 | 37 | * [Files are fraught with peril](https://danluu.com/deconstruct-files/) by Dan Luu 38 | * [What's the big deal about Deterministic Simulation Testing?](https://notes.eatonphil.com/2024-08-20-deterministic-simulation-testing.html) 39 | by Phil Eaton 40 | 41 | ## Components 42 | 43 | ### Memlog 44 | 45 | Memlog simulates the Rust memory model (C++ 11 without Consume). Combined with operation reordering in Temper, its goal is full coverage. It contains a series of test cases dervied from [Preshing on Programming](https://preshing.com/), [C++ Concurrency in Action](https://www.amazon.com.au/C-Concurrency-Action-Practical-Multithreading/dp/1933988770), the [C++ Standard](https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence), [blog posts](https://puzpuzpuz.dev/seqlock-based-atomic-memory-snapshots) and [many](https://stackoverflow.com/questions/47520748/c-memory-model-do-seq-cst-loads-synchronize-with-seq-cst-stores) [Stack](https://stackoverflow.com/questions/52606524/what-exact-rules-in-the-c-memory-model-prevent-reordering-before-acquire-opera) [Overflow](https://stackoverflow.com/questions/71509935/how-does-mixing-relaxed-and-acquire-release-accesses-on-the-same-atomic-variable) [questions](https://stackoverflow.com/questions/67693687/possible-orderings-with-memory-order-seq-cst-and-memory-order-release). 46 | 47 | Todo: 48 | * Detect [data races](https://en.cppreference.com/w/cpp/language/memory_model) in non-atomic datatypes 49 | * Expose API to declare what can be reordered 50 | * MESI protocol simulation 51 | * Locks 52 | * Seeded randomness 53 | * Reentry support for fetch_update 54 | * Support multiple datatypes 55 | 56 | ### Low Level 57 | 58 | Temper contains a low level simulation of x86/ARM memory models. It is intended for experimentation, as the operations cannot be translated to standard Rust calls in release mode. 59 | 60 | Todo: 61 | * Non-coherent memory models (Alpha) 62 | * Locks 63 | * CAS 64 | * Platform specific barriers 65 | * Spin forever under contention 66 | 67 | ### Future Work 68 | 69 | * Crate swap mechanism for release 70 | * Sample lock free algorithms, such as a MPMC queue 71 | * Deterministic testing with seeds and reproducibility 72 | * Disk w/ fsync, power failure, corruption 73 | * Sample Disk LSM system 74 | * TCP with net splits, latency and Byzantine faults 75 | * Sample Raft protocol 76 | * Visualisation -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //#![warn(clippy::panic, clippy::unwrap_used, clippy::expect_used)] 2 | #![allow(clippy::ptr_arg)] 3 | 4 | use crate::temper::memory::core::{set_model, Atomic, MemoryModel}; 5 | use std::sync::atomic::AtomicUsize; 6 | use std::sync::atomic::Ordering::Relaxed; 7 | use std::sync::Arc; 8 | 9 | extern crate uuid; 10 | use crate::temper::system::core::System; 11 | use threadpool::ThreadPool; 12 | 13 | pub mod temper; 14 | 15 | #[derive(Clone)] 16 | struct Test { 17 | a: Arc>, 18 | b: Arc>, 19 | } 20 | 21 | fn test_left(t: &mut Test) { 22 | t.b.set(1); 23 | Atomic::<()>::fence(); 24 | let _res = t.a.get(); 25 | // println!("Got A {}", *res); 26 | } 27 | 28 | fn test_right(t: &mut Test) { 29 | t.a.set(1); 30 | Atomic::<()>::fence(); 31 | let _res = t.b.get(); 32 | // println!("Got B {}", *res); 33 | } 34 | 35 | fn run_test() { 36 | set_model(MemoryModel::Intel); 37 | let s = System::new(); 38 | 39 | let t = Test { 40 | a: Arc::new(Atomic::new(0)), 41 | b: Arc::new(Atomic::new(0)), 42 | }; 43 | 44 | let mut ta = t.clone(); 45 | let mut tb = t; 46 | 47 | let fns: Vec> = vec![ 48 | Box::new(move || test_left(&mut ta)), 49 | Box::new(move || test_right(&mut tb)), 50 | ]; 51 | 52 | s.run(fns); 53 | } 54 | 55 | pub fn run_bench() { 56 | let now = std::time::SystemTime::now(); 57 | let n_workers = 1; 58 | let pool = ThreadPool::new(n_workers); 59 | 60 | let num = 1_000; 61 | let fin = Arc::new(AtomicUsize::new(0)); 62 | 63 | for _ in 0..num { 64 | let fin = fin.clone(); 65 | pool.execute(move || { 66 | run_test(); 67 | fin.fetch_add(1, Relaxed); 68 | }); 69 | } 70 | 71 | pool.join(); 72 | 73 | while fin.load(Relaxed) != num { 74 | std::thread::sleep(std::time::Duration::from_millis(1)); 75 | } 76 | 77 | println!("Done {:?}", now.elapsed().unwrap().as_millis()); 78 | } 79 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use temper::run_bench; 2 | 3 | fn main() { 4 | run_bench(); 5 | } 6 | -------------------------------------------------------------------------------- /src/temper/memory/core.rs: -------------------------------------------------------------------------------- 1 | use crate::temper::system::core::{with_system, Op, Operation}; 2 | use crate::temper::utils::sleepwait::SleepWait; 3 | use std::any::Any; 4 | use std::cell::UnsafeCell; 5 | use std::ops::Deref; 6 | use std::rc::Rc; 7 | use std::sync::atomic::Ordering; 8 | use std::sync::{Arc, Mutex, MutexGuard}; 9 | use uuid::Uuid; 10 | 11 | #[derive(Copy, Clone, PartialEq)] 12 | pub enum MemoryModel { 13 | ARM, 14 | Intel, 15 | } 16 | 17 | #[derive(Copy, Clone, PartialEq, Debug)] 18 | pub enum MemoryOpType { 19 | Get, 20 | Set, 21 | Fence, 22 | } 23 | 24 | thread_local! { 25 | pub static MODEL: Mutex> = const { Mutex::new(None) }; 26 | } 27 | 28 | pub fn get_model() -> Option { 29 | MODEL.with(|v| *v.lock().unwrap()) 30 | } 31 | 32 | pub fn set_model(model: MemoryModel) { 33 | MODEL.with(|v| *v.lock().unwrap() = Some(model)) 34 | } 35 | 36 | pub struct MemoryOp { 37 | pub op: MemoryOpType, 38 | thread: usize, 39 | location: Uuid, 40 | pub func: Box, 41 | } 42 | 43 | impl Op for MemoryOp { 44 | fn blocks(&self, other: &(dyn Op + Send)) -> bool { 45 | if let Some(other) = other.as_any().downcast_ref::() { 46 | self.blocks(other, get_model().unwrap()) 47 | } else { 48 | false 49 | } 50 | } 51 | 52 | fn as_any(&self) -> &dyn Any { 53 | self 54 | } 55 | 56 | fn execute(&self) { 57 | (self.func)() 58 | } 59 | } 60 | 61 | impl MemoryOp { 62 | pub fn blocks(&self, other: &MemoryOp, model: MemoryModel) -> bool { 63 | let standard_op = |a| a == MemoryOpType::Set || a == MemoryOpType::Get; 64 | 65 | if self.thread != other.thread { 66 | return false; 67 | } 68 | 69 | if other.location == self.location { 70 | return true; 71 | } 72 | 73 | if model == MemoryModel::ARM && standard_op(self.op) && standard_op(other.op) { 74 | return false; 75 | } 76 | 77 | #[allow(clippy::match_like_matches_macro)] 78 | match (&self.op, &other.op) { 79 | (MemoryOpType::Set, MemoryOpType::Get) => false, 80 | _ => true, 81 | } 82 | } 83 | } 84 | 85 | pub struct PendingResult { 86 | result: Arc>>, 87 | value: Rc>, 88 | sleep_wait: Arc, 89 | } 90 | 91 | pub struct Atomic { 92 | value: Arc>, 93 | id: Uuid, 94 | } 95 | 96 | pub struct SharedMemory { 97 | arr: Vec>, 98 | } 99 | 100 | impl SharedMemory { 101 | pub fn new(len: usize) -> Self { 102 | SharedMemory { 103 | arr: (0..len).map(|_| Atomic::new(T::default())).collect(), 104 | } 105 | } 106 | 107 | pub fn get(&self, ind: usize) -> PendingResult { 108 | self.arr[ind].get() 109 | } 110 | 111 | pub fn set(&self, ind: usize, val: T) -> PendingResult { 112 | self.arr[ind].set(val) 113 | } 114 | } 115 | 116 | impl Deref for PendingResult { 117 | type Target = T; 118 | 119 | fn deref(&self) -> &T { 120 | let mut taken = false; 121 | 122 | while self.result.lock().unwrap().is_none() { 123 | // We can't park if the value exists; this will cause race conditions 124 | if !taken { 125 | with_system(|s| s.parked.fetch_add(1, Ordering::SeqCst)); 126 | self.sleep_wait.wait(); 127 | taken = true; 128 | } 129 | } 130 | 131 | if taken { 132 | with_system(|s| s.parked.fetch_sub(1, Ordering::SeqCst)); 133 | } 134 | 135 | let v = self.result.lock().unwrap(); 136 | 137 | unsafe { 138 | *self.value.get() = v.unwrap(); 139 | &*self.value.get() 140 | } 141 | } 142 | } 143 | 144 | impl Atomic { 145 | pub fn new(value: T) -> Self { 146 | Self { 147 | id: Uuid::new_v4(), 148 | value: Arc::new(Mutex::new(value)), 149 | } 150 | } 151 | 152 | pub fn queue_op(id: Uuid, op_type: MemoryOpType, op: F) { 153 | let op = { 154 | Operation::build(MemoryOp { 155 | op: op_type, 156 | location: id, 157 | thread: with_system(|s| s.thread), 158 | func: Box::new(op), 159 | }) 160 | }; 161 | 162 | with_system(move |s| s.chan.send(op).unwrap()); 163 | } 164 | 165 | pub fn fence() { 166 | Self::queue_op(Uuid::new_v4(), MemoryOpType::Fence, move || {}); 167 | } 168 | 169 | pub fn self_op) -> T + Send + 'static>( 170 | &self, 171 | op: MemoryOpType, 172 | f: F, 173 | ) -> PendingResult { 174 | let value = Rc::new(UnsafeCell::new(T::default())); 175 | 176 | let vclone = self.value.clone(); 177 | let result = Arc::new(Mutex::new(None)); 178 | let sleep_wait = Arc::new(SleepWait::default()); 179 | 180 | { 181 | let value_slot = result.clone(); 182 | let sleep_wait = sleep_wait.clone(); 183 | 184 | Self::queue_op(self.id, op, move || { 185 | let v = vclone.lock().unwrap(); 186 | 187 | *value_slot.lock().unwrap() = Some(f(v)); 188 | sleep_wait.signal(); 189 | }); 190 | } 191 | 192 | PendingResult { 193 | value, 194 | result, 195 | sleep_wait, 196 | } 197 | } 198 | 199 | pub fn get(&self) -> PendingResult { 200 | self.self_op(MemoryOpType::Get, move |v| *v) 201 | } 202 | 203 | pub fn set(&self, val: T) -> PendingResult { 204 | self.self_op(MemoryOpType::Set, move |mut v| { 205 | *v = val; 206 | val 207 | }) 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/temper/memory/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod core; 2 | -------------------------------------------------------------------------------- /src/temper/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod memory; 2 | pub mod system; 3 | pub mod utils; 4 | -------------------------------------------------------------------------------- /src/temper/system/core.rs: -------------------------------------------------------------------------------- 1 | use rand::{RngCore, SeedableRng}; 2 | use rand_chacha::ChaCha8Rng; 3 | use std::any::Any; 4 | use std::sync::atomic::AtomicUsize; 5 | use std::sync::atomic::Ordering::SeqCst; 6 | use std::sync::mpsc::{channel, Sender}; 7 | use std::sync::Arc; 8 | use std::sync::Mutex; 9 | use std::thread; 10 | 11 | #[derive(Clone)] 12 | pub struct SystemInfo { 13 | pub thread: usize, 14 | pub chan: Sender, 15 | pub parked: Arc, 16 | } 17 | 18 | thread_local! { 19 | pub static SYSTEM: Mutex> = const { Mutex::new(None) }; 20 | } 21 | 22 | pub fn with_system T>(f: F) -> T { 23 | SYSTEM.with(|a| f(a.lock().unwrap().as_ref().unwrap())) 24 | } 25 | 26 | pub trait Op { 27 | fn blocks(&self, other: &(dyn Op + Send)) -> bool; 28 | fn as_any(&self) -> &dyn Any; 29 | fn execute(&self); 30 | } 31 | 32 | pub struct Operation { 33 | pub op: Box, 34 | } 35 | 36 | impl Operation { 37 | pub fn build(op: T) -> Operation { 38 | Operation { op: Box::new(op) } 39 | } 40 | 41 | pub fn execute(&self) { 42 | self.op.execute(); 43 | } 44 | } 45 | 46 | #[derive(Default)] 47 | pub struct System {} 48 | 49 | impl System { 50 | pub fn new() -> Self { 51 | Self {} 52 | } 53 | 54 | pub fn get_op(&self, ops: &mut Vec, ind: usize) -> Option { 55 | if ops.is_empty() { 56 | return None; 57 | } 58 | 59 | let ind = ind % ops.len(); 60 | 61 | for x in 0..ind { 62 | if ops[x].op.blocks(ops[ind].op.as_ref()) { 63 | return None; 64 | } 65 | } 66 | 67 | Some(ops.remove(ind)) 68 | } 69 | 70 | pub fn run(self, mut fns: Vec>) { 71 | let s = std::time::UNIX_EPOCH.elapsed().unwrap().as_nanos() as u64; 72 | let mut rng = ChaCha8Rng::seed_from_u64(s); 73 | 74 | //println!("Executing with Seed {}", s); 75 | let mut handles = vec![]; 76 | let finished = Arc::new(AtomicUsize::new(0)); 77 | 78 | let (sender, receiver) = channel(); 79 | 80 | let mut sys_info = SystemInfo { 81 | chan: sender, 82 | thread: 0, 83 | parked: Arc::new(AtomicUsize::new(0)), 84 | }; 85 | 86 | for mut f in fns.drain(..) { 87 | let finished = finished.clone(); 88 | 89 | sys_info.thread += 1; 90 | let sys_info = sys_info.clone(); 91 | 92 | handles.push(thread::spawn(move || { 93 | SYSTEM.with(|v| *v.lock().unwrap() = Some(sys_info)); 94 | f(); 95 | finished.fetch_add(1, SeqCst); 96 | })); 97 | } 98 | 99 | let mut operations = vec![]; 100 | 101 | while finished.load(SeqCst) < handles.len() { 102 | while let Ok(v) = receiver.try_recv() { 103 | operations.push(v); 104 | } 105 | 106 | let finished_count = finished.load(SeqCst); 107 | let parked_count = sys_info.parked.load(SeqCst); 108 | 109 | if finished_count + parked_count == handles.len() { 110 | if let Some(o) = self.get_op(&mut operations, rng.next_u64() as usize) { 111 | o.execute(); 112 | } 113 | } 114 | } 115 | 116 | for h in handles { 117 | h.join().unwrap() 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/temper/system/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod core; 2 | -------------------------------------------------------------------------------- /src/temper/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod sleepwait; 2 | -------------------------------------------------------------------------------- /src/temper/utils/sleepwait.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Condvar, Mutex}; 2 | 3 | #[derive(Default)] 4 | #[allow(clippy::mutex_atomic)] 5 | pub struct SleepWait { 6 | ready: Mutex, 7 | signal: Condvar, 8 | } 9 | 10 | #[allow(clippy::mutex_atomic)] 11 | impl SleepWait { 12 | pub fn signal(&self) { 13 | *self.ready.lock().unwrap() = true; 14 | self.signal.notify_all(); 15 | } 16 | pub fn wait(&self) { 17 | let mut ready = self.ready.lock().unwrap(); 18 | 19 | while !*ready { 20 | ready = self.signal.wait(ready).unwrap(); 21 | } 22 | } 23 | } 24 | 25 | #[cfg(test)] 26 | mod test { 27 | use crate::temper::utils::sleepwait::SleepWait; 28 | use std::sync::atomic::{AtomicUsize, Ordering}; 29 | use std::sync::Arc; 30 | use std::thread; 31 | 32 | #[test] 33 | pub fn test_wait() { 34 | let sw = Arc::new(SleepWait::default()); 35 | let prog = Arc::new(AtomicUsize::new(0)); 36 | 37 | { 38 | let sw = sw.clone(); 39 | let prog = prog.clone(); 40 | thread::spawn(move || { 41 | prog.store(1, Ordering::SeqCst); 42 | sw.wait(); 43 | prog.store(2, Ordering::SeqCst); 44 | sw.wait(); 45 | }); 46 | }; 47 | 48 | // These sleeps are lazy, and could cause flake. 49 | // The irony of building a system to test concurrency bugs and writing code like this 50 | // is not lost on me. 51 | std::thread::sleep(std::time::Duration::from_millis(20)); 52 | assert_eq!(prog.load(Ordering::SeqCst), 1); 53 | sw.signal(); 54 | std::thread::sleep(std::time::Duration::from_millis(20)); 55 | assert_eq!(prog.load(Ordering::SeqCst), 2); 56 | sw.signal(); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /tests/atom.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU32, Ordering}; 2 | 3 | // Assembly tests for Godbolt 4 | 5 | // --target=arm-unknown-linux-gnueabihf 6 | 7 | pub fn store_release(x: &AtomicU32) { 8 | x.store(1, Ordering::Release) 9 | } 10 | 11 | pub fn store_seq(x: &AtomicU32) { 12 | x.store(1, Ordering::SeqCst) 13 | } 14 | 15 | pub fn load_release(x: &AtomicU32) -> u32 { 16 | x.load(Ordering::Acquire) 17 | } 18 | 19 | pub fn load_seqcst(x: &AtomicU32) -> u32 { 20 | x.load(Ordering::SeqCst) 21 | } 22 | 23 | pub fn twostore_release(x: &AtomicU32, y: &AtomicU32) { 24 | x.store(1, Ordering::Relaxed); 25 | y.store(1, Ordering::Release) 26 | } 27 | 28 | pub fn twostore_seq(x: &AtomicU32, y: &AtomicU32) { 29 | x.store(1, Ordering::SeqCst); 30 | y.store(1, Ordering::SeqCst) 31 | } 32 | 33 | pub fn twoload_set_seq(x: &AtomicU32, _y: &AtomicU32) { 34 | x.store(1, Ordering::SeqCst) 35 | } 36 | 37 | pub fn twoload_set_release(_x: &AtomicU32, y: &AtomicU32) { 38 | y.store(1, Ordering::Release) 39 | } 40 | 41 | pub fn twoload_release(x: &AtomicU32, y: &AtomicU32) -> u32 { 42 | while x.load(Ordering::Acquire) == 0 {} 43 | y.load(Ordering::Acquire) 44 | } 45 | 46 | pub fn twoload_seqcst(x: &AtomicU32, y: &AtomicU32) -> u32 { 47 | while x.load(Ordering::SeqCst) == 0 {} 48 | y.load(Ordering::SeqCst) 49 | } 50 | 51 | pub fn barrier_loadstore(x: &AtomicU32, y: &AtomicU32) { 52 | x.store(1, Ordering::Relaxed); 53 | std::sync::atomic::fence(Ordering::AcqRel); 54 | y.store(1, Ordering::Relaxed) 55 | } 56 | 57 | pub fn barrier_seq(x: &AtomicU32, y: &AtomicU32) { 58 | x.store(1, Ordering::Relaxed); 59 | std::sync::atomic::fence(Ordering::SeqCst); 60 | y.store(1, Ordering::Relaxed) 61 | } 62 | 63 | pub fn two_acqrel(x: &AtomicU32, y: &AtomicU32) { 64 | x.load(Ordering::Acquire); 65 | y.store(1, Ordering::Release) 66 | } 67 | 68 | pub fn two_fence(x: &AtomicU32, y: &AtomicU32) { 69 | std::sync::atomic::fence(Ordering::AcqRel); 70 | x.load(Ordering::Relaxed); 71 | y.store(1, Ordering::Relaxed); 72 | std::sync::atomic::fence(Ordering::AcqRel); 73 | } 74 | 75 | pub fn two_fence_seq(x: &AtomicU32, y: &AtomicU32) { 76 | std::sync::atomic::fence(Ordering::SeqCst); 77 | x.load(Ordering::Relaxed); 78 | y.store(1, Ordering::Relaxed); 79 | std::sync::atomic::fence(Ordering::SeqCst); 80 | } 81 | 82 | pub fn two_load_acq(x: &AtomicU32, y: &AtomicU32) { 83 | x.load(Ordering::Acquire); 84 | y.load(Ordering::Acquire); 85 | } 86 | pub fn two_load_seq(x: &AtomicU32, y: &AtomicU32) { 87 | x.load(Ordering::SeqCst); 88 | y.load(Ordering::SeqCst); 89 | } 90 | -------------------------------------------------------------------------------- /tests/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod utils; 2 | -------------------------------------------------------------------------------- /tests/common/utils.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::fmt::Debug; 3 | use std::hash::Hash; 4 | use std::sync::{Arc, Mutex}; 5 | use temper::temper::memory::core::{Atomic, SharedMemory}; 6 | 7 | /* Default test environment provides for four variables */ 8 | 9 | #[derive(Clone)] 10 | #[allow(unused)] 11 | pub struct Test { 12 | pub a: Arc>, 13 | pub b: Arc>, 14 | pub c: Arc>, 15 | pub d: Arc>, 16 | 17 | pub arr: Arc>, 18 | 19 | pub results: Arc>>, 20 | } 21 | 22 | impl Default for Test { 23 | fn default() -> Self { 24 | Test { 25 | a: Arc::new(Atomic::new(0usize)), 26 | b: Arc::new(Atomic::new(0usize)), 27 | c: Arc::new(Atomic::new(0usize)), 28 | d: Arc::new(Atomic::new(0usize)), 29 | arr: Arc::new(SharedMemory::new(1024)), 30 | results: Arc::new(Mutex::new(vec![])), 31 | } 32 | } 33 | } 34 | 35 | impl Test { 36 | pub fn report_result(&self, index: usize, result: usize) { 37 | let mut res = self.results.lock().unwrap(); 38 | while res.len() <= index { 39 | res.push(0); 40 | } 41 | res[index] = result; 42 | } 43 | } 44 | 45 | fn check_set(hs: &HashSet, arr: &Vec) -> bool { 46 | let mut ns = HashSet::new(); 47 | for x in arr { 48 | ns.insert(x.clone()); 49 | } 50 | ns == *hs 51 | } 52 | 53 | pub fn run_until T>( 54 | mut f: F, 55 | expected: Vec, 56 | ) -> bool { 57 | let mut res = HashSet::new(); 58 | 59 | for x in 0..10_000 { 60 | res.insert(f()); 61 | 62 | if check_set(&res, &expected) && x > 100 { 63 | return true; 64 | } 65 | 66 | if res.len() > expected.len() { 67 | println!("Failed {:?} {:?}", res, expected); 68 | return false; 69 | } 70 | } 71 | 72 | println!("Failed {:?} {:?}", res, expected); 73 | false 74 | } 75 | -------------------------------------------------------------------------------- /tests/memory.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::ptr_arg)] 2 | 3 | mod common; 4 | 5 | use common::utils::{run_until, Test}; 6 | 7 | use temper::temper::memory::core::{set_model, Atomic, MemoryModel}; 8 | use temper::temper::system::core::System; 9 | 10 | /* From Intel's memory model documentation 11 | 12 | Thread 1: 13 | a = 1 14 | print(b) 15 | 16 | Thread 2: 17 | b = 1 18 | print(a) 19 | 20 | Can print any of (0,0) (0,1) (1,0) (1,1) 21 | If a memfence is present, (0,0) is not a valid result 22 | */ 23 | 24 | fn test_a(memfence: bool) -> Vec { 25 | set_model(MemoryModel::Intel); 26 | let s = System::new(); 27 | 28 | let test = Test::default(); 29 | 30 | let fa = { 31 | let test = test.clone(); 32 | move || { 33 | test.b.set(1); 34 | if memfence { 35 | Atomic::<()>::fence() 36 | } 37 | let res = *test.a.get(); 38 | test.report_result(0, res); 39 | } 40 | }; 41 | 42 | let fb = { 43 | let test = test.clone(); 44 | move || { 45 | test.a.set(1); 46 | if memfence { 47 | Atomic::<()>::fence() 48 | } 49 | let res = *test.b.get(); 50 | test.report_result(1, res); 51 | } 52 | }; 53 | 54 | let fns: Vec> = vec![Box::new(fa), Box::new(fb)]; 55 | 56 | s.run(fns); 57 | 58 | let tr = test.results.lock().unwrap(); 59 | (*tr).clone() 60 | } 61 | 62 | #[test] 63 | fn test_a_runner() { 64 | assert!(run_until( 65 | || test_a(false), 66 | vec![vec![0, 0], vec![0, 1], vec![1, 0], vec![1, 1]], 67 | )); 68 | 69 | assert!(run_until( 70 | || test_a(true), 71 | vec![vec![0, 1], vec![1, 0], vec![1, 1]], 72 | )); 73 | } 74 | 75 | fn test_queue(iters: usize, model: MemoryModel) -> Vec { 76 | //let start = Utc::now(); 77 | set_model(model); 78 | let system = System::new(); 79 | 80 | let test = Test::default(); 81 | 82 | let fa = { 83 | let test = test.clone(); 84 | move || { 85 | for x in 0..iters { 86 | let i = *test.a.get(); 87 | test.arr.set(i, x); 88 | 89 | // ARM requires fence here 90 | if model == MemoryModel::ARM { 91 | Atomic::<()>::fence(); 92 | } 93 | 94 | test.a.set(i + 1); 95 | } 96 | } 97 | }; 98 | 99 | let fb = { 100 | let test = test.clone(); 101 | move || { 102 | let mut o = 0; 103 | for _ in 0..iters { 104 | let res = loop { 105 | let a = test.a.get(); 106 | let b = test.b.get(); 107 | 108 | if *a > *b { 109 | test.b.set(*b + 1); 110 | break *test.arr.get(*b); 111 | } 112 | }; 113 | o += res; 114 | } 115 | 116 | test.report_result(0, o); 117 | } 118 | }; 119 | 120 | let fns: Vec> = vec![Box::new(fa), Box::new(fb)]; 121 | 122 | system.run(fns); 123 | 124 | //println!("Elapsed {}", (Utc::now() - start)); 125 | 126 | let tr = test.results.lock().unwrap(); 127 | (*tr).clone() 128 | } 129 | 130 | #[test] 131 | fn test_queue_runner() { 132 | let size = 20; 133 | let expected = (0..size).sum(); 134 | assert!(run_until( 135 | || test_queue(size, MemoryModel::ARM), 136 | vec![vec![expected]] 137 | )); 138 | assert!(run_until( 139 | || test_queue(size, MemoryModel::Intel), 140 | vec![vec![expected]] 141 | )); 142 | } --------------------------------------------------------------------------------