├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md └── src ├── fallback.rs ├── lib.rs └── ops.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | sudo: false 3 | 4 | rust: 5 | - nightly 6 | - beta 7 | - stable 8 | - 1.45.0 9 | 10 | script: 11 | - cargo build 12 | - cargo test 13 | - cargo doc 14 | - if [ $TRAVIS_RUST_VERSION = nightly ]; then rustup target add aarch64-unknown-none; fi 15 | - if [ $TRAVIS_RUST_VERSION = nightly ]; then RUSTFLAGS="-Zcrate-attr=feature(integer_atomics)" cargo check --target=aarch64-unknown-none; fi 16 | 17 | notifications: 18 | email: false 19 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "atomic" 3 | version = "0.6.0" 4 | edition = "2018" 5 | authors = ["Amanieu d'Antras "] 6 | description = "Generic Atomic wrapper type" 7 | license = "Apache-2.0/MIT" 8 | repository = "https://github.com/Amanieu/atomic-rs" 9 | readme = "README.md" 10 | keywords = ["atomic", "no_std"] 11 | 12 | [features] 13 | default = ["fallback"] 14 | std = [] 15 | fallback = [] 16 | nightly = [] 17 | 18 | [dependencies] 19 | bytemuck = "1.13.1" 20 | 21 | [dev-dependencies] 22 | bytemuck = { version = "1.13.1", features = ["derive"] } 23 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Generic `Atomic` for Rust 2 | ============================ 3 | 4 | [![Build Status](https://travis-ci.org/Amanieu/atomic-rs.svg?branch=master)](https://travis-ci.org/Amanieu/atomic-rs) [![Crates.io](https://img.shields.io/crates/v/atomic.svg)](https://crates.io/crates/atomic) 5 | 6 | A Rust library which provides a generic `Atomic` type for all `T: NoUninit` types, unlike the standard library which only provides a few fixed atomic types (`AtomicBool`, `AtomicIsize`, `AtomicUsize`, `AtomicPtr`). The `NoUninit` bound is from the [bytemuck] crate, and indicates that a type has no internal padding bytes. You will need to derive or implement this trait for all types used with `Atomic`. 7 | 8 | This library will use native atomic instructions if possible, and will otherwise fall back to a lock-based mechanism. You can use the `Atomic::::is_lock_free()` function to check whether native atomic operations are supported for a given type. Note that a type must have a power-of-2 size and alignment in order to be used by native atomic instructions. 9 | 10 | This crate uses `#![no_std]` and only depends on libcore. 11 | 12 | [bytemuck]: https://docs.rs/bytemuck 13 | 14 | [Documentation](https://docs.rs/atomic) 15 | 16 | ## Usage 17 | 18 | Add this to your `Cargo.toml`: 19 | 20 | ```toml 21 | [dependencies] 22 | atomic = "0.6" 23 | ``` 24 | 25 | and this to your crate root: 26 | 27 | ```rust 28 | extern crate atomic; 29 | ``` 30 | 31 | ## License 32 | 33 | Licensed under either of 34 | 35 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 36 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 37 | 38 | at your option. 39 | 40 | ### Contribution 41 | 42 | Unless you explicitly state otherwise, any contribution intentionally submitted 43 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any 44 | additional terms or conditions. 45 | -------------------------------------------------------------------------------- /src/fallback.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::cmp; 9 | use core::hint; 10 | use core::num::Wrapping; 11 | use core::ops; 12 | use core::ptr; 13 | use core::sync::atomic::{AtomicUsize, Ordering}; 14 | 15 | use bytemuck::NoUninit; 16 | 17 | // We use an AtomicUsize instead of an AtomicBool because it performs better 18 | // on architectures that don't have byte-sized atomics. 19 | // 20 | // We give each spinlock its own cache line to avoid false sharing. 21 | #[repr(align(64))] 22 | struct SpinLock(AtomicUsize); 23 | 24 | impl SpinLock { 25 | fn lock(&self) { 26 | while self 27 | .0 28 | .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed) 29 | .is_err() 30 | { 31 | while self.0.load(Ordering::Relaxed) != 0 { 32 | hint::spin_loop(); 33 | } 34 | } 35 | } 36 | 37 | fn unlock(&self) { 38 | self.0.store(0, Ordering::Release); 39 | } 40 | } 41 | 42 | // A big array of spinlocks which we use to guard atomic accesses. A spinlock is 43 | // chosen based on a hash of the address of the atomic object, which helps to 44 | // reduce contention compared to a single global lock. 45 | macro_rules! array { 46 | (@accum (0, $($_es:expr),*) -> ($($body:tt)*)) 47 | => {array!(@as_expr [$($body)*])}; 48 | (@accum (1, $($es:expr),*) -> ($($body:tt)*)) 49 | => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))}; 50 | (@accum (2, $($es:expr),*) -> ($($body:tt)*)) 51 | => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))}; 52 | (@accum (4, $($es:expr),*) -> ($($body:tt)*)) 53 | => {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))}; 54 | (@accum (8, $($es:expr),*) -> ($($body:tt)*)) 55 | => {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))}; 56 | (@accum (16, $($es:expr),*) -> ($($body:tt)*)) 57 | => {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))}; 58 | (@accum (32, $($es:expr),*) -> ($($body:tt)*)) 59 | => {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))}; 60 | (@accum (64, $($es:expr),*) -> ($($body:tt)*)) 61 | => {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))}; 62 | 63 | (@as_expr $e:expr) => {$e}; 64 | 65 | [$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) }; 66 | } 67 | static SPINLOCKS: [SpinLock; 64] = array![SpinLock(AtomicUsize::new(0)); 64]; 68 | 69 | // Spinlock pointer hashing function from compiler-rt 70 | #[inline] 71 | fn lock_for_addr(addr: usize) -> &'static SpinLock { 72 | // Disregard the lowest 4 bits. We want all values that may be part of the 73 | // same memory operation to hash to the same value and therefore use the same 74 | // lock. 75 | let mut hash = addr >> 4; 76 | // Use the next bits as the basis for the hash 77 | let low = hash & (SPINLOCKS.len() - 1); 78 | // Now use the high(er) set of bits to perturb the hash, so that we don't 79 | // get collisions from atomic fields in a single object 80 | hash >>= 16; 81 | hash ^= low; 82 | // Return a pointer to the lock to use 83 | &SPINLOCKS[hash & (SPINLOCKS.len() - 1)] 84 | } 85 | 86 | #[inline] 87 | fn lock(addr: usize) -> LockGuard { 88 | let lock = lock_for_addr(addr); 89 | lock.lock(); 90 | LockGuard(lock) 91 | } 92 | 93 | struct LockGuard(&'static SpinLock); 94 | impl Drop for LockGuard { 95 | #[inline] 96 | fn drop(&mut self) { 97 | self.0.unlock(); 98 | } 99 | } 100 | 101 | #[inline] 102 | pub unsafe fn atomic_load(dst: *mut T) -> T { 103 | let _l = lock(dst as usize); 104 | ptr::read(dst) 105 | } 106 | 107 | #[inline] 108 | pub unsafe fn atomic_store(dst: *mut T, val: T) { 109 | let _l = lock(dst as usize); 110 | ptr::write(dst, val); 111 | } 112 | 113 | #[inline] 114 | pub unsafe fn atomic_swap(dst: *mut T, val: T) -> T { 115 | let _l = lock(dst as usize); 116 | ptr::replace(dst, val) 117 | } 118 | 119 | #[inline] 120 | pub unsafe fn atomic_compare_exchange( 121 | dst: *mut T, 122 | current: T, 123 | new: T, 124 | ) -> Result { 125 | let _l = lock(dst as usize); 126 | let result = ptr::read(dst); 127 | // compare_exchange compares with memcmp instead of Eq 128 | let a = bytemuck::bytes_of(&result); 129 | let b = bytemuck::bytes_of(¤t); 130 | if a == b { 131 | ptr::write(dst, new); 132 | Ok(result) 133 | } else { 134 | Err(result) 135 | } 136 | } 137 | 138 | #[inline] 139 | pub unsafe fn atomic_add(dst: *mut T, val: T) -> T 140 | where 141 | Wrapping: ops::Add>, 142 | { 143 | let _l = lock(dst as usize); 144 | let result = ptr::read(dst); 145 | ptr::write(dst, (Wrapping(result) + Wrapping(val)).0); 146 | result 147 | } 148 | 149 | #[inline] 150 | pub unsafe fn atomic_sub(dst: *mut T, val: T) -> T 151 | where 152 | Wrapping: ops::Sub>, 153 | { 154 | let _l = lock(dst as usize); 155 | let result = ptr::read(dst); 156 | ptr::write(dst, (Wrapping(result) - Wrapping(val)).0); 157 | result 158 | } 159 | 160 | #[inline] 161 | pub unsafe fn atomic_and>(dst: *mut T, val: T) -> T { 162 | let _l = lock(dst as usize); 163 | let result = ptr::read(dst); 164 | ptr::write(dst, result & val); 165 | result 166 | } 167 | 168 | #[inline] 169 | pub unsafe fn atomic_or>(dst: *mut T, val: T) -> T { 170 | let _l = lock(dst as usize); 171 | let result = ptr::read(dst); 172 | ptr::write(dst, result | val); 173 | result 174 | } 175 | 176 | #[inline] 177 | pub unsafe fn atomic_xor>(dst: *mut T, val: T) -> T { 178 | let _l = lock(dst as usize); 179 | let result = ptr::read(dst); 180 | ptr::write(dst, result ^ val); 181 | result 182 | } 183 | 184 | #[inline] 185 | pub unsafe fn atomic_min(dst: *mut T, val: T) -> T { 186 | let _l = lock(dst as usize); 187 | let result = ptr::read(dst); 188 | ptr::write(dst, cmp::min(result, val)); 189 | result 190 | } 191 | 192 | #[inline] 193 | pub unsafe fn atomic_max(dst: *mut T, val: T) -> T { 194 | let _l = lock(dst as usize); 195 | let result = ptr::read(dst); 196 | ptr::write(dst, cmp::max(result, val)); 197 | result 198 | } 199 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! Generic `Atomic` wrapper type 9 | //! 10 | //! Atomic types provide primitive shared-memory communication between 11 | //! threads, and are the building blocks of other concurrent types. 12 | //! 13 | //! This library defines a generic atomic wrapper type `Atomic` for all 14 | //! `T: NoUninit` types. 15 | //! Atomic types present operations that, when used correctly, synchronize 16 | //! updates between threads. 17 | //! 18 | //! The `NoUninit` bound is from the [bytemuck] crate, and indicates that a 19 | //! type has no internal padding bytes. You will need to derive or implement 20 | //! this trait for all types used with `Atomic`. 21 | //! 22 | //! Each method takes an `Ordering` which represents the strength of 23 | //! the memory barrier for that operation. These orderings are the 24 | //! same as [LLVM atomic orderings][1]. 25 | //! 26 | //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations 27 | //! 28 | //! Atomic variables are safe to share between threads (they implement `Sync`) 29 | //! but they do not themselves provide the mechanism for sharing. The most 30 | //! common way to share an atomic variable is to put it into an `Arc` (an 31 | //! atomically-reference-counted shared pointer). 32 | //! 33 | //! Most atomic types may be stored in static variables, initialized using 34 | //! the `const fn` constructors. Atomic statics are often used for lazy global 35 | //! initialization. 36 | //! 37 | //! [bytemuck]: https://docs.rs/bytemuck 38 | 39 | #![warn(missing_docs)] 40 | #![warn(rust_2018_idioms)] 41 | #![no_std] 42 | #![cfg_attr(feature = "nightly", feature(integer_atomics))] 43 | 44 | #[cfg(any(test, feature = "std"))] 45 | #[macro_use] 46 | extern crate std; 47 | 48 | use core::mem::MaybeUninit; 49 | // Re-export some useful definitions from libcore 50 | pub use core::sync::atomic::{fence, Ordering}; 51 | 52 | use core::cell::UnsafeCell; 53 | use core::fmt; 54 | 55 | #[cfg(feature = "std")] 56 | use std::panic::RefUnwindSafe; 57 | 58 | use bytemuck::NoUninit; 59 | 60 | #[cfg(feature = "fallback")] 61 | mod fallback; 62 | mod ops; 63 | 64 | /// A generic atomic wrapper type which allows an object to be safely shared 65 | /// between threads. 66 | #[repr(transparent)] 67 | pub struct Atomic { 68 | // The MaybeUninit is here to work around rust-lang/rust#87341. 69 | v: UnsafeCell>, 70 | } 71 | 72 | // Atomic is only Sync if T is Send 73 | unsafe impl Sync for Atomic {} 74 | 75 | // Given that atomicity is guaranteed, Atomic is RefUnwindSafe if T is 76 | // 77 | // This is trivially correct for native lock-free atomic types. For those whose 78 | // atomicity is emulated using a spinlock, it is still correct because the 79 | // `Atomic` API does not allow doing any panic-inducing operation after writing 80 | // to the target object. 81 | #[cfg(feature = "std")] 82 | impl RefUnwindSafe for Atomic {} 83 | 84 | impl Default for Atomic { 85 | #[inline] 86 | fn default() -> Self { 87 | Self::new(Default::default()) 88 | } 89 | } 90 | 91 | impl fmt::Debug for Atomic { 92 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 93 | f.debug_tuple("Atomic") 94 | .field(&self.load(Ordering::SeqCst)) 95 | .finish() 96 | } 97 | } 98 | 99 | impl Atomic { 100 | /// Creates a new `Atomic`. 101 | #[inline] 102 | pub const fn new(v: T) -> Atomic { 103 | Atomic { 104 | v: UnsafeCell::new(MaybeUninit::new(v)), 105 | } 106 | } 107 | 108 | /// Checks if `Atomic` objects of this type are lock-free. 109 | /// 110 | /// If an `Atomic` is not lock-free then it may be implemented using locks 111 | /// internally, which makes it unsuitable for some situations (such as 112 | /// communicating with a signal handler). 113 | #[inline] 114 | pub const fn is_lock_free() -> bool { 115 | ops::atomic_is_lock_free::() 116 | } 117 | } 118 | 119 | impl Atomic { 120 | #[inline] 121 | fn inner_ptr(&self) -> *mut T { 122 | self.v.get() as *mut T 123 | } 124 | 125 | /// Returns a mutable reference to the underlying type. 126 | /// 127 | /// This is safe because the mutable reference guarantees that no other threads are 128 | /// concurrently accessing the atomic data. 129 | #[inline] 130 | pub fn get_mut(&mut self) -> &mut T { 131 | unsafe { &mut *self.inner_ptr() } 132 | } 133 | 134 | /// Consumes the atomic and returns the contained value. 135 | /// 136 | /// This is safe because passing `self` by value guarantees that no other threads are 137 | /// concurrently accessing the atomic data. 138 | #[inline] 139 | pub fn into_inner(self) -> T { 140 | unsafe { self.v.into_inner().assume_init() } 141 | } 142 | 143 | /// Loads a value from the `Atomic`. 144 | /// 145 | /// `load` takes an `Ordering` argument which describes the memory ordering 146 | /// of this operation. 147 | /// 148 | /// # Panics 149 | /// 150 | /// Panics if `order` is `Release` or `AcqRel`. 151 | #[inline] 152 | pub fn load(&self, order: Ordering) -> T { 153 | unsafe { ops::atomic_load(self.inner_ptr(), order) } 154 | } 155 | 156 | /// Stores a value into the `Atomic`. 157 | /// 158 | /// `store` takes an `Ordering` argument which describes the memory ordering 159 | /// of this operation. 160 | /// 161 | /// # Panics 162 | /// 163 | /// Panics if `order` is `Acquire` or `AcqRel`. 164 | #[inline] 165 | pub fn store(&self, val: T, order: Ordering) { 166 | unsafe { 167 | ops::atomic_store(self.inner_ptr(), val, order); 168 | } 169 | } 170 | 171 | /// Stores a value into the `Atomic`, returning the old value. 172 | /// 173 | /// `swap` takes an `Ordering` argument which describes the memory ordering 174 | /// of this operation. 175 | #[inline] 176 | pub fn swap(&self, val: T, order: Ordering) -> T { 177 | unsafe { ops::atomic_swap(self.inner_ptr(), val, order) } 178 | } 179 | 180 | /// Stores a value into the `Atomic` if the current value is the same as the 181 | /// `current` value. 182 | /// 183 | /// The return value is a result indicating whether the new value was 184 | /// written and containing the previous value. On success this value is 185 | /// guaranteed to be equal to `new`. 186 | /// 187 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory 188 | /// ordering of this operation. The first describes the required ordering if 189 | /// the operation succeeds while the second describes the required ordering 190 | /// when the operation fails. The failure ordering can't be `Release` or 191 | /// `AcqRel` and must be equivalent or weaker than the success ordering. 192 | #[inline] 193 | pub fn compare_exchange( 194 | &self, 195 | current: T, 196 | new: T, 197 | success: Ordering, 198 | failure: Ordering, 199 | ) -> Result { 200 | unsafe { ops::atomic_compare_exchange(self.inner_ptr(), current, new, success, failure) } 201 | } 202 | 203 | /// Stores a value into the `Atomic` if the current value is the same as the 204 | /// `current` value. 205 | /// 206 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail 207 | /// even when the comparison succeeds, which can result in more efficient 208 | /// code on some platforms. The return value is a result indicating whether 209 | /// the new value was written and containing the previous value. 210 | /// 211 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory 212 | /// ordering of this operation. The first describes the required ordering if 213 | /// the operation succeeds while the second describes the required ordering 214 | /// when the operation fails. The failure ordering can't be `Release` or 215 | /// `AcqRel` and must be equivalent or weaker than the success ordering. 216 | /// success ordering. 217 | #[inline] 218 | pub fn compare_exchange_weak( 219 | &self, 220 | current: T, 221 | new: T, 222 | success: Ordering, 223 | failure: Ordering, 224 | ) -> Result { 225 | unsafe { 226 | ops::atomic_compare_exchange_weak(self.inner_ptr(), current, new, success, failure) 227 | } 228 | } 229 | 230 | /// Fetches the value, and applies a function to it that returns an optional 231 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else 232 | /// `Err(previous_value)`. 233 | /// 234 | /// Note: This may call the function multiple times if the value has been changed from other threads in 235 | /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied 236 | /// only once to the stored value. 237 | /// 238 | /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation. 239 | /// The first describes the required ordering for when the operation finally succeeds while the second 240 | /// describes the required ordering for loads. These correspond to the success and failure orderings of 241 | /// [`compare_exchange`] respectively. 242 | /// 243 | /// Using [`Acquire`] as success ordering makes the store part 244 | /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load 245 | /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] 246 | /// and must be equivalent to or weaker than the success ordering. 247 | /// 248 | /// [`compare_exchange`]: #method.compare_exchange 249 | /// [`Ordering`]: enum.Ordering.html 250 | /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed 251 | /// [`Release`]: enum.Ordering.html#variant.Release 252 | /// [`Acquire`]: enum.Ordering.html#variant.Acquire 253 | /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst 254 | /// 255 | /// # Examples 256 | /// 257 | /// ```rust 258 | /// use atomic::{Atomic, Ordering}; 259 | /// 260 | /// let x = Atomic::new(7); 261 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7)); 262 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7)); 263 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8)); 264 | /// assert_eq!(x.load(Ordering::SeqCst), 9); 265 | /// ``` 266 | #[inline] 267 | pub fn fetch_update( 268 | &self, 269 | set_order: Ordering, 270 | fetch_order: Ordering, 271 | mut f: F, 272 | ) -> Result 273 | where 274 | F: FnMut(T) -> Option, 275 | { 276 | let mut prev = self.load(fetch_order); 277 | while let Some(next) = f(prev) { 278 | match self.compare_exchange_weak(prev, next, set_order, fetch_order) { 279 | x @ Ok(_) => return x, 280 | Err(next_prev) => prev = next_prev, 281 | } 282 | } 283 | Err(prev) 284 | } 285 | } 286 | 287 | impl Atomic { 288 | /// Logical "and" with a boolean value. 289 | /// 290 | /// Performs a logical "and" operation on the current value and the argument 291 | /// `val`, and sets the new value to the result. 292 | /// 293 | /// Returns the previous value. 294 | #[inline] 295 | pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { 296 | unsafe { ops::atomic_and(self.inner_ptr(), val, order) } 297 | } 298 | 299 | /// Logical "or" with a boolean value. 300 | /// 301 | /// Performs a logical "or" operation on the current value and the argument 302 | /// `val`, and sets the new value to the result. 303 | /// 304 | /// Returns the previous value. 305 | #[inline] 306 | pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { 307 | unsafe { ops::atomic_or(self.inner_ptr(), val, order) } 308 | } 309 | 310 | /// Logical "xor" with a boolean value. 311 | /// 312 | /// Performs a logical "xor" operation on the current value and the argument 313 | /// `val`, and sets the new value to the result. 314 | /// 315 | /// Returns the previous value. 316 | #[inline] 317 | pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { 318 | unsafe { ops::atomic_xor(self.inner_ptr(), val, order) } 319 | } 320 | } 321 | 322 | macro_rules! atomic_ops_common { 323 | ($($t:ty)*) => ($( 324 | impl Atomic<$t> { 325 | /// Add to the current value, returning the previous value. 326 | #[inline] 327 | pub fn fetch_add(&self, val: $t, order: Ordering) -> $t { 328 | unsafe { ops::atomic_add(self.inner_ptr(), val, order) } 329 | } 330 | 331 | /// Subtract from the current value, returning the previous value. 332 | #[inline] 333 | pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t { 334 | unsafe { ops::atomic_sub(self.inner_ptr(), val, order) } 335 | } 336 | 337 | /// Bitwise and with the current value, returning the previous value. 338 | #[inline] 339 | pub fn fetch_and(&self, val: $t, order: Ordering) -> $t { 340 | unsafe { ops::atomic_and(self.inner_ptr(), val, order) } 341 | } 342 | 343 | /// Bitwise or with the current value, returning the previous value. 344 | #[inline] 345 | pub fn fetch_or(&self, val: $t, order: Ordering) -> $t { 346 | unsafe { ops::atomic_or(self.inner_ptr(), val, order) } 347 | } 348 | 349 | /// Bitwise xor with the current value, returning the previous value. 350 | #[inline] 351 | pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t { 352 | unsafe { ops::atomic_xor(self.inner_ptr(), val, order) } 353 | } 354 | } 355 | )*); 356 | } 357 | macro_rules! atomic_ops_signed { 358 | ($($t:ty)*) => ( 359 | atomic_ops_common!{ $($t)* } 360 | $( 361 | impl Atomic<$t> { 362 | /// Minimum with the current value. 363 | #[inline] 364 | pub fn fetch_min(&self, val: $t, order: Ordering) -> $t { 365 | unsafe { ops::atomic_min(self.inner_ptr(), val, order) } 366 | } 367 | 368 | /// Maximum with the current value. 369 | #[inline] 370 | pub fn fetch_max(&self, val: $t, order: Ordering) -> $t { 371 | unsafe { ops::atomic_max(self.inner_ptr(), val, order) } 372 | } 373 | } 374 | )* 375 | ); 376 | } 377 | macro_rules! atomic_ops_unsigned { 378 | ($($t:ty)*) => ( 379 | atomic_ops_common!{ $($t)* } 380 | $( 381 | impl Atomic<$t> { 382 | /// Minimum with the current value. 383 | #[inline] 384 | pub fn fetch_min(&self, val: $t, order: Ordering) -> $t { 385 | unsafe { ops::atomic_umin(self.inner_ptr(), val, order) } 386 | } 387 | 388 | /// Maximum with the current value. 389 | #[inline] 390 | pub fn fetch_max(&self, val: $t, order: Ordering) -> $t { 391 | unsafe { ops::atomic_umax(self.inner_ptr(), val, order) } 392 | } 393 | } 394 | )* 395 | ); 396 | } 397 | atomic_ops_signed! { i8 i16 i32 i64 isize i128 } 398 | atomic_ops_unsigned! { u8 u16 u32 u64 usize u128 } 399 | 400 | #[cfg(test)] 401 | mod tests { 402 | use super::{Atomic, Ordering::*}; 403 | use bytemuck::NoUninit; 404 | use core::mem; 405 | 406 | #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)] 407 | #[repr(C)] 408 | struct Foo(u8, u8); 409 | #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)] 410 | #[repr(C)] 411 | struct Bar(u64, u64); 412 | #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)] 413 | #[repr(C)] 414 | struct Quux(u32); 415 | 416 | #[test] 417 | fn atomic_bool() { 418 | let a = Atomic::new(false); 419 | assert_eq!( 420 | Atomic::::is_lock_free(), 421 | cfg!(target_has_atomic = "8"), 422 | ); 423 | assert_eq!(format!("{:?}", a), "Atomic(false)"); 424 | assert_eq!(a.load(SeqCst), false); 425 | a.store(true, SeqCst); 426 | assert_eq!(a.swap(false, SeqCst), true); 427 | assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false)); 428 | assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false)); 429 | assert_eq!(a.fetch_and(false, SeqCst), true); 430 | assert_eq!(a.fetch_or(true, SeqCst), false); 431 | assert_eq!(a.fetch_xor(false, SeqCst), true); 432 | assert_eq!(a.load(SeqCst), true); 433 | } 434 | 435 | #[test] 436 | fn atomic_i8() { 437 | let a = Atomic::new(0i8); 438 | assert_eq!(Atomic::::is_lock_free(), cfg!(target_has_atomic = "8")); 439 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 440 | assert_eq!(a.load(SeqCst), 0); 441 | a.store(1, SeqCst); 442 | assert_eq!(a.swap(2, SeqCst), 1); 443 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 444 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 445 | assert_eq!(a.fetch_add(123, SeqCst), 3); 446 | // Make sure overflows are handled correctly 447 | assert_eq!(a.fetch_sub(-56, SeqCst), 126); 448 | assert_eq!(a.fetch_and(7, SeqCst), -74); 449 | assert_eq!(a.fetch_or(64, SeqCst), 6); 450 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 451 | assert_eq!(a.fetch_min(30, SeqCst), 71); 452 | assert_eq!(a.fetch_max(-25, SeqCst), 30); 453 | assert_eq!(a.load(SeqCst), 30); 454 | } 455 | 456 | #[test] 457 | fn atomic_i16() { 458 | let a = Atomic::new(0i16); 459 | assert_eq!( 460 | Atomic::::is_lock_free(), 461 | cfg!(target_has_atomic = "16") 462 | ); 463 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 464 | assert_eq!(a.load(SeqCst), 0); 465 | a.store(1, SeqCst); 466 | assert_eq!(a.swap(2, SeqCst), 1); 467 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 468 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 469 | assert_eq!(a.fetch_add(123, SeqCst), 3); 470 | assert_eq!(a.fetch_sub(-56, SeqCst), 126); 471 | assert_eq!(a.fetch_and(7, SeqCst), 182); 472 | assert_eq!(a.fetch_or(64, SeqCst), 6); 473 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 474 | assert_eq!(a.fetch_min(30, SeqCst), 71); 475 | assert_eq!(a.fetch_max(-25, SeqCst), 30); 476 | assert_eq!(a.load(SeqCst), 30); 477 | } 478 | 479 | #[test] 480 | fn atomic_i32() { 481 | let a = Atomic::new(0i32); 482 | assert_eq!( 483 | Atomic::::is_lock_free(), 484 | cfg!(target_has_atomic = "32") 485 | ); 486 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 487 | assert_eq!(a.load(SeqCst), 0); 488 | a.store(1, SeqCst); 489 | assert_eq!(a.swap(2, SeqCst), 1); 490 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 491 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 492 | assert_eq!(a.fetch_add(123, SeqCst), 3); 493 | assert_eq!(a.fetch_sub(-56, SeqCst), 126); 494 | assert_eq!(a.fetch_and(7, SeqCst), 182); 495 | assert_eq!(a.fetch_or(64, SeqCst), 6); 496 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 497 | assert_eq!(a.fetch_min(30, SeqCst), 71); 498 | assert_eq!(a.fetch_max(-25, SeqCst), 30); 499 | assert_eq!(a.load(SeqCst), 30); 500 | } 501 | 502 | #[test] 503 | fn atomic_i64() { 504 | let a = Atomic::new(0i64); 505 | assert_eq!( 506 | Atomic::::is_lock_free(), 507 | cfg!(target_has_atomic = "64") && mem::align_of::() == 8 508 | ); 509 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 510 | assert_eq!(a.load(SeqCst), 0); 511 | a.store(1, SeqCst); 512 | assert_eq!(a.swap(2, SeqCst), 1); 513 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 514 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 515 | assert_eq!(a.fetch_add(123, SeqCst), 3); 516 | assert_eq!(a.fetch_sub(-56, SeqCst), 126); 517 | assert_eq!(a.fetch_and(7, SeqCst), 182); 518 | assert_eq!(a.fetch_or(64, SeqCst), 6); 519 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 520 | assert_eq!(a.fetch_min(30, SeqCst), 71); 521 | assert_eq!(a.fetch_max(-25, SeqCst), 30); 522 | assert_eq!(a.load(SeqCst), 30); 523 | } 524 | 525 | #[test] 526 | fn atomic_i128() { 527 | let a = Atomic::new(0i128); 528 | assert_eq!( 529 | Atomic::::is_lock_free(), 530 | cfg!(feature = "nightly") & cfg!(target_has_atomic = "128") 531 | ); 532 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 533 | assert_eq!(a.load(SeqCst), 0); 534 | a.store(1, SeqCst); 535 | assert_eq!(a.swap(2, SeqCst), 1); 536 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 537 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 538 | assert_eq!(a.fetch_add(123, SeqCst), 3); 539 | assert_eq!(a.fetch_sub(-56, SeqCst), 126); 540 | assert_eq!(a.fetch_and(7, SeqCst), 182); 541 | assert_eq!(a.fetch_or(64, SeqCst), 6); 542 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 543 | assert_eq!(a.fetch_min(30, SeqCst), 71); 544 | assert_eq!(a.fetch_max(-25, SeqCst), 30); 545 | assert_eq!(a.load(SeqCst), 30); 546 | } 547 | 548 | #[test] 549 | fn atomic_isize() { 550 | let a = Atomic::new(0isize); 551 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 552 | assert_eq!(a.load(SeqCst), 0); 553 | a.store(1, SeqCst); 554 | assert_eq!(a.swap(2, SeqCst), 1); 555 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 556 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 557 | assert_eq!(a.fetch_add(123, SeqCst), 3); 558 | assert_eq!(a.fetch_sub(-56, SeqCst), 126); 559 | assert_eq!(a.fetch_and(7, SeqCst), 182); 560 | assert_eq!(a.fetch_or(64, SeqCst), 6); 561 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 562 | assert_eq!(a.fetch_min(30, SeqCst), 71); 563 | assert_eq!(a.fetch_max(-25, SeqCst), 30); 564 | assert_eq!(a.load(SeqCst), 30); 565 | } 566 | 567 | #[test] 568 | fn atomic_u8() { 569 | let a = Atomic::new(0u8); 570 | assert_eq!(Atomic::::is_lock_free(), cfg!(target_has_atomic = "8")); 571 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 572 | assert_eq!(a.load(SeqCst), 0); 573 | a.store(1, SeqCst); 574 | assert_eq!(a.swap(2, SeqCst), 1); 575 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 576 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 577 | assert_eq!(a.fetch_add(123, SeqCst), 3); 578 | assert_eq!(a.fetch_sub(56, SeqCst), 126); 579 | assert_eq!(a.fetch_and(7, SeqCst), 70); 580 | assert_eq!(a.fetch_or(64, SeqCst), 6); 581 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 582 | assert_eq!(a.fetch_min(30, SeqCst), 71); 583 | assert_eq!(a.fetch_max(25, SeqCst), 30); 584 | assert_eq!(a.load(SeqCst), 30); 585 | } 586 | 587 | #[test] 588 | fn atomic_u16() { 589 | let a = Atomic::new(0u16); 590 | assert_eq!( 591 | Atomic::::is_lock_free(), 592 | cfg!(target_has_atomic = "16") 593 | ); 594 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 595 | assert_eq!(a.load(SeqCst), 0); 596 | a.store(1, SeqCst); 597 | assert_eq!(a.swap(2, SeqCst), 1); 598 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 599 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 600 | assert_eq!(a.fetch_add(123, SeqCst), 3); 601 | assert_eq!(a.fetch_sub(56, SeqCst), 126); 602 | assert_eq!(a.fetch_and(7, SeqCst), 70); 603 | assert_eq!(a.fetch_or(64, SeqCst), 6); 604 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 605 | assert_eq!(a.fetch_min(30, SeqCst), 71); 606 | assert_eq!(a.fetch_max(25, SeqCst), 30); 607 | assert_eq!(a.load(SeqCst), 30); 608 | } 609 | 610 | #[test] 611 | fn atomic_u32() { 612 | let a = Atomic::new(0u32); 613 | assert_eq!( 614 | Atomic::::is_lock_free(), 615 | cfg!(target_has_atomic = "32") 616 | ); 617 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 618 | assert_eq!(a.load(SeqCst), 0); 619 | a.store(1, SeqCst); 620 | assert_eq!(a.swap(2, SeqCst), 1); 621 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 622 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 623 | assert_eq!(a.fetch_add(123, SeqCst), 3); 624 | assert_eq!(a.fetch_sub(56, SeqCst), 126); 625 | assert_eq!(a.fetch_and(7, SeqCst), 70); 626 | assert_eq!(a.fetch_or(64, SeqCst), 6); 627 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 628 | assert_eq!(a.fetch_min(30, SeqCst), 71); 629 | assert_eq!(a.fetch_max(25, SeqCst), 30); 630 | assert_eq!(a.load(SeqCst), 30); 631 | } 632 | 633 | #[test] 634 | fn atomic_u64() { 635 | let a = Atomic::new(0u64); 636 | assert_eq!( 637 | Atomic::::is_lock_free(), 638 | cfg!(target_has_atomic = "64") && mem::align_of::() == 8 639 | ); 640 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 641 | assert_eq!(a.load(SeqCst), 0); 642 | a.store(1, SeqCst); 643 | assert_eq!(a.swap(2, SeqCst), 1); 644 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 645 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 646 | assert_eq!(a.fetch_add(123, SeqCst), 3); 647 | assert_eq!(a.fetch_sub(56, SeqCst), 126); 648 | assert_eq!(a.fetch_and(7, SeqCst), 70); 649 | assert_eq!(a.fetch_or(64, SeqCst), 6); 650 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 651 | assert_eq!(a.fetch_min(30, SeqCst), 71); 652 | assert_eq!(a.fetch_max(25, SeqCst), 30); 653 | assert_eq!(a.load(SeqCst), 30); 654 | } 655 | 656 | #[test] 657 | fn atomic_u128() { 658 | let a = Atomic::new(0u128); 659 | assert_eq!( 660 | Atomic::::is_lock_free(), 661 | cfg!(feature = "nightly") & cfg!(target_has_atomic = "128") 662 | ); 663 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 664 | assert_eq!(a.load(SeqCst), 0); 665 | a.store(1, SeqCst); 666 | assert_eq!(a.swap(2, SeqCst), 1); 667 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 668 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 669 | assert_eq!(a.fetch_add(123, SeqCst), 3); 670 | assert_eq!(a.fetch_sub(56, SeqCst), 126); 671 | assert_eq!(a.fetch_and(7, SeqCst), 70); 672 | assert_eq!(a.fetch_or(64, SeqCst), 6); 673 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 674 | assert_eq!(a.fetch_min(30, SeqCst), 71); 675 | assert_eq!(a.fetch_max(25, SeqCst), 30); 676 | assert_eq!(a.load(SeqCst), 30); 677 | } 678 | 679 | #[test] 680 | fn atomic_usize() { 681 | let a = Atomic::new(0usize); 682 | assert_eq!(format!("{:?}", a), "Atomic(0)"); 683 | assert_eq!(a.load(SeqCst), 0); 684 | a.store(1, SeqCst); 685 | assert_eq!(a.swap(2, SeqCst), 1); 686 | assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2)); 687 | assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2)); 688 | assert_eq!(a.fetch_add(123, SeqCst), 3); 689 | assert_eq!(a.fetch_sub(56, SeqCst), 126); 690 | assert_eq!(a.fetch_and(7, SeqCst), 70); 691 | assert_eq!(a.fetch_or(64, SeqCst), 6); 692 | assert_eq!(a.fetch_xor(1, SeqCst), 70); 693 | assert_eq!(a.fetch_min(30, SeqCst), 71); 694 | assert_eq!(a.fetch_max(25, SeqCst), 30); 695 | assert_eq!(a.load(SeqCst), 30); 696 | } 697 | 698 | #[test] 699 | fn atomic_foo() { 700 | let a = Atomic::default(); 701 | assert_eq!(Atomic::::is_lock_free(), false); 702 | assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))"); 703 | assert_eq!(a.load(SeqCst), Foo(0, 0)); 704 | a.store(Foo(1, 1), SeqCst); 705 | assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1)); 706 | assert_eq!( 707 | a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst), 708 | Err(Foo(2, 2)) 709 | ); 710 | assert_eq!( 711 | a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst), 712 | Ok(Foo(2, 2)) 713 | ); 714 | assert_eq!(a.load(SeqCst), Foo(3, 3)); 715 | } 716 | 717 | #[test] 718 | fn atomic_bar() { 719 | let a = Atomic::default(); 720 | assert_eq!(Atomic::::is_lock_free(), false); 721 | assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))"); 722 | assert_eq!(a.load(SeqCst), Bar(0, 0)); 723 | a.store(Bar(1, 1), SeqCst); 724 | assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1)); 725 | assert_eq!( 726 | a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst), 727 | Err(Bar(2, 2)) 728 | ); 729 | assert_eq!( 730 | a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst), 731 | Ok(Bar(2, 2)) 732 | ); 733 | assert_eq!(a.load(SeqCst), Bar(3, 3)); 734 | } 735 | 736 | #[test] 737 | fn atomic_quxx() { 738 | let a = Atomic::default(); 739 | assert_eq!( 740 | Atomic::::is_lock_free(), 741 | cfg!(target_has_atomic = "32") 742 | ); 743 | assert_eq!(format!("{:?}", a), "Atomic(Quux(0))"); 744 | assert_eq!(a.load(SeqCst), Quux(0)); 745 | a.store(Quux(1), SeqCst); 746 | assert_eq!(a.swap(Quux(2), SeqCst), Quux(1)); 747 | assert_eq!( 748 | a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst), 749 | Err(Quux(2)) 750 | ); 751 | assert_eq!( 752 | a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst), 753 | Ok(Quux(2)) 754 | ); 755 | assert_eq!(a.load(SeqCst), Quux(3)); 756 | } 757 | } 758 | -------------------------------------------------------------------------------- /src/ops.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use bytemuck::NoUninit; 9 | 10 | #[cfg(feature = "fallback")] 11 | use crate::fallback; 12 | use core::cmp; 13 | use core::mem; 14 | use core::num::Wrapping; 15 | use core::ops; 16 | use core::sync::atomic::Ordering; 17 | 18 | macro_rules! match_atomic { 19 | ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => { 20 | match mem::size_of::<$type>() { 21 | #[cfg(target_has_atomic = "8")] 22 | 1 if mem::align_of::<$type>() >= 1 => { 23 | type $atomic = core::sync::atomic::AtomicU8; 24 | 25 | $impl 26 | } 27 | #[cfg(target_has_atomic = "16")] 28 | 2 if mem::align_of::<$type>() >= 2 => { 29 | type $atomic = core::sync::atomic::AtomicU16; 30 | 31 | $impl 32 | } 33 | #[cfg(target_has_atomic = "32")] 34 | 4 if mem::align_of::<$type>() >= 4 => { 35 | type $atomic = core::sync::atomic::AtomicU32; 36 | 37 | $impl 38 | } 39 | #[cfg(target_has_atomic = "64")] 40 | 8 if mem::align_of::<$type>() >= 8 => { 41 | type $atomic = core::sync::atomic::AtomicU64; 42 | 43 | $impl 44 | } 45 | #[cfg(all(feature = "nightly", target_has_atomic = "128"))] 46 | 16 if mem::align_of::<$type>() >= 16 => { 47 | type $atomic = core::sync::atomic::AtomicU128; 48 | 49 | $impl 50 | } 51 | #[cfg(feature = "fallback")] 52 | _ => $fallback_impl, 53 | #[cfg(not(feature = "fallback"))] 54 | _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()), 55 | } 56 | }; 57 | } 58 | 59 | macro_rules! match_signed_atomic { 60 | ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => { 61 | match mem::size_of::<$type>() { 62 | #[cfg(target_has_atomic = "8")] 63 | 1 if mem::align_of::<$type>() >= 1 => { 64 | type $atomic = core::sync::atomic::AtomicI8; 65 | 66 | $impl 67 | } 68 | #[cfg(target_has_atomic = "16")] 69 | 2 if mem::align_of::<$type>() >= 2 => { 70 | type $atomic = core::sync::atomic::AtomicI16; 71 | 72 | $impl 73 | } 74 | #[cfg(target_has_atomic = "32")] 75 | 4 if mem::align_of::<$type>() >= 4 => { 76 | type $atomic = core::sync::atomic::AtomicI32; 77 | 78 | $impl 79 | } 80 | #[cfg(target_has_atomic = "64")] 81 | 8 if mem::align_of::<$type>() >= 8 => { 82 | type $atomic = core::sync::atomic::AtomicI64; 83 | 84 | $impl 85 | } 86 | #[cfg(all(feature = "nightly", target_has_atomic = "128"))] 87 | 16 if mem::align_of::<$type>() >= 16 => { 88 | type $atomic = core::sync::atomic::AtomicI128; 89 | 90 | $impl 91 | } 92 | #[cfg(feature = "fallback")] 93 | _ => $fallback_impl, 94 | #[cfg(not(feature = "fallback"))] 95 | _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()), 96 | } 97 | }; 98 | } 99 | 100 | #[inline] 101 | pub const fn atomic_is_lock_free() -> bool { 102 | let size = mem::size_of::(); 103 | let align = mem::align_of::(); 104 | 105 | (cfg!(target_has_atomic = "8") & (size == 1) & (align >= 1)) 106 | | (cfg!(target_has_atomic = "16") & (size == 2) & (align >= 2)) 107 | | (cfg!(target_has_atomic = "32") & (size == 4) & (align >= 4)) 108 | | (cfg!(target_has_atomic = "64") & (size == 8) & (align >= 8)) 109 | | (cfg!(feature = "nightly") 110 | & cfg!(target_has_atomic = "128") 111 | & (size == 16) 112 | & (align >= 16)) 113 | } 114 | 115 | #[inline] 116 | pub unsafe fn atomic_load(dst: *mut T, order: Ordering) -> T { 117 | match_atomic!( 118 | T, 119 | A, 120 | mem::transmute_copy(&(*(dst as *const A)).load(order)), 121 | fallback::atomic_load(dst) 122 | ) 123 | } 124 | 125 | #[inline] 126 | pub unsafe fn atomic_store(dst: *mut T, val: T, order: Ordering) { 127 | match_atomic!( 128 | T, 129 | A, 130 | (*(dst as *const A)).store(mem::transmute_copy(&val), order), 131 | fallback::atomic_store(dst, val) 132 | ) 133 | } 134 | 135 | #[inline] 136 | pub unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { 137 | match_atomic!( 138 | T, 139 | A, 140 | mem::transmute_copy(&(*(dst as *const A)).swap(mem::transmute_copy(&val), order)), 141 | fallback::atomic_swap(dst, val) 142 | ) 143 | } 144 | 145 | #[inline] 146 | unsafe fn map_result(r: Result) -> Result { 147 | match r { 148 | Ok(x) => Ok(mem::transmute_copy(&x)), 149 | Err(x) => Err(mem::transmute_copy(&x)), 150 | } 151 | } 152 | 153 | #[inline] 154 | pub unsafe fn atomic_compare_exchange( 155 | dst: *mut T, 156 | current: T, 157 | new: T, 158 | success: Ordering, 159 | failure: Ordering, 160 | ) -> Result { 161 | match_atomic!( 162 | T, 163 | A, 164 | map_result((*(dst as *const A)).compare_exchange( 165 | mem::transmute_copy(¤t), 166 | mem::transmute_copy(&new), 167 | success, 168 | failure, 169 | )), 170 | fallback::atomic_compare_exchange(dst, current, new) 171 | ) 172 | } 173 | 174 | #[inline] 175 | pub unsafe fn atomic_compare_exchange_weak( 176 | dst: *mut T, 177 | current: T, 178 | new: T, 179 | success: Ordering, 180 | failure: Ordering, 181 | ) -> Result { 182 | match_atomic!( 183 | T, 184 | A, 185 | map_result((*(dst as *const A)).compare_exchange_weak( 186 | mem::transmute_copy(¤t), 187 | mem::transmute_copy(&new), 188 | success, 189 | failure, 190 | )), 191 | fallback::atomic_compare_exchange(dst, current, new) 192 | ) 193 | } 194 | 195 | #[inline] 196 | pub unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T 197 | where 198 | Wrapping: ops::Add>, 199 | { 200 | match_atomic!( 201 | T, 202 | A, 203 | mem::transmute_copy(&(*(dst as *const A)).fetch_add(mem::transmute_copy(&val), order),), 204 | fallback::atomic_add(dst, val) 205 | ) 206 | } 207 | 208 | #[inline] 209 | pub unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T 210 | where 211 | Wrapping: ops::Sub>, 212 | { 213 | match_atomic!( 214 | T, 215 | A, 216 | mem::transmute_copy(&(*(dst as *const A)).fetch_sub(mem::transmute_copy(&val), order),), 217 | fallback::atomic_sub(dst, val) 218 | ) 219 | } 220 | 221 | #[inline] 222 | pub unsafe fn atomic_and>( 223 | dst: *mut T, 224 | val: T, 225 | order: Ordering, 226 | ) -> T { 227 | match_atomic!( 228 | T, 229 | A, 230 | mem::transmute_copy(&(*(dst as *const A)).fetch_and(mem::transmute_copy(&val), order),), 231 | fallback::atomic_and(dst, val) 232 | ) 233 | } 234 | 235 | #[inline] 236 | pub unsafe fn atomic_or>( 237 | dst: *mut T, 238 | val: T, 239 | order: Ordering, 240 | ) -> T { 241 | match_atomic!( 242 | T, 243 | A, 244 | mem::transmute_copy(&(*(dst as *const A)).fetch_or(mem::transmute_copy(&val), order),), 245 | fallback::atomic_or(dst, val) 246 | ) 247 | } 248 | 249 | #[inline] 250 | pub unsafe fn atomic_xor>( 251 | dst: *mut T, 252 | val: T, 253 | order: Ordering, 254 | ) -> T { 255 | match_atomic!( 256 | T, 257 | A, 258 | mem::transmute_copy(&(*(dst as *const A)).fetch_xor(mem::transmute_copy(&val), order),), 259 | fallback::atomic_xor(dst, val) 260 | ) 261 | } 262 | 263 | #[inline] 264 | pub unsafe fn atomic_min(dst: *mut T, val: T, order: Ordering) -> T { 265 | match_signed_atomic!( 266 | T, 267 | A, 268 | mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),), 269 | fallback::atomic_min(dst, val) 270 | ) 271 | } 272 | 273 | #[inline] 274 | pub unsafe fn atomic_max(dst: *mut T, val: T, order: Ordering) -> T { 275 | match_signed_atomic!( 276 | T, 277 | A, 278 | mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),), 279 | fallback::atomic_max(dst, val) 280 | ) 281 | } 282 | 283 | #[inline] 284 | pub unsafe fn atomic_umin(dst: *mut T, val: T, order: Ordering) -> T { 285 | match_atomic!( 286 | T, 287 | A, 288 | mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),), 289 | fallback::atomic_min(dst, val) 290 | ) 291 | } 292 | 293 | #[inline] 294 | pub unsafe fn atomic_umax(dst: *mut T, val: T, order: Ordering) -> T { 295 | match_atomic!( 296 | T, 297 | A, 298 | mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),), 299 | fallback::atomic_max(dst, val) 300 | ) 301 | } 302 | --------------------------------------------------------------------------------