1 | #[cfg (any(target_arch = "arm" , target_arch = "aarch64" ))] |
2 | use crate::primitive::sync::atomic::compiler_fence; |
3 | #[cfg (not(crossbeam_no_atomic))] |
4 | use core::sync::atomic::Ordering; |
5 | |
6 | /// Trait which allows reading from primitive atomic types with "consume" ordering. |
7 | pub trait AtomicConsume { |
8 | /// Type returned by `load_consume`. |
9 | type Val; |
10 | |
11 | /// Loads a value from the atomic using a "consume" memory ordering. |
12 | /// |
13 | /// This is similar to the "acquire" ordering, except that an ordering is |
14 | /// only guaranteed with operations that "depend on" the result of the load. |
15 | /// However consume loads are usually much faster than acquire loads on |
16 | /// architectures with a weak memory model since they don't require memory |
17 | /// fence instructions. |
18 | /// |
19 | /// The exact definition of "depend on" is a bit vague, but it works as you |
20 | /// would expect in practice since a lot of software, especially the Linux |
21 | /// kernel, rely on this behavior. |
22 | /// |
23 | /// This is currently only implemented on ARM and AArch64, where a fence |
24 | /// can be avoided. On other architectures this will fall back to a simple |
25 | /// `load(Ordering::Acquire)`. |
26 | fn load_consume(&self) -> Self::Val; |
27 | } |
28 | |
29 | #[cfg (not(crossbeam_no_atomic))] |
30 | #[cfg (any(target_arch = "arm" , target_arch = "aarch64" ))] |
31 | macro_rules! impl_consume { |
32 | () => { |
33 | #[inline] |
34 | fn load_consume(&self) -> Self::Val { |
35 | let result = self.load(Ordering::Relaxed); |
36 | compiler_fence(Ordering::Acquire); |
37 | result |
38 | } |
39 | }; |
40 | } |
41 | |
42 | #[cfg (not(crossbeam_no_atomic))] |
43 | #[cfg (not(any(target_arch = "arm" , target_arch = "aarch64" )))] |
44 | macro_rules! impl_consume { |
45 | () => { |
46 | #[inline] |
47 | fn load_consume(&self) -> Self::Val { |
48 | self.load(Ordering::Acquire) |
49 | } |
50 | }; |
51 | } |
52 | |
53 | macro_rules! impl_atomic { |
54 | ($atomic:ident, $val:ty) => { |
55 | #[cfg(not(crossbeam_no_atomic))] |
56 | impl AtomicConsume for core::sync::atomic::$atomic { |
57 | type Val = $val; |
58 | impl_consume!(); |
59 | } |
60 | #[cfg(crossbeam_loom)] |
61 | impl AtomicConsume for loom::sync::atomic::$atomic { |
62 | type Val = $val; |
63 | impl_consume!(); |
64 | } |
65 | }; |
66 | } |
67 | |
68 | impl_atomic!(AtomicBool, bool); |
69 | impl_atomic!(AtomicUsize, usize); |
70 | impl_atomic!(AtomicIsize, isize); |
71 | impl_atomic!(AtomicU8, u8); |
72 | impl_atomic!(AtomicI8, i8); |
73 | impl_atomic!(AtomicU16, u16); |
74 | impl_atomic!(AtomicI16, i16); |
75 | impl_atomic!(AtomicU32, u32); |
76 | impl_atomic!(AtomicI32, i32); |
77 | #[cfg (not(crossbeam_no_atomic_64))] |
78 | impl_atomic!(AtomicU64, u64); |
79 | #[cfg (not(crossbeam_no_atomic_64))] |
80 | impl_atomic!(AtomicI64, i64); |
81 | |
82 | #[cfg (not(crossbeam_no_atomic))] |
83 | impl<T> AtomicConsume for core::sync::atomic::AtomicPtr<T> { |
84 | type Val = *mut T; |
85 | impl_consume!(); |
86 | } |
87 | |
88 | #[cfg (crossbeam_loom)] |
89 | impl<T> AtomicConsume for loom::sync::atomic::AtomicPtr<T> { |
90 | type Val = *mut T; |
91 | impl_consume!(); |
92 | } |
93 | |