1 | // SPDX-License-Identifier: Apache-2.0 OR MIT |
2 | |
3 | /* |
4 | Wrap the standard library's atomic types in newtype. |
5 | |
6 | This is not a reexport, because we want to backport changes like |
7 | https://github.com/rust-lang/rust/pull/98383 to old compilers. |
8 | */ |
9 | |
10 | use core::{cell::UnsafeCell, marker::PhantomData, sync::atomic::Ordering}; |
11 | |
12 | // core::panic::RefUnwindSafe is only available on Rust 1.56+, so on pre-1.56 |
13 | // Rust, we implement RefUnwindSafe when "std" feature is enabled. |
14 | // However, on pre-1.56 Rust, the standard library's atomic types implement |
15 | // RefUnwindSafe when "linked to std", and that's behavior that our other atomic |
16 | // implementations can't emulate, so use PhantomData<NotRefUnwindSafe> to match |
17 | // conditions where our other atomic implementations implement RefUnwindSafe. |
18 | // |
19 | // If we do not do this, for example, downstream that is only tested on x86_64 |
20 | // may incorrectly assume that AtomicU64 always implements RefUnwindSafe even on |
21 | // older rustc, and may be broken on platforms where std AtomicU64 is not available. |
22 | struct NotRefUnwindSafe(UnsafeCell<()>); |
23 | // SAFETY: this is a marker type and we'll never access the value. |
24 | unsafe impl Sync for NotRefUnwindSafe {} |
25 | |
26 | #[repr (transparent)] |
27 | pub(crate) struct AtomicPtr<T> { |
28 | inner: core::sync::atomic::AtomicPtr<T>, |
29 | // Prevent RefUnwindSafe from being propagated from the std atomic type. See NotRefUnwindSafe for more. |
30 | _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>, |
31 | } |
32 | impl<T> AtomicPtr<T> { |
33 | #[inline ] |
34 | pub(crate) const fn new(v: *mut T) -> Self { |
35 | Self { inner: core::sync::atomic::AtomicPtr::new(v), _not_ref_unwind_safe: PhantomData } |
36 | } |
37 | #[inline ] |
38 | pub(crate) fn is_lock_free() -> bool { |
39 | Self::IS_ALWAYS_LOCK_FREE |
40 | } |
41 | pub(crate) const IS_ALWAYS_LOCK_FREE: bool = true; |
42 | #[inline ] |
43 | #[cfg_attr ( |
44 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
45 | track_caller |
46 | )] |
47 | pub(crate) fn load(&self, order: Ordering) -> *mut T { |
48 | crate::utils::assert_load_ordering(order); // for track_caller (compiler can omit double check) |
49 | self.inner.load(order) |
50 | } |
51 | #[inline ] |
52 | #[cfg_attr ( |
53 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
54 | track_caller |
55 | )] |
56 | pub(crate) fn store(&self, ptr: *mut T, order: Ordering) { |
57 | crate::utils::assert_store_ordering(order); // for track_caller (compiler can omit double check) |
58 | self.inner.store(ptr, order); |
59 | } |
60 | const_fn! { |
61 | const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))]; |
62 | #[inline ] |
63 | pub(crate) const fn as_ptr(&self) -> *mut *mut T { |
64 | // SAFETY: Self is #[repr(C)] and internally UnsafeCell<*mut T>. |
65 | // See also https://github.com/rust-lang/rust/pull/66705 and |
66 | // https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116. |
67 | unsafe { (*(self as *const Self as *const UnsafeCell<*mut T>)).get() } |
68 | } |
69 | } |
70 | } |
71 | #[cfg_attr (portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] |
72 | #[cfg_attr (not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
73 | impl<T> AtomicPtr<T> { |
74 | #[inline ] |
75 | #[cfg_attr ( |
76 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
77 | track_caller |
78 | )] |
79 | pub(crate) fn compare_exchange( |
80 | &self, |
81 | current: *mut T, |
82 | new: *mut T, |
83 | success: Ordering, |
84 | failure: Ordering, |
85 | ) -> Result<*mut T, *mut T> { |
86 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
87 | #[cfg (portable_atomic_no_stronger_failure_ordering)] |
88 | let success = crate::utils::upgrade_success_ordering(success, failure); |
89 | self.inner.compare_exchange(current, new, success, failure) |
90 | } |
91 | #[inline ] |
92 | #[cfg_attr ( |
93 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
94 | track_caller |
95 | )] |
96 | pub(crate) fn compare_exchange_weak( |
97 | &self, |
98 | current: *mut T, |
99 | new: *mut T, |
100 | success: Ordering, |
101 | failure: Ordering, |
102 | ) -> Result<*mut T, *mut T> { |
103 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
104 | #[cfg (portable_atomic_no_stronger_failure_ordering)] |
105 | let success = crate::utils::upgrade_success_ordering(success, failure); |
106 | self.inner.compare_exchange_weak(current, new, success, failure) |
107 | } |
108 | } |
109 | impl<T> core::ops::Deref for AtomicPtr<T> { |
110 | type Target = core::sync::atomic::AtomicPtr<T>; |
111 | #[inline ] |
112 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
113 | fn deref(&self) -> &Self::Target { |
114 | &self.inner |
115 | } |
116 | } |
117 | |
118 | macro_rules! atomic_int { |
119 | ($atomic_type:ident, $int_type:ident) => { |
120 | #[repr(transparent)] |
121 | pub(crate) struct $atomic_type { |
122 | inner: core::sync::atomic::$atomic_type, |
123 | // Prevent RefUnwindSafe from being propagated from the std atomic type. See NotRefUnwindSafe for more. |
124 | _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>, |
125 | } |
126 | #[cfg_attr( |
127 | portable_atomic_no_cfg_target_has_atomic, |
128 | cfg(not(portable_atomic_no_atomic_cas)) |
129 | )] |
130 | #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
131 | impl_default_no_fetch_ops!($atomic_type, $int_type); |
132 | #[cfg(not(all( |
133 | any(target_arch = "x86" , target_arch = "x86_64" ), |
134 | not(any(miri, portable_atomic_sanitize_thread)), |
135 | any(not(portable_atomic_no_asm), portable_atomic_unstable_asm), |
136 | )))] |
137 | #[cfg_attr( |
138 | portable_atomic_no_cfg_target_has_atomic, |
139 | cfg(not(portable_atomic_no_atomic_cas)) |
140 | )] |
141 | #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
142 | impl_default_bit_opts!($atomic_type, $int_type); |
143 | impl $atomic_type { |
144 | #[inline] |
145 | pub(crate) const fn new(v: $int_type) -> Self { |
146 | Self { |
147 | inner: core::sync::atomic::$atomic_type::new(v), |
148 | _not_ref_unwind_safe: PhantomData, |
149 | } |
150 | } |
151 | #[inline] |
152 | pub(crate) fn is_lock_free() -> bool { |
153 | Self::IS_ALWAYS_LOCK_FREE |
154 | } |
155 | // ESP-IDF targets' 64-bit atomics are not lock-free. |
156 | // https://github.com/rust-lang/rust/pull/115577#issuecomment-1732259297 |
157 | pub(crate) const IS_ALWAYS_LOCK_FREE: bool = cfg!(not(all( |
158 | any(target_arch = "riscv32" , target_arch = "xtensa" ), |
159 | target_os = "espidf" , |
160 | ))) | (core::mem::size_of::<$int_type>() |
161 | < 8); |
162 | #[inline] |
163 | #[cfg_attr( |
164 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
165 | track_caller |
166 | )] |
167 | pub(crate) fn load(&self, order: Ordering) -> $int_type { |
168 | crate::utils::assert_load_ordering(order); // for track_caller (compiler can omit double check) |
169 | self.inner.load(order) |
170 | } |
171 | #[inline] |
172 | #[cfg_attr( |
173 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
174 | track_caller |
175 | )] |
176 | pub(crate) fn store(&self, val: $int_type, order: Ordering) { |
177 | crate::utils::assert_store_ordering(order); // for track_caller (compiler can omit double check) |
178 | self.inner.store(val, order); |
179 | } |
180 | const_fn! { |
181 | const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))]; |
182 | #[inline] |
183 | pub(crate) const fn as_ptr(&self) -> *mut $int_type { |
184 | // SAFETY: Self is #[repr(C)] and internally UnsafeCell<$int_type>. |
185 | // See also https://github.com/rust-lang/rust/pull/66705 and |
186 | // https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116. |
187 | unsafe { |
188 | (*(self as *const Self as *const UnsafeCell<$int_type>)).get() |
189 | } |
190 | } |
191 | } |
192 | } |
193 | #[cfg_attr( |
194 | portable_atomic_no_cfg_target_has_atomic, |
195 | cfg(not(portable_atomic_no_atomic_cas)) |
196 | )] |
197 | #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
198 | impl $atomic_type { |
199 | #[inline] |
200 | #[cfg_attr( |
201 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
202 | track_caller |
203 | )] |
204 | pub(crate) fn compare_exchange( |
205 | &self, |
206 | current: $int_type, |
207 | new: $int_type, |
208 | success: Ordering, |
209 | failure: Ordering, |
210 | ) -> Result<$int_type, $int_type> { |
211 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
212 | #[cfg(portable_atomic_no_stronger_failure_ordering)] |
213 | let success = crate::utils::upgrade_success_ordering(success, failure); |
214 | self.inner.compare_exchange(current, new, success, failure) |
215 | } |
216 | #[inline] |
217 | #[cfg_attr( |
218 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
219 | track_caller |
220 | )] |
221 | pub(crate) fn compare_exchange_weak( |
222 | &self, |
223 | current: $int_type, |
224 | new: $int_type, |
225 | success: Ordering, |
226 | failure: Ordering, |
227 | ) -> Result<$int_type, $int_type> { |
228 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
229 | #[cfg(portable_atomic_no_stronger_failure_ordering)] |
230 | let success = crate::utils::upgrade_success_ordering(success, failure); |
231 | self.inner.compare_exchange_weak(current, new, success, failure) |
232 | } |
233 | #[allow(dead_code)] |
234 | #[inline] |
235 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
236 | fn fetch_update_<F>(&self, order: Ordering, mut f: F) -> $int_type |
237 | where |
238 | F: FnMut($int_type) -> $int_type, |
239 | { |
240 | // This is a private function and all instances of `f` only operate on the value |
241 | // loaded, so there is no need to synchronize the first load/failed CAS. |
242 | let mut prev = self.load(Ordering::Relaxed); |
243 | loop { |
244 | let next = f(prev); |
245 | match self.compare_exchange_weak(prev, next, order, Ordering::Relaxed) { |
246 | Ok(x) => return x, |
247 | Err(next_prev) => prev = next_prev, |
248 | } |
249 | } |
250 | } |
251 | #[inline] |
252 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
253 | pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { |
254 | #[cfg(not(portable_atomic_no_atomic_min_max))] |
255 | { |
256 | #[cfg(any( |
257 | all( |
258 | any(target_arch = "aarch64" , target_arch = "arm64ec" ), |
259 | any(target_feature = "lse" , portable_atomic_target_feature = "lse" ), |
260 | ), |
261 | all( |
262 | target_arch = "arm" , |
263 | not(any( |
264 | target_feature = "v6" , |
265 | portable_atomic_target_feature = "v6" , |
266 | )), |
267 | ), |
268 | target_arch = "mips" , |
269 | target_arch = "mips32r6" , |
270 | target_arch = "mips64" , |
271 | target_arch = "mips64r6" , |
272 | target_arch = "powerpc" , |
273 | target_arch = "powerpc64" , |
274 | ))] |
275 | { |
276 | // HACK: the following operations are currently broken (at least on qemu-user): |
277 | // - aarch64's `AtomicI{8,16}::fetch_{max,min}` (release mode + lse) |
278 | // - armv5te's `Atomic{I,U}{8,16}::fetch_{max,min}` |
279 | // - mips's `AtomicI8::fetch_{max,min}` (release mode) |
280 | // - mipsel's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
281 | // - mips64's `AtomicI8::fetch_{max,min}` (release mode) |
282 | // - mips64el's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
283 | // - powerpc's `AtomicI{8,16}::fetch_{max,min}` |
284 | // - powerpc64's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
285 | // - powerpc64le's `AtomicU{8,16}::fetch_{max,min}` (release mode + fat LTO) |
286 | // See also: |
287 | // https://github.com/llvm/llvm-project/issues/61880 |
288 | // https://github.com/llvm/llvm-project/issues/61881 |
289 | // https://github.com/llvm/llvm-project/issues/61882 |
290 | // https://github.com/taiki-e/portable-atomic/issues/2 |
291 | // https://github.com/rust-lang/rust/issues/100650 |
292 | if core::mem::size_of::<$int_type>() <= 2 { |
293 | return self.fetch_update_(order, |x| core::cmp::max(x, val)); |
294 | } |
295 | } |
296 | self.inner.fetch_max(val, order) |
297 | } |
298 | #[cfg(portable_atomic_no_atomic_min_max)] |
299 | { |
300 | self.fetch_update_(order, |x| core::cmp::max(x, val)) |
301 | } |
302 | } |
303 | #[inline] |
304 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
305 | pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { |
306 | #[cfg(not(portable_atomic_no_atomic_min_max))] |
307 | { |
308 | #[cfg(any( |
309 | all( |
310 | any(target_arch = "aarch64" , target_arch = "arm64ec" ), |
311 | any(target_feature = "lse" , portable_atomic_target_feature = "lse" ), |
312 | ), |
313 | all( |
314 | target_arch = "arm" , |
315 | not(any( |
316 | target_feature = "v6" , |
317 | portable_atomic_target_feature = "v6" , |
318 | )), |
319 | ), |
320 | target_arch = "mips" , |
321 | target_arch = "mips32r6" , |
322 | target_arch = "mips64" , |
323 | target_arch = "mips64r6" , |
324 | target_arch = "powerpc" , |
325 | target_arch = "powerpc64" , |
326 | ))] |
327 | { |
328 | // HACK: the following operations are currently broken (at least on qemu-user): |
329 | // - aarch64's `AtomicI{8,16}::fetch_{max,min}` (release mode + lse) |
330 | // - armv5te's `Atomic{I,U}{8,16}::fetch_{max,min}` |
331 | // - mips's `AtomicI8::fetch_{max,min}` (release mode) |
332 | // - mipsel's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
333 | // - mips64's `AtomicI8::fetch_{max,min}` (release mode) |
334 | // - mips64el's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
335 | // - powerpc's `AtomicI{8,16}::fetch_{max,min}` |
336 | // - powerpc64's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
337 | // - powerpc64le's `AtomicU{8,16}::fetch_{max,min}` (release mode + fat LTO) |
338 | // See also: |
339 | // https://github.com/llvm/llvm-project/issues/61880 |
340 | // https://github.com/llvm/llvm-project/issues/61881 |
341 | // https://github.com/llvm/llvm-project/issues/61882 |
342 | // https://github.com/taiki-e/portable-atomic/issues/2 |
343 | // https://github.com/rust-lang/rust/issues/100650 |
344 | if core::mem::size_of::<$int_type>() <= 2 { |
345 | return self.fetch_update_(order, |x| core::cmp::min(x, val)); |
346 | } |
347 | } |
348 | self.inner.fetch_min(val, order) |
349 | } |
350 | #[cfg(portable_atomic_no_atomic_min_max)] |
351 | { |
352 | self.fetch_update_(order, |x| core::cmp::min(x, val)) |
353 | } |
354 | } |
355 | #[inline] |
356 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
357 | pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type { |
358 | self.fetch_xor(!0, order) |
359 | } |
360 | #[cfg(not(all( |
361 | any(target_arch = "x86" , target_arch = "x86_64" ), |
362 | not(any(miri, portable_atomic_sanitize_thread)), |
363 | any(not(portable_atomic_no_asm), portable_atomic_unstable_asm), |
364 | )))] |
365 | #[inline] |
366 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
367 | pub(crate) fn not(&self, order: Ordering) { |
368 | self.fetch_not(order); |
369 | } |
370 | // TODO: provide asm-based implementation on AArch64 without FEAT_LSE, Armv7, RISC-V, etc. |
371 | #[inline] |
372 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
373 | pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type { |
374 | self.fetch_update_(order, $int_type::wrapping_neg) |
375 | } |
376 | #[cfg(not(all( |
377 | any(target_arch = "x86" , target_arch = "x86_64" ), |
378 | not(any(miri, portable_atomic_sanitize_thread)), |
379 | any(not(portable_atomic_no_asm), portable_atomic_unstable_asm), |
380 | )))] |
381 | #[inline] |
382 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
383 | pub(crate) fn neg(&self, order: Ordering) { |
384 | self.fetch_neg(order); |
385 | } |
386 | } |
387 | impl core::ops::Deref for $atomic_type { |
388 | type Target = core::sync::atomic::$atomic_type; |
389 | #[inline] |
390 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
391 | fn deref(&self) -> &Self::Target { |
392 | &self.inner |
393 | } |
394 | } |
395 | }; |
396 | } |
397 | |
398 | atomic_int!(AtomicIsize, isize); |
399 | atomic_int!(AtomicUsize, usize); |
400 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
401 | atomic_int!(AtomicI8, i8); |
402 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
403 | atomic_int!(AtomicU8, u8); |
404 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
405 | atomic_int!(AtomicI16, i16); |
406 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
407 | atomic_int!(AtomicU16, u16); |
408 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
409 | #[cfg (not(target_pointer_width = "16" ))] |
410 | atomic_int!(AtomicI32, i32); |
411 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
412 | #[cfg (not(target_pointer_width = "16" ))] |
413 | atomic_int!(AtomicU32, u32); |
414 | #[cfg_attr (portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))] |
415 | #[cfg_attr ( |
416 | not(portable_atomic_no_cfg_target_has_atomic), |
417 | cfg(any( |
418 | target_has_atomic = "64" , |
419 | not(any(target_pointer_width = "16" , target_pointer_width = "32" )), |
420 | )) |
421 | )] |
422 | atomic_int!(AtomicI64, i64); |
423 | #[cfg_attr (portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))] |
424 | #[cfg_attr ( |
425 | not(portable_atomic_no_cfg_target_has_atomic), |
426 | cfg(any( |
427 | target_has_atomic = "64" , |
428 | not(any(target_pointer_width = "16" , target_pointer_width = "32" )), |
429 | )) |
430 | )] |
431 | atomic_int!(AtomicU64, u64); |
432 | |