1 | // SPDX-License-Identifier: Apache-2.0 OR MIT |
2 | |
3 | // Wrap the standard library's atomic types in newtype. |
4 | // |
5 | // This is not a reexport, because we want to backport changes like |
6 | // https://github.com/rust-lang/rust/pull/98383 to old compilers. |
7 | |
8 | use core::{cell::UnsafeCell, marker::PhantomData, sync::atomic::Ordering}; |
9 | |
10 | // core::panic::RefUnwindSafe is only available on Rust 1.56+, so on pre-1.56 |
11 | // Rust, we implement RefUnwindSafe when "std" feature is enabled. |
12 | // However, on pre-1.56 Rust, the standard library's atomic types implement |
13 | // RefUnwindSafe when "linked to std", and that's behavior that our other atomic |
14 | // implementations can't emulate, so use PhantomData<NotRefUnwindSafe> to match |
15 | // conditions where our other atomic implementations implement RefUnwindSafe. |
16 | // If we do not do this, for example, downstream that is only tested on x86_64 |
17 | // may incorrectly assume that AtomicU64 always implements RefUnwindSafe even on |
18 | // older rustc, and may be broken on platforms where std AtomicU64 is not available. |
19 | struct NotRefUnwindSafe(UnsafeCell<()>); |
20 | // SAFETY: this is a marker type and we'll never access the value. |
21 | unsafe impl Sync for NotRefUnwindSafe {} |
22 | |
23 | #[repr (transparent)] |
24 | pub(crate) struct AtomicPtr<T> { |
25 | inner: core::sync::atomic::AtomicPtr<T>, |
26 | // Prevent RefUnwindSafe from being propagated from the std atomic type. See NotRefUnwindSafe for more. |
27 | _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>, |
28 | } |
29 | impl<T> AtomicPtr<T> { |
30 | #[inline ] |
31 | pub(crate) const fn new(v: *mut T) -> Self { |
32 | Self { inner: core::sync::atomic::AtomicPtr::new(v), _not_ref_unwind_safe: PhantomData } |
33 | } |
34 | #[inline ] |
35 | pub(crate) fn is_lock_free() -> bool { |
36 | Self::is_always_lock_free() |
37 | } |
38 | #[inline ] |
39 | pub(crate) const fn is_always_lock_free() -> bool { |
40 | true |
41 | } |
42 | #[inline ] |
43 | pub(crate) fn get_mut(&mut self) -> &mut *mut T { |
44 | self.inner.get_mut() |
45 | } |
46 | #[inline ] |
47 | pub(crate) fn into_inner(self) -> *mut T { |
48 | self.inner.into_inner() |
49 | } |
50 | #[inline ] |
51 | #[cfg_attr ( |
52 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
53 | track_caller |
54 | )] |
55 | pub(crate) fn load(&self, order: Ordering) -> *mut T { |
56 | crate::utils::assert_load_ordering(order); // for track_caller (compiler can omit double check) |
57 | self.inner.load(order) |
58 | } |
59 | #[inline ] |
60 | #[cfg_attr ( |
61 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
62 | track_caller |
63 | )] |
64 | pub(crate) fn store(&self, ptr: *mut T, order: Ordering) { |
65 | crate::utils::assert_store_ordering(order); // for track_caller (compiler can omit double check) |
66 | self.inner.store(ptr, order); |
67 | } |
68 | const_fn! { |
69 | const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))]; |
70 | #[inline ] |
71 | pub(crate) const fn as_ptr(&self) -> *mut *mut T { |
72 | // SAFETY: Self is #[repr(C)] and internally UnsafeCell<*mut T>. |
73 | // See also https://github.com/rust-lang/rust/pull/66705 and |
74 | // https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116. |
75 | unsafe { (*(self as *const Self as *const UnsafeCell<*mut T>)).get() } |
76 | } |
77 | } |
78 | } |
79 | #[cfg_attr (portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] |
80 | #[cfg_attr (not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
81 | impl<T> AtomicPtr<T> { |
82 | #[inline ] |
83 | #[cfg_attr ( |
84 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
85 | track_caller |
86 | )] |
87 | pub(crate) fn compare_exchange( |
88 | &self, |
89 | current: *mut T, |
90 | new: *mut T, |
91 | success: Ordering, |
92 | failure: Ordering, |
93 | ) -> Result<*mut T, *mut T> { |
94 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
95 | #[cfg (portable_atomic_no_stronger_failure_ordering)] |
96 | let success = crate::utils::upgrade_success_ordering(success, failure); |
97 | self.inner.compare_exchange(current, new, success, failure) |
98 | } |
99 | #[inline ] |
100 | #[cfg_attr ( |
101 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
102 | track_caller |
103 | )] |
104 | pub(crate) fn compare_exchange_weak( |
105 | &self, |
106 | current: *mut T, |
107 | new: *mut T, |
108 | success: Ordering, |
109 | failure: Ordering, |
110 | ) -> Result<*mut T, *mut T> { |
111 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
112 | #[cfg (portable_atomic_no_stronger_failure_ordering)] |
113 | let success = crate::utils::upgrade_success_ordering(success, failure); |
114 | self.inner.compare_exchange_weak(current, new, success, failure) |
115 | } |
116 | } |
117 | impl<T> core::ops::Deref for AtomicPtr<T> { |
118 | type Target = core::sync::atomic::AtomicPtr<T>; |
119 | #[inline ] |
120 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
121 | fn deref(&self) -> &Self::Target { |
122 | &self.inner |
123 | } |
124 | } |
125 | |
126 | macro_rules! atomic_int { |
127 | ($atomic_type:ident, $int_type:ident) => { |
128 | #[repr(transparent)] |
129 | pub(crate) struct $atomic_type { |
130 | inner: core::sync::atomic::$atomic_type, |
131 | // Prevent RefUnwindSafe from being propagated from the std atomic type. See NotRefUnwindSafe for more. |
132 | _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>, |
133 | } |
134 | #[cfg_attr( |
135 | portable_atomic_no_cfg_target_has_atomic, |
136 | cfg(not(portable_atomic_no_atomic_cas)) |
137 | )] |
138 | #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
139 | impl_default_no_fetch_ops!($atomic_type, $int_type); |
140 | #[cfg(not(all( |
141 | any(target_arch = "x86" , target_arch = "x86_64" ), |
142 | not(any(miri, portable_atomic_sanitize_thread)), |
143 | not(portable_atomic_no_asm), |
144 | )))] |
145 | #[cfg_attr( |
146 | portable_atomic_no_cfg_target_has_atomic, |
147 | cfg(not(portable_atomic_no_atomic_cas)) |
148 | )] |
149 | #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
150 | impl_default_bit_opts!($atomic_type, $int_type); |
151 | impl $atomic_type { |
152 | #[inline] |
153 | pub(crate) const fn new(v: $int_type) -> Self { |
154 | Self { |
155 | inner: core::sync::atomic::$atomic_type::new(v), |
156 | _not_ref_unwind_safe: PhantomData, |
157 | } |
158 | } |
159 | #[inline] |
160 | pub(crate) fn is_lock_free() -> bool { |
161 | Self::is_always_lock_free() |
162 | } |
163 | #[inline] |
164 | pub(crate) const fn is_always_lock_free() -> bool { |
165 | // ESP-IDF targets' 64-bit atomics are not lock-free. |
166 | // https://github.com/rust-lang/rust/pull/115577#issuecomment-1732259297 |
167 | cfg!(not(all( |
168 | any(target_arch = "riscv32" , target_arch = "xtensa" ), |
169 | target_os = "espidf" , |
170 | ))) | (core::mem::size_of::<$int_type>() < 8) |
171 | } |
172 | #[inline] |
173 | pub(crate) fn get_mut(&mut self) -> &mut $int_type { |
174 | self.inner.get_mut() |
175 | } |
176 | #[inline] |
177 | pub(crate) fn into_inner(self) -> $int_type { |
178 | self.inner.into_inner() |
179 | } |
180 | #[inline] |
181 | #[cfg_attr( |
182 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
183 | track_caller |
184 | )] |
185 | pub(crate) fn load(&self, order: Ordering) -> $int_type { |
186 | crate::utils::assert_load_ordering(order); // for track_caller (compiler can omit double check) |
187 | self.inner.load(order) |
188 | } |
189 | #[inline] |
190 | #[cfg_attr( |
191 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
192 | track_caller |
193 | )] |
194 | pub(crate) fn store(&self, val: $int_type, order: Ordering) { |
195 | crate::utils::assert_store_ordering(order); // for track_caller (compiler can omit double check) |
196 | self.inner.store(val, order); |
197 | } |
198 | const_fn! { |
199 | const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))]; |
200 | #[inline] |
201 | pub(crate) const fn as_ptr(&self) -> *mut $int_type { |
202 | // SAFETY: Self is #[repr(C)] and internally UnsafeCell<$int_type>. |
203 | // See also https://github.com/rust-lang/rust/pull/66705 and |
204 | // https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116. |
205 | unsafe { |
206 | (*(self as *const Self as *const UnsafeCell<$int_type>)).get() |
207 | } |
208 | } |
209 | } |
210 | } |
211 | #[cfg_attr( |
212 | portable_atomic_no_cfg_target_has_atomic, |
213 | cfg(not(portable_atomic_no_atomic_cas)) |
214 | )] |
215 | #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr" ))] |
216 | impl $atomic_type { |
217 | #[inline] |
218 | #[cfg_attr( |
219 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
220 | track_caller |
221 | )] |
222 | pub(crate) fn compare_exchange( |
223 | &self, |
224 | current: $int_type, |
225 | new: $int_type, |
226 | success: Ordering, |
227 | failure: Ordering, |
228 | ) -> Result<$int_type, $int_type> { |
229 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
230 | #[cfg(portable_atomic_no_stronger_failure_ordering)] |
231 | let success = crate::utils::upgrade_success_ordering(success, failure); |
232 | self.inner.compare_exchange(current, new, success, failure) |
233 | } |
234 | #[inline] |
235 | #[cfg_attr( |
236 | any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), |
237 | track_caller |
238 | )] |
239 | pub(crate) fn compare_exchange_weak( |
240 | &self, |
241 | current: $int_type, |
242 | new: $int_type, |
243 | success: Ordering, |
244 | failure: Ordering, |
245 | ) -> Result<$int_type, $int_type> { |
246 | crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check) |
247 | #[cfg(portable_atomic_no_stronger_failure_ordering)] |
248 | let success = crate::utils::upgrade_success_ordering(success, failure); |
249 | self.inner.compare_exchange_weak(current, new, success, failure) |
250 | } |
251 | #[allow(dead_code)] |
252 | #[inline] |
253 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
254 | fn fetch_update_<F>(&self, order: Ordering, mut f: F) -> $int_type |
255 | where |
256 | F: FnMut($int_type) -> $int_type, |
257 | { |
258 | // This is a private function and all instances of `f` only operate on the value |
259 | // loaded, so there is no need to synchronize the first load/failed CAS. |
260 | let mut prev = self.load(Ordering::Relaxed); |
261 | loop { |
262 | let next = f(prev); |
263 | match self.compare_exchange_weak(prev, next, order, Ordering::Relaxed) { |
264 | Ok(x) => return x, |
265 | Err(next_prev) => prev = next_prev, |
266 | } |
267 | } |
268 | } |
269 | #[inline] |
270 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
271 | pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { |
272 | #[cfg(not(portable_atomic_no_atomic_min_max))] |
273 | { |
274 | #[cfg(any( |
275 | all( |
276 | target_arch = "aarch64" , |
277 | any(target_feature = "lse" , portable_atomic_target_feature = "lse" ), |
278 | ), |
279 | all( |
280 | target_arch = "arm" , |
281 | not(any( |
282 | target_feature = "v6" , |
283 | portable_atomic_target_feature = "v6" , |
284 | )), |
285 | ), |
286 | target_arch = "mips" , |
287 | target_arch = "mips32r6" , |
288 | target_arch = "mips64" , |
289 | target_arch = "mips64r6" , |
290 | target_arch = "powerpc" , |
291 | target_arch = "powerpc64" , |
292 | ))] |
293 | { |
294 | // HACK: the following operations are currently broken (at least on qemu-user): |
295 | // - aarch64's `AtomicI{8,16}::fetch_{max,min}` (release mode + lse) |
296 | // - armv5te's `Atomic{I,U}{8,16}::fetch_{max,min}` |
297 | // - mips's `AtomicI8::fetch_{max,min}` (release mode) |
298 | // - mipsel's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
299 | // - mips64's `AtomicI8::fetch_{max,min}` (release mode) |
300 | // - mips64el's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
301 | // - powerpc's `AtomicI{8,16}::fetch_{max,min}` |
302 | // - powerpc64's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
303 | // - powerpc64le's `AtomicU{8,16}::fetch_{max,min}` (release mode + fat LTO) |
304 | // See also: |
305 | // https://github.com/llvm/llvm-project/issues/61880 |
306 | // https://github.com/llvm/llvm-project/issues/61881 |
307 | // https://github.com/llvm/llvm-project/issues/61882 |
308 | // https://github.com/taiki-e/portable-atomic/issues/2 |
309 | // https://github.com/rust-lang/rust/issues/100650 |
310 | if core::mem::size_of::<$int_type>() <= 2 { |
311 | return self.fetch_update_(order, |x| core::cmp::max(x, val)); |
312 | } |
313 | } |
314 | self.inner.fetch_max(val, order) |
315 | } |
316 | #[cfg(portable_atomic_no_atomic_min_max)] |
317 | { |
318 | self.fetch_update_(order, |x| core::cmp::max(x, val)) |
319 | } |
320 | } |
321 | #[inline] |
322 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
323 | pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { |
324 | #[cfg(not(portable_atomic_no_atomic_min_max))] |
325 | { |
326 | #[cfg(any( |
327 | all( |
328 | target_arch = "aarch64" , |
329 | any(target_feature = "lse" , portable_atomic_target_feature = "lse" ), |
330 | ), |
331 | all( |
332 | target_arch = "arm" , |
333 | not(any( |
334 | target_feature = "v6" , |
335 | portable_atomic_target_feature = "v6" , |
336 | )), |
337 | ), |
338 | target_arch = "mips" , |
339 | target_arch = "mips32r6" , |
340 | target_arch = "mips64" , |
341 | target_arch = "mips64r6" , |
342 | target_arch = "powerpc" , |
343 | target_arch = "powerpc64" , |
344 | ))] |
345 | { |
346 | // HACK: the following operations are currently broken (at least on qemu-user): |
347 | // - aarch64's `AtomicI{8,16}::fetch_{max,min}` (release mode + lse) |
348 | // - armv5te's `Atomic{I,U}{8,16}::fetch_{max,min}` |
349 | // - mips's `AtomicI8::fetch_{max,min}` (release mode) |
350 | // - mipsel's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
351 | // - mips64's `AtomicI8::fetch_{max,min}` (release mode) |
352 | // - mips64el's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
353 | // - powerpc's `AtomicI{8,16}::fetch_{max,min}` |
354 | // - powerpc64's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least) |
355 | // - powerpc64le's `AtomicU{8,16}::fetch_{max,min}` (release mode + fat LTO) |
356 | // See also: |
357 | // https://github.com/llvm/llvm-project/issues/61880 |
358 | // https://github.com/llvm/llvm-project/issues/61881 |
359 | // https://github.com/llvm/llvm-project/issues/61882 |
360 | // https://github.com/taiki-e/portable-atomic/issues/2 |
361 | // https://github.com/rust-lang/rust/issues/100650 |
362 | if core::mem::size_of::<$int_type>() <= 2 { |
363 | return self.fetch_update_(order, |x| core::cmp::min(x, val)); |
364 | } |
365 | } |
366 | self.inner.fetch_min(val, order) |
367 | } |
368 | #[cfg(portable_atomic_no_atomic_min_max)] |
369 | { |
370 | self.fetch_update_(order, |x| core::cmp::min(x, val)) |
371 | } |
372 | } |
373 | #[inline] |
374 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
375 | pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type { |
376 | self.fetch_xor(!0, order) |
377 | } |
378 | #[cfg(not(all( |
379 | any(target_arch = "x86" , target_arch = "x86_64" ), |
380 | not(any(miri, portable_atomic_sanitize_thread)), |
381 | not(portable_atomic_no_asm), |
382 | )))] |
383 | #[inline] |
384 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
385 | pub(crate) fn not(&self, order: Ordering) { |
386 | self.fetch_not(order); |
387 | } |
388 | #[inline] |
389 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
390 | pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type { |
391 | self.fetch_update_(order, $int_type::wrapping_neg) |
392 | } |
393 | #[cfg(not(all( |
394 | any(target_arch = "x86" , target_arch = "x86_64" ), |
395 | not(any(miri, portable_atomic_sanitize_thread)), |
396 | not(portable_atomic_no_asm), |
397 | )))] |
398 | #[inline] |
399 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
400 | pub(crate) fn neg(&self, order: Ordering) { |
401 | self.fetch_neg(order); |
402 | } |
403 | } |
404 | impl core::ops::Deref for $atomic_type { |
405 | type Target = core::sync::atomic::$atomic_type; |
406 | #[inline] |
407 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
408 | fn deref(&self) -> &Self::Target { |
409 | &self.inner |
410 | } |
411 | } |
412 | }; |
413 | } |
414 | |
415 | atomic_int!(AtomicIsize, isize); |
416 | atomic_int!(AtomicUsize, usize); |
417 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
418 | atomic_int!(AtomicI8, i8); |
419 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
420 | atomic_int!(AtomicU8, u8); |
421 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
422 | atomic_int!(AtomicI16, i16); |
423 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
424 | atomic_int!(AtomicU16, u16); |
425 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
426 | #[cfg (not(target_pointer_width = "16" ))] |
427 | atomic_int!(AtomicI32, i32); |
428 | #[cfg (not(portable_atomic_no_atomic_load_store))] |
429 | #[cfg (not(target_pointer_width = "16" ))] |
430 | atomic_int!(AtomicU32, u32); |
431 | #[cfg_attr (portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))] |
432 | #[cfg_attr ( |
433 | not(portable_atomic_no_cfg_target_has_atomic), |
434 | cfg(any( |
435 | target_has_atomic = "64" , |
436 | not(any(target_pointer_width = "16" , target_pointer_width = "32" )), |
437 | )) |
438 | )] |
439 | atomic_int!(AtomicI64, i64); |
440 | #[cfg_attr (portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))] |
441 | #[cfg_attr ( |
442 | not(portable_atomic_no_cfg_target_has_atomic), |
443 | cfg(any( |
444 | target_has_atomic = "64" , |
445 | not(any(target_pointer_width = "16" , target_pointer_width = "32" )), |
446 | )) |
447 | )] |
448 | atomic_int!(AtomicU64, u64); |
449 | |