1// SPDX-License-Identifier: Apache-2.0 OR MIT
2
3// Wrap the standard library's atomic types in newtype.
4//
5// This is not a reexport, because we want to backport changes like
6// https://github.com/rust-lang/rust/pull/98383 to old compilers.
7
8use core::{cell::UnsafeCell, marker::PhantomData, sync::atomic::Ordering};
9
10// core::panic::RefUnwindSafe is only available on Rust 1.56+, so on pre-1.56
11// Rust, we implement RefUnwindSafe when "std" feature is enabled.
12// However, on pre-1.56 Rust, the standard library's atomic types implement
13// RefUnwindSafe when "linked to std", and that's behavior that our other atomic
14// implementations can't emulate, so use PhantomData<NoRefUnwindSafe> to match
15// conditions where our other atomic implementations implement RefUnwindSafe.
16// If we do not do this, for example, downstream that is only tested on x86_64
17// may incorrectly assume that AtomicU64 always implements RefUnwindSafe even on
18// older rustc, and may be broken on platforms where std AtomicU64 is not available.
19struct NoRefUnwindSafe(UnsafeCell<()>);
20// SAFETY: this is a marker type and we'll never access the value.
21unsafe impl Sync for NoRefUnwindSafe {}
22
23#[repr(transparent)]
24pub(crate) struct AtomicPtr<T> {
25 inner: core::sync::atomic::AtomicPtr<T>,
26 // Prevent RefUnwindSafe from being propagated from the std atomic type.
27 _marker: PhantomData<NoRefUnwindSafe>,
28}
29impl<T> AtomicPtr<T> {
30 #[inline]
31 pub(crate) const fn new(v: *mut T) -> Self {
32 Self { inner: core::sync::atomic::AtomicPtr::new(v), _marker: PhantomData }
33 }
34 #[inline]
35 pub(crate) fn is_lock_free() -> bool {
36 Self::is_always_lock_free()
37 }
38 #[inline]
39 pub(crate) const fn is_always_lock_free() -> bool {
40 true
41 }
42 #[inline]
43 pub(crate) fn get_mut(&mut self) -> &mut *mut T {
44 self.inner.get_mut()
45 }
46 #[inline]
47 pub(crate) fn into_inner(self) -> *mut T {
48 self.inner.into_inner()
49 }
50 #[inline]
51 #[cfg_attr(
52 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
53 track_caller
54 )]
55 pub(crate) fn load(&self, order: Ordering) -> *mut T {
56 crate::utils::assert_load_ordering(order); // for track_caller (compiler can omit double check)
57 self.inner.load(order)
58 }
59 #[inline]
60 #[cfg_attr(
61 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
62 track_caller
63 )]
64 pub(crate) fn store(&self, ptr: *mut T, order: Ordering) {
65 crate::utils::assert_store_ordering(order); // for track_caller (compiler can omit double check)
66 self.inner.store(ptr, order);
67 }
68 const_fn! {
69 const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
70 #[inline]
71 pub(crate) const fn as_ptr(&self) -> *mut *mut T {
72 // SAFETY: Self is #[repr(C)] and internally UnsafeCell<*mut T>.
73 // See also https://github.com/rust-lang/rust/pull/66705 and
74 // https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116.
75 unsafe { (*(self as *const Self as *const UnsafeCell<*mut T>)).get() }
76 }
77 }
78}
79#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))]
80#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
81impl<T> AtomicPtr<T> {
82 #[inline]
83 #[cfg_attr(
84 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
85 track_caller
86 )]
87 pub(crate) fn compare_exchange(
88 &self,
89 current: *mut T,
90 new: *mut T,
91 success: Ordering,
92 failure: Ordering,
93 ) -> Result<*mut T, *mut T> {
94 crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check)
95 #[cfg(portable_atomic_no_stronger_failure_ordering)]
96 let success = crate::utils::upgrade_success_ordering(success, failure);
97 self.inner.compare_exchange(current, new, success, failure)
98 }
99 #[inline]
100 #[cfg_attr(
101 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
102 track_caller
103 )]
104 pub(crate) fn compare_exchange_weak(
105 &self,
106 current: *mut T,
107 new: *mut T,
108 success: Ordering,
109 failure: Ordering,
110 ) -> Result<*mut T, *mut T> {
111 crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check)
112 #[cfg(portable_atomic_no_stronger_failure_ordering)]
113 let success = crate::utils::upgrade_success_ordering(success, failure);
114 self.inner.compare_exchange_weak(current, new, success, failure)
115 }
116}
117impl<T> core::ops::Deref for AtomicPtr<T> {
118 type Target = core::sync::atomic::AtomicPtr<T>;
119 #[inline]
120 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
121 fn deref(&self) -> &Self::Target {
122 &self.inner
123 }
124}
125
126macro_rules! atomic_int {
127 ($atomic_type:ident, $int_type:ident) => {
128 #[repr(transparent)]
129 pub(crate) struct $atomic_type {
130 inner: core::sync::atomic::$atomic_type,
131 // Prevent RefUnwindSafe from being propagated from the std atomic type.
132 _marker: PhantomData<NoRefUnwindSafe>,
133 }
134 #[cfg_attr(
135 portable_atomic_no_cfg_target_has_atomic,
136 cfg(not(portable_atomic_no_atomic_cas))
137 )]
138 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
139 impl_default_no_fetch_ops!($atomic_type, $int_type);
140 #[cfg(not(all(
141 any(target_arch = "x86", target_arch = "x86_64"),
142 not(any(miri, portable_atomic_sanitize_thread)),
143 not(portable_atomic_no_asm),
144 )))]
145 #[cfg_attr(
146 portable_atomic_no_cfg_target_has_atomic,
147 cfg(not(portable_atomic_no_atomic_cas))
148 )]
149 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
150 impl_default_bit_opts!($atomic_type, $int_type);
151 impl $atomic_type {
152 #[inline]
153 pub(crate) const fn new(v: $int_type) -> Self {
154 Self { inner: core::sync::atomic::$atomic_type::new(v), _marker: PhantomData }
155 }
156 #[inline]
157 pub(crate) fn is_lock_free() -> bool {
158 Self::is_always_lock_free()
159 }
160 #[inline]
161 pub(crate) const fn is_always_lock_free() -> bool {
162 // ESP-IDF targets' 64-bit atomics are not lock-free.
163 // https://github.com/rust-lang/rust/pull/115577#issuecomment-1732259297
164 cfg!(not(all(
165 any(target_arch = "riscv32", target_arch = "xtensa"),
166 target_os = "espidf",
167 ))) | (core::mem::size_of::<$int_type>() < 8)
168 }
169 #[inline]
170 pub(crate) fn get_mut(&mut self) -> &mut $int_type {
171 self.inner.get_mut()
172 }
173 #[inline]
174 pub(crate) fn into_inner(self) -> $int_type {
175 self.inner.into_inner()
176 }
177 #[inline]
178 #[cfg_attr(
179 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
180 track_caller
181 )]
182 pub(crate) fn load(&self, order: Ordering) -> $int_type {
183 crate::utils::assert_load_ordering(order); // for track_caller (compiler can omit double check)
184 self.inner.load(order)
185 }
186 #[inline]
187 #[cfg_attr(
188 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
189 track_caller
190 )]
191 pub(crate) fn store(&self, val: $int_type, order: Ordering) {
192 crate::utils::assert_store_ordering(order); // for track_caller (compiler can omit double check)
193 self.inner.store(val, order);
194 }
195 const_fn! {
196 const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
197 #[inline]
198 pub(crate) const fn as_ptr(&self) -> *mut $int_type {
199 // SAFETY: Self is #[repr(C)] and internally UnsafeCell<$int_type>.
200 // See also https://github.com/rust-lang/rust/pull/66705 and
201 // https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116.
202 unsafe {
203 (*(self as *const Self as *const UnsafeCell<$int_type>)).get()
204 }
205 }
206 }
207 }
208 #[cfg_attr(
209 portable_atomic_no_cfg_target_has_atomic,
210 cfg(not(portable_atomic_no_atomic_cas))
211 )]
212 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
213 impl $atomic_type {
214 #[inline]
215 #[cfg_attr(
216 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
217 track_caller
218 )]
219 pub(crate) fn compare_exchange(
220 &self,
221 current: $int_type,
222 new: $int_type,
223 success: Ordering,
224 failure: Ordering,
225 ) -> Result<$int_type, $int_type> {
226 crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check)
227 #[cfg(portable_atomic_no_stronger_failure_ordering)]
228 let success = crate::utils::upgrade_success_ordering(success, failure);
229 self.inner.compare_exchange(current, new, success, failure)
230 }
231 #[inline]
232 #[cfg_attr(
233 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
234 track_caller
235 )]
236 pub(crate) fn compare_exchange_weak(
237 &self,
238 current: $int_type,
239 new: $int_type,
240 success: Ordering,
241 failure: Ordering,
242 ) -> Result<$int_type, $int_type> {
243 crate::utils::assert_compare_exchange_ordering(success, failure); // for track_caller (compiler can omit double check)
244 #[cfg(portable_atomic_no_stronger_failure_ordering)]
245 let success = crate::utils::upgrade_success_ordering(success, failure);
246 self.inner.compare_exchange_weak(current, new, success, failure)
247 }
248 #[allow(dead_code)]
249 #[inline]
250 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
251 fn fetch_update_<F>(&self, order: Ordering, mut f: F) -> $int_type
252 where
253 F: FnMut($int_type) -> $int_type,
254 {
255 // This is a private function and all instances of `f` only operate on the value
256 // loaded, so there is no need to synchronize the first load/failed CAS.
257 let mut prev = self.load(Ordering::Relaxed);
258 loop {
259 let next = f(prev);
260 match self.compare_exchange_weak(prev, next, order, Ordering::Relaxed) {
261 Ok(x) => return x,
262 Err(next_prev) => prev = next_prev,
263 }
264 }
265 }
266 #[inline]
267 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
268 pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
269 #[cfg(not(portable_atomic_no_atomic_min_max))]
270 {
271 #[cfg(any(
272 all(
273 target_arch = "aarch64",
274 any(target_feature = "lse", portable_atomic_target_feature = "lse"),
275 ),
276 all(
277 target_arch = "arm",
278 not(any(
279 target_feature = "v6",
280 portable_atomic_target_feature = "v6",
281 )),
282 ),
283 target_arch = "mips",
284 target_arch = "mips32r6",
285 target_arch = "mips64",
286 target_arch = "mips64r6",
287 target_arch = "powerpc",
288 target_arch = "powerpc64",
289 ))]
290 {
291 // HACK: the following operations are currently broken (at least on qemu-user):
292 // - aarch64's `AtomicI{8,16}::fetch_{max,min}` (release mode + lse)
293 // - armv5te's `Atomic{I,U}{8,16}::fetch_{max,min}`
294 // - mips's `AtomicI8::fetch_{max,min}` (release mode)
295 // - mipsel's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least)
296 // - mips64's `AtomicI8::fetch_{max,min}` (release mode)
297 // - mips64el's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least)
298 // - powerpc's `AtomicI{8,16}::fetch_{max,min}`
299 // - powerpc64's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least)
300 // - powerpc64le's `AtomicU{8,16}::fetch_{max,min}` (release mode + fat LTO)
301 // See also:
302 // https://github.com/llvm/llvm-project/issues/61880
303 // https://github.com/llvm/llvm-project/issues/61881
304 // https://github.com/llvm/llvm-project/issues/61882
305 // https://github.com/taiki-e/portable-atomic/issues/2
306 // https://github.com/rust-lang/rust/issues/100650
307 if core::mem::size_of::<$int_type>() <= 2 {
308 return self.fetch_update_(order, |x| core::cmp::max(x, val));
309 }
310 }
311 self.inner.fetch_max(val, order)
312 }
313 #[cfg(portable_atomic_no_atomic_min_max)]
314 {
315 self.fetch_update_(order, |x| core::cmp::max(x, val))
316 }
317 }
318 #[inline]
319 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
320 pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
321 #[cfg(not(portable_atomic_no_atomic_min_max))]
322 {
323 #[cfg(any(
324 all(
325 target_arch = "aarch64",
326 any(target_feature = "lse", portable_atomic_target_feature = "lse"),
327 ),
328 all(
329 target_arch = "arm",
330 not(any(
331 target_feature = "v6",
332 portable_atomic_target_feature = "v6",
333 )),
334 ),
335 target_arch = "mips",
336 target_arch = "mips32r6",
337 target_arch = "mips64",
338 target_arch = "mips64r6",
339 target_arch = "powerpc",
340 target_arch = "powerpc64",
341 ))]
342 {
343 // HACK: the following operations are currently broken (at least on qemu-user):
344 // - aarch64's `AtomicI{8,16}::fetch_{max,min}` (release mode + lse)
345 // - armv5te's `Atomic{I,U}{8,16}::fetch_{max,min}`
346 // - mips's `AtomicI8::fetch_{max,min}` (release mode)
347 // - mipsel's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least)
348 // - mips64's `AtomicI8::fetch_{max,min}` (release mode)
349 // - mips64el's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least)
350 // - powerpc's `AtomicI{8,16}::fetch_{max,min}`
351 // - powerpc64's `AtomicI{8,16}::fetch_{max,min}` (debug mode, at least)
352 // - powerpc64le's `AtomicU{8,16}::fetch_{max,min}` (release mode + fat LTO)
353 // See also:
354 // https://github.com/llvm/llvm-project/issues/61880
355 // https://github.com/llvm/llvm-project/issues/61881
356 // https://github.com/llvm/llvm-project/issues/61882
357 // https://github.com/taiki-e/portable-atomic/issues/2
358 // https://github.com/rust-lang/rust/issues/100650
359 if core::mem::size_of::<$int_type>() <= 2 {
360 return self.fetch_update_(order, |x| core::cmp::min(x, val));
361 }
362 }
363 self.inner.fetch_min(val, order)
364 }
365 #[cfg(portable_atomic_no_atomic_min_max)]
366 {
367 self.fetch_update_(order, |x| core::cmp::min(x, val))
368 }
369 }
370 #[inline]
371 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
372 pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
373 const NOT_MASK: $int_type = (0 as $int_type).wrapping_sub(1);
374 self.fetch_xor(NOT_MASK, order)
375 }
376 #[cfg(not(all(
377 any(target_arch = "x86", target_arch = "x86_64"),
378 not(any(miri, portable_atomic_sanitize_thread)),
379 not(portable_atomic_no_asm),
380 )))]
381 #[inline]
382 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
383 pub(crate) fn not(&self, order: Ordering) {
384 self.fetch_not(order);
385 }
386 #[inline]
387 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
388 pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
389 self.fetch_update_(order, $int_type::wrapping_neg)
390 }
391 #[cfg(not(all(
392 any(target_arch = "x86", target_arch = "x86_64"),
393 not(any(miri, portable_atomic_sanitize_thread)),
394 not(portable_atomic_no_asm),
395 )))]
396 #[inline]
397 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
398 pub(crate) fn neg(&self, order: Ordering) {
399 self.fetch_neg(order);
400 }
401 }
402 impl core::ops::Deref for $atomic_type {
403 type Target = core::sync::atomic::$atomic_type;
404 #[inline]
405 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
406 fn deref(&self) -> &Self::Target {
407 &self.inner
408 }
409 }
410 };
411}
412
413atomic_int!(AtomicIsize, isize);
414atomic_int!(AtomicUsize, usize);
415#[cfg(not(portable_atomic_no_atomic_load_store))]
416atomic_int!(AtomicI8, i8);
417#[cfg(not(portable_atomic_no_atomic_load_store))]
418atomic_int!(AtomicU8, u8);
419#[cfg(not(portable_atomic_no_atomic_load_store))]
420atomic_int!(AtomicI16, i16);
421#[cfg(not(portable_atomic_no_atomic_load_store))]
422atomic_int!(AtomicU16, u16);
423#[cfg(not(portable_atomic_no_atomic_load_store))]
424#[cfg(not(target_pointer_width = "16"))]
425atomic_int!(AtomicI32, i32);
426#[cfg(not(portable_atomic_no_atomic_load_store))]
427#[cfg(not(target_pointer_width = "16"))]
428atomic_int!(AtomicU32, u32);
429#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))]
430#[cfg_attr(
431 not(portable_atomic_no_cfg_target_has_atomic),
432 cfg(any(
433 target_has_atomic = "64",
434 not(any(target_pointer_width = "16", target_pointer_width = "32")),
435 ))
436)]
437atomic_int!(AtomicI64, i64);
438#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))]
439#[cfg_attr(
440 not(portable_atomic_no_cfg_target_has_atomic),
441 cfg(any(
442 target_has_atomic = "64",
443 not(any(target_pointer_width = "16", target_pointer_width = "32")),
444 ))
445)]
446atomic_int!(AtomicU64, u64);
447