1// SPDX-License-Identifier: Apache-2.0 OR MIT
2
3#![cfg_attr(not(all(test, feature = "float")), allow(dead_code, unused_macros))]
4
5#[macro_use]
6#[path = "gen/utils.rs"]
7mod gen;
8
9use core::sync::atomic::Ordering;
10
11macro_rules! static_assert {
12 ($cond:expr $(,)?) => {{
13 let [] = [(); true as usize - $crate::utils::_assert_is_bool($cond) as usize];
14 }};
15}
16pub(crate) const fn _assert_is_bool(v: bool) -> bool {
17 v
18}
19
20macro_rules! static_assert_layout {
21 ($atomic_type:ty, $value_type:ty) => {
22 static_assert!(
23 core::mem::align_of::<$atomic_type>() == core::mem::size_of::<$atomic_type>()
24 );
25 static_assert!(core::mem::size_of::<$atomic_type>() == core::mem::size_of::<$value_type>());
26 };
27}
28
29// #[doc = concat!(...)] requires Rust 1.54
30macro_rules! doc_comment {
31 ($doc:expr, $($tt:tt)*) => {
32 #[doc = $doc]
33 $($tt)*
34 };
35}
36
37// Adapted from https://github.com/BurntSushi/memchr/blob/2.4.1/src/memchr/x86/mod.rs#L9-L71.
38/// # Safety
39///
40/// - the caller must uphold the safety contract for the function returned by $detect_body.
41/// - the memory pointed by the function pointer returned by $detect_body must be visible from any threads.
42///
43/// The second requirement is always met if the function pointer is to the function definition.
44/// (Currently, all uses of this macro in our code are in this case.)
45#[allow(unused_macros)]
46#[cfg(not(portable_atomic_no_outline_atomics))]
47#[cfg(any(
48 target_arch = "aarch64",
49 target_arch = "arm",
50 target_arch = "powerpc64",
51 all(target_arch = "x86_64", not(any(target_env = "sgx", miri))),
52))]
53macro_rules! ifunc {
54 (unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)? { $($detect_body:tt)* }) => {{
55 type FnTy = unsafe fn($($arg_ty),*) $(-> $ret_ty)?;
56 static FUNC: core::sync::atomic::AtomicPtr<()>
57 = core::sync::atomic::AtomicPtr::new(detect as *mut ());
58 #[cold]
59 unsafe fn detect($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
60 let func: FnTy = { $($detect_body)* };
61 FUNC.store(func as *mut (), core::sync::atomic::Ordering::Relaxed);
62 // SAFETY: the caller must uphold the safety contract for the function returned by $detect_body.
63 unsafe { func($($arg_pat),*) }
64 }
65 // SAFETY: `FnTy` is a function pointer, which is always safe to transmute with a `*mut ()`.
66 // (To force the caller to use unsafe block for this macro, do not use
67 // unsafe block here.)
68 let func = {
69 core::mem::transmute::<*mut (), FnTy>(FUNC.load(core::sync::atomic::Ordering::Relaxed))
70 };
71 // SAFETY: the caller must uphold the safety contract for the function returned by $detect_body.
72 // (To force the caller to use unsafe block for this macro, do not use
73 // unsafe block here.)
74 func($($arg_pat),*)
75 }};
76}
77
78#[allow(unused_macros)]
79#[cfg(not(portable_atomic_no_outline_atomics))]
80#[cfg(any(
81 target_arch = "aarch64",
82 target_arch = "arm",
83 target_arch = "powerpc64",
84 all(target_arch = "x86_64", not(any(target_env = "sgx", miri))),
85))]
86macro_rules! fn_alias {
87 (
88 $(#[$($fn_attr:tt)*])*
89 $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?;
90 $(#[$($alias_attr:tt)*])*
91 $new:ident = $from:ident($($last_args:tt)*);
92 $($rest:tt)*
93 ) => {
94 $(#[$($fn_attr)*])*
95 $(#[$($alias_attr)*])*
96 $vis unsafe fn $new($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
97 // SAFETY: the caller must uphold the safety contract.
98 unsafe { $from($($arg_pat,)* $($last_args)*) }
99 }
100 fn_alias! {
101 $(#[$($fn_attr)*])*
102 $vis unsafe fn($($arg_pat: $arg_ty),*) $(-> $ret_ty)?;
103 $($rest)*
104 }
105 };
106 (
107 $(#[$($attr:tt)*])*
108 $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?;
109 ) => {}
110}
111
112/// Make the given function const if the given condition is true.
113macro_rules! const_fn {
114 (
115 const_if: #[cfg($($cfg:tt)+)];
116 $(#[$($attr:tt)*])*
117 $vis:vis const fn $($rest:tt)*
118 ) => {
119 #[cfg($($cfg)+)]
120 $(#[$($attr)*])*
121 $vis const fn $($rest)*
122 #[cfg(not($($cfg)+))]
123 $(#[$($attr)*])*
124 $vis fn $($rest)*
125 };
126}
127
128/// Implements `core::fmt::Debug` and `serde::{Serialize, Deserialize}` (when serde
129/// feature is enabled) for atomic bool, integer, or float.
130macro_rules! impl_debug_and_serde {
131 ($atomic_type:ident) => {
132 impl fmt::Debug for $atomic_type {
133 #[allow(clippy::missing_inline_in_public_items)] // fmt is not hot path
134 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
135 // std atomic types use Relaxed in Debug::fmt: https://github.com/rust-lang/rust/blob/1.70.0/library/core/src/sync/atomic.rs#L2024
136 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
137 }
138 }
139 #[cfg(feature = "serde")]
140 #[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
141 impl serde::ser::Serialize for $atomic_type {
142 #[allow(clippy::missing_inline_in_public_items)] // serde doesn't use inline on std atomic's Serialize/Deserialize impl
143 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
144 where
145 S: serde::ser::Serializer,
146 {
147 // https://github.com/serde-rs/serde/blob/v1.0.152/serde/src/ser/impls.rs#L958-L959
148 self.load(Ordering::Relaxed).serialize(serializer)
149 }
150 }
151 #[cfg(feature = "serde")]
152 #[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
153 impl<'de> serde::de::Deserialize<'de> for $atomic_type {
154 #[allow(clippy::missing_inline_in_public_items)] // serde doesn't use inline on std atomic's Serialize/Deserialize impl
155 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
156 where
157 D: serde::de::Deserializer<'de>,
158 {
159 serde::de::Deserialize::deserialize(deserializer).map(Self::new)
160 }
161 }
162 };
163}
164
165// We do not provide `nand` because it cannot be optimized on neither x86 nor MSP430.
166// https://godbolt.org/z/7TzjKqYvE
167macro_rules! impl_default_no_fetch_ops {
168 ($atomic_type:ident, bool) => {
169 impl $atomic_type {
170 #[inline]
171 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
172 pub(crate) fn and(&self, val: bool, order: Ordering) {
173 self.fetch_and(val, order);
174 }
175 #[inline]
176 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
177 pub(crate) fn or(&self, val: bool, order: Ordering) {
178 self.fetch_or(val, order);
179 }
180 #[inline]
181 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
182 pub(crate) fn xor(&self, val: bool, order: Ordering) {
183 self.fetch_xor(val, order);
184 }
185 }
186 };
187 ($atomic_type:ident, $int_type:ident) => {
188 impl $atomic_type {
189 #[inline]
190 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
191 pub(crate) fn add(&self, val: $int_type, order: Ordering) {
192 self.fetch_add(val, order);
193 }
194 #[inline]
195 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
196 pub(crate) fn sub(&self, val: $int_type, order: Ordering) {
197 self.fetch_sub(val, order);
198 }
199 #[inline]
200 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
201 pub(crate) fn and(&self, val: $int_type, order: Ordering) {
202 self.fetch_and(val, order);
203 }
204 #[inline]
205 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
206 pub(crate) fn or(&self, val: $int_type, order: Ordering) {
207 self.fetch_or(val, order);
208 }
209 #[inline]
210 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
211 pub(crate) fn xor(&self, val: $int_type, order: Ordering) {
212 self.fetch_xor(val, order);
213 }
214 }
215 };
216}
217macro_rules! impl_default_bit_opts {
218 ($atomic_type:ident, $int_type:ident) => {
219 impl $atomic_type {
220 #[inline]
221 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
222 pub(crate) fn bit_set(&self, bit: u32, order: Ordering) -> bool {
223 let mask = (1 as $int_type).wrapping_shl(bit);
224 self.fetch_or(mask, order) & mask != 0
225 }
226 #[inline]
227 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
228 pub(crate) fn bit_clear(&self, bit: u32, order: Ordering) -> bool {
229 let mask = (1 as $int_type).wrapping_shl(bit);
230 self.fetch_and(!mask, order) & mask != 0
231 }
232 #[inline]
233 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
234 pub(crate) fn bit_toggle(&self, bit: u32, order: Ordering) -> bool {
235 let mask = (1 as $int_type).wrapping_shl(bit);
236 self.fetch_xor(mask, order) & mask != 0
237 }
238 }
239 };
240}
241
242// This just outputs the input as is, but can be used like an item-level block by using it with cfg.
243macro_rules! items {
244 ($($tt:tt)*) => {
245 $($tt)*
246 };
247}
248
249#[cfg(not(all(
250 portable_atomic_no_atomic_load_store,
251 not(any(
252 target_arch = "avr",
253 target_arch = "bpf",
254 target_arch = "msp430",
255 target_arch = "riscv32",
256 target_arch = "riscv64",
257 feature = "critical-section",
258 )),
259)))]
260#[macro_use]
261mod atomic_ptr_macros {
262 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
263 #[macro_export]
264 macro_rules! cfg_has_atomic_ptr {
265 ($($tt:tt)*) => {
266 $($tt)*
267 };
268 }
269}
270#[cfg(all(
271 portable_atomic_no_atomic_load_store,
272 not(any(
273 target_arch = "avr",
274 target_arch = "bpf",
275 target_arch = "msp430",
276 target_arch = "riscv32",
277 target_arch = "riscv64",
278 feature = "critical-section",
279 )),
280))]
281#[macro_use]
282mod atomic_ptr_macros {
283 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
284 #[macro_export]
285 macro_rules! cfg_has_atomic_ptr {
286 ($($tt:tt)*) => {};
287 }
288}
289
290#[cfg(not(all(
291 portable_atomic_no_atomic_load_store,
292 not(any(
293 target_arch = "avr",
294 target_arch = "msp430",
295 target_arch = "riscv32",
296 target_arch = "riscv64",
297 feature = "critical-section",
298 )),
299)))]
300#[macro_use]
301mod atomic_8_16_macros {
302 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
303 #[macro_export]
304 macro_rules! cfg_has_atomic_8 {
305 ($($tt:tt)*) => {
306 $($tt)*
307 };
308 }
309 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
310 #[macro_export]
311 macro_rules! cfg_has_atomic_16 {
312 ($($tt:tt)*) => {
313 $($tt)*
314 };
315 }
316}
317#[cfg(all(
318 portable_atomic_no_atomic_load_store,
319 not(any(
320 target_arch = "avr",
321 target_arch = "msp430",
322 target_arch = "riscv32",
323 target_arch = "riscv64",
324 feature = "critical-section",
325 )),
326))]
327#[macro_use]
328mod atomic_8_16_macros {
329 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
330 #[macro_export]
331 macro_rules! cfg_has_atomic_8 {
332 ($($tt:tt)*) => {};
333 }
334 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
335 #[macro_export]
336 macro_rules! cfg_has_atomic_16 {
337 ($($tt:tt)*) => {};
338 }
339}
340
341#[cfg(all(
342 any(not(target_pointer_width = "16"), feature = "fallback"),
343 not(all(
344 portable_atomic_no_atomic_load_store,
345 not(any(
346 target_arch = "avr",
347 target_arch = "msp430",
348 target_arch = "riscv32",
349 target_arch = "riscv64",
350 feature = "critical-section",
351 )),
352 )),
353))]
354#[macro_use]
355mod atomic_32_macros {
356 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
357 #[macro_export]
358 macro_rules! cfg_has_atomic_32 {
359 ($($tt:tt)*) => {
360 $($tt)*
361 };
362 }
363}
364#[cfg(not(all(
365 any(not(target_pointer_width = "16"), feature = "fallback"),
366 not(all(
367 portable_atomic_no_atomic_load_store,
368 not(any(
369 target_arch = "avr",
370 target_arch = "msp430",
371 target_arch = "riscv32",
372 target_arch = "riscv64",
373 feature = "critical-section",
374 )),
375 )),
376)))]
377#[macro_use]
378mod atomic_32_macros {
379 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
380 #[macro_export]
381 macro_rules! cfg_has_atomic_32 {
382 ($($tt:tt)*) => {};
383 }
384}
385
386#[cfg_attr(
387 portable_atomic_no_cfg_target_has_atomic,
388 cfg(any(
389 all(
390 feature = "fallback",
391 any(
392 not(portable_atomic_no_atomic_cas),
393 portable_atomic_unsafe_assume_single_core,
394 feature = "critical-section",
395 target_arch = "avr",
396 target_arch = "msp430",
397 ),
398 ),
399 not(portable_atomic_no_atomic_64),
400 not(any(target_pointer_width = "16", target_pointer_width = "32")),
401 ))
402)]
403#[cfg_attr(
404 not(portable_atomic_no_cfg_target_has_atomic),
405 cfg(any(
406 all(
407 feature = "fallback",
408 any(
409 target_has_atomic = "ptr",
410 portable_atomic_unsafe_assume_single_core,
411 feature = "critical-section",
412 target_arch = "avr",
413 target_arch = "msp430",
414 ),
415 ),
416 target_has_atomic = "64",
417 not(any(target_pointer_width = "16", target_pointer_width = "32")),
418 ))
419)]
420#[macro_use]
421mod atomic_64_macros {
422 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
423 #[macro_export]
424 macro_rules! cfg_has_atomic_64 {
425 ($($tt:tt)*) => {
426 $($tt)*
427 };
428 }
429}
430#[cfg_attr(
431 portable_atomic_no_cfg_target_has_atomic,
432 cfg(not(any(
433 all(
434 feature = "fallback",
435 any(
436 not(portable_atomic_no_atomic_cas),
437 portable_atomic_unsafe_assume_single_core,
438 feature = "critical-section",
439 target_arch = "avr",
440 target_arch = "msp430",
441 ),
442 ),
443 not(portable_atomic_no_atomic_64),
444 not(any(target_pointer_width = "16", target_pointer_width = "32")),
445 )))
446)]
447#[cfg_attr(
448 not(portable_atomic_no_cfg_target_has_atomic),
449 cfg(not(any(
450 all(
451 feature = "fallback",
452 any(
453 target_has_atomic = "ptr",
454 portable_atomic_unsafe_assume_single_core,
455 feature = "critical-section",
456 target_arch = "avr",
457 target_arch = "msp430",
458 ),
459 ),
460 target_has_atomic = "64",
461 not(any(target_pointer_width = "16", target_pointer_width = "32")),
462 )))
463)]
464#[macro_use]
465mod atomic_64_macros {
466 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
467 #[macro_export]
468 macro_rules! cfg_has_atomic_64 {
469 ($($tt:tt)*) => {};
470 }
471}
472
473#[cfg_attr(
474 not(feature = "fallback"),
475 cfg(any(
476 all(
477 target_arch = "aarch64",
478 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
479 ),
480 all(
481 target_arch = "x86_64",
482 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
483 any(
484 target_feature = "cmpxchg16b",
485 portable_atomic_target_feature = "cmpxchg16b",
486 all(
487 feature = "fallback",
488 not(portable_atomic_no_cmpxchg16b_target_feature),
489 not(portable_atomic_no_outline_atomics),
490 not(any(target_env = "sgx", miri)),
491 ),
492 ),
493 ),
494 all(
495 target_arch = "powerpc64",
496 portable_atomic_unstable_asm_experimental_arch,
497 any(
498 target_feature = "quadword-atomics",
499 portable_atomic_target_feature = "quadword-atomics",
500 all(
501 feature = "fallback",
502 not(portable_atomic_no_outline_atomics),
503 portable_atomic_outline_atomics, // TODO(powerpc64): currently disabled by default
504 any(
505 all(
506 target_os = "linux",
507 any(
508 target_env = "gnu",
509 all(
510 any(target_env = "musl", target_env = "ohos"),
511 not(target_feature = "crt-static"),
512 ),
513 portable_atomic_outline_atomics,
514 ),
515 ),
516 target_os = "android",
517 target_os = "freebsd",
518 ),
519 not(any(miri, portable_atomic_sanitize_thread)),
520 ),
521 ),
522 ),
523 all(target_arch = "s390x", portable_atomic_unstable_asm_experimental_arch),
524 ))
525)]
526#[cfg_attr(
527 all(feature = "fallback", portable_atomic_no_cfg_target_has_atomic),
528 cfg(any(
529 not(portable_atomic_no_atomic_cas),
530 portable_atomic_unsafe_assume_single_core,
531 feature = "critical-section",
532 target_arch = "avr",
533 target_arch = "msp430",
534 ))
535)]
536#[cfg_attr(
537 all(feature = "fallback", not(portable_atomic_no_cfg_target_has_atomic)),
538 cfg(any(
539 target_has_atomic = "ptr",
540 portable_atomic_unsafe_assume_single_core,
541 feature = "critical-section",
542 target_arch = "avr",
543 target_arch = "msp430",
544 ))
545)]
546#[macro_use]
547mod atomic_128_macros {
548 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
549 #[macro_export]
550 macro_rules! cfg_has_atomic_128 {
551 ($($tt:tt)*) => {
552 $($tt)*
553 };
554 }
555}
556#[cfg_attr(
557 not(feature = "fallback"),
558 cfg(not(any(
559 all(
560 target_arch = "aarch64",
561 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
562 ),
563 all(
564 target_arch = "x86_64",
565 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
566 any(
567 target_feature = "cmpxchg16b",
568 portable_atomic_target_feature = "cmpxchg16b",
569 all(
570 feature = "fallback",
571 not(portable_atomic_no_cmpxchg16b_target_feature),
572 not(portable_atomic_no_outline_atomics),
573 not(any(target_env = "sgx", miri)),
574 ),
575 ),
576 ),
577 all(
578 target_arch = "powerpc64",
579 portable_atomic_unstable_asm_experimental_arch,
580 any(
581 target_feature = "quadword-atomics",
582 portable_atomic_target_feature = "quadword-atomics",
583 all(
584 feature = "fallback",
585 not(portable_atomic_no_outline_atomics),
586 portable_atomic_outline_atomics, // TODO(powerpc64): currently disabled by default
587 any(
588 all(
589 target_os = "linux",
590 any(
591 target_env = "gnu",
592 all(
593 any(target_env = "musl", target_env = "ohos"),
594 not(target_feature = "crt-static"),
595 ),
596 portable_atomic_outline_atomics,
597 ),
598 ),
599 target_os = "android",
600 target_os = "freebsd",
601 ),
602 not(any(miri, portable_atomic_sanitize_thread)),
603 ),
604 ),
605 ),
606 all(target_arch = "s390x", portable_atomic_unstable_asm_experimental_arch),
607 )))
608)]
609#[cfg_attr(
610 all(feature = "fallback", portable_atomic_no_cfg_target_has_atomic),
611 cfg(not(any(
612 not(portable_atomic_no_atomic_cas),
613 portable_atomic_unsafe_assume_single_core,
614 feature = "critical-section",
615 target_arch = "avr",
616 target_arch = "msp430",
617 )))
618)]
619#[cfg_attr(
620 all(feature = "fallback", not(portable_atomic_no_cfg_target_has_atomic)),
621 cfg(not(any(
622 target_has_atomic = "ptr",
623 portable_atomic_unsafe_assume_single_core,
624 feature = "critical-section",
625 target_arch = "avr",
626 target_arch = "msp430",
627 )))
628)]
629#[macro_use]
630mod atomic_128_macros {
631 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
632 #[macro_export]
633 macro_rules! cfg_has_atomic_128 {
634 ($($tt:tt)*) => {};
635 }
636}
637
638#[cfg_attr(
639 portable_atomic_no_cfg_target_has_atomic,
640 cfg(any(
641 not(portable_atomic_no_atomic_cas),
642 portable_atomic_unsafe_assume_single_core,
643 feature = "critical-section",
644 target_arch = "avr",
645 target_arch = "msp430",
646 ))
647)]
648#[cfg_attr(
649 not(portable_atomic_no_cfg_target_has_atomic),
650 cfg(any(
651 target_has_atomic = "ptr",
652 portable_atomic_unsafe_assume_single_core,
653 feature = "critical-section",
654 target_arch = "avr",
655 target_arch = "msp430",
656 ))
657)]
658#[macro_use]
659mod atomic_cas_macros {
660 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
661 #[macro_export]
662 macro_rules! cfg_has_atomic_cas {
663 ($($tt:tt)*) => {
664 $($tt)*
665 };
666 }
667}
668#[cfg_attr(
669 portable_atomic_no_cfg_target_has_atomic,
670 cfg(not(any(
671 not(portable_atomic_no_atomic_cas),
672 portable_atomic_unsafe_assume_single_core,
673 feature = "critical-section",
674 target_arch = "avr",
675 target_arch = "msp430",
676 )))
677)]
678#[cfg_attr(
679 not(portable_atomic_no_cfg_target_has_atomic),
680 cfg(not(any(
681 target_has_atomic = "ptr",
682 portable_atomic_unsafe_assume_single_core,
683 feature = "critical-section",
684 target_arch = "avr",
685 target_arch = "msp430",
686 )))
687)]
688#[macro_use]
689mod atomic_cas_macros {
690 #[doc(hidden)] // Not public API. (please submit an issue if you want this to be public API)
691 #[macro_export]
692 macro_rules! cfg_has_atomic_cas {
693 ($($tt:tt)*) => {};
694 }
695}
696
697// https://github.com/rust-lang/rust/blob/1.70.0/library/core/src/sync/atomic.rs#L3155
698#[inline]
699#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
700pub(crate) fn assert_load_ordering(order: Ordering) {
701 match order {
702 Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
703 Ordering::Release => panic!("there is no such thing as a release load"),
704 Ordering::AcqRel => panic!("there is no such thing as an acquire-release load"),
705 _ => unreachable!("{:?}", order),
706 }
707}
708
709// https://github.com/rust-lang/rust/blob/1.70.0/library/core/src/sync/atomic.rs#L3140
710#[inline]
711#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
712pub(crate) fn assert_store_ordering(order: Ordering) {
713 match order {
714 Ordering::Release | Ordering::Relaxed | Ordering::SeqCst => {}
715 Ordering::Acquire => panic!("there is no such thing as an acquire store"),
716 Ordering::AcqRel => panic!("there is no such thing as an acquire-release store"),
717 _ => unreachable!("{:?}", order),
718 }
719}
720
721// https://github.com/rust-lang/rust/blob/1.70.0/library/core/src/sync/atomic.rs#L3221
722#[inline]
723#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
724pub(crate) fn assert_compare_exchange_ordering(success: Ordering, failure: Ordering) {
725 match success {
726 Ordering::AcqRel
727 | Ordering::Acquire
728 | Ordering::Relaxed
729 | Ordering::Release
730 | Ordering::SeqCst => {}
731 _ => unreachable!("{:?}, {:?}", success, failure),
732 }
733 match failure {
734 Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
735 Ordering::Release => panic!("there is no such thing as a release failure ordering"),
736 Ordering::AcqRel => panic!("there is no such thing as an acquire-release failure ordering"),
737 _ => unreachable!("{:?}, {:?}", success, failure),
738 }
739}
740
741// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0418r2.html
742// https://github.com/rust-lang/rust/pull/98383
743#[allow(dead_code)]
744#[inline]
745pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering {
746 match (success, failure) {
747 (Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire,
748 (Ordering::Release, Ordering::Acquire) => Ordering::AcqRel,
749 (_, Ordering::SeqCst) => Ordering::SeqCst,
750 _ => success,
751 }
752}
753
754/// Zero-extends the given 32-bit pointer to `MaybeUninit<u64>`.
755/// This is used for 64-bit architecture's 32-bit ABI (e.g., AArch64 ILP32 ABI).
756/// See ptr_reg! macro in src/gen/utils.rs for details.
757#[cfg(not(portable_atomic_no_asm_maybe_uninit))]
758#[cfg(target_pointer_width = "32")]
759#[allow(dead_code)]
760#[inline]
761pub(crate) fn zero_extend64_ptr(v: *mut ()) -> core::mem::MaybeUninit<u64> {
762 #[repr(C)]
763 struct ZeroExtended {
764 #[cfg(target_endian = "big")]
765 pad: *mut (),
766 v: *mut (),
767 #[cfg(target_endian = "little")]
768 pad: *mut (),
769 }
770 // SAFETY: we can safely transmute any 64-bit value to MaybeUninit<u64>.
771 unsafe { core::mem::transmute(ZeroExtended { v, pad: core::ptr::null_mut() }) }
772}
773
774#[allow(dead_code)]
775#[cfg(any(
776 target_arch = "aarch64",
777 target_arch = "powerpc64",
778 target_arch = "s390x",
779 target_arch = "x86_64",
780))]
781/// A 128-bit value represented as a pair of 64-bit values.
782///
783/// This type is `#[repr(C)]`, both fields have the same in-memory representation
784/// and are plain old data types, so access to the fields is always safe.
785#[derive(Clone, Copy)]
786#[repr(C)]
787pub(crate) union U128 {
788 pub(crate) whole: u128,
789 pub(crate) pair: Pair<u64>,
790}
791#[allow(dead_code)]
792#[cfg(target_arch = "arm")]
793/// A 64-bit value represented as a pair of 32-bit values.
794///
795/// This type is `#[repr(C)]`, both fields have the same in-memory representation
796/// and are plain old data types, so access to the fields is always safe.
797#[derive(Clone, Copy)]
798#[repr(C)]
799pub(crate) union U64 {
800 pub(crate) whole: u64,
801 pub(crate) pair: Pair<u32>,
802}
803#[allow(dead_code)]
804#[derive(Clone, Copy)]
805#[repr(C)]
806pub(crate) struct Pair<T: Copy> {
807 // little endian order
808 #[cfg(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm"))]
809 pub(crate) lo: T,
810 pub(crate) hi: T,
811 // big endian order
812 #[cfg(not(any(target_endian = "little", target_arch = "aarch64", target_arch = "arm")))]
813 pub(crate) lo: T,
814}
815
816#[allow(dead_code)]
817type MinWord = u32;
818#[cfg(target_arch = "riscv32")]
819type RegSize = u32;
820#[cfg(target_arch = "riscv64")]
821type RegSize = u64;
822// Adapted from https://github.com/taiki-e/atomic-maybe-uninit/blob/v0.3.0/src/utils.rs#L210.
823// Helper for implementing sub-word atomic operations using word-sized LL/SC loop or CAS loop.
824//
825// Refs: https://github.com/llvm/llvm-project/blob/llvmorg-17.0.0-rc2/llvm/lib/CodeGen/AtomicExpandPass.cpp#L699
826// (aligned_ptr, shift, mask)
827#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
828#[allow(dead_code)]
829#[inline]
830pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RegSize, RegSize) {
831 use core::mem;
832 const SHIFT_MASK: bool = !cfg!(any(
833 target_arch = "riscv32",
834 target_arch = "riscv64",
835 target_arch = "loongarch64",
836 target_arch = "s390x",
837 ));
838 let ptr_mask = mem::size_of::<MinWord>() - 1;
839 let aligned_ptr = strict::with_addr(ptr, ptr as usize & !ptr_mask) as *mut MinWord;
840 let ptr_lsb = if SHIFT_MASK {
841 ptr as usize & ptr_mask
842 } else {
843 // We use 32-bit wrapping shift instructions in asm on these platforms.
844 ptr as usize
845 };
846 let shift = if cfg!(any(target_endian = "little", target_arch = "s390x")) {
847 ptr_lsb.wrapping_mul(8)
848 } else {
849 (ptr_lsb ^ (mem::size_of::<MinWord>() - mem::size_of::<T>())).wrapping_mul(8)
850 };
851 let mut mask: RegSize = (1 << (mem::size_of::<T>() * 8)) - 1; // !(0 as T) as RegSize
852 if SHIFT_MASK {
853 mask <<= shift;
854 }
855 (aligned_ptr, shift as RegSize, mask)
856}
857
858/// Emulate strict provenance.
859///
860/// Once strict_provenance is stable, migrate to the standard library's APIs.
861#[cfg(any(miri, target_arch = "riscv32", target_arch = "riscv64"))]
862#[allow(dead_code)]
863#[allow(clippy::cast_possible_wrap)]
864pub(crate) mod strict {
865 /// Replace the address portion of this pointer with a new address.
866 #[inline]
867 #[must_use]
868 pub(crate) fn with_addr<T>(ptr: *mut T, addr: usize) -> *mut T {
869 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
870 //
871 // In the mean-time, this operation is defined to be "as if" it was
872 // a wrapping_offset, so we can emulate it as such. This should properly
873 // restore pointer provenance even under today's compiler.
874 let self_addr = ptr as usize as isize;
875 let dest_addr = addr as isize;
876 let offset = dest_addr.wrapping_sub(self_addr);
877
878 // This is the canonical desugaring of this operation.
879 (ptr as *mut u8).wrapping_offset(offset) as *mut T
880 }
881
882 /// Run an operation of some kind on a pointer.
883 #[inline]
884 #[must_use]
885 pub(crate) fn map_addr<T>(ptr: *mut T, f: impl FnOnce(usize) -> usize) -> *mut T {
886 with_addr(ptr, f(ptr as usize))
887 }
888}
889