| 1 | // SPDX-License-Identifier: Apache-2.0 OR MIT |
| 2 | |
| 3 | #![cfg_attr (not(all(test, feature = "float" )), allow(dead_code, unused_macros))] |
| 4 | |
| 5 | #[macro_use ] |
| 6 | #[path = "gen/utils.rs" ] |
| 7 | mod gen; |
| 8 | |
| 9 | use core::sync::atomic::Ordering; |
| 10 | |
| 11 | macro_rules! static_assert { |
| 12 | ($cond:expr $(,)?) => {{ |
| 13 | let [] = [(); true as usize - $crate::utils::_assert_is_bool($cond) as usize]; |
| 14 | }}; |
| 15 | } |
| 16 | pub(crate) const fn _assert_is_bool(v: bool) -> bool { |
| 17 | v |
| 18 | } |
| 19 | |
| 20 | macro_rules! static_assert_layout { |
| 21 | ($atomic_type:ty, $value_type:ty) => { |
| 22 | static_assert!( |
| 23 | core::mem::align_of::<$atomic_type>() == core::mem::size_of::<$atomic_type>() |
| 24 | ); |
| 25 | static_assert!(core::mem::size_of::<$atomic_type>() == core::mem::size_of::<$value_type>()); |
| 26 | }; |
| 27 | } |
| 28 | |
| 29 | // #[doc = concat!(...)] requires Rust 1.54 |
| 30 | macro_rules! doc_comment { |
| 31 | ($doc:expr, $($tt:tt)*) => { |
| 32 | #[doc = $doc] |
| 33 | $($tt)* |
| 34 | }; |
| 35 | } |
| 36 | |
| 37 | // Adapted from https://github.com/BurntSushi/memchr/blob/2.4.1/src/memchr/x86/mod.rs#L9-L71. |
| 38 | /// # Safety |
| 39 | /// |
| 40 | /// - the caller must uphold the safety contract for the function returned by $detect_body. |
| 41 | /// - the memory pointed by the function pointer returned by $detect_body must be visible from any threads. |
| 42 | /// |
| 43 | /// The second requirement is always met if the function pointer is to the function definition. |
| 44 | /// (Currently, all uses of this macro in our code are in this case.) |
| 45 | #[allow (unused_macros)] |
| 46 | #[cfg (not(portable_atomic_no_outline_atomics))] |
| 47 | #[cfg (any( |
| 48 | target_arch = "aarch64" , |
| 49 | target_arch = "arm" , |
| 50 | target_arch = "arm64ec" , |
| 51 | target_arch = "powerpc64" , |
| 52 | target_arch = "riscv32" , |
| 53 | target_arch = "riscv64" , |
| 54 | all(target_arch = "x86_64" , not(any(target_env = "sgx" , miri))), |
| 55 | ))] |
| 56 | macro_rules! ifunc { |
| 57 | (unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)? { $($detect_body:tt)* }) => {{ |
| 58 | type FnTy = unsafe fn($($arg_ty),*) $(-> $ret_ty)?; |
| 59 | static FUNC: core::sync::atomic::AtomicPtr<()> |
| 60 | = core::sync::atomic::AtomicPtr::new(detect as *mut ()); |
| 61 | #[cold] |
| 62 | unsafe fn detect($($arg_pat: $arg_ty),*) $(-> $ret_ty)? { |
| 63 | let func: FnTy = { $($detect_body)* }; |
| 64 | FUNC.store(func as *mut (), core::sync::atomic::Ordering::Relaxed); |
| 65 | // SAFETY: the caller must uphold the safety contract for the function returned by $detect_body. |
| 66 | unsafe { func($($arg_pat),*) } |
| 67 | } |
| 68 | // SAFETY: `FnTy` is a function pointer, which is always safe to transmute with a `*mut ()`. |
| 69 | // (To force the caller to use unsafe block for this macro, do not use |
| 70 | // unsafe block here.) |
| 71 | let func = { |
| 72 | core::mem::transmute::<*mut (), FnTy>(FUNC.load(core::sync::atomic::Ordering::Relaxed)) |
| 73 | }; |
| 74 | // SAFETY: the caller must uphold the safety contract for the function returned by $detect_body. |
| 75 | // (To force the caller to use unsafe block for this macro, do not use |
| 76 | // unsafe block here.) |
| 77 | func($($arg_pat),*) |
| 78 | }}; |
| 79 | } |
| 80 | |
| 81 | #[allow (unused_macros)] |
| 82 | #[cfg (not(portable_atomic_no_outline_atomics))] |
| 83 | #[cfg (any( |
| 84 | target_arch = "aarch64" , |
| 85 | target_arch = "arm" , |
| 86 | target_arch = "arm64ec" , |
| 87 | target_arch = "powerpc64" , |
| 88 | target_arch = "riscv32" , |
| 89 | target_arch = "riscv64" , |
| 90 | all(target_arch = "x86_64" , not(any(target_env = "sgx" , miri))), |
| 91 | ))] |
| 92 | macro_rules! fn_alias { |
| 93 | ( |
| 94 | $(#[$($fn_attr:tt)*])* |
| 95 | $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?; |
| 96 | $(#[$($alias_attr:tt)*])* |
| 97 | $new:ident = $from:ident($($last_args:tt)*); |
| 98 | $($rest:tt)* |
| 99 | ) => { |
| 100 | $(#[$($fn_attr)*])* |
| 101 | $(#[$($alias_attr)*])* |
| 102 | $vis unsafe fn $new($($arg_pat: $arg_ty),*) $(-> $ret_ty)? { |
| 103 | // SAFETY: the caller must uphold the safety contract. |
| 104 | unsafe { $from($($arg_pat,)* $($last_args)*) } |
| 105 | } |
| 106 | fn_alias! { |
| 107 | $(#[$($fn_attr)*])* |
| 108 | $vis unsafe fn($($arg_pat: $arg_ty),*) $(-> $ret_ty)?; |
| 109 | $($rest)* |
| 110 | } |
| 111 | }; |
| 112 | ( |
| 113 | $(#[$($attr:tt)*])* |
| 114 | $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?; |
| 115 | ) => {} |
| 116 | } |
| 117 | |
| 118 | /// Make the given function const if the given condition is true. |
| 119 | macro_rules! const_fn { |
| 120 | ( |
| 121 | const_if: #[cfg($($cfg:tt)+)]; |
| 122 | $(#[$($attr:tt)*])* |
| 123 | $vis:vis const $($rest:tt)* |
| 124 | ) => { |
| 125 | #[cfg($($cfg)+)] |
| 126 | $(#[$($attr)*])* |
| 127 | $vis const $($rest)* |
| 128 | #[cfg(not($($cfg)+))] |
| 129 | $(#[$($attr)*])* |
| 130 | $vis $($rest)* |
| 131 | }; |
| 132 | } |
| 133 | |
| 134 | /// Implements `core::fmt::Debug` and `serde::{Serialize, Deserialize}` (when serde |
| 135 | /// feature is enabled) for atomic bool, integer, or float. |
| 136 | macro_rules! impl_debug_and_serde { |
| 137 | ($atomic_type:ident) => { |
| 138 | impl fmt::Debug for $atomic_type { |
| 139 | #[inline] // fmt is not hot path, but #[inline] on fmt seems to still be useful: https://github.com/rust-lang/rust/pull/117727 |
| 140 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| 141 | // std atomic types use Relaxed in Debug::fmt: https://github.com/rust-lang/rust/blob/1.80.0/library/core/src/sync/atomic.rs#L2166 |
| 142 | fmt::Debug::fmt(&self.load(Ordering::Relaxed), f) |
| 143 | } |
| 144 | } |
| 145 | #[cfg(feature = "serde" )] |
| 146 | #[cfg_attr(docsrs, doc(cfg(feature = "serde" )))] |
| 147 | impl serde::ser::Serialize for $atomic_type { |
| 148 | #[allow(clippy::missing_inline_in_public_items)] // serde doesn't use inline on std atomic's Serialize/Deserialize impl |
| 149 | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> |
| 150 | where |
| 151 | S: serde::ser::Serializer, |
| 152 | { |
| 153 | // https://github.com/serde-rs/serde/blob/v1.0.152/serde/src/ser/impls.rs#L958-L959 |
| 154 | self.load(Ordering::Relaxed).serialize(serializer) |
| 155 | } |
| 156 | } |
| 157 | #[cfg(feature = "serde" )] |
| 158 | #[cfg_attr(docsrs, doc(cfg(feature = "serde" )))] |
| 159 | impl<'de> serde::de::Deserialize<'de> for $atomic_type { |
| 160 | #[allow(clippy::missing_inline_in_public_items)] // serde doesn't use inline on std atomic's Serialize/Deserialize impl |
| 161 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> |
| 162 | where |
| 163 | D: serde::de::Deserializer<'de>, |
| 164 | { |
| 165 | serde::de::Deserialize::deserialize(deserializer).map(Self::new) |
| 166 | } |
| 167 | } |
| 168 | }; |
| 169 | } |
| 170 | |
| 171 | // We do not provide `nand` because it cannot be optimized on neither x86 nor MSP430. |
| 172 | // https://godbolt.org/z/ahWejchbT |
| 173 | macro_rules! impl_default_no_fetch_ops { |
| 174 | ($atomic_type:ident, bool) => { |
| 175 | impl $atomic_type { |
| 176 | #[inline] |
| 177 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 178 | pub(crate) fn and(&self, val: bool, order: Ordering) { |
| 179 | self.fetch_and(val, order); |
| 180 | } |
| 181 | #[inline] |
| 182 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 183 | pub(crate) fn or(&self, val: bool, order: Ordering) { |
| 184 | self.fetch_or(val, order); |
| 185 | } |
| 186 | #[inline] |
| 187 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 188 | pub(crate) fn xor(&self, val: bool, order: Ordering) { |
| 189 | self.fetch_xor(val, order); |
| 190 | } |
| 191 | } |
| 192 | }; |
| 193 | ($atomic_type:ident, $int_type:ty) => { |
| 194 | impl $atomic_type { |
| 195 | #[inline] |
| 196 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 197 | pub(crate) fn add(&self, val: $int_type, order: Ordering) { |
| 198 | self.fetch_add(val, order); |
| 199 | } |
| 200 | #[inline] |
| 201 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 202 | pub(crate) fn sub(&self, val: $int_type, order: Ordering) { |
| 203 | self.fetch_sub(val, order); |
| 204 | } |
| 205 | #[inline] |
| 206 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 207 | pub(crate) fn and(&self, val: $int_type, order: Ordering) { |
| 208 | self.fetch_and(val, order); |
| 209 | } |
| 210 | #[inline] |
| 211 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 212 | pub(crate) fn or(&self, val: $int_type, order: Ordering) { |
| 213 | self.fetch_or(val, order); |
| 214 | } |
| 215 | #[inline] |
| 216 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 217 | pub(crate) fn xor(&self, val: $int_type, order: Ordering) { |
| 218 | self.fetch_xor(val, order); |
| 219 | } |
| 220 | } |
| 221 | }; |
| 222 | } |
| 223 | macro_rules! impl_default_bit_opts { |
| 224 | ($atomic_type:ident, $int_type:ty) => { |
| 225 | impl $atomic_type { |
| 226 | #[inline] |
| 227 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 228 | pub(crate) fn bit_set(&self, bit: u32, order: Ordering) -> bool { |
| 229 | let mask = <$int_type>::wrapping_shl(1, bit); |
| 230 | self.fetch_or(mask, order) & mask != 0 |
| 231 | } |
| 232 | #[inline] |
| 233 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 234 | pub(crate) fn bit_clear(&self, bit: u32, order: Ordering) -> bool { |
| 235 | let mask = <$int_type>::wrapping_shl(1, bit); |
| 236 | self.fetch_and(!mask, order) & mask != 0 |
| 237 | } |
| 238 | #[inline] |
| 239 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
| 240 | pub(crate) fn bit_toggle(&self, bit: u32, order: Ordering) -> bool { |
| 241 | let mask = <$int_type>::wrapping_shl(1, bit); |
| 242 | self.fetch_xor(mask, order) & mask != 0 |
| 243 | } |
| 244 | } |
| 245 | }; |
| 246 | } |
| 247 | |
| 248 | // This just outputs the input as is, but can be used like an item-level block by using it with cfg. |
| 249 | macro_rules! items { |
| 250 | ($($tt:tt)*) => { |
| 251 | $($tt)* |
| 252 | }; |
| 253 | } |
| 254 | |
| 255 | #[allow (dead_code)] |
| 256 | #[cfg (any(target_arch = "x86" , target_arch = "x86_64" ))] |
| 257 | // Stable version of https://doc.rust-lang.org/nightly/std/hint/fn.assert_unchecked.html. |
| 258 | // TODO: use real core::hint::assert_unchecked on 1.81+ https://github.com/rust-lang/rust/pull/123588 |
| 259 | #[inline (always)] |
| 260 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
| 261 | pub(crate) unsafe fn assert_unchecked(cond: bool) { |
| 262 | if !cond { |
| 263 | if cfg!(debug_assertions) { |
| 264 | unreachable!() |
| 265 | } else { |
| 266 | // SAFETY: the caller promised `cond` is true. |
| 267 | unsafe { core::hint::unreachable_unchecked() } |
| 268 | } |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | // https://github.com/rust-lang/rust/blob/1.80.0/library/core/src/sync/atomic.rs#L3294 |
| 273 | #[inline ] |
| 274 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
| 275 | pub(crate) fn assert_load_ordering(order: Ordering) { |
| 276 | match order { |
| 277 | Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {} |
| 278 | Ordering::Release => panic!("there is no such thing as a release load" ), |
| 279 | Ordering::AcqRel => panic!("there is no such thing as an acquire-release load" ), |
| 280 | _ => unreachable!(), |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | // https://github.com/rust-lang/rust/blob/1.80.0/library/core/src/sync/atomic.rs#L3279 |
| 285 | #[inline ] |
| 286 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
| 287 | pub(crate) fn assert_store_ordering(order: Ordering) { |
| 288 | match order { |
| 289 | Ordering::Release | Ordering::Relaxed | Ordering::SeqCst => {} |
| 290 | Ordering::Acquire => panic!("there is no such thing as an acquire store" ), |
| 291 | Ordering::AcqRel => panic!("there is no such thing as an acquire-release store" ), |
| 292 | _ => unreachable!(), |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | // https://github.com/rust-lang/rust/blob/1.80.0/library/core/src/sync/atomic.rs#L3360 |
| 297 | #[inline ] |
| 298 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
| 299 | pub(crate) fn assert_compare_exchange_ordering(success: Ordering, failure: Ordering) { |
| 300 | match success { |
| 301 | Ordering::AcqRel |
| 302 | | Ordering::Acquire |
| 303 | | Ordering::Relaxed |
| 304 | | Ordering::Release |
| 305 | | Ordering::SeqCst => {} |
| 306 | _ => unreachable!(), |
| 307 | } |
| 308 | match failure { |
| 309 | Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {} |
| 310 | Ordering::Release => panic!("there is no such thing as a release failure ordering" ), |
| 311 | Ordering::AcqRel => panic!("there is no such thing as an acquire-release failure ordering" ), |
| 312 | _ => unreachable!(), |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0418r2.html |
| 317 | // https://github.com/rust-lang/rust/pull/98383 |
| 318 | #[allow (dead_code)] |
| 319 | #[inline ] |
| 320 | pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering { |
| 321 | match (success, failure) { |
| 322 | (Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire, |
| 323 | (Ordering::Release, Ordering::Acquire) => Ordering::AcqRel, |
| 324 | (_, Ordering::SeqCst) => Ordering::SeqCst, |
| 325 | _ => success, |
| 326 | } |
| 327 | } |
| 328 | |
| 329 | /// Zero-extends the given 32-bit pointer to `MaybeUninit<u64>`. |
| 330 | /// This is used for 64-bit architecture's 32-bit ABI (e.g., AArch64 ILP32 ABI). |
| 331 | /// See ptr_reg! macro in src/gen/utils.rs for details. |
| 332 | #[cfg (not(portable_atomic_no_asm_maybe_uninit))] |
| 333 | #[cfg (target_pointer_width = "32" )] |
| 334 | #[allow (dead_code)] |
| 335 | #[inline ] |
| 336 | pub(crate) fn zero_extend64_ptr(v: *mut ()) -> core::mem::MaybeUninit<u64> { |
| 337 | #[repr (C)] |
| 338 | struct ZeroExtended { |
| 339 | #[cfg (target_endian = "big" )] |
| 340 | pad: *mut (), |
| 341 | v: *mut (), |
| 342 | #[cfg (target_endian = "little" )] |
| 343 | pad: *mut (), |
| 344 | } |
| 345 | // SAFETY: we can safely transmute any 64-bit value to MaybeUninit<u64>. |
| 346 | unsafe { core::mem::transmute(ZeroExtended { v, pad: core::ptr::null_mut() }) } |
| 347 | } |
| 348 | |
| 349 | #[allow (dead_code)] |
| 350 | #[cfg (any( |
| 351 | target_arch = "aarch64" , |
| 352 | target_arch = "arm64ec" , |
| 353 | target_arch = "powerpc64" , |
| 354 | target_arch = "riscv64" , |
| 355 | target_arch = "s390x" , |
| 356 | target_arch = "x86_64" , |
| 357 | ))] |
| 358 | /// A 128-bit value represented as a pair of 64-bit values. |
| 359 | /// |
| 360 | /// This type is `#[repr(C)]`, both fields have the same in-memory representation |
| 361 | /// and are plain old data types, so access to the fields is always safe. |
| 362 | #[derive (Clone, Copy)] |
| 363 | #[repr (C)] |
| 364 | pub(crate) union U128 { |
| 365 | pub(crate) whole: u128, |
| 366 | pub(crate) pair: Pair<u64>, |
| 367 | } |
| 368 | #[allow (dead_code)] |
| 369 | #[cfg (any(target_arch = "arm" , target_arch = "riscv32" ))] |
| 370 | /// A 64-bit value represented as a pair of 32-bit values. |
| 371 | /// |
| 372 | /// This type is `#[repr(C)]`, both fields have the same in-memory representation |
| 373 | /// and are plain old data types, so access to the fields is always safe. |
| 374 | #[derive (Clone, Copy)] |
| 375 | #[repr (C)] |
| 376 | pub(crate) union U64 { |
| 377 | pub(crate) whole: u64, |
| 378 | pub(crate) pair: Pair<u32>, |
| 379 | } |
| 380 | #[allow (dead_code)] |
| 381 | #[derive (Clone, Copy)] |
| 382 | #[repr (C)] |
| 383 | pub(crate) struct Pair<T: Copy> { |
| 384 | // little endian order |
| 385 | #[cfg (any( |
| 386 | target_endian = "little" , |
| 387 | target_arch = "aarch64" , |
| 388 | target_arch = "arm" , |
| 389 | target_arch = "arm64ec" , |
| 390 | ))] |
| 391 | pub(crate) lo: T, |
| 392 | pub(crate) hi: T, |
| 393 | // big endian order |
| 394 | #[cfg (not(any( |
| 395 | target_endian = "little" , |
| 396 | target_arch = "aarch64" , |
| 397 | target_arch = "arm" , |
| 398 | target_arch = "arm64ec" , |
| 399 | )))] |
| 400 | pub(crate) lo: T, |
| 401 | } |
| 402 | |
| 403 | #[cfg (any(target_arch = "riscv32" , target_arch = "riscv64" ))] |
| 404 | type MinWord = u32; |
| 405 | #[cfg (any(target_arch = "riscv32" , target_arch = "riscv64" ))] |
| 406 | type RetInt = u32; |
| 407 | // Adapted from https://github.com/taiki-e/atomic-maybe-uninit/blob/v0.3.4/src/utils.rs#L255. |
| 408 | // Helper for implementing sub-word atomic operations using word-sized LL/SC loop or CAS loop. |
| 409 | // |
| 410 | // Refs: https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/lib/CodeGen/AtomicExpandPass.cpp#L737 |
| 411 | // (aligned_ptr, shift, mask) |
| 412 | #[cfg (any(target_arch = "riscv32" , target_arch = "riscv64" ))] |
| 413 | #[allow (dead_code)] |
| 414 | #[inline ] |
| 415 | pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RetInt, RetInt) { |
| 416 | use core::mem; |
| 417 | // RISC-V, MIPS, SPARC, LoongArch, Xtensa: shift amount of 32-bit shift instructions is 5 bits unsigned (0-31). |
| 418 | // PowerPC, C-SKY: shift amount of 32-bit shift instructions is 6 bits unsigned (0-63) and shift amount 32-63 means "clear". |
| 419 | // Arm: shift amount of 32-bit shift instructions is 8 bits unsigned (0-255). |
| 420 | // Hexagon: shift amount of 32-bit shift instructions is 7 bits signed (-64-63) and negative shift amount means "reverse the direction of the shift". |
| 421 | // (On s390x, we don't use the mask returned from this function.) |
| 422 | const SHIFT_MASK: bool = !cfg!(any( |
| 423 | target_arch = "loongarch64" , |
| 424 | target_arch = "mips" , |
| 425 | target_arch = "mips32r6" , |
| 426 | target_arch = "mips64" , |
| 427 | target_arch = "mips64r6" , |
| 428 | target_arch = "riscv32" , |
| 429 | target_arch = "riscv64" , |
| 430 | target_arch = "s390x" , |
| 431 | target_arch = "sparc" , |
| 432 | target_arch = "sparc64" , |
| 433 | target_arch = "xtensa" , |
| 434 | )); |
| 435 | let ptr_mask = mem::size_of::<MinWord>() - 1; |
| 436 | let aligned_ptr = strict::with_addr(ptr, ptr as usize & !ptr_mask) as *mut MinWord; |
| 437 | let ptr_lsb = if SHIFT_MASK { |
| 438 | ptr as usize & ptr_mask |
| 439 | } else { |
| 440 | // We use 32-bit wrapping shift instructions in asm on these platforms. |
| 441 | ptr as usize |
| 442 | }; |
| 443 | let shift = if cfg!(any(target_endian = "little" , target_arch = "s390x" )) { |
| 444 | ptr_lsb.wrapping_mul(8) |
| 445 | } else { |
| 446 | (ptr_lsb ^ (mem::size_of::<MinWord>() - mem::size_of::<T>())).wrapping_mul(8) |
| 447 | }; |
| 448 | let mut mask: RetInt = (1 << (mem::size_of::<T>() * 8)) - 1; // !(0 as T) as RetInt |
| 449 | if SHIFT_MASK { |
| 450 | mask <<= shift; |
| 451 | } |
| 452 | (aligned_ptr, shift as RetInt, mask) |
| 453 | } |
| 454 | |
| 455 | // TODO: use stabilized core::ptr strict_provenance helpers https://github.com/rust-lang/rust/pull/130350 |
| 456 | #[cfg (any(miri, target_arch = "riscv32" , target_arch = "riscv64" ))] |
| 457 | #[allow (dead_code)] |
| 458 | pub(crate) mod strict { |
| 459 | #[inline ] |
| 460 | #[must_use ] |
| 461 | pub(crate) fn with_addr<T>(ptr: *mut T, addr: usize) -> *mut T { |
| 462 | // This should probably be an intrinsic to avoid doing any sort of arithmetic, but |
| 463 | // meanwhile, we can implement it with `wrapping_offset`, which preserves the pointer's |
| 464 | // provenance. |
| 465 | let offset = addr.wrapping_sub(ptr as usize); |
| 466 | (ptr as *mut u8).wrapping_add(offset) as *mut T |
| 467 | } |
| 468 | |
| 469 | #[cfg (miri)] |
| 470 | #[inline ] |
| 471 | #[must_use ] |
| 472 | pub(crate) fn map_addr<T>(ptr: *mut T, f: impl FnOnce(usize) -> usize) -> *mut T { |
| 473 | with_addr(ptr, f(ptr as usize)) |
| 474 | } |
| 475 | } |
| 476 | |