1 | // SPDX-License-Identifier: Apache-2.0 OR MIT |
2 | |
3 | #![cfg_attr (not(all(test, feature = "float" )), allow(dead_code, unused_macros))] |
4 | |
5 | #[macro_use ] |
6 | #[path = "gen/utils.rs" ] |
7 | mod generated; |
8 | |
9 | use core::sync::atomic::Ordering; |
10 | |
11 | macro_rules! static_assert { |
12 | ($cond:expr $(,)?) => {{ |
13 | let [] = [(); true as usize - $crate::utils::_assert_is_bool($cond) as usize]; |
14 | }}; |
15 | } |
16 | pub(crate) const fn _assert_is_bool(v: bool) -> bool { |
17 | v |
18 | } |
19 | |
20 | macro_rules! static_assert_layout { |
21 | ($atomic_type:ty, $value_type:ty) => { |
22 | static_assert!( |
23 | core::mem::align_of::<$atomic_type>() == core::mem::size_of::<$atomic_type>() |
24 | ); |
25 | static_assert!(core::mem::size_of::<$atomic_type>() == core::mem::size_of::<$value_type>()); |
26 | }; |
27 | } |
28 | |
29 | // #[doc = concat!(...)] requires Rust 1.54 |
30 | macro_rules! doc_comment { |
31 | ($doc:expr, $($tt:tt)*) => { |
32 | #[doc = $doc] |
33 | $($tt)* |
34 | }; |
35 | } |
36 | |
37 | // Adapted from https://github.com/BurntSushi/memchr/blob/2.4.1/src/memchr/x86/mod.rs#L9-L71. |
38 | /// # Safety |
39 | /// |
40 | /// - the caller must uphold the safety contract for the function returned by $detect_body. |
41 | /// - the memory pointed by the function pointer returned by $detect_body must be visible from any threads. |
42 | /// |
43 | /// The second requirement is always met if the function pointer is to the function definition. |
44 | /// (Currently, all uses of this macro in our code are in this case.) |
45 | #[allow (unused_macros)] |
46 | #[cfg (not(portable_atomic_no_outline_atomics))] |
47 | #[cfg (any( |
48 | target_arch = "aarch64" , |
49 | target_arch = "arm" , |
50 | target_arch = "arm64ec" , |
51 | target_arch = "powerpc64" , |
52 | target_arch = "riscv32" , |
53 | target_arch = "riscv64" , |
54 | all(target_arch = "x86_64" , not(any(target_env = "sgx" , miri))), |
55 | ))] |
56 | macro_rules! ifunc { |
57 | (unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)? { $($detect_body:tt)* }) => {{ |
58 | type FnTy = unsafe fn($($arg_ty),*) $(-> $ret_ty)?; |
59 | static FUNC: core::sync::atomic::AtomicPtr<()> |
60 | = core::sync::atomic::AtomicPtr::new(detect as *mut ()); |
61 | #[cold] |
62 | unsafe fn detect($($arg_pat: $arg_ty),*) $(-> $ret_ty)? { |
63 | let func: FnTy = { $($detect_body)* }; |
64 | FUNC.store(func as *mut (), core::sync::atomic::Ordering::Relaxed); |
65 | // SAFETY: the caller must uphold the safety contract for the function returned by $detect_body. |
66 | unsafe { func($($arg_pat),*) } |
67 | } |
68 | // SAFETY: `FnTy` is a function pointer, which is always safe to transmute with a `*mut ()`. |
69 | // (To force the caller to use unsafe block for this macro, do not use |
70 | // unsafe block here.) |
71 | let func = { |
72 | core::mem::transmute::<*mut (), FnTy>(FUNC.load(core::sync::atomic::Ordering::Relaxed)) |
73 | }; |
74 | // SAFETY: the caller must uphold the safety contract for the function returned by $detect_body. |
75 | // (To force the caller to use unsafe block for this macro, do not use |
76 | // unsafe block here.) |
77 | func($($arg_pat),*) |
78 | }}; |
79 | } |
80 | |
81 | #[allow (unused_macros)] |
82 | #[cfg (not(portable_atomic_no_outline_atomics))] |
83 | #[cfg (any( |
84 | target_arch = "aarch64" , |
85 | target_arch = "arm" , |
86 | target_arch = "arm64ec" , |
87 | target_arch = "powerpc64" , |
88 | target_arch = "riscv32" , |
89 | target_arch = "riscv64" , |
90 | all(target_arch = "x86_64" , not(any(target_env = "sgx" , miri))), |
91 | ))] |
92 | macro_rules! fn_alias { |
93 | ( |
94 | $(#[$($fn_attr:tt)*])* |
95 | $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?; |
96 | $(#[$($alias_attr:tt)*])* |
97 | $new:ident = $from:ident($($last_args:tt)*); |
98 | $($rest:tt)* |
99 | ) => { |
100 | $(#[$($fn_attr)*])* |
101 | $(#[$($alias_attr)*])* |
102 | $vis unsafe fn $new($($arg_pat: $arg_ty),*) $(-> $ret_ty)? { |
103 | // SAFETY: the caller must uphold the safety contract. |
104 | unsafe { $from($($arg_pat,)* $($last_args)*) } |
105 | } |
106 | fn_alias! { |
107 | $(#[$($fn_attr)*])* |
108 | $vis unsafe fn($($arg_pat: $arg_ty),*) $(-> $ret_ty)?; |
109 | $($rest)* |
110 | } |
111 | }; |
112 | ( |
113 | $(#[$($attr:tt)*])* |
114 | $vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?; |
115 | ) => {} |
116 | } |
117 | |
118 | /// Make the given function const if the given condition is true. |
119 | macro_rules! const_fn { |
120 | ( |
121 | const_if: #[cfg($($cfg:tt)+)]; |
122 | $(#[$($attr:tt)*])* |
123 | $vis:vis const $($rest:tt)* |
124 | ) => { |
125 | #[cfg($($cfg)+)] |
126 | $(#[$($attr)*])* |
127 | $vis const $($rest)* |
128 | #[cfg(not($($cfg)+))] |
129 | $(#[$($attr)*])* |
130 | $vis $($rest)* |
131 | }; |
132 | } |
133 | |
134 | /// Implements `core::fmt::Debug` and `serde::{Serialize, Deserialize}` (when serde |
135 | /// feature is enabled) for atomic bool, integer, or float. |
136 | macro_rules! impl_debug_and_serde { |
137 | // TODO(f16_and_f128): Implement serde traits for f16 & f128 once stabilized. |
138 | (AtomicF16) => { |
139 | impl_debug!(AtomicF16); |
140 | }; |
141 | (AtomicF128) => { |
142 | impl_debug!(AtomicF128); |
143 | }; |
144 | ($atomic_type:ident) => { |
145 | impl_debug!($atomic_type); |
146 | #[cfg(feature = "serde" )] |
147 | #[cfg_attr(docsrs, doc(cfg(feature = "serde" )))] |
148 | impl serde::ser::Serialize for $atomic_type { |
149 | #[allow(clippy::missing_inline_in_public_items)] // serde doesn't use inline on std atomic's Serialize/Deserialize impl |
150 | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> |
151 | where |
152 | S: serde::ser::Serializer, |
153 | { |
154 | // https://github.com/serde-rs/serde/blob/v1.0.152/serde/src/ser/impls.rs#L958-L959 |
155 | self.load(Ordering::Relaxed).serialize(serializer) |
156 | } |
157 | } |
158 | #[cfg(feature = "serde" )] |
159 | #[cfg_attr(docsrs, doc(cfg(feature = "serde" )))] |
160 | impl<'de> serde::de::Deserialize<'de> for $atomic_type { |
161 | #[allow(clippy::missing_inline_in_public_items)] // serde doesn't use inline on std atomic's Serialize/Deserialize impl |
162 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> |
163 | where |
164 | D: serde::de::Deserializer<'de>, |
165 | { |
166 | serde::de::Deserialize::deserialize(deserializer).map(Self::new) |
167 | } |
168 | } |
169 | }; |
170 | } |
171 | macro_rules! impl_debug { |
172 | ($atomic_type:ident) => { |
173 | impl fmt::Debug for $atomic_type { |
174 | #[inline] // fmt is not hot path, but #[inline] on fmt seems to still be useful: https://github.com/rust-lang/rust/pull/117727 |
175 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
176 | // std atomic types use Relaxed in Debug::fmt: https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/sync/atomic.rs#L2188 |
177 | fmt::Debug::fmt(&self.load(Ordering::Relaxed), f) |
178 | } |
179 | } |
180 | }; |
181 | } |
182 | |
183 | // We do not provide `nand` because it cannot be optimized on neither x86 nor MSP430. |
184 | // https://godbolt.org/z/ahWejchbT |
185 | macro_rules! impl_default_no_fetch_ops { |
186 | ($atomic_type:ident, bool) => { |
187 | impl $atomic_type { |
188 | #[inline] |
189 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
190 | pub(crate) fn and(&self, val: bool, order: Ordering) { |
191 | self.fetch_and(val, order); |
192 | } |
193 | #[inline] |
194 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
195 | pub(crate) fn or(&self, val: bool, order: Ordering) { |
196 | self.fetch_or(val, order); |
197 | } |
198 | #[inline] |
199 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
200 | pub(crate) fn xor(&self, val: bool, order: Ordering) { |
201 | self.fetch_xor(val, order); |
202 | } |
203 | } |
204 | }; |
205 | ($atomic_type:ident, $int_type:ty) => { |
206 | impl $atomic_type { |
207 | #[inline] |
208 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
209 | pub(crate) fn add(&self, val: $int_type, order: Ordering) { |
210 | self.fetch_add(val, order); |
211 | } |
212 | #[inline] |
213 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
214 | pub(crate) fn sub(&self, val: $int_type, order: Ordering) { |
215 | self.fetch_sub(val, order); |
216 | } |
217 | #[inline] |
218 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
219 | pub(crate) fn and(&self, val: $int_type, order: Ordering) { |
220 | self.fetch_and(val, order); |
221 | } |
222 | #[inline] |
223 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
224 | pub(crate) fn or(&self, val: $int_type, order: Ordering) { |
225 | self.fetch_or(val, order); |
226 | } |
227 | #[inline] |
228 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
229 | pub(crate) fn xor(&self, val: $int_type, order: Ordering) { |
230 | self.fetch_xor(val, order); |
231 | } |
232 | } |
233 | }; |
234 | } |
235 | macro_rules! impl_default_bit_opts { |
236 | ($atomic_type:ident, $int_type:ty) => { |
237 | impl $atomic_type { |
238 | #[inline] |
239 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
240 | pub(crate) fn bit_set(&self, bit: u32, order: Ordering) -> bool { |
241 | let mask = <$int_type>::wrapping_shl(1, bit); |
242 | self.fetch_or(mask, order) & mask != 0 |
243 | } |
244 | #[inline] |
245 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
246 | pub(crate) fn bit_clear(&self, bit: u32, order: Ordering) -> bool { |
247 | let mask = <$int_type>::wrapping_shl(1, bit); |
248 | self.fetch_and(!mask, order) & mask != 0 |
249 | } |
250 | #[inline] |
251 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
252 | pub(crate) fn bit_toggle(&self, bit: u32, order: Ordering) -> bool { |
253 | let mask = <$int_type>::wrapping_shl(1, bit); |
254 | self.fetch_xor(mask, order) & mask != 0 |
255 | } |
256 | } |
257 | }; |
258 | } |
259 | |
260 | // This just outputs the input as is, but can be used like an item-level block by using it with cfg. |
261 | macro_rules! items { |
262 | ($($tt:tt)*) => { |
263 | $($tt)* |
264 | }; |
265 | } |
266 | |
267 | #[allow (dead_code)] |
268 | #[cfg (any(target_arch = "x86" , target_arch = "x86_64" ))] |
269 | // Stable version of https://doc.rust-lang.org/nightly/std/hint/fn.assert_unchecked.html. |
270 | // TODO: use real core::hint::assert_unchecked on 1.81+ https://github.com/rust-lang/rust/pull/123588 |
271 | #[inline (always)] |
272 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
273 | pub(crate) unsafe fn assert_unchecked(cond: bool) { |
274 | if !cond { |
275 | if cfg!(debug_assertions) { |
276 | unreachable!() |
277 | } else { |
278 | // SAFETY: the caller promised `cond` is true. |
279 | unsafe { core::hint::unreachable_unchecked() } |
280 | } |
281 | } |
282 | } |
283 | |
284 | // https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/sync/atomic.rs#L3338 |
285 | #[inline ] |
286 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
287 | pub(crate) fn assert_load_ordering(order: Ordering) { |
288 | match order { |
289 | Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {} |
290 | Ordering::Release => panic!("there is no such thing as a release load" ), |
291 | Ordering::AcqRel => panic!("there is no such thing as an acquire-release load" ), |
292 | _ => unreachable!(), |
293 | } |
294 | } |
295 | // https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/sync/atomic.rs#L3323 |
296 | #[inline ] |
297 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
298 | pub(crate) fn assert_store_ordering(order: Ordering) { |
299 | match order { |
300 | Ordering::Release | Ordering::Relaxed | Ordering::SeqCst => {} |
301 | Ordering::Acquire => panic!("there is no such thing as an acquire store" ), |
302 | Ordering::AcqRel => panic!("there is no such thing as an acquire-release store" ), |
303 | _ => unreachable!(), |
304 | } |
305 | } |
306 | // https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/sync/atomic.rs#L3404 |
307 | #[inline ] |
308 | #[cfg_attr (all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] |
309 | pub(crate) fn assert_compare_exchange_ordering(success: Ordering, failure: Ordering) { |
310 | match success { |
311 | Ordering::AcqRel |
312 | | Ordering::Acquire |
313 | | Ordering::Relaxed |
314 | | Ordering::Release |
315 | | Ordering::SeqCst => {} |
316 | _ => unreachable!(), |
317 | } |
318 | match failure { |
319 | Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {} |
320 | Ordering::Release => panic!("there is no such thing as a release failure ordering" ), |
321 | Ordering::AcqRel => panic!("there is no such thing as an acquire-release failure ordering" ), |
322 | _ => unreachable!(), |
323 | } |
324 | } |
325 | |
326 | // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0418r2.html |
327 | // https://github.com/rust-lang/rust/pull/98383 |
328 | #[allow (dead_code)] |
329 | #[inline ] |
330 | pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering { |
331 | match (success, failure) { |
332 | (Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire, |
333 | (Ordering::Release, Ordering::Acquire) => Ordering::AcqRel, |
334 | (_, Ordering::SeqCst) => Ordering::SeqCst, |
335 | _ => success, |
336 | } |
337 | } |
338 | |
339 | /// Zero-extends the given 32-bit pointer to `MaybeUninit<u64>`. |
340 | /// This is used for 64-bit architecture's 32-bit ABI (e.g., AArch64 ILP32 ABI). |
341 | /// See ptr_reg! macro in src/gen/utils.rs for details. |
342 | #[cfg (not(portable_atomic_no_asm_maybe_uninit))] |
343 | #[cfg (target_pointer_width = "32" )] |
344 | #[allow (dead_code)] |
345 | #[inline ] |
346 | pub(crate) fn zero_extend64_ptr(v: *mut ()) -> core::mem::MaybeUninit<u64> { |
347 | #[repr (C)] |
348 | struct ZeroExtended { |
349 | #[cfg (target_endian = "big" )] |
350 | pad: *mut (), |
351 | v: *mut (), |
352 | #[cfg (target_endian = "little" )] |
353 | pad: *mut (), |
354 | } |
355 | // SAFETY: we can safely transmute any 64-bit value to MaybeUninit<u64>. |
356 | unsafe { core::mem::transmute(ZeroExtended { v, pad: core::ptr::null_mut() }) } |
357 | } |
358 | |
359 | #[allow (dead_code)] |
360 | #[cfg (any( |
361 | target_arch = "aarch64" , |
362 | target_arch = "arm64ec" , |
363 | target_arch = "powerpc64" , |
364 | target_arch = "riscv64" , |
365 | target_arch = "s390x" , |
366 | target_arch = "x86_64" , |
367 | ))] |
368 | /// A 128-bit value represented as a pair of 64-bit values. |
369 | /// |
370 | /// This type is `#[repr(C)]`, both fields have the same in-memory representation |
371 | /// and are plain old data types, so access to the fields is always safe. |
372 | #[derive (Clone, Copy)] |
373 | #[repr (C)] |
374 | pub(crate) union U128 { |
375 | pub(crate) whole: u128, |
376 | pub(crate) pair: Pair<u64>, |
377 | } |
378 | #[allow (dead_code)] |
379 | #[cfg (any(target_arch = "arm" , target_arch = "riscv32" ))] |
380 | /// A 64-bit value represented as a pair of 32-bit values. |
381 | /// |
382 | /// This type is `#[repr(C)]`, both fields have the same in-memory representation |
383 | /// and are plain old data types, so access to the fields is always safe. |
384 | #[derive (Clone, Copy)] |
385 | #[repr (C)] |
386 | pub(crate) union U64 { |
387 | pub(crate) whole: u64, |
388 | pub(crate) pair: Pair<u32>, |
389 | } |
390 | #[allow (dead_code)] |
391 | #[derive (Clone, Copy)] |
392 | #[repr (C)] |
393 | pub(crate) struct Pair<T: Copy> { |
394 | // little endian order |
395 | #[cfg (any( |
396 | target_endian = "little" , |
397 | target_arch = "aarch64" , |
398 | target_arch = "arm" , |
399 | target_arch = "arm64ec" , |
400 | ))] |
401 | pub(crate) lo: T, |
402 | pub(crate) hi: T, |
403 | // big endian order |
404 | #[cfg (not(any( |
405 | target_endian = "little" , |
406 | target_arch = "aarch64" , |
407 | target_arch = "arm" , |
408 | target_arch = "arm64ec" , |
409 | )))] |
410 | pub(crate) lo: T, |
411 | } |
412 | |
413 | #[cfg (any(target_arch = "riscv32" , target_arch = "riscv64" ))] |
414 | type MinWord = u32; |
415 | #[cfg (any(target_arch = "riscv32" , target_arch = "riscv64" ))] |
416 | type RetInt = u32; |
417 | // Adapted from https://github.com/taiki-e/atomic-maybe-uninit/blob/v0.3.6/src/utils.rs#L255. |
418 | // Helper for implementing sub-word atomic operations using word-sized LL/SC loop or CAS loop. |
419 | // |
420 | // Refs: https://github.com/llvm/llvm-project/blob/llvmorg-20.1.0-rc1/llvm/lib/CodeGen/AtomicExpandPass.cpp#L799 |
421 | // (aligned_ptr, shift, mask) |
422 | #[cfg (any(target_arch = "riscv32" , target_arch = "riscv64" ))] |
423 | #[allow (dead_code)] |
424 | #[inline ] |
425 | pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RetInt, RetInt) { |
426 | #[cfg (portable_atomic_no_strict_provenance)] |
427 | use self::ptr::PtrExt as _; |
428 | use core::mem; |
429 | // RISC-V, MIPS, SPARC, LoongArch, Xtensa, BPF: shift amount of 32-bit shift instructions is 5 bits unsigned (0-31). |
430 | // PowerPC, C-SKY: shift amount of 32-bit shift instructions is 6 bits unsigned (0-63) and shift amount 32-63 means "clear". |
431 | // Arm: shift amount of 32-bit shift instructions is 8 bits unsigned (0-255). |
432 | // Hexagon: shift amount of 32-bit shift instructions is 7 bits signed (-64-63) and negative shift amount means "reverse the direction of the shift". |
433 | // (On s390x, we don't use the mask returned from this function.) |
434 | // (See also https://devblogs.microsoft.com/oldnewthing/20230904-00/?p=108704 for others) |
435 | const SHIFT_MASK: bool = !cfg!(any( |
436 | target_arch = "bpf" , |
437 | target_arch = "loongarch64" , |
438 | target_arch = "mips" , |
439 | target_arch = "mips32r6" , |
440 | target_arch = "mips64" , |
441 | target_arch = "mips64r6" , |
442 | target_arch = "riscv32" , |
443 | target_arch = "riscv64" , |
444 | target_arch = "s390x" , |
445 | target_arch = "sparc" , |
446 | target_arch = "sparc64" , |
447 | target_arch = "xtensa" , |
448 | )); |
449 | let ptr_mask = mem::size_of::<MinWord>() - 1; |
450 | let aligned_ptr = ptr.with_addr(ptr.addr() & !ptr_mask) as *mut MinWord; |
451 | let ptr_lsb = if SHIFT_MASK { |
452 | ptr.addr() & ptr_mask |
453 | } else { |
454 | // We use 32-bit wrapping shift instructions in asm on these platforms. |
455 | ptr.addr() |
456 | }; |
457 | let shift = if cfg!(any(target_endian = "little" , target_arch = "s390x" )) { |
458 | ptr_lsb.wrapping_mul(8) |
459 | } else { |
460 | (ptr_lsb ^ (mem::size_of::<MinWord>() - mem::size_of::<T>())).wrapping_mul(8) |
461 | }; |
462 | let mut mask: RetInt = (1 << (mem::size_of::<T>() * 8)) - 1; // !(0 as T) as RetInt |
463 | if SHIFT_MASK { |
464 | mask <<= shift; |
465 | } |
466 | (aligned_ptr, shift as RetInt, mask) |
467 | } |
468 | |
469 | // This module provides core::ptr strict_provenance/exposed_provenance polyfill for pre-1.84 rustc. |
470 | #[allow (dead_code)] |
471 | pub(crate) mod ptr { |
472 | #[cfg (portable_atomic_no_strict_provenance)] |
473 | use core::mem; |
474 | #[cfg (not(portable_atomic_no_strict_provenance))] |
475 | #[allow (unused_imports)] |
476 | pub(crate) use core::ptr::{with_exposed_provenance, with_exposed_provenance_mut}; |
477 | |
478 | #[cfg (portable_atomic_no_strict_provenance)] |
479 | #[inline (always)] |
480 | #[must_use ] |
481 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
482 | pub(crate) fn with_exposed_provenance<T>(addr: usize) -> *const T { |
483 | addr as *const T |
484 | } |
485 | #[cfg (portable_atomic_no_strict_provenance)] |
486 | #[inline (always)] |
487 | #[must_use ] |
488 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
489 | pub(crate) fn with_exposed_provenance_mut<T>(addr: usize) -> *mut T { |
490 | addr as *mut T |
491 | } |
492 | |
493 | #[cfg (portable_atomic_no_strict_provenance)] |
494 | pub(crate) trait PtrExt<T: ?Sized>: Copy { |
495 | #[must_use ] |
496 | fn addr(self) -> usize; |
497 | #[must_use ] |
498 | fn with_addr(self, addr: usize) -> Self |
499 | where |
500 | T: Sized; |
501 | } |
502 | #[cfg (portable_atomic_no_strict_provenance)] |
503 | impl<T: ?Sized> PtrExt<T> for *mut T { |
504 | #[inline (always)] |
505 | #[must_use ] |
506 | fn addr(self) -> usize { |
507 | // A pointer-to-integer transmute currently has exactly the right semantics: it returns the |
508 | // address without exposing the provenance. Note that this is *not* a stable guarantee about |
509 | // transmute semantics, it relies on sysroot crates having special status. |
510 | // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the |
511 | // provenance). |
512 | #[allow (clippy::transmutes_expressible_as_ptr_casts)] |
513 | unsafe { |
514 | mem::transmute(self as *mut ()) |
515 | } |
516 | } |
517 | #[allow (clippy::cast_possible_wrap)] |
518 | #[inline ] |
519 | #[must_use ] |
520 | fn with_addr(self, addr: usize) -> Self |
521 | where |
522 | T: Sized, |
523 | { |
524 | // This should probably be an intrinsic to avoid doing any sort of arithmetic, but |
525 | // meanwhile, we can implement it with `wrapping_offset`, which preserves the pointer's |
526 | // provenance. |
527 | let self_addr = self.addr() as isize; |
528 | let dest_addr = addr as isize; |
529 | let offset = dest_addr.wrapping_sub(self_addr); |
530 | (self as *mut u8).wrapping_offset(offset) as *mut T |
531 | } |
532 | } |
533 | } |
534 | |
535 | // This module provides: |
536 | // - core::ffi polyfill (c_* type aliases and CStr) for pre-1.64 rustc compatibility. |
537 | // (core::ffi::* (except c_void) requires Rust 1.64) |
538 | // - safe abstraction (c! macro) for creating static C strings without runtime checks. |
539 | // (c"..." requires Rust 1.77) |
540 | #[cfg (any(test, not(any(windows, target_arch = "x86" , target_arch = "x86_64" ))))] |
541 | #[cfg (any(not(portable_atomic_no_asm), portable_atomic_unstable_asm))] |
542 | #[allow (dead_code, non_camel_case_types, unused_macros)] |
543 | #[macro_use ] |
544 | pub(crate) mod ffi { |
545 | pub(crate) type c_void = core::ffi::c_void; |
546 | // c_{,u}int is {i,u}16 on 16-bit targets, otherwise {i,u}32. |
547 | // https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/ffi/mod.rs#L156 |
548 | #[cfg (target_pointer_width = "16" )] |
549 | pub(crate) type c_int = i16; |
550 | #[cfg (target_pointer_width = "16" )] |
551 | pub(crate) type c_uint = u16; |
552 | #[cfg (not(target_pointer_width = "16" ))] |
553 | pub(crate) type c_int = i32; |
554 | #[cfg (not(target_pointer_width = "16" ))] |
555 | pub(crate) type c_uint = u32; |
556 | // c_{,u}long is {i,u}64 on non-Windows 64-bit targets, otherwise {i,u}32. |
557 | // https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/ffi/mod.rs#L168 |
558 | #[cfg (all(target_pointer_width = "64" , not(windows)))] |
559 | pub(crate) type c_long = i64; |
560 | #[cfg (all(target_pointer_width = "64" , not(windows)))] |
561 | pub(crate) type c_ulong = u64; |
562 | #[cfg (not(all(target_pointer_width = "64" , not(windows))))] |
563 | pub(crate) type c_long = i32; |
564 | #[cfg (not(all(target_pointer_width = "64" , not(windows))))] |
565 | pub(crate) type c_ulong = u32; |
566 | // c_size_t is currently always usize. |
567 | // https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/ffi/mod.rs#L76 |
568 | pub(crate) type c_size_t = usize; |
569 | // c_char is u8 by default on non-Apple/non-Windows Arm/C-SKY/Hexagon/MSP430/PowerPC/RISC-V/s390x/Xtensa targets, otherwise i8 by default. |
570 | // See references in https://github.com/rust-lang/rust/issues/129945 for details. |
571 | #[cfg (all( |
572 | not(any(target_vendor = "apple" , windows)), |
573 | any( |
574 | target_arch = "aarch64" , |
575 | target_arch = "arm" , |
576 | target_arch = "csky" , |
577 | target_arch = "hexagon" , |
578 | target_arch = "msp430" , |
579 | target_arch = "powerpc" , |
580 | target_arch = "powerpc64" , |
581 | target_arch = "riscv32" , |
582 | target_arch = "riscv64" , |
583 | target_arch = "s390x" , |
584 | target_arch = "xtensa" , |
585 | ), |
586 | ))] |
587 | pub(crate) type c_char = u8; |
588 | #[cfg (not(all( |
589 | not(any(target_vendor = "apple" , windows)), |
590 | any( |
591 | target_arch = "aarch64" , |
592 | target_arch = "arm" , |
593 | target_arch = "csky" , |
594 | target_arch = "hexagon" , |
595 | target_arch = "msp430" , |
596 | target_arch = "powerpc" , |
597 | target_arch = "powerpc64" , |
598 | target_arch = "riscv32" , |
599 | target_arch = "riscv64" , |
600 | target_arch = "s390x" , |
601 | target_arch = "xtensa" , |
602 | ), |
603 | )))] |
604 | pub(crate) type c_char = i8; |
605 | |
606 | // Static assertions for C type definitions. |
607 | #[cfg (test)] |
608 | const _: fn() = || { |
609 | let _: c_int = 0 as std::os::raw::c_int; |
610 | let _: c_uint = 0 as std::os::raw::c_uint; |
611 | let _: c_long = 0 as std::os::raw::c_long; |
612 | let _: c_ulong = 0 as std::os::raw::c_ulong; |
613 | #[cfg (unix)] |
614 | let _: c_size_t = 0 as libc::size_t; // std::os::raw::c_size_t is unstable |
615 | let _: c_char = 0 as std::os::raw::c_char; |
616 | }; |
617 | |
618 | #[repr (transparent)] |
619 | pub(crate) struct CStr([c_char]); |
620 | impl CStr { |
621 | #[inline ] |
622 | #[must_use ] |
623 | pub(crate) const fn as_ptr(&self) -> *const c_char { |
624 | self.0.as_ptr() |
625 | } |
626 | /// # Safety |
627 | /// |
628 | /// The provided slice **must** be nul-terminated and not contain any interior |
629 | /// nul bytes. |
630 | #[inline ] |
631 | #[must_use ] |
632 | pub(crate) unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr { |
633 | // SAFETY: Casting to CStr is safe because *our* CStr is #[repr(transparent)] |
634 | // and its internal representation is a [u8] too. (Note that std's CStr |
635 | // is not #[repr(transparent)].) |
636 | // Dereferencing the obtained pointer is safe because it comes from a |
637 | // reference. Making a reference is then safe because its lifetime |
638 | // is bound by the lifetime of the given `bytes`. |
639 | unsafe { &*(bytes as *const [u8] as *const CStr) } |
640 | } |
641 | #[cfg (test)] |
642 | #[inline ] |
643 | #[must_use ] |
644 | pub(crate) fn to_bytes_with_nul(&self) -> &[u8] { |
645 | // SAFETY: Transmuting a slice of `c_char`s to a slice of `u8`s |
646 | // is safe on all supported targets. |
647 | #[allow (clippy::unnecessary_cast)] // triggered for targets that c_char is u8 |
648 | unsafe { |
649 | &*(&self.0 as *const [c_char] as *const [u8]) |
650 | } |
651 | } |
652 | } |
653 | |
654 | macro_rules! c { |
655 | ($s:expr) => {{ |
656 | const BYTES: &[u8] = concat!($s, " \0" ).as_bytes(); |
657 | const _: () = static_assert!(crate::utils::ffi::_const_is_c_str(BYTES)); |
658 | #[allow(unused_unsafe)] |
659 | // SAFETY: we've checked `BYTES` is a valid C string |
660 | unsafe { |
661 | crate::utils::ffi::CStr::from_bytes_with_nul_unchecked(BYTES) |
662 | } |
663 | }}; |
664 | } |
665 | |
666 | #[must_use ] |
667 | pub(crate) const fn _const_is_c_str(bytes: &[u8]) -> bool { |
668 | #[cfg (portable_atomic_no_track_caller)] |
669 | { |
670 | // const_if_match/const_loop was stabilized (nightly-2020-06-30) 2 days before |
671 | // track_caller was stabilized (nightly-2020-07-02), so we reuse the cfg for |
672 | // track_caller here instead of emitting a cfg for const_if_match/const_loop. |
673 | // https://github.com/rust-lang/rust/pull/72437 |
674 | // track_caller was stabilized 11 days after the oldest nightly version |
675 | // that uses this module, and is included in the same 1.46 stable release. |
676 | // The check here is insufficient in this case, but this is fine because this function |
677 | // is internal code that is not used to process input from the user and our CI checks |
678 | // all builtin targets and some custom targets with some versions of newer compilers. |
679 | !bytes.is_empty() |
680 | } |
681 | #[cfg (not(portable_atomic_no_track_caller))] |
682 | { |
683 | // Based on https://github.com/rust-lang/rust/blob/1.84.0/library/core/src/ffi/c_str.rs#L417 |
684 | // - bytes must be nul-terminated. |
685 | // - bytes must not contain any interior nul bytes. |
686 | if bytes.is_empty() { |
687 | return false; |
688 | } |
689 | let mut i = bytes.len() - 1; |
690 | if bytes[i] != 0 { |
691 | return false; |
692 | } |
693 | // Ending null byte exists, skip to the rest. |
694 | while i != 0 { |
695 | i -= 1; |
696 | if bytes[i] == 0 { |
697 | return false; |
698 | } |
699 | } |
700 | true |
701 | } |
702 | } |
703 | |
704 | #[allow ( |
705 | clippy::alloc_instead_of_core, |
706 | clippy::std_instead_of_alloc, |
707 | clippy::std_instead_of_core, |
708 | clippy::undocumented_unsafe_blocks, |
709 | clippy::wildcard_imports |
710 | )] |
711 | #[cfg (test)] |
712 | mod tests { |
713 | #[test ] |
714 | fn test_c_macro() { |
715 | #[track_caller ] |
716 | fn t(s: &crate::utils::ffi::CStr, raw: &[u8]) { |
717 | assert_eq!(s.to_bytes_with_nul(), raw); |
718 | } |
719 | t(c!("" ), b" \0" ); |
720 | t(c!("a" ), b"a \0" ); |
721 | t(c!("abc" ), b"abc \0" ); |
722 | t(c!(concat!("abc" , "d" )), b"abcd \0" ); |
723 | } |
724 | |
725 | #[test ] |
726 | fn test_is_c_str() { |
727 | #[track_caller ] |
728 | fn t(bytes: &[u8]) { |
729 | assert_eq!( |
730 | super::_const_is_c_str(bytes), |
731 | std::ffi::CStr::from_bytes_with_nul(bytes).is_ok() |
732 | ); |
733 | } |
734 | t(b" \0" ); |
735 | t(b"a \0" ); |
736 | t(b"abc \0" ); |
737 | t(b"" ); |
738 | t(b"a" ); |
739 | t(b"abc" ); |
740 | t(b" \0a" ); |
741 | t(b" \0a \0" ); |
742 | t(b"ab \0c \0" ); |
743 | t(b" \0\0" ); |
744 | } |
745 | } |
746 | } |
747 | |