1use super::sealed::Sealed;
2use crate::simd::{
3 cmp::SimdPartialEq, intrinsics, num::SimdUint, LaneCount, Mask, Simd, SupportedLaneCount,
4};
5
6/// Operations on SIMD vectors of constant pointers.
7pub trait SimdConstPtr: Copy + Sealed {
8 /// Vector of `usize` with the same number of elements.
9 type Usize;
10
11 /// Vector of `isize` with the same number of elements.
12 type Isize;
13
14 /// Vector of const pointers with the same number of elements.
15 type CastPtr<T>;
16
17 /// Vector of mutable pointers to the same type.
18 type MutPtr;
19
20 /// Mask type used for manipulating this SIMD vector type.
21 type Mask;
22
23 /// Returns `true` for each element that is null.
24 fn is_null(self) -> Self::Mask;
25
26 /// Casts to a pointer of another type.
27 ///
28 /// Equivalent to calling [`pointer::cast`] on each element.
29 fn cast<T>(self) -> Self::CastPtr<T>;
30
31 /// Changes constness without changing the type.
32 ///
33 /// Equivalent to calling [`pointer::cast_mut`] on each element.
34 fn cast_mut(self) -> Self::MutPtr;
35
36 /// Gets the "address" portion of the pointer.
37 ///
38 /// This method discards pointer semantic metadata, so the result cannot be
39 /// directly cast into a valid pointer.
40 ///
41 /// This method semantically discards *provenance* and
42 /// *address-space* information. To properly restore that information, use [`Self::with_addr`].
43 ///
44 /// Equivalent to calling [`pointer::addr`] on each element.
45 fn addr(self) -> Self::Usize;
46
47 /// Creates a new pointer with the given address.
48 ///
49 /// This performs the same operation as a cast, but copies the *address-space* and
50 /// *provenance* of `self` to the new pointer.
51 ///
52 /// Equivalent to calling [`pointer::with_addr`] on each element.
53 fn with_addr(self, addr: Self::Usize) -> Self;
54
55 /// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
56 /// in [`Self::from_exposed_addr`].
57 fn expose_addr(self) -> Self::Usize;
58
59 /// Convert an address back to a pointer, picking up a previously "exposed" provenance.
60 ///
61 /// Equivalent to calling [`core::ptr::from_exposed_addr`] on each element.
62 fn from_exposed_addr(addr: Self::Usize) -> Self;
63
64 /// Calculates the offset from a pointer using wrapping arithmetic.
65 ///
66 /// Equivalent to calling [`pointer::wrapping_offset`] on each element.
67 fn wrapping_offset(self, offset: Self::Isize) -> Self;
68
69 /// Calculates the offset from a pointer using wrapping arithmetic.
70 ///
71 /// Equivalent to calling [`pointer::wrapping_add`] on each element.
72 fn wrapping_add(self, count: Self::Usize) -> Self;
73
74 /// Calculates the offset from a pointer using wrapping arithmetic.
75 ///
76 /// Equivalent to calling [`pointer::wrapping_sub`] on each element.
77 fn wrapping_sub(self, count: Self::Usize) -> Self;
78}
79
80impl<T, const N: usize> Sealed for Simd<*const T, N> where LaneCount<N>: SupportedLaneCount {}
81
82impl<T, const N: usize> SimdConstPtr for Simd<*const T, N>
83where
84 LaneCount<N>: SupportedLaneCount,
85{
86 type Usize = Simd<usize, N>;
87 type Isize = Simd<isize, N>;
88 type CastPtr<U> = Simd<*const U, N>;
89 type MutPtr = Simd<*mut T, N>;
90 type Mask = Mask<isize, N>;
91
92 #[inline]
93 fn is_null(self) -> Self::Mask {
94 Simd::splat(core::ptr::null()).simd_eq(self)
95 }
96
97 #[inline]
98 fn cast<U>(self) -> Self::CastPtr<U> {
99 // SimdElement currently requires zero-sized metadata, so this should never fail.
100 // If this ever changes, `simd_cast_ptr` should produce a post-mono error.
101 use core::{mem::size_of, ptr::Pointee};
102 assert_eq!(size_of::<<T as Pointee>::Metadata>(), 0);
103 assert_eq!(size_of::<<U as Pointee>::Metadata>(), 0);
104
105 // Safety: pointers can be cast
106 unsafe { intrinsics::simd_cast_ptr(self) }
107 }
108
109 #[inline]
110 fn cast_mut(self) -> Self::MutPtr {
111 // Safety: pointers can be cast
112 unsafe { intrinsics::simd_cast_ptr(self) }
113 }
114
115 #[inline]
116 fn addr(self) -> Self::Usize {
117 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
118 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
119 // provenance).
120 unsafe { core::mem::transmute_copy(&self) }
121 }
122
123 #[inline]
124 fn with_addr(self, addr: Self::Usize) -> Self {
125 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
126 //
127 // In the mean-time, this operation is defined to be "as if" it was
128 // a wrapping_offset, so we can emulate it as such. This should properly
129 // restore pointer provenance even under today's compiler.
130 self.cast::<u8>()
131 .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
132 .cast()
133 }
134
135 #[inline]
136 fn expose_addr(self) -> Self::Usize {
137 // Safety: `self` is a pointer vector
138 unsafe { intrinsics::simd_expose_addr(self) }
139 }
140
141 #[inline]
142 fn from_exposed_addr(addr: Self::Usize) -> Self {
143 // Safety: `self` is a pointer vector
144 unsafe { intrinsics::simd_from_exposed_addr(addr) }
145 }
146
147 #[inline]
148 fn wrapping_offset(self, count: Self::Isize) -> Self {
149 // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
150 unsafe { intrinsics::simd_arith_offset(self, count) }
151 }
152
153 #[inline]
154 fn wrapping_add(self, count: Self::Usize) -> Self {
155 self.wrapping_offset(count.cast())
156 }
157
158 #[inline]
159 fn wrapping_sub(self, count: Self::Usize) -> Self {
160 self.wrapping_offset(-count.cast::<isize>())
161 }
162}
163