1use super::sealed::Sealed;
2use crate::simd::{cmp::SimdPartialEq, num::SimdUint, LaneCount, Mask, Simd, SupportedLaneCount};
3
4/// Operations on SIMD vectors of constant pointers.
5pub trait SimdConstPtr: Copy + Sealed {
6 /// Vector of `usize` with the same number of elements.
7 type Usize;
8
9 /// Vector of `isize` with the same number of elements.
10 type Isize;
11
12 /// Vector of const pointers with the same number of elements.
13 type CastPtr<T>;
14
15 /// Vector of mutable pointers to the same type.
16 type MutPtr;
17
18 /// Mask type used for manipulating this SIMD vector type.
19 type Mask;
20
21 /// Returns `true` for each element that is null.
22 fn is_null(self) -> Self::Mask;
23
24 /// Casts to a pointer of another type.
25 ///
26 /// Equivalent to calling [`pointer::cast`] on each element.
27 fn cast<T>(self) -> Self::CastPtr<T>;
28
29 /// Changes constness without changing the type.
30 ///
31 /// Equivalent to calling [`pointer::cast_mut`] on each element.
32 fn cast_mut(self) -> Self::MutPtr;
33
34 /// Gets the "address" portion of the pointer.
35 ///
36 /// This method discards pointer semantic metadata, so the result cannot be
37 /// directly cast into a valid pointer.
38 ///
39 /// This method semantically discards *provenance* and
40 /// *address-space* information. To properly restore that information, use [`Self::with_addr`].
41 ///
42 /// Equivalent to calling [`pointer::addr`] on each element.
43 fn addr(self) -> Self::Usize;
44
45 /// Creates a new pointer with the given address.
46 ///
47 /// This performs the same operation as a cast, but copies the *address-space* and
48 /// *provenance* of `self` to the new pointer.
49 ///
50 /// Equivalent to calling [`pointer::with_addr`] on each element.
51 fn with_addr(self, addr: Self::Usize) -> Self;
52
53 /// Exposes the "provenance" part of the pointer for future use in
54 /// [`Self::with_exposed_provenance`] and returns the "address" portion.
55 fn expose_provenance(self) -> Self::Usize;
56
57 /// Convert an address back to a pointer, picking up a previously "exposed" provenance.
58 ///
59 /// Equivalent to calling [`core::ptr::with_exposed_provenance`] on each element.
60 fn with_exposed_provenance(addr: Self::Usize) -> Self;
61
62 /// Calculates the offset from a pointer using wrapping arithmetic.
63 ///
64 /// Equivalent to calling [`pointer::wrapping_offset`] on each element.
65 fn wrapping_offset(self, offset: Self::Isize) -> Self;
66
67 /// Calculates the offset from a pointer using wrapping arithmetic.
68 ///
69 /// Equivalent to calling [`pointer::wrapping_add`] on each element.
70 fn wrapping_add(self, count: Self::Usize) -> Self;
71
72 /// Calculates the offset from a pointer using wrapping arithmetic.
73 ///
74 /// Equivalent to calling [`pointer::wrapping_sub`] on each element.
75 fn wrapping_sub(self, count: Self::Usize) -> Self;
76}
77
78impl<T, const N: usize> Sealed for Simd<*const T, N> where LaneCount<N>: SupportedLaneCount {}
79
80impl<T, const N: usize> SimdConstPtr for Simd<*const T, N>
81where
82 LaneCount<N>: SupportedLaneCount,
83{
84 type Usize = Simd<usize, N>;
85 type Isize = Simd<isize, N>;
86 type CastPtr<U> = Simd<*const U, N>;
87 type MutPtr = Simd<*mut T, N>;
88 type Mask = Mask<isize, N>;
89
90 #[inline]
91 fn is_null(self) -> Self::Mask {
92 Simd::splat(core::ptr::null()).simd_eq(self)
93 }
94
95 #[inline]
96 fn cast<U>(self) -> Self::CastPtr<U> {
97 // SimdElement currently requires zero-sized metadata, so this should never fail.
98 // If this ever changes, `simd_cast_ptr` should produce a post-mono error.
99 use core::{mem::size_of, ptr::Pointee};
100 assert_eq!(size_of::<<T as Pointee>::Metadata>(), 0);
101 assert_eq!(size_of::<<U as Pointee>::Metadata>(), 0);
102
103 // Safety: pointers can be cast
104 unsafe { core::intrinsics::simd::simd_cast_ptr(self) }
105 }
106
107 #[inline]
108 fn cast_mut(self) -> Self::MutPtr {
109 // Safety: pointers can be cast
110 unsafe { core::intrinsics::simd::simd_cast_ptr(self) }
111 }
112
113 #[inline]
114 fn addr(self) -> Self::Usize {
115 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
116 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
117 // provenance).
118 unsafe { core::mem::transmute_copy(&self) }
119 }
120
121 #[inline]
122 fn with_addr(self, addr: Self::Usize) -> Self {
123 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
124 //
125 // In the mean-time, this operation is defined to be "as if" it was
126 // a wrapping_offset, so we can emulate it as such. This should properly
127 // restore pointer provenance even under today's compiler.
128 self.cast::<u8>()
129 .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
130 .cast()
131 }
132
133 #[inline]
134 fn expose_provenance(self) -> Self::Usize {
135 // Safety: `self` is a pointer vector
136 unsafe { core::intrinsics::simd::simd_expose_provenance(self) }
137 }
138
139 #[inline]
140 fn with_exposed_provenance(addr: Self::Usize) -> Self {
141 // Safety: `self` is a pointer vector
142 unsafe { core::intrinsics::simd::simd_with_exposed_provenance(addr) }
143 }
144
145 #[inline]
146 fn wrapping_offset(self, count: Self::Isize) -> Self {
147 // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
148 unsafe { core::intrinsics::simd::simd_arith_offset(self, count) }
149 }
150
151 #[inline]
152 fn wrapping_add(self, count: Self::Usize) -> Self {
153 self.wrapping_offset(count.cast())
154 }
155
156 #[inline]
157 fn wrapping_sub(self, count: Self::Usize) -> Self {
158 self.wrapping_offset(-count.cast::<isize>())
159 }
160}
161