1 | use super::sealed::Sealed; |
2 | use crate::simd::{ |
3 | cmp::SimdPartialEq, intrinsics, num::SimdUint, LaneCount, Mask, Simd, SupportedLaneCount, |
4 | }; |
5 | |
6 | /// Operations on SIMD vectors of mutable pointers. |
7 | pub trait SimdMutPtr: Copy + Sealed { |
8 | /// Vector of `usize` with the same number of elements. |
9 | type Usize; |
10 | |
11 | /// Vector of `isize` with the same number of elements. |
12 | type Isize; |
13 | |
14 | /// Vector of const pointers with the same number of elements. |
15 | type CastPtr<T>; |
16 | |
17 | /// Vector of constant pointers to the same type. |
18 | type ConstPtr; |
19 | |
20 | /// Mask type used for manipulating this SIMD vector type. |
21 | type Mask; |
22 | |
23 | /// Returns `true` for each element that is null. |
24 | fn is_null(self) -> Self::Mask; |
25 | |
26 | /// Casts to a pointer of another type. |
27 | /// |
28 | /// Equivalent to calling [`pointer::cast`] on each element. |
29 | fn cast<T>(self) -> Self::CastPtr<T>; |
30 | |
31 | /// Changes constness without changing the type. |
32 | /// |
33 | /// Equivalent to calling [`pointer::cast_const`] on each element. |
34 | fn cast_const(self) -> Self::ConstPtr; |
35 | |
36 | /// Gets the "address" portion of the pointer. |
37 | /// |
38 | /// This method discards pointer semantic metadata, so the result cannot be |
39 | /// directly cast into a valid pointer. |
40 | /// |
41 | /// Equivalent to calling [`pointer::addr`] on each element. |
42 | fn addr(self) -> Self::Usize; |
43 | |
44 | /// Creates a new pointer with the given address. |
45 | /// |
46 | /// This performs the same operation as a cast, but copies the *address-space* and |
47 | /// *provenance* of `self` to the new pointer. |
48 | /// |
49 | /// Equivalent to calling [`pointer::with_addr`] on each element. |
50 | fn with_addr(self, addr: Self::Usize) -> Self; |
51 | |
52 | /// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use |
53 | /// in [`Self::from_exposed_addr`]. |
54 | fn expose_addr(self) -> Self::Usize; |
55 | |
56 | /// Convert an address back to a pointer, picking up a previously "exposed" provenance. |
57 | /// |
58 | /// Equivalent to calling [`core::ptr::from_exposed_addr_mut`] on each element. |
59 | fn from_exposed_addr(addr: Self::Usize) -> Self; |
60 | |
61 | /// Calculates the offset from a pointer using wrapping arithmetic. |
62 | /// |
63 | /// Equivalent to calling [`pointer::wrapping_offset`] on each element. |
64 | fn wrapping_offset(self, offset: Self::Isize) -> Self; |
65 | |
66 | /// Calculates the offset from a pointer using wrapping arithmetic. |
67 | /// |
68 | /// Equivalent to calling [`pointer::wrapping_add`] on each element. |
69 | fn wrapping_add(self, count: Self::Usize) -> Self; |
70 | |
71 | /// Calculates the offset from a pointer using wrapping arithmetic. |
72 | /// |
73 | /// Equivalent to calling [`pointer::wrapping_sub`] on each element. |
74 | fn wrapping_sub(self, count: Self::Usize) -> Self; |
75 | } |
76 | |
77 | impl<T, const N: usize> Sealed for Simd<*mut T, N> where LaneCount<N>: SupportedLaneCount {} |
78 | |
79 | impl<T, const N: usize> SimdMutPtr for Simd<*mut T, N> |
80 | where |
81 | LaneCount<N>: SupportedLaneCount, |
82 | { |
83 | type Usize = Simd<usize, N>; |
84 | type Isize = Simd<isize, N>; |
85 | type CastPtr<U> = Simd<*mut U, N>; |
86 | type ConstPtr = Simd<*const T, N>; |
87 | type Mask = Mask<isize, N>; |
88 | |
89 | #[inline ] |
90 | fn is_null(self) -> Self::Mask { |
91 | Simd::splat(core::ptr::null_mut()).simd_eq(self) |
92 | } |
93 | |
94 | #[inline ] |
95 | fn cast<U>(self) -> Self::CastPtr<U> { |
96 | // SimdElement currently requires zero-sized metadata, so this should never fail. |
97 | // If this ever changes, `simd_cast_ptr` should produce a post-mono error. |
98 | use core::{mem::size_of, ptr::Pointee}; |
99 | assert_eq!(size_of::<<T as Pointee>::Metadata>(), 0); |
100 | assert_eq!(size_of::<<U as Pointee>::Metadata>(), 0); |
101 | |
102 | // Safety: pointers can be cast |
103 | unsafe { intrinsics::simd_cast_ptr(self) } |
104 | } |
105 | |
106 | #[inline ] |
107 | fn cast_const(self) -> Self::ConstPtr { |
108 | // Safety: pointers can be cast |
109 | unsafe { intrinsics::simd_cast_ptr(self) } |
110 | } |
111 | |
112 | #[inline ] |
113 | fn addr(self) -> Self::Usize { |
114 | // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. |
115 | // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the |
116 | // provenance). |
117 | unsafe { core::mem::transmute_copy(&self) } |
118 | } |
119 | |
120 | #[inline ] |
121 | fn with_addr(self, addr: Self::Usize) -> Self { |
122 | // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. |
123 | // |
124 | // In the mean-time, this operation is defined to be "as if" it was |
125 | // a wrapping_offset, so we can emulate it as such. This should properly |
126 | // restore pointer provenance even under today's compiler. |
127 | self.cast::<u8>() |
128 | .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>()) |
129 | .cast() |
130 | } |
131 | |
132 | #[inline ] |
133 | fn expose_addr(self) -> Self::Usize { |
134 | // Safety: `self` is a pointer vector |
135 | unsafe { intrinsics::simd_expose_addr(self) } |
136 | } |
137 | |
138 | #[inline ] |
139 | fn from_exposed_addr(addr: Self::Usize) -> Self { |
140 | // Safety: `self` is a pointer vector |
141 | unsafe { intrinsics::simd_from_exposed_addr(addr) } |
142 | } |
143 | |
144 | #[inline ] |
145 | fn wrapping_offset(self, count: Self::Isize) -> Self { |
146 | // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets |
147 | unsafe { intrinsics::simd_arith_offset(self, count) } |
148 | } |
149 | |
150 | #[inline ] |
151 | fn wrapping_add(self, count: Self::Usize) -> Self { |
152 | self.wrapping_offset(count.cast()) |
153 | } |
154 | |
155 | #[inline ] |
156 | fn wrapping_sub(self, count: Self::Usize) -> Self { |
157 | self.wrapping_offset(-count.cast::<isize>()) |
158 | } |
159 | } |
160 | |