| 1 | use super::sealed::Sealed; |
| 2 | use crate::simd::{LaneCount, Mask, Simd, SupportedLaneCount, cmp::SimdPartialEq, num::SimdUint}; |
| 3 | |
| 4 | /// Operations on SIMD vectors of constant pointers. |
| 5 | pub trait SimdConstPtr: Copy + Sealed { |
| 6 | /// Vector of `usize` with the same number of elements. |
| 7 | type Usize; |
| 8 | |
| 9 | /// Vector of `isize` with the same number of elements. |
| 10 | type Isize; |
| 11 | |
| 12 | /// Vector of const pointers with the same number of elements. |
| 13 | type CastPtr<T>; |
| 14 | |
| 15 | /// Vector of mutable pointers to the same type. |
| 16 | type MutPtr; |
| 17 | |
| 18 | /// Mask type used for manipulating this SIMD vector type. |
| 19 | type Mask; |
| 20 | |
| 21 | /// Returns `true` for each element that is null. |
| 22 | fn is_null(self) -> Self::Mask; |
| 23 | |
| 24 | /// Casts to a pointer of another type. |
| 25 | /// |
| 26 | /// Equivalent to calling [`pointer::cast`] on each element. |
| 27 | fn cast<T>(self) -> Self::CastPtr<T>; |
| 28 | |
| 29 | /// Changes constness without changing the type. |
| 30 | /// |
| 31 | /// Equivalent to calling [`pointer::cast_mut`] on each element. |
| 32 | fn cast_mut(self) -> Self::MutPtr; |
| 33 | |
| 34 | /// Gets the "address" portion of the pointer. |
| 35 | /// |
| 36 | /// This method discards pointer semantic metadata, so the result cannot be |
| 37 | /// directly cast into a valid pointer. |
| 38 | /// |
| 39 | /// This method semantically discards *provenance* and |
| 40 | /// *address-space* information. To properly restore that information, use [`Self::with_addr`]. |
| 41 | /// |
| 42 | /// Equivalent to calling [`pointer::addr`] on each element. |
| 43 | fn addr(self) -> Self::Usize; |
| 44 | |
| 45 | /// Converts an address to a pointer without giving it any provenance. |
| 46 | /// |
| 47 | /// Without provenance, this pointer is not associated with any actual allocation. Such a |
| 48 | /// no-provenance pointer may be used for zero-sized memory accesses (if suitably aligned), but |
| 49 | /// non-zero-sized memory accesses with a no-provenance pointer are UB. No-provenance pointers |
| 50 | /// are little more than a usize address in disguise. |
| 51 | /// |
| 52 | /// This is different from [`Self::with_exposed_provenance`], which creates a pointer that picks up a |
| 53 | /// previously exposed provenance. |
| 54 | /// |
| 55 | /// Equivalent to calling [`core::ptr::without_provenance`] on each element. |
| 56 | fn without_provenance(addr: Self::Usize) -> Self; |
| 57 | |
| 58 | /// Creates a new pointer with the given address. |
| 59 | /// |
| 60 | /// This performs the same operation as a cast, but copies the *address-space* and |
| 61 | /// *provenance* of `self` to the new pointer. |
| 62 | /// |
| 63 | /// Equivalent to calling [`pointer::with_addr`] on each element. |
| 64 | fn with_addr(self, addr: Self::Usize) -> Self; |
| 65 | |
| 66 | /// Exposes the "provenance" part of the pointer for future use in |
| 67 | /// [`Self::with_exposed_provenance`] and returns the "address" portion. |
| 68 | fn expose_provenance(self) -> Self::Usize; |
| 69 | |
| 70 | /// Converts an address back to a pointer, picking up a previously "exposed" provenance. |
| 71 | /// |
| 72 | /// Equivalent to calling [`core::ptr::with_exposed_provenance`] on each element. |
| 73 | fn with_exposed_provenance(addr: Self::Usize) -> Self; |
| 74 | |
| 75 | /// Calculates the offset from a pointer using wrapping arithmetic. |
| 76 | /// |
| 77 | /// Equivalent to calling [`pointer::wrapping_offset`] on each element. |
| 78 | fn wrapping_offset(self, offset: Self::Isize) -> Self; |
| 79 | |
| 80 | /// Calculates the offset from a pointer using wrapping arithmetic. |
| 81 | /// |
| 82 | /// Equivalent to calling [`pointer::wrapping_add`] on each element. |
| 83 | fn wrapping_add(self, count: Self::Usize) -> Self; |
| 84 | |
| 85 | /// Calculates the offset from a pointer using wrapping arithmetic. |
| 86 | /// |
| 87 | /// Equivalent to calling [`pointer::wrapping_sub`] on each element. |
| 88 | fn wrapping_sub(self, count: Self::Usize) -> Self; |
| 89 | } |
| 90 | |
| 91 | impl<T, const N: usize> Sealed for Simd<*const T, N> where LaneCount<N>: SupportedLaneCount {} |
| 92 | |
| 93 | impl<T, const N: usize> SimdConstPtr for Simd<*const T, N> |
| 94 | where |
| 95 | LaneCount<N>: SupportedLaneCount, |
| 96 | { |
| 97 | type Usize = Simd<usize, N>; |
| 98 | type Isize = Simd<isize, N>; |
| 99 | type CastPtr<U> = Simd<*const U, N>; |
| 100 | type MutPtr = Simd<*mut T, N>; |
| 101 | type Mask = Mask<isize, N>; |
| 102 | |
| 103 | #[inline ] |
| 104 | fn is_null(self) -> Self::Mask { |
| 105 | Simd::splat(core::ptr::null()).simd_eq(self) |
| 106 | } |
| 107 | |
| 108 | #[inline ] |
| 109 | fn cast<U>(self) -> Self::CastPtr<U> { |
| 110 | // SimdElement currently requires zero-sized metadata, so this should never fail. |
| 111 | // If this ever changes, `simd_cast_ptr` should produce a post-mono error. |
| 112 | use core::ptr::Pointee; |
| 113 | assert_eq!(size_of::<<T as Pointee>::Metadata>(), 0); |
| 114 | assert_eq!(size_of::<<U as Pointee>::Metadata>(), 0); |
| 115 | |
| 116 | // Safety: pointers can be cast |
| 117 | unsafe { core::intrinsics::simd::simd_cast_ptr(self) } |
| 118 | } |
| 119 | |
| 120 | #[inline ] |
| 121 | fn cast_mut(self) -> Self::MutPtr { |
| 122 | // Safety: pointers can be cast |
| 123 | unsafe { core::intrinsics::simd::simd_cast_ptr(self) } |
| 124 | } |
| 125 | |
| 126 | #[inline ] |
| 127 | fn addr(self) -> Self::Usize { |
| 128 | // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. |
| 129 | // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the |
| 130 | // provenance). |
| 131 | unsafe { core::mem::transmute_copy(&self) } |
| 132 | } |
| 133 | |
| 134 | #[inline ] |
| 135 | fn without_provenance(addr: Self::Usize) -> Self { |
| 136 | // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. |
| 137 | // SAFETY: Integer-to-pointer transmutes are valid (if you are okay with not getting any |
| 138 | // provenance). |
| 139 | unsafe { core::mem::transmute_copy(&addr) } |
| 140 | } |
| 141 | |
| 142 | #[inline ] |
| 143 | fn with_addr(self, addr: Self::Usize) -> Self { |
| 144 | // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. |
| 145 | // |
| 146 | // In the mean-time, this operation is defined to be "as if" it was |
| 147 | // a wrapping_offset, so we can emulate it as such. This should properly |
| 148 | // restore pointer provenance even under today's compiler. |
| 149 | self.cast::<u8>() |
| 150 | .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>()) |
| 151 | .cast() |
| 152 | } |
| 153 | |
| 154 | #[inline ] |
| 155 | fn expose_provenance(self) -> Self::Usize { |
| 156 | // Safety: `self` is a pointer vector |
| 157 | unsafe { core::intrinsics::simd::simd_expose_provenance(self) } |
| 158 | } |
| 159 | |
| 160 | #[inline ] |
| 161 | fn with_exposed_provenance(addr: Self::Usize) -> Self { |
| 162 | // Safety: `self` is a pointer vector |
| 163 | unsafe { core::intrinsics::simd::simd_with_exposed_provenance(addr) } |
| 164 | } |
| 165 | |
| 166 | #[inline ] |
| 167 | fn wrapping_offset(self, count: Self::Isize) -> Self { |
| 168 | // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets |
| 169 | unsafe { core::intrinsics::simd::simd_arith_offset(self, count) } |
| 170 | } |
| 171 | |
| 172 | #[inline ] |
| 173 | fn wrapping_add(self, count: Self::Usize) -> Self { |
| 174 | self.wrapping_offset(count.cast()) |
| 175 | } |
| 176 | |
| 177 | #[inline ] |
| 178 | fn wrapping_sub(self, count: Self::Usize) -> Self { |
| 179 | self.wrapping_offset(-count.cast::<isize>()) |
| 180 | } |
| 181 | } |
| 182 | |