| 1 | #![unstable ( |
| 2 | feature = "core_intrinsics_fallbacks" , |
| 3 | reason = "The fallbacks will never be stable, as they exist only to be called \ |
| 4 | by the fallback MIR, but they're exported so they can be tested on \ |
| 5 | platforms where the fallback MIR isn't actually used" , |
| 6 | issue = "none" |
| 7 | )] |
| 8 | #![allow (missing_docs)] |
| 9 | |
| 10 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 11 | pub const trait CarryingMulAdd: Copy + 'static { |
| 12 | type Unsigned: Copy + 'static; |
| 13 | fn carrying_mul_add( |
| 14 | self, |
| 15 | multiplicand: Self, |
| 16 | addend: Self, |
| 17 | carry: Self, |
| 18 | ) -> (Self::Unsigned, Self); |
| 19 | } |
| 20 | |
| 21 | macro_rules! impl_carrying_mul_add_by_widening { |
| 22 | ($($t:ident $u:ident $w:ident,)+) => {$( |
| 23 | #[rustc_const_unstable(feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 24 | impl const CarryingMulAdd for $t { |
| 25 | type Unsigned = $u; |
| 26 | #[inline] |
| 27 | fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) { |
| 28 | let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w); |
| 29 | (wide as _, (wide >> Self::BITS) as _) |
| 30 | } |
| 31 | } |
| 32 | )+}; |
| 33 | } |
| 34 | impl_carrying_mul_add_by_widening! { |
| 35 | u8 u8 u16, |
| 36 | u16 u16 u32, |
| 37 | u32 u32 u64, |
| 38 | u64 u64 u128, |
| 39 | usize usize UDoubleSize, |
| 40 | i8 u8 i16, |
| 41 | i16 u16 i32, |
| 42 | i32 u32 i64, |
| 43 | i64 u64 i128, |
| 44 | isize usize UDoubleSize, |
| 45 | } |
| 46 | |
| 47 | #[cfg (target_pointer_width = "16" )] |
| 48 | type UDoubleSize = u32; |
| 49 | #[cfg (target_pointer_width = "32" )] |
| 50 | type UDoubleSize = u64; |
| 51 | #[cfg (target_pointer_width = "64" )] |
| 52 | type UDoubleSize = u128; |
| 53 | |
| 54 | #[inline ] |
| 55 | const fn wide_mul_u128(a: u128, b: u128) -> (u128, u128) { |
| 56 | #[inline ] |
| 57 | const fn to_low_high(x: u128) -> [u128; 2] { |
| 58 | const MASK: u128 = u64::MAX as _; |
| 59 | [x & MASK, x >> 64] |
| 60 | } |
| 61 | #[inline ] |
| 62 | const fn from_low_high(x: [u128; 2]) -> u128 { |
| 63 | x[0] | (x[1] << 64) |
| 64 | } |
| 65 | #[inline ] |
| 66 | const fn scalar_mul(low_high: [u128; 2], k: u128) -> [u128; 3] { |
| 67 | let [x, c] = to_low_high(k * low_high[0]); |
| 68 | let [y, z] = to_low_high(k * low_high[1] + c); |
| 69 | [x, y, z] |
| 70 | } |
| 71 | let a = to_low_high(a); |
| 72 | let b = to_low_high(b); |
| 73 | let low = scalar_mul(a, b[0]); |
| 74 | let high = scalar_mul(a, b[1]); |
| 75 | let r0 = low[0]; |
| 76 | let [r1, c] = to_low_high(low[1] + high[0]); |
| 77 | let [r2, c] = to_low_high(low[2] + high[1] + c); |
| 78 | let r3 = high[2] + c; |
| 79 | (from_low_high([r0, r1]), from_low_high([r2, r3])) |
| 80 | } |
| 81 | |
| 82 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 83 | impl const CarryingMulAdd for u128 { |
| 84 | type Unsigned = u128; |
| 85 | #[inline ] |
| 86 | fn carrying_mul_add(self, b: u128, c: u128, d: u128) -> (u128, u128) { |
| 87 | let (low: u128, mut high: u128) = wide_mul_u128(self, b); |
| 88 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:c); |
| 89 | high += carry as u128; |
| 90 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:d); |
| 91 | high += carry as u128; |
| 92 | (low, high) |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 97 | impl const CarryingMulAdd for i128 { |
| 98 | type Unsigned = u128; |
| 99 | #[inline ] |
| 100 | fn carrying_mul_add(self, b: i128, c: i128, d: i128) -> (u128, i128) { |
| 101 | let (low: u128, high: u128) = wide_mul_u128(self as u128, b as u128); |
| 102 | let mut high: i128 = high as i128; |
| 103 | high = high.wrapping_add(i128::wrapping_mul(self >> 127, rhs:b)); |
| 104 | high = high.wrapping_add(i128::wrapping_mul(self, rhs:b >> 127)); |
| 105 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:c as u128); |
| 106 | high = high.wrapping_add((carry as i128) + (c >> 127)); |
| 107 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:d as u128); |
| 108 | high = high.wrapping_add((carry as i128) + (d >> 127)); |
| 109 | (low, high) |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 114 | pub const trait DisjointBitOr: Copy + 'static { |
| 115 | /// See [`super::disjoint_bitor`]; we just need the trait indirection to handle |
| 116 | /// different types since calling intrinsics with generics doesn't work. |
| 117 | unsafe fn disjoint_bitor(self, other: Self) -> Self; |
| 118 | } |
| 119 | macro_rules! zero { |
| 120 | (bool) => { |
| 121 | false |
| 122 | }; |
| 123 | ($t:ident) => { |
| 124 | 0 |
| 125 | }; |
| 126 | } |
| 127 | macro_rules! impl_disjoint_bitor { |
| 128 | ($($t:ident,)+) => {$( |
| 129 | #[rustc_const_unstable(feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 130 | impl const DisjointBitOr for $t { |
| 131 | #[cfg_attr(miri, track_caller)] |
| 132 | #[inline] |
| 133 | unsafe fn disjoint_bitor(self, other: Self) -> Self { |
| 134 | // Note that the assume here is required for UB detection in Miri! |
| 135 | |
| 136 | // SAFETY: our precondition is that there are no bits in common, |
| 137 | // so this is just telling that to the backend. |
| 138 | unsafe { super::assume((self & other) == zero!($t)) }; |
| 139 | self | other |
| 140 | } |
| 141 | } |
| 142 | )+}; |
| 143 | } |
| 144 | impl_disjoint_bitor! { |
| 145 | bool, |
| 146 | u8, u16, u32, u64, u128, usize, |
| 147 | i8, i16, i32, i64, i128, isize, |
| 148 | } |
| 149 | |
| 150 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 151 | pub const trait FunnelShift: Copy + 'static { |
| 152 | /// See [`super::unchecked_funnel_shl`]; we just need the trait indirection to handle |
| 153 | /// different types since calling intrinsics with generics doesn't work. |
| 154 | unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self; |
| 155 | |
| 156 | /// See [`super::unchecked_funnel_shr`]; we just need the trait indirection to handle |
| 157 | /// different types since calling intrinsics with generics doesn't work. |
| 158 | unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self; |
| 159 | } |
| 160 | |
| 161 | macro_rules! impl_funnel_shifts { |
| 162 | ($($type:ident),*) => {$( |
| 163 | #[rustc_const_unstable(feature = "core_intrinsics_fallbacks" , issue = "none" )] |
| 164 | impl const FunnelShift for $type { |
| 165 | #[cfg_attr(miri, track_caller)] |
| 166 | #[inline] |
| 167 | unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self { |
| 168 | // This implementation is also used by Miri so we have to check the precondition. |
| 169 | // SAFETY: this is guaranteed by the caller |
| 170 | unsafe { super::assume(shift < $type::BITS) }; |
| 171 | if shift == 0 { |
| 172 | self |
| 173 | } else { |
| 174 | // SAFETY: |
| 175 | // - `shift < T::BITS`, which satisfies `unchecked_shl` |
| 176 | // - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked |
| 177 | // above), which satisfies `unchecked_shr` |
| 178 | // - because the types are unsigned, the combination are disjoint bits (this is |
| 179 | // not true if they're signed, since SHR will fill in the empty space with a |
| 180 | // sign bit, not zero) |
| 181 | unsafe { |
| 182 | super::disjoint_bitor( |
| 183 | super::unchecked_shl(self, shift), |
| 184 | super::unchecked_shr(rhs, $type::BITS - shift), |
| 185 | ) |
| 186 | } |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | #[cfg_attr(miri, track_caller)] |
| 191 | #[inline] |
| 192 | unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self { |
| 193 | // This implementation is also used by Miri so we have to check the precondition. |
| 194 | // SAFETY: this is guaranteed by the caller |
| 195 | unsafe { super::assume(shift < $type::BITS) }; |
| 196 | if shift == 0 { |
| 197 | rhs |
| 198 | } else { |
| 199 | // SAFETY: |
| 200 | // - `shift < T::BITS`, which satisfies `unchecked_shr` |
| 201 | // - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked |
| 202 | // above), which satisfies `unchecked_shl` |
| 203 | // - because the types are unsigned, the combination are disjoint bits (this is |
| 204 | // not true if they're signed, since SHR will fill in the empty space with a |
| 205 | // sign bit, not zero) |
| 206 | unsafe { |
| 207 | super::disjoint_bitor( |
| 208 | super::unchecked_shl(self, $type::BITS - shift), |
| 209 | super::unchecked_shr(rhs, shift), |
| 210 | ) |
| 211 | } |
| 212 | } |
| 213 | } |
| 214 | } |
| 215 | )*}; |
| 216 | } |
| 217 | |
| 218 | impl_funnel_shifts! { |
| 219 | u8, u16, u32, u64, u128, usize |
| 220 | } |
| 221 | |