1 | #![unstable ( |
2 | feature = "core_intrinsics_fallbacks" , |
3 | reason = "The fallbacks will never be stable, as they exist only to be called \ |
4 | by the fallback MIR, but they're exported so they can be tested on \ |
5 | platforms where the fallback MIR isn't actually used" , |
6 | issue = "none" |
7 | )] |
8 | #![allow (missing_docs)] |
9 | |
10 | #[const_trait ] |
11 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
12 | pub trait CarryingMulAdd: Copy + 'static { |
13 | type Unsigned: Copy + 'static; |
14 | fn carrying_mul_add( |
15 | self, |
16 | multiplicand: Self, |
17 | addend: Self, |
18 | carry: Self, |
19 | ) -> (Self::Unsigned, Self); |
20 | } |
21 | |
22 | macro_rules! impl_carrying_mul_add_by_widening { |
23 | ($($t:ident $u:ident $w:ident,)+) => {$( |
24 | #[rustc_const_unstable(feature = "core_intrinsics_fallbacks" , issue = "none" )] |
25 | impl const CarryingMulAdd for $t { |
26 | type Unsigned = $u; |
27 | #[inline] |
28 | fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) { |
29 | let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w); |
30 | (wide as _, (wide >> Self::BITS) as _) |
31 | } |
32 | } |
33 | )+}; |
34 | } |
35 | impl_carrying_mul_add_by_widening! { |
36 | u8 u8 u16, |
37 | u16 u16 u32, |
38 | u32 u32 u64, |
39 | u64 u64 u128, |
40 | usize usize UDoubleSize, |
41 | i8 u8 i16, |
42 | i16 u16 i32, |
43 | i32 u32 i64, |
44 | i64 u64 i128, |
45 | isize usize UDoubleSize, |
46 | } |
47 | |
48 | #[cfg (target_pointer_width = "16" )] |
49 | type UDoubleSize = u32; |
50 | #[cfg (target_pointer_width = "32" )] |
51 | type UDoubleSize = u64; |
52 | #[cfg (target_pointer_width = "64" )] |
53 | type UDoubleSize = u128; |
54 | |
55 | #[inline ] |
56 | const fn wide_mul_u128(a: u128, b: u128) -> (u128, u128) { |
57 | #[inline ] |
58 | const fn to_low_high(x: u128) -> [u128; 2] { |
59 | const MASK: u128 = u64::MAX as _; |
60 | [x & MASK, x >> 64] |
61 | } |
62 | #[inline ] |
63 | const fn from_low_high(x: [u128; 2]) -> u128 { |
64 | x[0] | (x[1] << 64) |
65 | } |
66 | #[inline ] |
67 | const fn scalar_mul(low_high: [u128; 2], k: u128) -> [u128; 3] { |
68 | let [x, c] = to_low_high(k * low_high[0]); |
69 | let [y, z] = to_low_high(k * low_high[1] + c); |
70 | [x, y, z] |
71 | } |
72 | let a = to_low_high(a); |
73 | let b = to_low_high(b); |
74 | let low = scalar_mul(a, b[0]); |
75 | let high = scalar_mul(a, b[1]); |
76 | let r0 = low[0]; |
77 | let [r1, c] = to_low_high(low[1] + high[0]); |
78 | let [r2, c] = to_low_high(low[2] + high[1] + c); |
79 | let r3 = high[2] + c; |
80 | (from_low_high([r0, r1]), from_low_high([r2, r3])) |
81 | } |
82 | |
83 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
84 | impl const CarryingMulAdd for u128 { |
85 | type Unsigned = u128; |
86 | #[inline ] |
87 | fn carrying_mul_add(self, b: u128, c: u128, d: u128) -> (u128, u128) { |
88 | let (low: u128, mut high: u128) = wide_mul_u128(self, b); |
89 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:c); |
90 | high += carry as u128; |
91 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:d); |
92 | high += carry as u128; |
93 | (low, high) |
94 | } |
95 | } |
96 | |
97 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
98 | impl const CarryingMulAdd for i128 { |
99 | type Unsigned = u128; |
100 | #[inline ] |
101 | fn carrying_mul_add(self, b: i128, c: i128, d: i128) -> (u128, i128) { |
102 | let (low: u128, high: u128) = wide_mul_u128(self as u128, b as u128); |
103 | let mut high: i128 = high as i128; |
104 | high = high.wrapping_add(i128::wrapping_mul(self >> 127, rhs:b)); |
105 | high = high.wrapping_add(i128::wrapping_mul(self, rhs:b >> 127)); |
106 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:c as u128); |
107 | high = high.wrapping_add((carry as i128) + (c >> 127)); |
108 | let (low: u128, carry: bool) = u128::overflowing_add(self:low, rhs:d as u128); |
109 | high = high.wrapping_add((carry as i128) + (d >> 127)); |
110 | (low, high) |
111 | } |
112 | } |
113 | |
114 | #[const_trait ] |
115 | #[rustc_const_unstable (feature = "core_intrinsics_fallbacks" , issue = "none" )] |
116 | pub trait DisjointBitOr: Copy + 'static { |
117 | /// See [`super::disjoint_bitor`]; we just need the trait indirection to handle |
118 | /// different types since calling intrinsics with generics doesn't work. |
119 | unsafe fn disjoint_bitor(self, other: Self) -> Self; |
120 | } |
121 | macro_rules! zero { |
122 | (bool) => { |
123 | false |
124 | }; |
125 | ($t:ident) => { |
126 | 0 |
127 | }; |
128 | } |
129 | macro_rules! impl_disjoint_bitor { |
130 | ($($t:ident,)+) => {$( |
131 | #[rustc_const_unstable(feature = "core_intrinsics_fallbacks" , issue = "none" )] |
132 | impl const DisjointBitOr for $t { |
133 | #[cfg_attr(miri, track_caller)] |
134 | #[inline] |
135 | unsafe fn disjoint_bitor(self, other: Self) -> Self { |
136 | // Note that the assume here is required for UB detection in Miri! |
137 | |
138 | // SAFETY: our precondition is that there are no bits in common, |
139 | // so this is just telling that to the backend. |
140 | unsafe { super::assume((self & other) == zero!($t)) }; |
141 | self | other |
142 | } |
143 | } |
144 | )+}; |
145 | } |
146 | impl_disjoint_bitor! { |
147 | bool, |
148 | u8, u16, u32, u64, u128, usize, |
149 | i8, i16, i32, i64, i128, isize, |
150 | } |
151 | |