1 | use crate::int::{DInt, HInt, Int}; |
2 | |
3 | trait Mul: DInt |
4 | where |
5 | Self::H: DInt, |
6 | { |
7 | fn mul(self, rhs: Self) -> Self { |
8 | // In order to prevent infinite recursion, we cannot use the `widen_mul` in this: |
9 | //self.lo().widen_mul(rhs.lo()) |
10 | // .wrapping_add(self.lo().wrapping_mul(rhs.hi()).widen_hi()) |
11 | // .wrapping_add(self.hi().wrapping_mul(rhs.lo()).widen_hi()) |
12 | |
13 | let lhs_lo = self.lo(); |
14 | let rhs_lo = rhs.lo(); |
15 | // construct the widening multiplication using only `Self::H` sized multiplications |
16 | let tmp_0 = lhs_lo.lo().zero_widen_mul(rhs_lo.lo()); |
17 | let tmp_1 = lhs_lo.lo().zero_widen_mul(rhs_lo.hi()); |
18 | let tmp_2 = lhs_lo.hi().zero_widen_mul(rhs_lo.lo()); |
19 | let tmp_3 = lhs_lo.hi().zero_widen_mul(rhs_lo.hi()); |
20 | // sum up all widening partials |
21 | let mul: Self = Self::from_lo_hi(lo:tmp_0, hi:tmp_3) |
22 | .wrapping_add(tmp_1.zero_widen() << (Self::BITS / 4)) |
23 | .wrapping_add(tmp_2.zero_widen() << (Self::BITS / 4)); |
24 | // add the higher partials |
25 | mulSelf.wrapping_add(lhs_lo.wrapping_mul(rhs.hi()).widen_hi()) |
26 | .wrapping_add(self.hi().wrapping_mul(rhs_lo).widen_hi()) |
27 | } |
28 | } |
29 | |
30 | impl Mul for u64 {} |
31 | impl Mul for i128 {} |
32 | |
33 | pub(crate) trait UMulo: Int + DInt { |
34 | fn mulo(self, rhs: Self) -> (Self, bool) { |
35 | match (self.hi().is_zero(), rhs.hi().is_zero()) { |
36 | // overflow is guaranteed |
37 | (false, false) => (self.wrapping_mul(rhs), true), |
38 | (true, false) => { |
39 | let mul_lo: Self = self.lo().widen_mul(rhs.lo()); |
40 | let mul_hi: Self = self.lo().widen_mul(rhs.hi()); |
41 | let (mul: Self, o: bool) = mul_lo.overflowing_add(mul_hi.lo().widen_hi()); |
42 | (mul, o || !mul_hi.hi().is_zero()) |
43 | } |
44 | (false, true) => { |
45 | let mul_lo: Self = rhs.lo().widen_mul(self.lo()); |
46 | let mul_hi: Self = rhs.lo().widen_mul(self.hi()); |
47 | let (mul: Self, o: bool) = mul_lo.overflowing_add(mul_hi.lo().widen_hi()); |
48 | (mul, o || !mul_hi.hi().is_zero()) |
49 | } |
50 | // overflow is guaranteed to not happen, and use a smaller widening multiplication |
51 | (true, true) => (self.lo().widen_mul(rhs.lo()), false), |
52 | } |
53 | } |
54 | } |
55 | |
56 | impl UMulo for u32 {} |
57 | impl UMulo for u64 {} |
58 | impl UMulo for u128 {} |
59 | |
60 | macro_rules! impl_signed_mulo { |
61 | ($fn:ident, $iD:ident, $uD:ident) => { |
62 | fn $fn(lhs: $iD, rhs: $iD) -> ($iD, bool) { |
63 | let mut lhs = lhs; |
64 | let mut rhs = rhs; |
65 | // the test against `mul_neg` below fails without this early return |
66 | if lhs == 0 || rhs == 0 { |
67 | return (0, false); |
68 | } |
69 | |
70 | let lhs_neg = lhs < 0; |
71 | let rhs_neg = rhs < 0; |
72 | if lhs_neg { |
73 | lhs = lhs.wrapping_neg(); |
74 | } |
75 | if rhs_neg { |
76 | rhs = rhs.wrapping_neg(); |
77 | } |
78 | let mul_neg = lhs_neg != rhs_neg; |
79 | |
80 | let (mul, o) = (lhs as $uD).mulo(rhs as $uD); |
81 | let mut mul = mul as $iD; |
82 | |
83 | if mul_neg { |
84 | mul = mul.wrapping_neg(); |
85 | } |
86 | if (mul < 0) != mul_neg { |
87 | // this one check happens to catch all edge cases related to `$iD::MIN` |
88 | (mul, true) |
89 | } else { |
90 | (mul, o) |
91 | } |
92 | } |
93 | }; |
94 | } |
95 | |
96 | impl_signed_mulo!(i32_overflowing_mul, i32, u32); |
97 | impl_signed_mulo!(i64_overflowing_mul, i64, u64); |
98 | impl_signed_mulo!(i128_overflowing_mul, i128, u128); |
99 | |
100 | intrinsics! { |
101 | #[maybe_use_optimized_c_shim] |
102 | #[arm_aeabi_alias = __aeabi_lmul] |
103 | #[cfg (any(not(any(target_arch = "riscv32" , target_arch = "riscv64" )), target_feature = "m" ))] |
104 | pub extern "C" fn __muldi3(a: u64, b: u64) -> u64 { |
105 | a.mul(b) |
106 | } |
107 | |
108 | pub extern "C" fn __multi3(a: i128, b: i128) -> i128 { |
109 | a.mul(b) |
110 | } |
111 | |
112 | pub extern "C" fn __mulosi4(a: i32, b: i32, oflow: &mut i32) -> i32 { |
113 | let (mul, o) = i32_overflowing_mul(a, b); |
114 | *oflow = o as i32; |
115 | mul |
116 | } |
117 | |
118 | pub extern "C" fn __mulodi4(a: i64, b: i64, oflow: &mut i32) -> i64 { |
119 | let (mul, o) = i64_overflowing_mul(a, b); |
120 | *oflow = o as i32; |
121 | mul |
122 | } |
123 | |
124 | #[unadjusted_on_win64] |
125 | pub extern "C" fn __muloti4(a: i128, b: i128, oflow: &mut i32) -> i128 { |
126 | let (mul, o) = i128_overflowing_mul(a, b); |
127 | *oflow = o as i32; |
128 | mul |
129 | } |
130 | |
131 | pub extern "C" fn __rust_i128_mulo(a: i128, b: i128) -> (i128, bool) { |
132 | i128_overflowing_mul(a, b) |
133 | } |
134 | |
135 | pub extern "C" fn __rust_u128_mulo(a: u128, b: u128) -> (u128, bool) { |
136 | a.mulo(b) |
137 | } |
138 | } |
139 | |