1 | #![allow (unreachable_code)] |
2 | |
3 | use crate::float::Float; |
4 | use crate::int::Int; |
5 | |
6 | #[derive(Clone, Copy)] |
7 | enum Result { |
8 | Less, |
9 | Equal, |
10 | Greater, |
11 | Unordered, |
12 | } |
13 | |
14 | impl Result { |
15 | fn to_le_abi(self) -> i32 { |
16 | match self { |
17 | Result::Less => -1, |
18 | Result::Equal => 0, |
19 | Result::Greater => 1, |
20 | Result::Unordered => 1, |
21 | } |
22 | } |
23 | |
24 | fn to_ge_abi(self) -> i32 { |
25 | match self { |
26 | Result::Less => -1, |
27 | Result::Equal => 0, |
28 | Result::Greater => 1, |
29 | Result::Unordered => -1, |
30 | } |
31 | } |
32 | } |
33 | |
34 | fn cmp<F: Float>(a: F, b: F) -> Result { |
35 | let one = F::Int::ONE; |
36 | let zero = F::Int::ZERO; |
37 | let szero = F::SignedInt::ZERO; |
38 | |
39 | let sign_bit = F::SIGN_MASK as F::Int; |
40 | let abs_mask = sign_bit - one; |
41 | let exponent_mask = F::EXPONENT_MASK; |
42 | let inf_rep = exponent_mask; |
43 | |
44 | let a_rep = a.repr(); |
45 | let b_rep = b.repr(); |
46 | let a_abs = a_rep & abs_mask; |
47 | let b_abs = b_rep & abs_mask; |
48 | |
49 | // If either a or b is NaN, they are unordered. |
50 | if a_abs > inf_rep || b_abs > inf_rep { |
51 | return Result::Unordered; |
52 | } |
53 | |
54 | // If a and b are both zeros, they are equal. |
55 | if a_abs | b_abs == zero { |
56 | return Result::Equal; |
57 | } |
58 | |
59 | let a_srep = a.signed_repr(); |
60 | let b_srep = b.signed_repr(); |
61 | |
62 | // If at least one of a and b is positive, we get the same result comparing |
63 | // a and b as signed integers as we would with a fp_ting-point compare. |
64 | if a_srep & b_srep >= szero { |
65 | if a_srep < b_srep { |
66 | Result::Less |
67 | } else if a_srep == b_srep { |
68 | Result::Equal |
69 | } else { |
70 | Result::Greater |
71 | } |
72 | // Otherwise, both are negative, so we need to flip the sense of the |
73 | // comparison to get the correct result. (This assumes a twos- or ones- |
74 | // complement integer representation; if integers are represented in a |
75 | // sign-magnitude representation, then this flip is incorrect). |
76 | } else if a_srep > b_srep { |
77 | Result::Less |
78 | } else if a_srep == b_srep { |
79 | Result::Equal |
80 | } else { |
81 | Result::Greater |
82 | } |
83 | } |
84 | |
85 | fn unord<F: Float>(a: F, b: F) -> bool { |
86 | let one: ::Int = F::Int::ONE; |
87 | |
88 | let sign_bit: ::Int = F::SIGN_MASK as F::Int; |
89 | let abs_mask: <::Int as Sub<…>>::Output = sign_bit - one; |
90 | let exponent_mask: ::Int = F::EXPONENT_MASK; |
91 | let inf_rep: ::Int = exponent_mask; |
92 | |
93 | let a_rep: ::Int = a.repr(); |
94 | let b_rep: ::Int = b.repr(); |
95 | let a_abs: <::Int as BitAnd<…>>::Output = a_rep & abs_mask; |
96 | let b_abs: <::Int as BitAnd<…>>::Output = b_rep & abs_mask; |
97 | |
98 | a_abs > inf_rep || b_abs > inf_rep |
99 | } |
100 | |
101 | intrinsics! { |
102 | #[avr_skip] |
103 | pub extern "C" fn __lesf2(a: f32, b: f32) -> i32 { |
104 | cmp(a, b).to_le_abi() |
105 | } |
106 | |
107 | #[avr_skip] |
108 | pub extern "C" fn __gesf2(a: f32, b: f32) -> i32 { |
109 | cmp(a, b).to_ge_abi() |
110 | } |
111 | |
112 | #[avr_skip] |
113 | #[arm_aeabi_alias = __aeabi_fcmpun] |
114 | pub extern "C" fn __unordsf2(a: f32, b: f32) -> i32 { |
115 | unord(a, b) as i32 |
116 | } |
117 | |
118 | #[avr_skip] |
119 | pub extern "C" fn __eqsf2(a: f32, b: f32) -> i32 { |
120 | cmp(a, b).to_le_abi() |
121 | } |
122 | |
123 | #[avr_skip] |
124 | pub extern "C" fn __ltsf2(a: f32, b: f32) -> i32 { |
125 | cmp(a, b).to_le_abi() |
126 | } |
127 | |
128 | #[avr_skip] |
129 | pub extern "C" fn __nesf2(a: f32, b: f32) -> i32 { |
130 | cmp(a, b).to_le_abi() |
131 | } |
132 | |
133 | #[avr_skip] |
134 | pub extern "C" fn __gtsf2(a: f32, b: f32) -> i32 { |
135 | cmp(a, b).to_ge_abi() |
136 | } |
137 | |
138 | #[avr_skip] |
139 | pub extern "C" fn __ledf2(a: f64, b: f64) -> i32 { |
140 | cmp(a, b).to_le_abi() |
141 | } |
142 | |
143 | #[avr_skip] |
144 | pub extern "C" fn __gedf2(a: f64, b: f64) -> i32 { |
145 | cmp(a, b).to_ge_abi() |
146 | } |
147 | |
148 | #[avr_skip] |
149 | #[arm_aeabi_alias = __aeabi_dcmpun] |
150 | pub extern "C" fn __unorddf2(a: f64, b: f64) -> i32 { |
151 | unord(a, b) as i32 |
152 | } |
153 | |
154 | #[avr_skip] |
155 | pub extern "C" fn __eqdf2(a: f64, b: f64) -> i32 { |
156 | cmp(a, b).to_le_abi() |
157 | } |
158 | |
159 | #[avr_skip] |
160 | pub extern "C" fn __ltdf2(a: f64, b: f64) -> i32 { |
161 | cmp(a, b).to_le_abi() |
162 | } |
163 | |
164 | #[avr_skip] |
165 | pub extern "C" fn __nedf2(a: f64, b: f64) -> i32 { |
166 | cmp(a, b).to_le_abi() |
167 | } |
168 | |
169 | #[avr_skip] |
170 | pub extern "C" fn __gtdf2(a: f64, b: f64) -> i32 { |
171 | cmp(a, b).to_ge_abi() |
172 | } |
173 | } |
174 | |
175 | #[cfg (target_arch = "arm" )] |
176 | intrinsics! { |
177 | pub extern "aapcs" fn __aeabi_fcmple(a: f32, b: f32) -> i32 { |
178 | (__lesf2(a, b) <= 0) as i32 |
179 | } |
180 | |
181 | pub extern "aapcs" fn __aeabi_fcmpge(a: f32, b: f32) -> i32 { |
182 | (__gesf2(a, b) >= 0) as i32 |
183 | } |
184 | |
185 | pub extern "aapcs" fn __aeabi_fcmpeq(a: f32, b: f32) -> i32 { |
186 | (__eqsf2(a, b) == 0) as i32 |
187 | } |
188 | |
189 | pub extern "aapcs" fn __aeabi_fcmplt(a: f32, b: f32) -> i32 { |
190 | (__ltsf2(a, b) < 0) as i32 |
191 | } |
192 | |
193 | pub extern "aapcs" fn __aeabi_fcmpgt(a: f32, b: f32) -> i32 { |
194 | (__gtsf2(a, b) > 0) as i32 |
195 | } |
196 | |
197 | pub extern "aapcs" fn __aeabi_dcmple(a: f64, b: f64) -> i32 { |
198 | (__ledf2(a, b) <= 0) as i32 |
199 | } |
200 | |
201 | pub extern "aapcs" fn __aeabi_dcmpge(a: f64, b: f64) -> i32 { |
202 | (__gedf2(a, b) >= 0) as i32 |
203 | } |
204 | |
205 | pub extern "aapcs" fn __aeabi_dcmpeq(a: f64, b: f64) -> i32 { |
206 | (__eqdf2(a, b) == 0) as i32 |
207 | } |
208 | |
209 | pub extern "aapcs" fn __aeabi_dcmplt(a: f64, b: f64) -> i32 { |
210 | (__ltdf2(a, b) < 0) as i32 |
211 | } |
212 | |
213 | pub extern "aapcs" fn __aeabi_dcmpgt(a: f64, b: f64) -> i32 { |
214 | (__gtdf2(a, b) > 0) as i32 |
215 | } |
216 | |
217 | // On hard-float targets LLVM will use native instructions |
218 | // for all VFP intrinsics below |
219 | |
220 | pub extern "C" fn __gesf2vfp(a: f32, b: f32) -> i32 { |
221 | (a >= b) as i32 |
222 | } |
223 | |
224 | pub extern "C" fn __gedf2vfp(a: f64, b: f64) -> i32 { |
225 | (a >= b) as i32 |
226 | } |
227 | |
228 | pub extern "C" fn __gtsf2vfp(a: f32, b: f32) -> i32 { |
229 | (a > b) as i32 |
230 | } |
231 | |
232 | pub extern "C" fn __gtdf2vfp(a: f64, b: f64) -> i32 { |
233 | (a > b) as i32 |
234 | } |
235 | |
236 | pub extern "C" fn __ltsf2vfp(a: f32, b: f32) -> i32 { |
237 | (a < b) as i32 |
238 | } |
239 | |
240 | pub extern "C" fn __ltdf2vfp(a: f64, b: f64) -> i32 { |
241 | (a < b) as i32 |
242 | } |
243 | |
244 | pub extern "C" fn __lesf2vfp(a: f32, b: f32) -> i32 { |
245 | (a <= b) as i32 |
246 | } |
247 | |
248 | pub extern "C" fn __ledf2vfp(a: f64, b: f64) -> i32 { |
249 | (a <= b) as i32 |
250 | } |
251 | |
252 | pub extern "C" fn __nesf2vfp(a: f32, b: f32) -> i32 { |
253 | (a != b) as i32 |
254 | } |
255 | |
256 | pub extern "C" fn __nedf2vfp(a: f64, b: f64) -> i32 { |
257 | (a != b) as i32 |
258 | } |
259 | |
260 | pub extern "C" fn __eqsf2vfp(a: f32, b: f32) -> i32 { |
261 | (a == b) as i32 |
262 | } |
263 | |
264 | pub extern "C" fn __eqdf2vfp(a: f64, b: f64) -> i32 { |
265 | (a == b) as i32 |
266 | } |
267 | } |
268 | |