| 1 | use crate::float::Float; |
| 2 | use crate::int::{CastInto, Int, MinInt}; |
| 3 | |
| 4 | /// Generic conversion from a narrower to a wider IEEE-754 floating-point type |
| 5 | fn extend<F: Float, R: Float>(a: F) -> R |
| 6 | where |
| 7 | F::Int: CastInto<u64>, |
| 8 | u64: CastInto<F::Int>, |
| 9 | u32: CastInto<R::Int>, |
| 10 | R::Int: CastInto<u32>, |
| 11 | R::Int: CastInto<u64>, |
| 12 | u64: CastInto<R::Int>, |
| 13 | F::Int: CastInto<R::Int>, |
| 14 | { |
| 15 | let src_zero = F::Int::ZERO; |
| 16 | let src_one = F::Int::ONE; |
| 17 | let src_bits = F::BITS; |
| 18 | let src_sig_bits = F::SIG_BITS; |
| 19 | let src_exp_bias = F::EXP_BIAS; |
| 20 | let src_min_normal = F::IMPLICIT_BIT; |
| 21 | let src_infinity = F::EXP_MASK; |
| 22 | let src_sign_mask = F::SIGN_MASK; |
| 23 | let src_abs_mask = src_sign_mask - src_one; |
| 24 | let src_qnan = F::SIG_MASK; |
| 25 | let src_nan_code = src_qnan - src_one; |
| 26 | |
| 27 | let dst_bits = R::BITS; |
| 28 | let dst_sig_bits = R::SIG_BITS; |
| 29 | let dst_inf_exp = R::EXP_SAT; |
| 30 | let dst_exp_bias = R::EXP_BIAS; |
| 31 | let dst_min_normal = R::IMPLICIT_BIT; |
| 32 | |
| 33 | let sig_bits_delta = dst_sig_bits - src_sig_bits; |
| 34 | let exp_bias_delta = dst_exp_bias - src_exp_bias; |
| 35 | let a_abs = a.to_bits() & src_abs_mask; |
| 36 | let mut abs_result = R::Int::ZERO; |
| 37 | |
| 38 | if a_abs.wrapping_sub(src_min_normal) < src_infinity.wrapping_sub(src_min_normal) { |
| 39 | // a is a normal number. |
| 40 | // Extend to the destination type by shifting the significand and |
| 41 | // exponent into the proper position and rebiasing the exponent. |
| 42 | let abs_dst: R::Int = a_abs.cast(); |
| 43 | let bias_dst: R::Int = exp_bias_delta.cast(); |
| 44 | abs_result = abs_dst.wrapping_shl(sig_bits_delta); |
| 45 | abs_result += bias_dst.wrapping_shl(dst_sig_bits); |
| 46 | } else if a_abs >= src_infinity { |
| 47 | // a is NaN or infinity. |
| 48 | // Conjure the result by beginning with infinity, then setting the qNaN |
| 49 | // bit (if needed) and right-aligning the rest of the trailing NaN |
| 50 | // payload field. |
| 51 | let qnan_dst: R::Int = (a_abs & src_qnan).cast(); |
| 52 | let nan_code_dst: R::Int = (a_abs & src_nan_code).cast(); |
| 53 | let inf_exp_dst: R::Int = dst_inf_exp.cast(); |
| 54 | abs_result = inf_exp_dst.wrapping_shl(dst_sig_bits); |
| 55 | abs_result |= qnan_dst.wrapping_shl(sig_bits_delta); |
| 56 | abs_result |= nan_code_dst.wrapping_shl(sig_bits_delta); |
| 57 | } else if a_abs != src_zero { |
| 58 | // a is denormal. |
| 59 | // Renormalize the significand and clear the leading bit, then insert |
| 60 | // the correct adjusted exponent in the destination type. |
| 61 | let scale = a_abs.leading_zeros() - src_min_normal.leading_zeros(); |
| 62 | let abs_dst: R::Int = a_abs.cast(); |
| 63 | let bias_dst: R::Int = (exp_bias_delta - scale + 1).cast(); |
| 64 | abs_result = abs_dst.wrapping_shl(sig_bits_delta + scale); |
| 65 | abs_result = (abs_result ^ dst_min_normal) | (bias_dst.wrapping_shl(dst_sig_bits)); |
| 66 | } |
| 67 | |
| 68 | let sign_result: R::Int = (a.to_bits() & src_sign_mask).cast(); |
| 69 | R::from_bits(abs_result | (sign_result.wrapping_shl(dst_bits - src_bits))) |
| 70 | } |
| 71 | |
| 72 | intrinsics! { |
| 73 | #[avr_skip] |
| 74 | #[aapcs_on_arm] |
| 75 | #[arm_aeabi_alias = __aeabi_f2d] |
| 76 | pub extern "C" fn __extendsfdf2(a: f32) -> f64 { |
| 77 | extend(a) |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | intrinsics! { |
| 82 | #[avr_skip] |
| 83 | #[aapcs_on_arm] |
| 84 | #[apple_f16_arg_abi] |
| 85 | #[arm_aeabi_alias = __aeabi_h2f] |
| 86 | #[cfg (f16_enabled)] |
| 87 | pub extern "C" fn __extendhfsf2(a: f16) -> f32 { |
| 88 | extend(a) |
| 89 | } |
| 90 | |
| 91 | #[avr_skip] |
| 92 | #[aapcs_on_arm] |
| 93 | #[apple_f16_arg_abi] |
| 94 | #[cfg (f16_enabled)] |
| 95 | pub extern "C" fn __gnu_h2f_ieee(a: f16) -> f32 { |
| 96 | extend(a) |
| 97 | } |
| 98 | |
| 99 | #[avr_skip] |
| 100 | #[aapcs_on_arm] |
| 101 | #[apple_f16_arg_abi] |
| 102 | #[cfg (f16_enabled)] |
| 103 | pub extern "C" fn __extendhfdf2(a: f16) -> f64 { |
| 104 | extend(a) |
| 105 | } |
| 106 | |
| 107 | #[avr_skip] |
| 108 | #[aapcs_on_arm] |
| 109 | #[ppc_alias = __extendhfkf2] |
| 110 | #[cfg (all(f16_enabled, f128_enabled))] |
| 111 | pub extern "C" fn __extendhftf2(a: f16) -> f128 { |
| 112 | extend(a) |
| 113 | } |
| 114 | |
| 115 | #[avr_skip] |
| 116 | #[aapcs_on_arm] |
| 117 | #[ppc_alias = __extendsfkf2] |
| 118 | #[cfg (f128_enabled)] |
| 119 | pub extern "C" fn __extendsftf2(a: f32) -> f128 { |
| 120 | extend(a) |
| 121 | } |
| 122 | |
| 123 | #[avr_skip] |
| 124 | #[aapcs_on_arm] |
| 125 | #[ppc_alias = __extenddfkf2] |
| 126 | #[cfg (f128_enabled)] |
| 127 | pub extern "C" fn __extenddftf2(a: f64) -> f128 { |
| 128 | extend(a) |
| 129 | } |
| 130 | } |
| 131 | |