| 1 | use core::ops; |
| 2 | |
| 3 | /// Minimal integer implementations needed on all integer types, including wide integers. |
| 4 | #[allow (dead_code)] |
| 5 | pub trait MinInt: |
| 6 | Copy |
| 7 | + core::fmt::Debug |
| 8 | + ops::BitOr<Output = Self> |
| 9 | + ops::Not<Output = Self> |
| 10 | + ops::Shl<u32, Output = Self> |
| 11 | { |
| 12 | /// Type with the same width but other signedness |
| 13 | type OtherSign: MinInt; |
| 14 | /// Unsigned version of Self |
| 15 | type UnsignedInt: MinInt; |
| 16 | |
| 17 | /// If `Self` is a signed integer |
| 18 | const SIGNED: bool; |
| 19 | |
| 20 | /// The bitwidth of the int type |
| 21 | const BITS: u32; |
| 22 | |
| 23 | const ZERO: Self; |
| 24 | const ONE: Self; |
| 25 | const MIN: Self; |
| 26 | const MAX: Self; |
| 27 | } |
| 28 | |
| 29 | /// Trait for some basic operations on integers |
| 30 | #[allow (dead_code)] |
| 31 | pub trait Int: |
| 32 | MinInt |
| 33 | + PartialEq |
| 34 | + PartialOrd |
| 35 | + ops::AddAssign |
| 36 | + ops::SubAssign |
| 37 | + ops::BitAndAssign |
| 38 | + ops::BitOrAssign |
| 39 | + ops::BitXorAssign |
| 40 | + ops::ShlAssign<i32> |
| 41 | + ops::ShrAssign<u32> |
| 42 | + ops::Add<Output = Self> |
| 43 | + ops::Sub<Output = Self> |
| 44 | + ops::Mul<Output = Self> |
| 45 | + ops::Div<Output = Self> |
| 46 | + ops::Shr<u32, Output = Self> |
| 47 | + ops::BitXor<Output = Self> |
| 48 | + ops::BitAnd<Output = Self> |
| 49 | { |
| 50 | /// LUT used for maximizing the space covered and minimizing the computational cost of fuzzing |
| 51 | /// in `testcrate`. For example, Self = u128 produces [0,1,2,7,8,15,16,31,32,63,64,95,96,111, |
| 52 | /// 112,119,120,125,126,127]. |
| 53 | const FUZZ_LENGTHS: [u8; 20] = make_fuzz_lengths(<Self as MinInt>::BITS); |
| 54 | |
| 55 | /// The number of entries of `FUZZ_LENGTHS` actually used. The maximum is 20 for u128. |
| 56 | const FUZZ_NUM: usize = { |
| 57 | let log2 = (<Self as MinInt>::BITS - 1).count_ones() as usize; |
| 58 | if log2 == 3 { |
| 59 | // case for u8 |
| 60 | 6 |
| 61 | } else { |
| 62 | // 3 entries on each extreme, 2 in the middle, and 4 for each scale of intermediate |
| 63 | // boundaries. |
| 64 | 8 + (4 * (log2 - 4)) |
| 65 | } |
| 66 | }; |
| 67 | |
| 68 | fn unsigned(self) -> Self::UnsignedInt; |
| 69 | fn from_unsigned(unsigned: Self::UnsignedInt) -> Self; |
| 70 | fn unsigned_abs(self) -> Self::UnsignedInt; |
| 71 | |
| 72 | fn from_bool(b: bool) -> Self; |
| 73 | |
| 74 | /// Prevents the need for excessive conversions between signed and unsigned |
| 75 | fn logical_shr(self, other: u32) -> Self; |
| 76 | |
| 77 | /// Absolute difference between two integers. |
| 78 | fn abs_diff(self, other: Self) -> Self::UnsignedInt; |
| 79 | |
| 80 | // copied from primitive integers, but put in a trait |
| 81 | fn is_zero(self) -> bool; |
| 82 | fn wrapping_neg(self) -> Self; |
| 83 | fn wrapping_add(self, other: Self) -> Self; |
| 84 | fn wrapping_mul(self, other: Self) -> Self; |
| 85 | fn wrapping_sub(self, other: Self) -> Self; |
| 86 | fn wrapping_shl(self, other: u32) -> Self; |
| 87 | fn wrapping_shr(self, other: u32) -> Self; |
| 88 | fn rotate_left(self, other: u32) -> Self; |
| 89 | fn overflowing_add(self, other: Self) -> (Self, bool); |
| 90 | fn leading_zeros(self) -> u32; |
| 91 | fn ilog2(self) -> u32; |
| 92 | } |
| 93 | |
| 94 | pub(crate) const fn make_fuzz_lengths(bits: u32) -> [u8; 20] { |
| 95 | let mut v = [0u8; 20]; |
| 96 | v[0] = 0; |
| 97 | v[1] = 1; |
| 98 | v[2] = 2; // important for parity and the iX::MIN case when reversed |
| 99 | let mut i = 3; |
| 100 | |
| 101 | // No need for any more until the byte boundary, because there should be no algorithms |
| 102 | // that are sensitive to anything not next to byte boundaries after 2. We also scale |
| 103 | // in powers of two, which is important to prevent u128 corner tests from getting too |
| 104 | // big. |
| 105 | let mut l = 8; |
| 106 | loop { |
| 107 | if l >= ((bits / 2) as u8) { |
| 108 | break; |
| 109 | } |
| 110 | // get both sides of the byte boundary |
| 111 | v[i] = l - 1; |
| 112 | i += 1; |
| 113 | v[i] = l; |
| 114 | i += 1; |
| 115 | l *= 2; |
| 116 | } |
| 117 | |
| 118 | if bits != 8 { |
| 119 | // add the lower side of the middle boundary |
| 120 | v[i] = ((bits / 2) - 1) as u8; |
| 121 | i += 1; |
| 122 | } |
| 123 | |
| 124 | // We do not want to jump directly from the Self::BITS/2 boundary to the Self::BITS |
| 125 | // boundary because of algorithms that split the high part up. We reverse the scaling |
| 126 | // as we go to Self::BITS. |
| 127 | let mid = i; |
| 128 | let mut j = 1; |
| 129 | loop { |
| 130 | v[i] = (bits as u8) - (v[mid - j]) - 1; |
| 131 | if j == mid { |
| 132 | break; |
| 133 | } |
| 134 | i += 1; |
| 135 | j += 1; |
| 136 | } |
| 137 | v |
| 138 | } |
| 139 | |
| 140 | macro_rules! int_impl_common { |
| 141 | ($ty:ty) => { |
| 142 | fn from_bool(b: bool) -> Self { |
| 143 | b as $ty |
| 144 | } |
| 145 | |
| 146 | fn logical_shr(self, other: u32) -> Self { |
| 147 | Self::from_unsigned(self.unsigned().wrapping_shr(other)) |
| 148 | } |
| 149 | |
| 150 | fn is_zero(self) -> bool { |
| 151 | self == Self::ZERO |
| 152 | } |
| 153 | |
| 154 | fn wrapping_neg(self) -> Self { |
| 155 | <Self>::wrapping_neg(self) |
| 156 | } |
| 157 | |
| 158 | fn wrapping_add(self, other: Self) -> Self { |
| 159 | <Self>::wrapping_add(self, other) |
| 160 | } |
| 161 | |
| 162 | fn wrapping_mul(self, other: Self) -> Self { |
| 163 | <Self>::wrapping_mul(self, other) |
| 164 | } |
| 165 | fn wrapping_sub(self, other: Self) -> Self { |
| 166 | <Self>::wrapping_sub(self, other) |
| 167 | } |
| 168 | |
| 169 | fn wrapping_shl(self, other: u32) -> Self { |
| 170 | <Self>::wrapping_shl(self, other) |
| 171 | } |
| 172 | |
| 173 | fn wrapping_shr(self, other: u32) -> Self { |
| 174 | <Self>::wrapping_shr(self, other) |
| 175 | } |
| 176 | |
| 177 | fn rotate_left(self, other: u32) -> Self { |
| 178 | <Self>::rotate_left(self, other) |
| 179 | } |
| 180 | |
| 181 | fn overflowing_add(self, other: Self) -> (Self, bool) { |
| 182 | <Self>::overflowing_add(self, other) |
| 183 | } |
| 184 | |
| 185 | fn leading_zeros(self) -> u32 { |
| 186 | <Self>::leading_zeros(self) |
| 187 | } |
| 188 | |
| 189 | fn ilog2(self) -> u32 { |
| 190 | <Self>::ilog2(self) |
| 191 | } |
| 192 | }; |
| 193 | } |
| 194 | |
| 195 | macro_rules! int_impl { |
| 196 | ($ity:ty, $uty:ty) => { |
| 197 | impl MinInt for $uty { |
| 198 | type OtherSign = $ity; |
| 199 | type UnsignedInt = $uty; |
| 200 | |
| 201 | const BITS: u32 = <Self as MinInt>::ZERO.count_zeros(); |
| 202 | const SIGNED: bool = Self::MIN != Self::ZERO; |
| 203 | |
| 204 | const ZERO: Self = 0; |
| 205 | const ONE: Self = 1; |
| 206 | const MIN: Self = <Self>::MIN; |
| 207 | const MAX: Self = <Self>::MAX; |
| 208 | } |
| 209 | |
| 210 | impl Int for $uty { |
| 211 | fn unsigned(self) -> $uty { |
| 212 | self |
| 213 | } |
| 214 | |
| 215 | // It makes writing macros easier if this is implemented for both signed and unsigned |
| 216 | #[allow(clippy::wrong_self_convention)] |
| 217 | fn from_unsigned(me: $uty) -> Self { |
| 218 | me |
| 219 | } |
| 220 | |
| 221 | fn unsigned_abs(self) -> Self { |
| 222 | self |
| 223 | } |
| 224 | |
| 225 | fn abs_diff(self, other: Self) -> Self { |
| 226 | self.abs_diff(other) |
| 227 | } |
| 228 | |
| 229 | int_impl_common!($uty); |
| 230 | } |
| 231 | |
| 232 | impl MinInt for $ity { |
| 233 | type OtherSign = $uty; |
| 234 | type UnsignedInt = $uty; |
| 235 | |
| 236 | const BITS: u32 = <Self as MinInt>::ZERO.count_zeros(); |
| 237 | const SIGNED: bool = Self::MIN != Self::ZERO; |
| 238 | |
| 239 | const ZERO: Self = 0; |
| 240 | const ONE: Self = 1; |
| 241 | const MIN: Self = <Self>::MIN; |
| 242 | const MAX: Self = <Self>::MAX; |
| 243 | } |
| 244 | |
| 245 | impl Int for $ity { |
| 246 | fn unsigned(self) -> $uty { |
| 247 | self as $uty |
| 248 | } |
| 249 | |
| 250 | fn from_unsigned(me: $uty) -> Self { |
| 251 | me as $ity |
| 252 | } |
| 253 | |
| 254 | fn unsigned_abs(self) -> Self::UnsignedInt { |
| 255 | self.unsigned_abs() |
| 256 | } |
| 257 | |
| 258 | fn abs_diff(self, other: Self) -> $uty { |
| 259 | self.abs_diff(other) |
| 260 | } |
| 261 | |
| 262 | int_impl_common!($ity); |
| 263 | } |
| 264 | }; |
| 265 | } |
| 266 | |
| 267 | int_impl!(isize, usize); |
| 268 | int_impl!(i8, u8); |
| 269 | int_impl!(i16, u16); |
| 270 | int_impl!(i32, u32); |
| 271 | int_impl!(i64, u64); |
| 272 | int_impl!(i128, u128); |
| 273 | |
| 274 | /// Trait for integers twice the bit width of another integer. This is implemented for all |
| 275 | /// primitives except for `u8`, because there is not a smaller primitive. |
| 276 | pub trait DInt: MinInt { |
| 277 | /// Integer that is half the bit width of the integer this trait is implemented for |
| 278 | type H: HInt<D = Self>; |
| 279 | |
| 280 | /// Returns the low half of `self` |
| 281 | fn lo(self) -> Self::H; |
| 282 | /// Returns the high half of `self` |
| 283 | fn hi(self) -> Self::H; |
| 284 | /// Returns the low and high halves of `self` as a tuple |
| 285 | fn lo_hi(self) -> (Self::H, Self::H) { |
| 286 | (self.lo(), self.hi()) |
| 287 | } |
| 288 | /// Constructs an integer using lower and higher half parts |
| 289 | fn from_lo_hi(lo: Self::H, hi: Self::H) -> Self { |
| 290 | lo.zero_widen() | hi.widen_hi() |
| 291 | } |
| 292 | } |
| 293 | |
| 294 | /// Trait for integers half the bit width of another integer. This is implemented for all |
| 295 | /// primitives except for `u128`, because it there is not a larger primitive. |
| 296 | pub trait HInt: Int { |
| 297 | /// Integer that is double the bit width of the integer this trait is implemented for |
| 298 | type D: DInt<H = Self> + MinInt; |
| 299 | |
| 300 | // NB: some of the below methods could have default implementations (e.g. `widen_hi`), but for |
| 301 | // unknown reasons this can cause infinite recursion when optimizations are disabled. See |
| 302 | // <https://github.com/rust-lang/compiler-builtins/pull/707> for context. |
| 303 | |
| 304 | /// Widens (using default extension) the integer to have double bit width |
| 305 | fn widen(self) -> Self::D; |
| 306 | /// Widens (zero extension only) the integer to have double bit width. This is needed to get |
| 307 | /// around problems with associated type bounds (such as `Int<Othersign: DInt>`) being unstable |
| 308 | fn zero_widen(self) -> Self::D; |
| 309 | /// Widens the integer to have double bit width and shifts the integer into the higher bits |
| 310 | fn widen_hi(self) -> Self::D; |
| 311 | /// Widening multiplication with zero widening. This cannot overflow. |
| 312 | fn zero_widen_mul(self, rhs: Self) -> Self::D; |
| 313 | /// Widening multiplication. This cannot overflow. |
| 314 | fn widen_mul(self, rhs: Self) -> Self::D; |
| 315 | } |
| 316 | |
| 317 | macro_rules! impl_d_int { |
| 318 | ($($X:ident $D:ident),*) => { |
| 319 | $( |
| 320 | impl DInt for $D { |
| 321 | type H = $X; |
| 322 | |
| 323 | fn lo(self) -> Self::H { |
| 324 | self as $X |
| 325 | } |
| 326 | fn hi(self) -> Self::H { |
| 327 | (self >> <$X as MinInt>::BITS) as $X |
| 328 | } |
| 329 | } |
| 330 | )* |
| 331 | }; |
| 332 | } |
| 333 | |
| 334 | macro_rules! impl_h_int { |
| 335 | ($($H:ident $uH:ident $X:ident),*) => { |
| 336 | $( |
| 337 | impl HInt for $H { |
| 338 | type D = $X; |
| 339 | |
| 340 | fn widen(self) -> Self::D { |
| 341 | self as $X |
| 342 | } |
| 343 | fn zero_widen(self) -> Self::D { |
| 344 | (self as $uH) as $X |
| 345 | } |
| 346 | fn zero_widen_mul(self, rhs: Self) -> Self::D { |
| 347 | self.zero_widen().wrapping_mul(rhs.zero_widen()) |
| 348 | } |
| 349 | fn widen_mul(self, rhs: Self) -> Self::D { |
| 350 | self.widen().wrapping_mul(rhs.widen()) |
| 351 | } |
| 352 | fn widen_hi(self) -> Self::D { |
| 353 | (self as $X) << <Self as MinInt>::BITS |
| 354 | } |
| 355 | } |
| 356 | )* |
| 357 | }; |
| 358 | } |
| 359 | |
| 360 | impl_d_int!(u8 u16, u16 u32, u32 u64, u64 u128, i8 i16, i16 i32, i32 i64, i64 i128); |
| 361 | impl_h_int!( |
| 362 | u8 u8 u16, |
| 363 | u16 u16 u32, |
| 364 | u32 u32 u64, |
| 365 | u64 u64 u128, |
| 366 | i8 u8 i16, |
| 367 | i16 u16 i32, |
| 368 | i32 u32 i64, |
| 369 | i64 u64 i128 |
| 370 | ); |
| 371 | |
| 372 | /// Trait to express (possibly lossy) casting of integers |
| 373 | pub trait CastInto<T: Copy>: Copy { |
| 374 | fn cast(self) -> T; |
| 375 | } |
| 376 | |
| 377 | pub trait CastFrom<T: Copy>: Copy { |
| 378 | fn cast_from(value: T) -> Self; |
| 379 | } |
| 380 | |
| 381 | impl<T: Copy, U: CastInto<T> + Copy> CastFrom<U> for T { |
| 382 | fn cast_from(value: U) -> Self { |
| 383 | value.cast() |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | macro_rules! cast_into { |
| 388 | ($ty:ty) => { |
| 389 | cast_into!($ty; usize, isize, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128); |
| 390 | }; |
| 391 | ($ty:ty; $($into:ty),*) => {$( |
| 392 | impl CastInto<$into> for $ty { |
| 393 | fn cast(self) -> $into { |
| 394 | self as $into |
| 395 | } |
| 396 | } |
| 397 | )*}; |
| 398 | } |
| 399 | |
| 400 | cast_into!(usize); |
| 401 | cast_into!(isize); |
| 402 | cast_into!(u8); |
| 403 | cast_into!(i8); |
| 404 | cast_into!(u16); |
| 405 | cast_into!(i16); |
| 406 | cast_into!(u32); |
| 407 | cast_into!(i32); |
| 408 | cast_into!(u64); |
| 409 | cast_into!(i64); |
| 410 | cast_into!(u128); |
| 411 | cast_into!(i128); |
| 412 | |