diff options
Diffstat (limited to 'library/compiler-builtins/libm/src/math/support')
8 files changed, 2791 insertions, 0 deletions
diff --git a/library/compiler-builtins/libm/src/math/support/big.rs b/library/compiler-builtins/libm/src/math/support/big.rs new file mode 100644 index 00000000000..eae08238e09 --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/big.rs @@ -0,0 +1,239 @@ +//! Integers used for wide operations, larger than `u128`. + +#[cfg(test)] +mod tests; + +use core::ops; + +use super::{DInt, HInt, Int, MinInt}; + +const U128_LO_MASK: u128 = u64::MAX as u128; + +/// A 256-bit unsigned integer represented as two 128-bit native-endian limbs. +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +pub struct u256 { + pub lo: u128, + pub hi: u128, +} + +impl u256 { + #[cfg(any(test, feature = "unstable-public-internals"))] + pub const MAX: Self = Self { lo: u128::MAX, hi: u128::MAX }; + + /// Reinterpret as a signed integer + pub fn signed(self) -> i256 { + i256 { lo: self.lo, hi: self.hi } + } +} + +/// A 256-bit signed integer represented as two 128-bit native-endian limbs. +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +pub struct i256 { + pub lo: u128, + pub hi: u128, +} + +impl i256 { + /// Reinterpret as an unsigned integer + #[cfg(any(test, feature = "unstable-public-internals"))] + pub fn unsigned(self) -> u256 { + u256 { lo: self.lo, hi: self.hi } + } +} + +impl MinInt for u256 { + type OtherSign = i256; + + type Unsigned = u256; + + const SIGNED: bool = false; + const BITS: u32 = 256; + const ZERO: Self = Self { lo: 0, hi: 0 }; + const ONE: Self = Self { lo: 1, hi: 0 }; + const MIN: Self = Self { lo: 0, hi: 0 }; + const MAX: Self = Self { lo: u128::MAX, hi: u128::MAX }; +} + +impl MinInt for i256 { + type OtherSign = u256; + + type Unsigned = u256; + + const SIGNED: bool = false; + const BITS: u32 = 256; + const ZERO: Self = Self { lo: 0, hi: 0 }; + const ONE: Self = Self { lo: 1, hi: 0 }; + const MIN: Self = Self { lo: 0, hi: 1 << 127 }; + const MAX: Self = Self { lo: u128::MAX, hi: u128::MAX << 1 }; +} + +macro_rules! impl_common { + ($ty:ty) => { + impl ops::BitOr for $ty { + type Output = Self; + + fn bitor(mut self, rhs: Self) -> Self::Output { + self.lo |= rhs.lo; + self.hi |= rhs.hi; + self + } + } + + impl ops::Not for $ty { + type Output = Self; + + fn not(mut self) -> Self::Output { + self.lo = !self.lo; + self.hi = !self.hi; + self + } + } + + impl ops::Shl<u32> for $ty { + type Output = Self; + + fn shl(self, _rhs: u32) -> Self::Output { + unimplemented!("only used to meet trait bounds") + } + } + }; +} + +impl_common!(i256); +impl_common!(u256); + +impl ops::Add<Self> for u256 { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + let (lo, carry) = self.lo.overflowing_add(rhs.lo); + let hi = self.hi.wrapping_add(carry as u128).wrapping_add(rhs.hi); + + Self { lo, hi } + } +} + +impl ops::Shr<u32> for u256 { + type Output = Self; + + fn shr(mut self, rhs: u32) -> Self::Output { + debug_assert!(rhs < Self::BITS, "attempted to shift right with overflow"); + if rhs >= Self::BITS { + return Self::ZERO; + } + + if rhs == 0 { + return self; + } + + if rhs < 128 { + self.lo >>= rhs; + self.lo |= self.hi << (128 - rhs); + } else { + self.lo = self.hi >> (rhs - 128); + } + + if rhs < 128 { + self.hi >>= rhs; + } else { + self.hi = 0; + } + + self + } +} + +impl HInt for u128 { + type D = u256; + + fn widen(self) -> Self::D { + u256 { lo: self, hi: 0 } + } + + fn zero_widen(self) -> Self::D { + self.widen() + } + + fn zero_widen_mul(self, rhs: Self) -> Self::D { + let l0 = self & U128_LO_MASK; + let l1 = rhs & U128_LO_MASK; + let h0 = self >> 64; + let h1 = rhs >> 64; + + let p_ll: u128 = l0.overflowing_mul(l1).0; + let p_lh: u128 = l0.overflowing_mul(h1).0; + let p_hl: u128 = h0.overflowing_mul(l1).0; + let p_hh: u128 = h0.overflowing_mul(h1).0; + + let s0 = p_hl + (p_ll >> 64); + let s1 = (p_ll & U128_LO_MASK) + (s0 << 64); + let s2 = p_lh + (s1 >> 64); + + let lo = (p_ll & U128_LO_MASK) + (s2 << 64); + let hi = p_hh + (s0 >> 64) + (s2 >> 64); + + u256 { lo, hi } + } + + fn widen_mul(self, rhs: Self) -> Self::D { + self.zero_widen_mul(rhs) + } + + fn widen_hi(self) -> Self::D { + self.widen() << <Self as MinInt>::BITS + } +} + +impl HInt for i128 { + type D = i256; + + fn widen(self) -> Self::D { + let mut ret = self.unsigned().zero_widen().signed(); + if self.is_negative() { + ret.hi = u128::MAX; + } + ret + } + + fn zero_widen(self) -> Self::D { + self.unsigned().zero_widen().signed() + } + + fn zero_widen_mul(self, rhs: Self) -> Self::D { + self.unsigned().zero_widen_mul(rhs.unsigned()).signed() + } + + fn widen_mul(self, _rhs: Self) -> Self::D { + unimplemented!("signed i128 widening multiply is not used") + } + + fn widen_hi(self) -> Self::D { + self.widen() << <Self as MinInt>::BITS + } +} + +impl DInt for u256 { + type H = u128; + + fn lo(self) -> Self::H { + self.lo + } + + fn hi(self) -> Self::H { + self.hi + } +} + +impl DInt for i256 { + type H = i128; + + fn lo(self) -> Self::H { + self.lo as i128 + } + + fn hi(self) -> Self::H { + self.hi as i128 + } +} diff --git a/library/compiler-builtins/libm/src/math/support/big/tests.rs b/library/compiler-builtins/libm/src/math/support/big/tests.rs new file mode 100644 index 00000000000..2c71191ba53 --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/big/tests.rs @@ -0,0 +1,149 @@ +extern crate std; +use std::string::String; +use std::{eprintln, format}; + +use super::{HInt, MinInt, i256, u256}; + +const LOHI_SPLIT: u128 = 0xaaaaaaaaaaaaaaaaffffffffffffffff; + +/// Print a `u256` as hex since we can't add format implementations +fn hexu(v: u256) -> String { + format!("0x{:032x}{:032x}", v.hi, v.lo) +} + +#[test] +fn widen_u128() { + assert_eq!(u128::MAX.widen(), u256 { lo: u128::MAX, hi: 0 }); + assert_eq!(LOHI_SPLIT.widen(), u256 { lo: LOHI_SPLIT, hi: 0 }); +} + +#[test] +fn widen_i128() { + assert_eq!((-1i128).widen(), u256::MAX.signed()); + assert_eq!((LOHI_SPLIT as i128).widen(), i256 { lo: LOHI_SPLIT, hi: u128::MAX }); + assert_eq!((-1i128).zero_widen().unsigned(), (u128::MAX).widen()); +} + +#[test] +fn widen_mul_u128() { + let tests = [ + (u128::MAX / 2, 2_u128, u256 { lo: u128::MAX - 1, hi: 0 }), + (u128::MAX, 2_u128, u256 { lo: u128::MAX - 1, hi: 1 }), + (u128::MAX, u128::MAX, u256 { lo: 1, hi: u128::MAX - 1 }), + (0, 0, u256::ZERO), + (1234u128, 0, u256::ZERO), + (0, 1234, u256::ZERO), + ]; + + let mut has_errors = false; + let mut add_error = |i, a, b, expected, actual| { + has_errors = true; + eprintln!( + "\ + FAILURE ({i}): {a:#034x} * {b:#034x}\n\ + expected: {}\n\ + got: {}\ + ", + hexu(expected), + hexu(actual) + ); + }; + + for (i, (a, b, exp)) in tests.iter().copied().enumerate() { + let res = a.widen_mul(b); + let res_z = a.zero_widen_mul(b); + assert_eq!(res, res_z); + if res != exp { + add_error(i, a, b, exp, res); + } + } + + assert!(!has_errors); +} + +#[test] +fn not_u256() { + assert_eq!(!u256::ZERO, u256::MAX); +} + +#[test] +fn shr_u256() { + let only_low = [1, u16::MAX.into(), u32::MAX.into(), u64::MAX.into(), u128::MAX]; + let mut has_errors = false; + + let mut add_error = |a, b, expected, actual| { + has_errors = true; + eprintln!( + "\ + FAILURE: {} >> {b}\n\ + expected: {}\n\ + actual: {}\ + ", + hexu(a), + hexu(expected), + hexu(actual), + ); + }; + + for a in only_low { + for perturb in 0..10 { + let a = a.saturating_add(perturb); + for shift in 0..128 { + let res = a.widen() >> shift; + let expected = (a >> shift).widen(); + if res != expected { + add_error(a.widen(), shift, expected, res); + } + } + } + } + + let check = [ + (u256::MAX, 1, u256 { lo: u128::MAX, hi: u128::MAX >> 1 }), + (u256::MAX, 5, u256 { lo: u128::MAX, hi: u128::MAX >> 5 }), + (u256::MAX, 63, u256 { lo: u128::MAX, hi: u64::MAX as u128 | (1 << 64) }), + (u256::MAX, 64, u256 { lo: u128::MAX, hi: u64::MAX as u128 }), + (u256::MAX, 65, u256 { lo: u128::MAX, hi: (u64::MAX >> 1) as u128 }), + (u256::MAX, 127, u256 { lo: u128::MAX, hi: 1 }), + (u256::MAX, 128, u256 { lo: u128::MAX, hi: 0 }), + (u256::MAX, 129, u256 { lo: u128::MAX >> 1, hi: 0 }), + (u256::MAX, 191, u256 { lo: u64::MAX as u128 | 1 << 64, hi: 0 }), + (u256::MAX, 192, u256 { lo: u64::MAX as u128, hi: 0 }), + (u256::MAX, 193, u256 { lo: u64::MAX as u128 >> 1, hi: 0 }), + (u256::MAX, 254, u256 { lo: 0b11, hi: 0 }), + (u256::MAX, 255, u256 { lo: 1, hi: 0 }), + ( + u256 { hi: LOHI_SPLIT, lo: 0 }, + 64, + u256 { lo: 0xffffffffffffffff0000000000000000, hi: 0xaaaaaaaaaaaaaaaa }, + ), + ]; + + for (input, shift, expected) in check { + let res = input >> shift; + if res != expected { + add_error(input, shift, expected, res); + } + } + + assert!(!has_errors); +} + +#[test] +#[should_panic] +#[cfg(debug_assertions)] +// FIXME(ppc): ppc64le seems to have issues with `should_panic` tests. +#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))] +fn shr_u256_overflow() { + // Like regular shr, panic on overflow with debug assertions + let _ = u256::MAX >> 256; +} + +#[test] +#[cfg(not(debug_assertions))] +fn shr_u256_overflow() { + // No panic without debug assertions + assert_eq!(u256::MAX >> 256, u256::ZERO); + assert_eq!(u256::MAX >> 257, u256::ZERO); + assert_eq!(u256::MAX >> u32::MAX, u256::ZERO); +} diff --git a/library/compiler-builtins/libm/src/math/support/env.rs b/library/compiler-builtins/libm/src/math/support/env.rs new file mode 100644 index 00000000000..796309372a5 --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/env.rs @@ -0,0 +1,127 @@ +//! Support for rounding directions and status flags as specified by IEEE 754. +//! +//! Rust does not support the floating point environment so rounding mode is passed as an argument +//! and status flags are returned as part of the result. There is currently not much support for +//! this; most existing ports from musl use a form of `force_eval!` to raise exceptions, but this +//! has no side effects in Rust. Further, correct behavior relies on elementary operations making +//! use of the correct rounding and raising relevant exceptions, which is not the case for Rust. +//! +//! This module exists so no functionality is lost when porting algorithms that respect floating +//! point environment, and so that some functionality may be tested (that which does not rely on +//! side effects from elementary operations). Full support would require wrappers around basic +//! operations, but there is no plan to add this at the current time. + +/// A value combined with a floating point status. +pub struct FpResult<T> { + pub val: T, + #[cfg_attr(not(feature = "unstable-public-internals"), allow(dead_code))] + pub status: Status, +} + +impl<T> FpResult<T> { + pub fn new(val: T, status: Status) -> Self { + Self { val, status } + } + + /// Return `val` with `Status::OK`. + pub fn ok(val: T) -> Self { + Self { val, status: Status::OK } + } +} + +/// IEEE 754 rounding mode, excluding the optional `roundTiesToAway` version of nearest. +/// +/// Integer representation comes from what CORE-MATH uses for indexing. +#[cfg_attr(not(feature = "unstable-public-internals"), allow(dead_code))] +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Round { + /// IEEE 754 nearest, `roundTiesToEven`. + Nearest = 0, + /// IEEE 754 `roundTowardNegative`. + Negative = 1, + /// IEEE 754 `roundTowardPositive`. + Positive = 2, + /// IEEE 754 `roundTowardZero`. + Zero = 3, +} + +/// IEEE 754 exception status flags. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct Status(u8); + +impl Status { + /// Default status indicating no errors. + pub const OK: Self = Self(0); + + /// No definable result. + /// + /// Includes: + /// - Any ops on sNaN, with a few exceptions. + /// - `0 * inf`, `inf * 0`. + /// - `fma(0, inf, c)` or `fma(inf, 0, c)`, possibly excluding `c = qNaN`. + /// - `+inf + -inf` and similar (includes subtraction and fma). + /// - `0.0 / 0.0`, `inf / inf` + /// - `remainder(x, y)` if `y == 0.0` or `x == inf`, and neither is NaN. + /// - `sqrt(x)` with `x < 0.0`. + pub const INVALID: Self = Self(1); + + /// Division by zero. + /// + /// The default result for division is +/-inf based on operand sign. For `logB`, the default + /// result is -inf. + /// `x / y` when `x != 0.0` and `y == 0.0`, + #[cfg_attr(not(feature = "unstable-public-internals"), allow(dead_code))] + pub const DIVIDE_BY_ZERO: Self = Self(1 << 2); + + /// The result exceeds the maximum finite value. + /// + /// The default result depends on rounding mode. `Nearest*` rounds to +/- infinity, sign based + /// on the intermediate result. `Zero` rounds to the signed maximum finite. `Positive` and + /// `Negative` round to signed maximum finite in one direction, signed infinity in the other. + #[cfg_attr(not(feature = "unstable-public-internals"), allow(dead_code))] + pub const OVERFLOW: Self = Self(1 << 3); + + /// The result is subnormal and lost precision. + pub const UNDERFLOW: Self = Self(1 << 4); + + /// The finite-precision result does not match that of infinite precision, and the reason + /// is not represented by one of the other flags. + pub const INEXACT: Self = Self(1 << 5); + + /// True if `UNDERFLOW` is set. + #[cfg_attr(not(feature = "unstable-public-internals"), allow(dead_code))] + pub const fn underflow(self) -> bool { + self.0 & Self::UNDERFLOW.0 != 0 + } + + /// True if `OVERFLOW` is set. + #[cfg_attr(not(feature = "unstable-public-internals"), allow(dead_code))] + pub const fn overflow(self) -> bool { + self.0 & Self::OVERFLOW.0 != 0 + } + + pub fn set_underflow(&mut self, val: bool) { + self.set_flag(val, Self::UNDERFLOW); + } + + /// True if `INEXACT` is set. + pub const fn inexact(self) -> bool { + self.0 & Self::INEXACT.0 != 0 + } + + pub fn set_inexact(&mut self, val: bool) { + self.set_flag(val, Self::INEXACT); + } + + fn set_flag(&mut self, val: bool, mask: Self) { + if val { + self.0 |= mask.0; + } else { + self.0 &= !mask.0; + } + } + + pub(crate) const fn with(self, rhs: Self) -> Self { + Self(self.0 | rhs.0) + } +} diff --git a/library/compiler-builtins/libm/src/math/support/float_traits.rs b/library/compiler-builtins/libm/src/math/support/float_traits.rs new file mode 100644 index 00000000000..fac10483237 --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/float_traits.rs @@ -0,0 +1,484 @@ +use core::{fmt, mem, ops}; + +use super::int_traits::{CastFrom, Int, MinInt}; + +/// Trait for some basic operations on floats +// #[allow(dead_code)] +pub trait Float: + Copy + + fmt::Debug + + PartialEq + + PartialOrd + + ops::AddAssign + + ops::MulAssign + + ops::Add<Output = Self> + + ops::Sub<Output = Self> + + ops::Mul<Output = Self> + + ops::Div<Output = Self> + + ops::Rem<Output = Self> + + ops::Neg<Output = Self> + + 'static +{ + /// A uint of the same width as the float + type Int: Int<OtherSign = Self::SignedInt, Unsigned = Self::Int>; + + /// A int of the same width as the float + type SignedInt: Int + + MinInt<OtherSign = Self::Int, Unsigned = Self::Int> + + ops::Neg<Output = Self::SignedInt>; + + const ZERO: Self; + const NEG_ZERO: Self; + const ONE: Self; + const NEG_ONE: Self; + const INFINITY: Self; + const NEG_INFINITY: Self; + const NAN: Self; + const NEG_NAN: Self; + const MAX: Self; + const MIN: Self; + const EPSILON: Self; + const PI: Self; + const NEG_PI: Self; + const FRAC_PI_2: Self; + + const MIN_POSITIVE_NORMAL: Self; + + /// The bitwidth of the float type + const BITS: u32; + + /// The bitwidth of the significand + const SIG_BITS: u32; + + /// The bitwidth of the exponent + const EXP_BITS: u32 = Self::BITS - Self::SIG_BITS - 1; + + /// The saturated (maximum bitpattern) value of the exponent, i.e. the infinite + /// representation. + /// + /// This shifted fully right, use `EXP_MASK` for the shifted value. + const EXP_SAT: u32 = (1 << Self::EXP_BITS) - 1; + + /// The exponent bias value + const EXP_BIAS: u32 = Self::EXP_SAT >> 1; + + /// Maximum unbiased exponent value. + const EXP_MAX: i32 = Self::EXP_BIAS as i32; + + /// Minimum *NORMAL* unbiased exponent value. + const EXP_MIN: i32 = -(Self::EXP_MAX - 1); + + /// Minimum subnormal exponent value. + const EXP_MIN_SUBNORM: i32 = Self::EXP_MIN - Self::SIG_BITS as i32; + + /// A mask for the sign bit + const SIGN_MASK: Self::Int; + + /// A mask for the significand + const SIG_MASK: Self::Int; + + /// A mask for the exponent + const EXP_MASK: Self::Int; + + /// The implicit bit of the float format + const IMPLICIT_BIT: Self::Int; + + /// Returns `self` transmuted to `Self::Int` + fn to_bits(self) -> Self::Int; + + /// Returns `self` transmuted to `Self::SignedInt` + #[allow(dead_code)] + fn to_bits_signed(self) -> Self::SignedInt { + self.to_bits().signed() + } + + /// Check bitwise equality. + #[allow(dead_code)] + fn biteq(self, rhs: Self) -> bool { + self.to_bits() == rhs.to_bits() + } + + /// Checks if two floats have the same bit representation. *Except* for NaNs! NaN can be + /// represented in multiple different ways. + /// + /// This method returns `true` if two NaNs are compared. Use [`biteq`](Self::biteq) instead + /// if `NaN` should not be treated separately. + #[allow(dead_code)] + fn eq_repr(self, rhs: Self) -> bool { + if self.is_nan() && rhs.is_nan() { true } else { self.biteq(rhs) } + } + + /// Returns true if the value is NaN. + fn is_nan(self) -> bool; + + /// Returns true if the value is +inf or -inf. + fn is_infinite(self) -> bool; + + /// Returns true if the sign is negative. Extracts the sign bit regardless of zero or NaN. + fn is_sign_negative(self) -> bool; + + /// Returns true if the sign is positive. Extracts the sign bit regardless of zero or NaN. + fn is_sign_positive(self) -> bool { + !self.is_sign_negative() + } + + /// Returns if `self` is subnormal. + #[allow(dead_code)] + fn is_subnormal(self) -> bool { + (self.to_bits() & Self::EXP_MASK) == Self::Int::ZERO + } + + /// Returns the exponent, not adjusting for bias, not accounting for subnormals or zero. + fn ex(self) -> u32 { + u32::cast_from(self.to_bits() >> Self::SIG_BITS) & Self::EXP_SAT + } + + /// Extract the exponent and adjust it for bias, not accounting for subnormals or zero. + fn exp_unbiased(self) -> i32 { + self.ex().signed() - (Self::EXP_BIAS as i32) + } + + /// Returns the significand with no implicit bit (or the "fractional" part) + #[allow(dead_code)] + fn frac(self) -> Self::Int { + self.to_bits() & Self::SIG_MASK + } + + /// Returns a `Self::Int` transmuted back to `Self` + fn from_bits(a: Self::Int) -> Self; + + /// Constructs a `Self` from its parts. Inputs are treated as bits and shifted into position. + fn from_parts(negative: bool, exponent: u32, significand: Self::Int) -> Self { + let sign = if negative { Self::Int::ONE } else { Self::Int::ZERO }; + Self::from_bits( + (sign << (Self::BITS - 1)) + | (Self::Int::cast_from(exponent & Self::EXP_SAT) << Self::SIG_BITS) + | (significand & Self::SIG_MASK), + ) + } + + #[allow(dead_code)] + fn abs(self) -> Self; + + /// Returns a number composed of the magnitude of self and the sign of sign. + fn copysign(self, other: Self) -> Self; + + /// Fused multiply add, rounding once. + fn fma(self, y: Self, z: Self) -> Self; + + /// Returns (normalized exponent, normalized significand) + #[allow(dead_code)] + fn normalize(significand: Self::Int) -> (i32, Self::Int); + + /// Returns a number that represents the sign of self. + #[allow(dead_code)] + fn signum(self) -> Self { + if self.is_nan() { self } else { Self::ONE.copysign(self) } + } +} + +/// Access the associated `Int` type from a float (helper to avoid ambiguous associated types). +pub type IntTy<F> = <F as Float>::Int; + +macro_rules! float_impl { + ( + $ty:ident, + $ity:ident, + $sity:ident, + $bits:expr, + $significand_bits:expr, + $from_bits:path, + $to_bits:path, + $fma_fn:ident, + $fma_intrinsic:ident + ) => { + impl Float for $ty { + type Int = $ity; + type SignedInt = $sity; + + const ZERO: Self = 0.0; + const NEG_ZERO: Self = -0.0; + const ONE: Self = 1.0; + const NEG_ONE: Self = -1.0; + const INFINITY: Self = Self::INFINITY; + const NEG_INFINITY: Self = Self::NEG_INFINITY; + const NAN: Self = Self::NAN; + // NAN isn't guaranteed to be positive but it usually is. We only use this for + // tests. + const NEG_NAN: Self = $from_bits($to_bits(Self::NAN) | Self::SIGN_MASK); + const MAX: Self = -Self::MIN; + // Sign bit set, saturated mantissa, saturated exponent with last bit zeroed + const MIN: Self = $from_bits(Self::Int::MAX & !(1 << Self::SIG_BITS)); + const EPSILON: Self = <$ty>::EPSILON; + + // Exponent is a 1 in the LSB + const MIN_POSITIVE_NORMAL: Self = $from_bits(1 << Self::SIG_BITS); + + const PI: Self = core::$ty::consts::PI; + const NEG_PI: Self = -Self::PI; + const FRAC_PI_2: Self = core::$ty::consts::FRAC_PI_2; + + const BITS: u32 = $bits; + const SIG_BITS: u32 = $significand_bits; + + const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1); + const SIG_MASK: Self::Int = (1 << Self::SIG_BITS) - 1; + const EXP_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIG_MASK); + const IMPLICIT_BIT: Self::Int = 1 << Self::SIG_BITS; + + fn to_bits(self) -> Self::Int { + self.to_bits() + } + fn is_nan(self) -> bool { + self.is_nan() + } + fn is_infinite(self) -> bool { + self.is_infinite() + } + fn is_sign_negative(self) -> bool { + self.is_sign_negative() + } + fn from_bits(a: Self::Int) -> Self { + Self::from_bits(a) + } + fn abs(self) -> Self { + cfg_if! { + // FIXME(msrv): `abs` is available in `core` starting with 1.85. + if #[cfg(intrinsics_enabled)] { + self.abs() + } else { + super::super::generic::fabs(self) + } + } + } + fn copysign(self, other: Self) -> Self { + cfg_if! { + // FIXME(msrv): `copysign` is available in `core` starting with 1.85. + if #[cfg(intrinsics_enabled)] { + self.copysign(other) + } else { + super::super::generic::copysign(self, other) + } + } + } + fn fma(self, y: Self, z: Self) -> Self { + cfg_if! { + // fma is not yet available in `core` + if #[cfg(intrinsics_enabled)] { + unsafe{ core::intrinsics::$fma_intrinsic(self, y, z) } + } else { + super::super::$fma_fn(self, y, z) + } + } + } + fn normalize(significand: Self::Int) -> (i32, Self::Int) { + let shift = significand.leading_zeros().wrapping_sub(Self::EXP_BITS); + (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) + } + } + }; +} + +#[cfg(f16_enabled)] +float_impl!(f16, u16, i16, 16, 10, f16::from_bits, f16::to_bits, fmaf16, fmaf16); +float_impl!(f32, u32, i32, 32, 23, f32_from_bits, f32_to_bits, fmaf, fmaf32); +float_impl!(f64, u64, i64, 64, 52, f64_from_bits, f64_to_bits, fma, fmaf64); +#[cfg(f128_enabled)] +float_impl!(f128, u128, i128, 128, 112, f128::from_bits, f128::to_bits, fmaf128, fmaf128); + +/* FIXME(msrv): vendor some things that are not const stable at our MSRV */ + +/// `f32::from_bits` +pub const fn f32_from_bits(bits: u32) -> f32 { + // SAFETY: POD cast with no preconditions + unsafe { mem::transmute::<u32, f32>(bits) } +} + +/// `f32::to_bits` +pub const fn f32_to_bits(x: f32) -> u32 { + // SAFETY: POD cast with no preconditions + unsafe { mem::transmute::<f32, u32>(x) } +} + +/// `f64::from_bits` +pub const fn f64_from_bits(bits: u64) -> f64 { + // SAFETY: POD cast with no preconditions + unsafe { mem::transmute::<u64, f64>(bits) } +} + +/// `f64::to_bits` +pub const fn f64_to_bits(x: f64) -> u64 { + // SAFETY: POD cast with no preconditions + unsafe { mem::transmute::<f64, u64>(x) } +} + +/// Trait for floats twice the bit width of another integer. +pub trait DFloat: Float { + /// Float that is half the bit width of the floatthis trait is implemented for. + type H: HFloat<D = Self>; + + /// Narrow the float type. + fn narrow(self) -> Self::H; +} + +/// Trait for floats half the bit width of another float. +pub trait HFloat: Float { + /// Float that is double the bit width of the float this trait is implemented for. + type D: DFloat<H = Self>; + + /// Widen the float type. + fn widen(self) -> Self::D; +} + +macro_rules! impl_d_float { + ($($X:ident $D:ident),*) => { + $( + impl DFloat for $D { + type H = $X; + + fn narrow(self) -> Self::H { + self as $X + } + } + )* + }; +} + +macro_rules! impl_h_float { + ($($H:ident $X:ident),*) => { + $( + impl HFloat for $H { + type D = $X; + + fn widen(self) -> Self::D { + self as $X + } + } + )* + }; +} + +impl_d_float!(f32 f64); +#[cfg(f16_enabled)] +impl_d_float!(f16 f32); +#[cfg(f128_enabled)] +impl_d_float!(f64 f128); + +impl_h_float!(f32 f64); +#[cfg(f16_enabled)] +impl_h_float!(f16 f32); +#[cfg(f128_enabled)] +impl_h_float!(f64 f128); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[cfg(f16_enabled)] + fn check_f16() { + // Constants + assert_eq!(f16::EXP_SAT, 0b11111); + assert_eq!(f16::EXP_BIAS, 15); + assert_eq!(f16::EXP_MAX, 15); + assert_eq!(f16::EXP_MIN, -14); + assert_eq!(f16::EXP_MIN_SUBNORM, -24); + + // `exp_unbiased` + assert_eq!(f16::FRAC_PI_2.exp_unbiased(), 0); + assert_eq!((1.0f16 / 2.0).exp_unbiased(), -1); + assert_eq!(f16::MAX.exp_unbiased(), 15); + assert_eq!(f16::MIN.exp_unbiased(), 15); + assert_eq!(f16::MIN_POSITIVE.exp_unbiased(), -14); + // This is a convenience method and not ldexp, `exp_unbiased` does not return correct + // results for zero and subnormals. + assert_eq!(f16::ZERO.exp_unbiased(), -15); + assert_eq!(f16::from_bits(0x1).exp_unbiased(), -15); + assert_eq!(f16::MIN_POSITIVE, f16::MIN_POSITIVE_NORMAL); + + // `from_parts` + assert_biteq!(f16::from_parts(true, f16::EXP_BIAS, 0), -1.0f16); + assert_biteq!(f16::from_parts(false, 0, 1), f16::from_bits(0x1)); + } + + #[test] + fn check_f32() { + // Constants + assert_eq!(f32::EXP_SAT, 0b11111111); + assert_eq!(f32::EXP_BIAS, 127); + assert_eq!(f32::EXP_MAX, 127); + assert_eq!(f32::EXP_MIN, -126); + assert_eq!(f32::EXP_MIN_SUBNORM, -149); + + // `exp_unbiased` + assert_eq!(f32::FRAC_PI_2.exp_unbiased(), 0); + assert_eq!((1.0f32 / 2.0).exp_unbiased(), -1); + assert_eq!(f32::MAX.exp_unbiased(), 127); + assert_eq!(f32::MIN.exp_unbiased(), 127); + assert_eq!(f32::MIN_POSITIVE.exp_unbiased(), -126); + // This is a convenience method and not ldexp, `exp_unbiased` does not return correct + // results for zero and subnormals. + assert_eq!(f32::ZERO.exp_unbiased(), -127); + assert_eq!(f32::from_bits(0x1).exp_unbiased(), -127); + assert_eq!(f32::MIN_POSITIVE, f32::MIN_POSITIVE_NORMAL); + + // `from_parts` + assert_biteq!(f32::from_parts(true, f32::EXP_BIAS, 0), -1.0f32); + assert_biteq!(f32::from_parts(false, 10 + f32::EXP_BIAS, 0), hf32!("0x1p10")); + assert_biteq!(f32::from_parts(false, 0, 1), f32::from_bits(0x1)); + } + + #[test] + fn check_f64() { + // Constants + assert_eq!(f64::EXP_SAT, 0b11111111111); + assert_eq!(f64::EXP_BIAS, 1023); + assert_eq!(f64::EXP_MAX, 1023); + assert_eq!(f64::EXP_MIN, -1022); + assert_eq!(f64::EXP_MIN_SUBNORM, -1074); + + // `exp_unbiased` + assert_eq!(f64::FRAC_PI_2.exp_unbiased(), 0); + assert_eq!((1.0f64 / 2.0).exp_unbiased(), -1); + assert_eq!(f64::MAX.exp_unbiased(), 1023); + assert_eq!(f64::MIN.exp_unbiased(), 1023); + assert_eq!(f64::MIN_POSITIVE.exp_unbiased(), -1022); + // This is a convenience method and not ldexp, `exp_unbiased` does not return correct + // results for zero and subnormals. + assert_eq!(f64::ZERO.exp_unbiased(), -1023); + assert_eq!(f64::from_bits(0x1).exp_unbiased(), -1023); + assert_eq!(f64::MIN_POSITIVE, f64::MIN_POSITIVE_NORMAL); + + // `from_parts` + assert_biteq!(f64::from_parts(true, f64::EXP_BIAS, 0), -1.0f64); + assert_biteq!(f64::from_parts(false, 10 + f64::EXP_BIAS, 0), hf64!("0x1p10")); + assert_biteq!(f64::from_parts(false, 0, 1), f64::from_bits(0x1)); + } + + #[test] + #[cfg(f128_enabled)] + fn check_f128() { + // Constants + assert_eq!(f128::EXP_SAT, 0b111111111111111); + assert_eq!(f128::EXP_BIAS, 16383); + assert_eq!(f128::EXP_MAX, 16383); + assert_eq!(f128::EXP_MIN, -16382); + assert_eq!(f128::EXP_MIN_SUBNORM, -16494); + + // `exp_unbiased` + assert_eq!(f128::FRAC_PI_2.exp_unbiased(), 0); + assert_eq!((1.0f128 / 2.0).exp_unbiased(), -1); + assert_eq!(f128::MAX.exp_unbiased(), 16383); + assert_eq!(f128::MIN.exp_unbiased(), 16383); + assert_eq!(f128::MIN_POSITIVE.exp_unbiased(), -16382); + // This is a convenience method and not ldexp, `exp_unbiased` does not return correct + // results for zero and subnormals. + assert_eq!(f128::ZERO.exp_unbiased(), -16383); + assert_eq!(f128::from_bits(0x1).exp_unbiased(), -16383); + assert_eq!(f128::MIN_POSITIVE, f128::MIN_POSITIVE_NORMAL); + + // `from_parts` + assert_biteq!(f128::from_parts(true, f128::EXP_BIAS, 0), -1.0f128); + assert_biteq!(f128::from_parts(false, 0, 1), f128::from_bits(0x1)); + } +} diff --git a/library/compiler-builtins/libm/src/math/support/hex_float.rs b/library/compiler-builtins/libm/src/math/support/hex_float.rs new file mode 100644 index 00000000000..819e2f56e36 --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/hex_float.rs @@ -0,0 +1,1155 @@ +//! Utilities for working with hex float formats. + +use core::fmt; + +use super::{Float, Round, Status, f32_from_bits, f64_from_bits}; + +/// Construct a 16-bit float from hex float representation (C-style) +#[cfg(f16_enabled)] +pub const fn hf16(s: &str) -> f16 { + match parse_hex_exact(s, 16, 10) { + Ok(bits) => f16::from_bits(bits as u16), + Err(HexFloatParseError(s)) => panic!("{}", s), + } +} + +/// Construct a 32-bit float from hex float representation (C-style) +#[allow(unused)] +pub const fn hf32(s: &str) -> f32 { + match parse_hex_exact(s, 32, 23) { + Ok(bits) => f32_from_bits(bits as u32), + Err(HexFloatParseError(s)) => panic!("{}", s), + } +} + +/// Construct a 64-bit float from hex float representation (C-style) +pub const fn hf64(s: &str) -> f64 { + match parse_hex_exact(s, 64, 52) { + Ok(bits) => f64_from_bits(bits as u64), + Err(HexFloatParseError(s)) => panic!("{}", s), + } +} + +/// Construct a 128-bit float from hex float representation (C-style) +#[cfg(f128_enabled)] +pub const fn hf128(s: &str) -> f128 { + match parse_hex_exact(s, 128, 112) { + Ok(bits) => f128::from_bits(bits), + Err(HexFloatParseError(s)) => panic!("{}", s), + } +} +#[derive(Copy, Clone, Debug)] +pub struct HexFloatParseError(&'static str); + +/// Parses any float to its bitwise representation, returning an error if it cannot be represented exactly +pub const fn parse_hex_exact( + s: &str, + bits: u32, + sig_bits: u32, +) -> Result<u128, HexFloatParseError> { + match parse_any(s, bits, sig_bits, Round::Nearest) { + Err(e) => Err(e), + Ok((bits, Status::OK)) => Ok(bits), + Ok((_, status)) if status.overflow() => Err(HexFloatParseError("the value is too huge")), + Ok((_, status)) if status.underflow() => Err(HexFloatParseError("the value is too tiny")), + Ok((_, status)) if status.inexact() => Err(HexFloatParseError("the value is too precise")), + Ok(_) => unreachable!(), + } +} + +/// Parse any float from hex to its bitwise representation. +pub const fn parse_any( + s: &str, + bits: u32, + sig_bits: u32, + round: Round, +) -> Result<(u128, Status), HexFloatParseError> { + let mut b = s.as_bytes(); + + if sig_bits > 119 || bits > 128 || bits < sig_bits + 3 || bits > sig_bits + 30 { + return Err(HexFloatParseError("unsupported target float configuration")); + } + + let neg = matches!(b, [b'-', ..]); + if let &[b'-' | b'+', ref rest @ ..] = b { + b = rest; + } + + let sign_bit = 1 << (bits - 1); + let quiet_bit = 1 << (sig_bits - 1); + let nan = sign_bit - quiet_bit; + let inf = nan - quiet_bit; + + let (mut x, status) = match *b { + [b'i' | b'I', b'n' | b'N', b'f' | b'F'] => (inf, Status::OK), + [b'n' | b'N', b'a' | b'A', b'n' | b'N'] => (nan, Status::OK), + [b'0', b'x' | b'X', ref rest @ ..] => { + let round = match (neg, round) { + // parse("-x", Round::Positive) == -parse("x", Round::Negative) + (true, Round::Positive) => Round::Negative, + (true, Round::Negative) => Round::Positive, + // rounding toward nearest or zero are symmetric + (true, Round::Nearest | Round::Zero) | (false, _) => round, + }; + match parse_finite(rest, bits, sig_bits, round) { + Err(e) => return Err(e), + Ok(res) => res, + } + } + _ => return Err(HexFloatParseError("no hex indicator")), + }; + + if neg { + x ^= sign_bit; + } + + Ok((x, status)) +} + +const fn parse_finite( + b: &[u8], + bits: u32, + sig_bits: u32, + rounding_mode: Round, +) -> Result<(u128, Status), HexFloatParseError> { + let exp_bits: u32 = bits - sig_bits - 1; + let max_msb: i32 = (1 << (exp_bits - 1)) - 1; + // The exponent of one ULP in the subnormals + let min_lsb: i32 = 1 - max_msb - sig_bits as i32; + + let (mut sig, mut exp) = match parse_hex(b) { + Err(e) => return Err(e), + Ok(Parsed { sig: 0, .. }) => return Ok((0, Status::OK)), + Ok(Parsed { sig, exp }) => (sig, exp), + }; + + let mut round_bits = u128_ilog2(sig) as i32 - sig_bits as i32; + + // Round at least up to min_lsb + if exp < min_lsb - round_bits { + round_bits = min_lsb - exp; + } + + let mut status = Status::OK; + + exp += round_bits; + + if round_bits > 0 { + // first, prepare for rounding exactly two bits + if round_bits == 1 { + sig <<= 1; + } else if round_bits > 2 { + sig = shr_odd_rounding(sig, (round_bits - 2) as u32); + } + + if sig & 0b11 != 0 { + status = Status::INEXACT; + } + + sig = shr2_round(sig, rounding_mode); + } else if round_bits < 0 { + sig <<= -round_bits; + } + + // The parsed value is X = sig * 2^exp + // Expressed as a multiple U of the smallest subnormal value: + // X = U * 2^min_lsb, so U = sig * 2^(exp-min_lsb) + let uexp = (exp - min_lsb) as u128; + let uexp = uexp << sig_bits; + + // Note that it is possible for the exponent bits to equal 2 here + // if the value rounded up, but that means the mantissa is all zeroes + // so the value is still correct + debug_assert!(sig <= 2 << sig_bits); + + let inf = ((1 << exp_bits) - 1) << sig_bits; + + let bits = match sig.checked_add(uexp) { + Some(bits) if bits < inf => { + // inexact subnormal or zero? + if status.inexact() && bits < (1 << sig_bits) { + status = status.with(Status::UNDERFLOW); + } + bits + } + _ => { + // overflow to infinity + status = status.with(Status::OVERFLOW).with(Status::INEXACT); + match rounding_mode { + Round::Positive | Round::Nearest => inf, + Round::Negative | Round::Zero => inf - 1, + } + } + }; + Ok((bits, status)) +} + +/// Shift right, rounding all inexact divisions to the nearest odd number +/// E.g. (0 >> 4) -> 0, (1..=31 >> 4) -> 1, (32 >> 4) -> 2, ... +/// +/// Useful for reducing a number before rounding the last two bits, since +/// the result of the final rounding is preserved for all rounding modes. +const fn shr_odd_rounding(x: u128, k: u32) -> u128 { + if k < 128 { + let inexact = x.trailing_zeros() < k; + (x >> k) | (inexact as u128) + } else { + (x != 0) as u128 + } +} + +/// Divide by 4, rounding with the given mode +const fn shr2_round(mut x: u128, round: Round) -> u128 { + let t = (x as u32) & 0b111; + x >>= 2; + match round { + // Look-up-table on the last three bits for when to round up + Round::Nearest => x + ((0b11001000_u8 >> t) & 1) as u128, + + Round::Negative => x, + Round::Zero => x, + Round::Positive => x + (t & 0b11 != 0) as u128, + } +} + +/// A parsed finite and unsigned floating point number. +struct Parsed { + /// Absolute value sig * 2^exp + sig: u128, + exp: i32, +} + +/// Parse a hexadecimal float x +const fn parse_hex(mut b: &[u8]) -> Result<Parsed, HexFloatParseError> { + let mut sig: u128 = 0; + let mut exp: i32 = 0; + + let mut seen_point = false; + let mut some_digits = false; + let mut inexact = false; + + while let &[c, ref rest @ ..] = b { + b = rest; + + match c { + b'.' => { + if seen_point { + return Err(HexFloatParseError("unexpected '.' parsing fractional digits")); + } + seen_point = true; + continue; + } + b'p' | b'P' => break, + c => { + let digit = match hex_digit(c) { + Some(d) => d, + None => return Err(HexFloatParseError("expected hexadecimal digit")), + }; + some_digits = true; + + if (sig >> 124) == 0 { + sig <<= 4; + sig |= digit as u128; + } else { + // FIXME: it is technically possible for exp to overflow if parsing a string with >500M digits + exp += 4; + inexact |= digit != 0; + } + // Up until the fractional point, the value grows + // with more digits, but after it the exponent is + // compensated to match. + if seen_point { + exp -= 4; + } + } + } + } + // If we've set inexact, the exact value has more than 125 + // significant bits, and lies somewhere between sig and sig + 1. + // Because we'll round off at least two of the trailing bits, + // setting the last bit gives correct rounding for inexact values. + sig |= inexact as u128; + + if !some_digits { + return Err(HexFloatParseError("at least one digit is required")); + }; + + some_digits = false; + + let negate_exp = matches!(b, [b'-', ..]); + if let &[b'-' | b'+', ref rest @ ..] = b { + b = rest; + } + + let mut pexp: u32 = 0; + while let &[c, ref rest @ ..] = b { + b = rest; + let digit = match dec_digit(c) { + Some(d) => d, + None => return Err(HexFloatParseError("expected decimal digit")), + }; + some_digits = true; + pexp = pexp.saturating_mul(10); + pexp += digit as u32; + } + + if !some_digits { + return Err(HexFloatParseError("at least one exponent digit is required")); + }; + + { + let e; + if negate_exp { + e = (exp as i64) - (pexp as i64); + } else { + e = (exp as i64) + (pexp as i64); + }; + + exp = if e < i32::MIN as i64 { + i32::MIN + } else if e > i32::MAX as i64 { + i32::MAX + } else { + e as i32 + }; + } + /* FIXME(msrv): once MSRV >= 1.66, replace the above workaround block with: + if negate_exp { + exp = exp.saturating_sub_unsigned(pexp); + } else { + exp = exp.saturating_add_unsigned(pexp); + }; + */ + + Ok(Parsed { sig, exp }) +} + +const fn dec_digit(c: u8) -> Option<u8> { + match c { + b'0'..=b'9' => Some(c - b'0'), + _ => None, + } +} + +const fn hex_digit(c: u8) -> Option<u8> { + match c { + b'0'..=b'9' => Some(c - b'0'), + b'a'..=b'f' => Some(c - b'a' + 10), + b'A'..=b'F' => Some(c - b'A' + 10), + _ => None, + } +} + +/* FIXME(msrv): vendor some things that are not const stable at our MSRV */ + +/// `u128::ilog2` +const fn u128_ilog2(v: u128) -> u32 { + assert!(v != 0); + u128::BITS - 1 - v.leading_zeros() +} + +/// Format a floating point number as its IEEE hex (`%a`) representation. +pub struct Hexf<F>(pub F); + +// Adapted from https://github.com/ericseppanen/hexfloat2/blob/a5c27932f0ff/src/format.rs +#[cfg(not(feature = "compiler-builtins"))] +fn fmt_any_hex<F: Float>(x: &F, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if x.is_sign_negative() { + write!(f, "-")?; + } + + if x.is_nan() { + return write!(f, "NaN"); + } else if x.is_infinite() { + return write!(f, "inf"); + } else if *x == F::ZERO { + return write!(f, "0x0p+0"); + } + + let mut exponent = x.exp_unbiased(); + let sig = x.to_bits() & F::SIG_MASK; + + let bias = F::EXP_BIAS as i32; + // The mantissa MSB needs to be shifted up to the nearest nibble. + let mshift = (4 - (F::SIG_BITS % 4)) % 4; + let sig = sig << mshift; + // The width is rounded up to the nearest char (4 bits) + let mwidth = (F::SIG_BITS as usize + 3) / 4; + let leading = if exponent == -bias { + // subnormal number means we shift our output by 1 bit. + exponent += 1; + "0." + } else { + "1." + }; + + write!(f, "0x{leading}{sig:0mwidth$x}p{exponent:+}") +} + +#[cfg(feature = "compiler-builtins")] +fn fmt_any_hex<F: Float>(_x: &F, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + unimplemented!() +} + +impl<F: Float> fmt::LowerHex for Hexf<F> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + cfg_if! { + if #[cfg(feature = "compiler-builtins")] { + let _ = f; + unimplemented!() + } else { + fmt_any_hex(&self.0, f) + } + } + } +} + +impl<F: Float> fmt::LowerHex for Hexf<(F, F)> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + cfg_if! { + if #[cfg(feature = "compiler-builtins")] { + let _ = f; + unimplemented!() + } else { + write!(f, "({:x}, {:x})", Hexf(self.0.0), Hexf(self.0.1)) + } + } + } +} + +impl<F: Float> fmt::LowerHex for Hexf<(F, i32)> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + cfg_if! { + if #[cfg(feature = "compiler-builtins")] { + let _ = f; + unimplemented!() + } else { + write!(f, "({:x}, {:x})", Hexf(self.0.0), Hexf(self.0.1)) + } + } + } +} + +impl fmt::LowerHex for Hexf<i32> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + cfg_if! { + if #[cfg(feature = "compiler-builtins")] { + let _ = f; + unimplemented!() + } else { + fmt::LowerHex::fmt(&self.0, f) + } + } + } +} + +impl<T> fmt::Debug for Hexf<T> +where + Hexf<T>: fmt::LowerHex, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + cfg_if! { + if #[cfg(feature = "compiler-builtins")] { + let _ = f; + unimplemented!() + } else { + fmt::LowerHex::fmt(self, f) + } + } + } +} + +impl<T> fmt::Display for Hexf<T> +where + Hexf<T>: fmt::LowerHex, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + cfg_if! { + if #[cfg(feature = "compiler-builtins")] { + let _ = f; + unimplemented!() + } else { + fmt::LowerHex::fmt(self, f) + } + } + } +} + +#[cfg(test)] +mod parse_tests { + extern crate std; + use std::{format, println}; + + use super::*; + + #[cfg(f16_enabled)] + fn rounding_properties(s: &str) -> Result<(), HexFloatParseError> { + let (xd, s0) = parse_any(s, 16, 10, Round::Negative)?; + let (xu, s1) = parse_any(s, 16, 10, Round::Positive)?; + let (xz, s2) = parse_any(s, 16, 10, Round::Zero)?; + let (xn, s3) = parse_any(s, 16, 10, Round::Nearest)?; + + // FIXME: A value between the least normal and largest subnormal + // could have underflow status depend on rounding mode. + + if let Status::OK = s0 { + // an exact result is the same for all rounding modes + assert_eq!(s0, s1); + assert_eq!(s0, s2); + assert_eq!(s0, s3); + + assert_eq!(xd, xu); + assert_eq!(xd, xz); + assert_eq!(xd, xn); + } else { + assert!([s0, s1, s2, s3].into_iter().all(Status::inexact)); + + let xd = f16::from_bits(xd as u16); + let xu = f16::from_bits(xu as u16); + let xz = f16::from_bits(xz as u16); + let xn = f16::from_bits(xn as u16); + + assert_biteq!(xd.next_up(), xu, "s={s}, xd={xd:?}, xu={xu:?}"); + + let signs = [xd, xu, xz, xn].map(f16::is_sign_negative); + + if signs == [true; 4] { + assert_biteq!(xz, xu); + } else { + assert_eq!(signs, [false; 4]); + assert_biteq!(xz, xd); + } + + if xn.to_bits() != xd.to_bits() { + assert_biteq!(xn, xu); + } + } + Ok(()) + } + #[test] + #[cfg(f16_enabled)] + fn test_rounding() { + let n = 1_i32 << 14; + for i in -n..n { + let u = i.rotate_right(11) as u32; + let s = format!("{}", Hexf(f32::from_bits(u))); + assert!(rounding_properties(&s).is_ok()); + } + } + + #[test] + fn test_parse_any() { + for k in -149..=127 { + let s = format!("0x1p{k}"); + let x = hf32(&s); + let y = if k < 0 { 0.5f32.powi(-k) } else { 2.0f32.powi(k) }; + assert_eq!(x, y); + } + + let mut s = *b"0x.0000000p-121"; + for e in 0..40 { + for k in 0..(1 << 15) { + let expected = f32::from_bits(k) * 2.0f32.powi(e); + let x = hf32(std::str::from_utf8(&s).unwrap()); + assert_eq!( + x.to_bits(), + expected.to_bits(), + "\ + e={e}\n\ + k={k}\n\ + x={x}\n\ + expected={expected}\n\ + s={}\n\ + f32::from_bits(k)={}\n\ + 2.0f32.powi(e)={}\ + ", + std::str::from_utf8(&s).unwrap(), + f32::from_bits(k), + 2.0f32.powi(e), + ); + for i in (3..10).rev() { + if s[i] == b'f' { + s[i] = b'0'; + } else if s[i] == b'9' { + s[i] = b'a'; + break; + } else { + s[i] += 1; + break; + } + } + } + for i in (12..15).rev() { + if s[i] == b'0' { + s[i] = b'9'; + } else { + s[i] -= 1; + break; + } + } + for i in (3..10).rev() { + s[i] = b'0'; + } + } + } + + // FIXME: this test is causing failures that are likely UB on various platforms + #[cfg(all(target_arch = "x86_64", target_os = "linux"))] + #[test] + #[cfg(f128_enabled)] + fn rounding() { + let pi = std::f128::consts::PI; + let s = format!("{}", Hexf(pi)); + + for k in 0..=111 { + let (bits, status) = parse_any(&s, 128 - k, 112 - k, Round::Nearest).unwrap(); + let scale = (1u128 << (112 - k - 1)) as f128; + let expected = (pi * scale).round_ties_even() / scale; + assert_eq!(bits << k, expected.to_bits(), "k = {k}, s = {s}"); + assert_eq!(expected != pi, status.inexact()); + } + } + #[test] + fn rounding_extreme_underflow() { + for k in 1..1000 { + let s = format!("0x1p{}", -149 - k); + let Ok((bits, status)) = parse_any(&s, 32, 23, Round::Nearest) else { unreachable!() }; + assert_eq!(bits, 0, "{s} should round to zero, got bits={bits}"); + assert!(status.underflow(), "should indicate underflow when parsing {s}"); + assert!(status.inexact(), "should indicate inexact when parsing {s}"); + } + } + #[test] + fn long_tail() { + for k in 1..1000 { + let s = format!("0x1.{}p0", "0".repeat(k)); + let Ok(bits) = parse_hex_exact(&s, 32, 23) else { panic!("parsing {s} failed") }; + assert_eq!(f32::from_bits(bits as u32), 1.0); + + let s = format!("0x1.{}1p0", "0".repeat(k)); + let Ok((bits, status)) = parse_any(&s, 32, 23, Round::Nearest) else { unreachable!() }; + if status.inexact() { + assert!(1.0 == f32::from_bits(bits as u32)); + } else { + assert!(1.0 < f32::from_bits(bits as u32)); + } + } + } + // HACK(msrv): 1.63 rejects unknown width float literals at an AST level, so use a macro to + // hide them from the AST. + #[cfg(f16_enabled)] + macro_rules! f16_tests { + () => { + #[test] + fn test_f16() { + let checks = [ + ("0x.1234p+16", (0x1234 as f16).to_bits()), + ("0x1.234p+12", (0x1234 as f16).to_bits()), + ("0x12.34p+8", (0x1234 as f16).to_bits()), + ("0x123.4p+4", (0x1234 as f16).to_bits()), + ("0x1234p+0", (0x1234 as f16).to_bits()), + ("0x1234.p+0", (0x1234 as f16).to_bits()), + ("0x1234.0p+0", (0x1234 as f16).to_bits()), + ("0x1.ffcp+15", f16::MAX.to_bits()), + ("0x1.0p+1", 2.0f16.to_bits()), + ("0x1.0p+0", 1.0f16.to_bits()), + ("0x1.ffp+8", 0x5ffc), + ("+0x1.ffp+8", 0x5ffc), + ("0x1p+0", 0x3c00), + ("0x1.998p-4", 0x2e66), + ("0x1.9p+6", 0x5640), + ("0x0.0p0", 0.0f16.to_bits()), + ("-0x0.0p0", (-0.0f16).to_bits()), + ("0x1.0p0", 1.0f16.to_bits()), + ("0x1.998p-4", (0.1f16).to_bits()), + ("-0x1.998p-4", (-0.1f16).to_bits()), + ("0x0.123p-12", 0x0123), + ("0x1p-24", 0x0001), + ("nan", f16::NAN.to_bits()), + ("-nan", (-f16::NAN).to_bits()), + ("inf", f16::INFINITY.to_bits()), + ("-inf", f16::NEG_INFINITY.to_bits()), + ]; + for (s, exp) in checks { + println!("parsing {s}"); + assert!(rounding_properties(s).is_ok()); + let act = hf16(s).to_bits(); + assert_eq!( + act, exp, + "parsing {s}: {act:#06x} != {exp:#06x}\nact: {act:#018b}\nexp: {exp:#018b}" + ); + } + } + + #[test] + fn test_macros_f16() { + assert_eq!(hf16!("0x1.ffp+8").to_bits(), 0x5ffc_u16); + } + }; + } + + #[cfg(f16_enabled)] + f16_tests!(); + + #[test] + fn test_f32() { + let checks = [ + ("0x.1234p+16", (0x1234 as f32).to_bits()), + ("0x1.234p+12", (0x1234 as f32).to_bits()), + ("0x12.34p+8", (0x1234 as f32).to_bits()), + ("0x123.4p+4", (0x1234 as f32).to_bits()), + ("0x1234p+0", (0x1234 as f32).to_bits()), + ("0x1234.p+0", (0x1234 as f32).to_bits()), + ("0x1234.0p+0", (0x1234 as f32).to_bits()), + ("0x1.fffffep+127", f32::MAX.to_bits()), + ("0x1.0p+1", 2.0f32.to_bits()), + ("0x1.0p+0", 1.0f32.to_bits()), + ("0x1.ffep+8", 0x43fff000), + ("+0x1.ffep+8", 0x43fff000), + ("0x1p+0", 0x3f800000), + ("0x1.99999ap-4", 0x3dcccccd), + ("0x1.9p+6", 0x42c80000), + ("0x1.2d5ed2p+20", 0x4996af69), + ("-0x1.348eb8p+10", 0xc49a475c), + ("-0x1.33dcfep-33", 0xaf19ee7f), + ("0x0.0p0", 0.0f32.to_bits()), + ("-0x0.0p0", (-0.0f32).to_bits()), + ("0x1.0p0", 1.0f32.to_bits()), + ("0x1.99999ap-4", (0.1f32).to_bits()), + ("-0x1.99999ap-4", (-0.1f32).to_bits()), + ("0x1.111114p-127", 0x00444445), + ("0x1.23456p-130", 0x00091a2b), + ("0x1p-149", 0x00000001), + ("nan", f32::NAN.to_bits()), + ("-nan", (-f32::NAN).to_bits()), + ("inf", f32::INFINITY.to_bits()), + ("-inf", f32::NEG_INFINITY.to_bits()), + ]; + for (s, exp) in checks { + println!("parsing {s}"); + let act = hf32(s).to_bits(); + assert_eq!( + act, exp, + "parsing {s}: {act:#010x} != {exp:#010x}\nact: {act:#034b}\nexp: {exp:#034b}" + ); + } + } + + #[test] + fn test_f64() { + let checks = [ + ("0x.1234p+16", (0x1234 as f64).to_bits()), + ("0x1.234p+12", (0x1234 as f64).to_bits()), + ("0x12.34p+8", (0x1234 as f64).to_bits()), + ("0x123.4p+4", (0x1234 as f64).to_bits()), + ("0x1234p+0", (0x1234 as f64).to_bits()), + ("0x1234.p+0", (0x1234 as f64).to_bits()), + ("0x1234.0p+0", (0x1234 as f64).to_bits()), + ("0x1.ffep+8", 0x407ffe0000000000), + ("0x1p+0", 0x3ff0000000000000), + ("0x1.999999999999ap-4", 0x3fb999999999999a), + ("0x1.9p+6", 0x4059000000000000), + ("0x1.2d5ed1fe1da7bp+20", 0x4132d5ed1fe1da7b), + ("-0x1.348eb851eb852p+10", 0xc09348eb851eb852), + ("-0x1.33dcfe54a3803p-33", 0xbde33dcfe54a3803), + ("0x1.0p0", 1.0f64.to_bits()), + ("0x0.0p0", 0.0f64.to_bits()), + ("-0x0.0p0", (-0.0f64).to_bits()), + ("0x1.999999999999ap-4", 0.1f64.to_bits()), + ("0x1.999999999998ap-4", (0.1f64 - f64::EPSILON).to_bits()), + ("-0x1.999999999999ap-4", (-0.1f64).to_bits()), + ("-0x1.999999999998ap-4", (-0.1f64 + f64::EPSILON).to_bits()), + ("0x0.8000000000001p-1022", 0x0008000000000001), + ("0x0.123456789abcdp-1022", 0x000123456789abcd), + ("0x0.0000000000002p-1022", 0x0000000000000002), + ("nan", f64::NAN.to_bits()), + ("-nan", (-f64::NAN).to_bits()), + ("inf", f64::INFINITY.to_bits()), + ("-inf", f64::NEG_INFINITY.to_bits()), + ]; + for (s, exp) in checks { + println!("parsing {s}"); + let act = hf64(s).to_bits(); + assert_eq!( + act, exp, + "parsing {s}: {act:#018x} != {exp:#018x}\nact: {act:#066b}\nexp: {exp:#066b}" + ); + } + } + + // HACK(msrv): 1.63 rejects unknown width float literals at an AST level, so use a macro to + // hide them from the AST. + #[cfg(f128_enabled)] + macro_rules! f128_tests { + () => { + #[test] + fn test_f128() { + let checks = [ + ("0x.1234p+16", (0x1234 as f128).to_bits()), + ("0x1.234p+12", (0x1234 as f128).to_bits()), + ("0x12.34p+8", (0x1234 as f128).to_bits()), + ("0x123.4p+4", (0x1234 as f128).to_bits()), + ("0x1234p+0", (0x1234 as f128).to_bits()), + ("0x1234.p+0", (0x1234 as f128).to_bits()), + ("0x1234.0p+0", (0x1234 as f128).to_bits()), + ("0x1.ffffffffffffffffffffffffffffp+16383", f128::MAX.to_bits()), + ("0x1.0p+1", 2.0f128.to_bits()), + ("0x1.0p+0", 1.0f128.to_bits()), + ("0x1.ffep+8", 0x4007ffe0000000000000000000000000), + ("+0x1.ffep+8", 0x4007ffe0000000000000000000000000), + ("0x1p+0", 0x3fff0000000000000000000000000000), + ("0x1.999999999999999999999999999ap-4", 0x3ffb999999999999999999999999999a), + ("0x1.9p+6", 0x40059000000000000000000000000000), + ("0x0.0p0", 0.0f128.to_bits()), + ("-0x0.0p0", (-0.0f128).to_bits()), + ("0x1.0p0", 1.0f128.to_bits()), + ("0x1.999999999999999999999999999ap-4", (0.1f128).to_bits()), + ("-0x1.999999999999999999999999999ap-4", (-0.1f128).to_bits()), + ("0x0.abcdef0123456789abcdef012345p-16382", 0x0000abcdef0123456789abcdef012345), + ("0x1p-16494", 0x00000000000000000000000000000001), + ("nan", f128::NAN.to_bits()), + ("-nan", (-f128::NAN).to_bits()), + ("inf", f128::INFINITY.to_bits()), + ("-inf", f128::NEG_INFINITY.to_bits()), + ]; + for (s, exp) in checks { + println!("parsing {s}"); + let act = hf128(s).to_bits(); + assert_eq!( + act, exp, + "parsing {s}: {act:#034x} != {exp:#034x}\nact: {act:#0130b}\nexp: {exp:#0130b}" + ); + } + } + + #[test] + fn test_macros_f128() { + assert_eq!(hf128!("0x1.ffep+8").to_bits(), 0x4007ffe0000000000000000000000000_u128); + } + } + } + + #[cfg(f128_enabled)] + f128_tests!(); + + #[test] + fn test_macros() { + #[cfg(f16_enabled)] + assert_eq!(hf16!("0x1.ffp+8").to_bits(), 0x5ffc_u16); + assert_eq!(hf32!("0x1.ffep+8").to_bits(), 0x43fff000_u32); + assert_eq!(hf64!("0x1.ffep+8").to_bits(), 0x407ffe0000000000_u64); + #[cfg(f128_enabled)] + assert_eq!(hf128!("0x1.ffep+8").to_bits(), 0x4007ffe0000000000000000000000000_u128); + } +} + +#[cfg(test)] +// FIXME(ppc): something with `should_panic` tests cause a SIGILL with ppc64le +#[cfg(not(all(target_arch = "powerpc64", target_endian = "little")))] +mod tests_panicking { + extern crate std; + use super::*; + + // HACK(msrv): 1.63 rejects unknown width float literals at an AST level, so use a macro to + // hide them from the AST. + #[cfg(f16_enabled)] + macro_rules! f16_tests { + () => { + #[test] + fn test_f16_almost_extra_precision() { + // Exact maximum precision allowed + hf16("0x1.ffcp+0"); + } + + #[test] + #[should_panic(expected = "the value is too precise")] + fn test_f16_extra_precision() { + // One bit more than the above. + hf16("0x1.ffdp+0"); + } + + #[test] + #[should_panic(expected = "the value is too huge")] + fn test_f16_overflow() { + // One bit more than the above. + hf16("0x1p+16"); + } + + #[test] + fn test_f16_tiniest() { + let x = hf16("0x1.p-24"); + let y = hf16("0x0.001p-12"); + let z = hf16("0x0.8p-23"); + assert_eq!(x, y); + assert_eq!(x, z); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f16_too_tiny() { + hf16("0x1.p-25"); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f16_also_too_tiny() { + hf16("0x0.8p-24"); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f16_again_too_tiny() { + hf16("0x0.001p-13"); + } + }; + } + + #[cfg(f16_enabled)] + f16_tests!(); + + #[test] + fn test_f32_almost_extra_precision() { + // Exact maximum precision allowed + hf32("0x1.abcdeep+0"); + } + + #[test] + #[should_panic] + fn test_f32_extra_precision2() { + // One bit more than the above. + hf32("0x1.ffffffp+127"); + } + + #[test] + #[should_panic(expected = "the value is too huge")] + fn test_f32_overflow() { + // One bit more than the above. + hf32("0x1p+128"); + } + + #[test] + #[should_panic(expected = "the value is too precise")] + fn test_f32_extra_precision() { + // One bit more than the above. + hf32("0x1.abcdefp+0"); + } + + #[test] + fn test_f32_tiniest() { + let x = hf32("0x1.p-149"); + let y = hf32("0x0.0000000000000001p-85"); + let z = hf32("0x0.8p-148"); + assert_eq!(x, y); + assert_eq!(x, z); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f32_too_tiny() { + hf32("0x1.p-150"); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f32_also_too_tiny() { + hf32("0x0.8p-149"); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f32_again_too_tiny() { + hf32("0x0.0000000000000001p-86"); + } + + #[test] + fn test_f64_almost_extra_precision() { + // Exact maximum precision allowed + hf64("0x1.abcdabcdabcdfp+0"); + } + + #[test] + #[should_panic(expected = "the value is too precise")] + fn test_f64_extra_precision() { + // One bit more than the above. + hf64("0x1.abcdabcdabcdf8p+0"); + } + + // HACK(msrv): 1.63 rejects unknown width float literals at an AST level, so use a macro to + // hide them from the AST. + #[cfg(f128_enabled)] + macro_rules! f128_tests { + () => { + #[test] + fn test_f128_almost_extra_precision() { + // Exact maximum precision allowed + hf128("0x1.ffffffffffffffffffffffffffffp+16383"); + } + + #[test] + #[should_panic(expected = "the value is too precise")] + fn test_f128_extra_precision() { + // Just below the maximum finite. + hf128("0x1.fffffffffffffffffffffffffffe8p+16383"); + } + #[test] + #[should_panic(expected = "the value is too huge")] + fn test_f128_extra_precision_overflow() { + // One bit more than the above. Should overflow. + hf128("0x1.ffffffffffffffffffffffffffff8p+16383"); + } + + #[test] + #[should_panic(expected = "the value is too huge")] + fn test_f128_overflow() { + // One bit more than the above. + hf128("0x1p+16384"); + } + + #[test] + fn test_f128_tiniest() { + let x = hf128("0x1.p-16494"); + let y = hf128("0x0.0000000000000001p-16430"); + let z = hf128("0x0.8p-16493"); + assert_eq!(x, y); + assert_eq!(x, z); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f128_too_tiny() { + hf128("0x1.p-16495"); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f128_again_too_tiny() { + hf128("0x0.0000000000000001p-16431"); + } + + #[test] + #[should_panic(expected = "the value is too tiny")] + fn test_f128_also_too_tiny() { + hf128("0x0.8p-16494"); + } + }; + } + + #[cfg(f128_enabled)] + f128_tests!(); +} + +#[cfg(test)] +mod print_tests { + extern crate std; + use std::string::ToString; + + use super::*; + + #[test] + #[cfg(f16_enabled)] + fn test_f16() { + use std::format; + // Exhaustively check that `f16` roundtrips. + for x in 0..=u16::MAX { + let f = f16::from_bits(x); + let s = format!("{}", Hexf(f)); + let from_s = hf16(&s); + + if f.is_nan() && from_s.is_nan() { + continue; + } + + assert_eq!( + f.to_bits(), + from_s.to_bits(), + "{f:?} formatted as {s} but parsed as {from_s:?}" + ); + } + } + + #[test] + #[cfg(f16_enabled)] + fn test_f16_to_f32() { + use std::format; + // Exhaustively check that these are equivalent for all `f16`: + // - `f16 -> f32` + // - `f16 -> str -> f32` + // - `f16 -> f32 -> str -> f32` + // - `f16 -> f32 -> str -> f16 -> f32` + for x in 0..=u16::MAX { + let f16 = f16::from_bits(x); + let s16 = format!("{}", Hexf(f16)); + let f32 = f16 as f32; + let s32 = format!("{}", Hexf(f32)); + + let a = hf32(&s16); + let b = hf32(&s32); + let c = hf16(&s32); + + if f32.is_nan() && a.is_nan() && b.is_nan() && c.is_nan() { + continue; + } + + assert_eq!( + f32.to_bits(), + a.to_bits(), + "{f16:?} : f16 formatted as {s16} which parsed as {a:?} : f16" + ); + assert_eq!( + f32.to_bits(), + b.to_bits(), + "{f32:?} : f32 formatted as {s32} which parsed as {b:?} : f32" + ); + assert_eq!( + f32.to_bits(), + (c as f32).to_bits(), + "{f32:?} : f32 formatted as {s32} which parsed as {c:?} : f16" + ); + } + } + #[test] + fn spot_checks() { + assert_eq!(Hexf(f32::MAX).to_string(), "0x1.fffffep+127"); + assert_eq!(Hexf(f64::MAX).to_string(), "0x1.fffffffffffffp+1023"); + + assert_eq!(Hexf(f32::MIN).to_string(), "-0x1.fffffep+127"); + assert_eq!(Hexf(f64::MIN).to_string(), "-0x1.fffffffffffffp+1023"); + + assert_eq!(Hexf(f32::ZERO).to_string(), "0x0p+0"); + assert_eq!(Hexf(f64::ZERO).to_string(), "0x0p+0"); + + assert_eq!(Hexf(f32::NEG_ZERO).to_string(), "-0x0p+0"); + assert_eq!(Hexf(f64::NEG_ZERO).to_string(), "-0x0p+0"); + + assert_eq!(Hexf(f32::NAN).to_string(), "NaN"); + assert_eq!(Hexf(f64::NAN).to_string(), "NaN"); + + assert_eq!(Hexf(f32::INFINITY).to_string(), "inf"); + assert_eq!(Hexf(f64::INFINITY).to_string(), "inf"); + + assert_eq!(Hexf(f32::NEG_INFINITY).to_string(), "-inf"); + assert_eq!(Hexf(f64::NEG_INFINITY).to_string(), "-inf"); + + #[cfg(f16_enabled)] + { + assert_eq!(Hexf(f16::MAX).to_string(), "0x1.ffcp+15"); + assert_eq!(Hexf(f16::MIN).to_string(), "-0x1.ffcp+15"); + assert_eq!(Hexf(f16::ZERO).to_string(), "0x0p+0"); + assert_eq!(Hexf(f16::NEG_ZERO).to_string(), "-0x0p+0"); + assert_eq!(Hexf(f16::NAN).to_string(), "NaN"); + assert_eq!(Hexf(f16::INFINITY).to_string(), "inf"); + assert_eq!(Hexf(f16::NEG_INFINITY).to_string(), "-inf"); + } + + #[cfg(f128_enabled)] + { + assert_eq!(Hexf(f128::MAX).to_string(), "0x1.ffffffffffffffffffffffffffffp+16383"); + assert_eq!(Hexf(f128::MIN).to_string(), "-0x1.ffffffffffffffffffffffffffffp+16383"); + assert_eq!(Hexf(f128::ZERO).to_string(), "0x0p+0"); + assert_eq!(Hexf(f128::NEG_ZERO).to_string(), "-0x0p+0"); + assert_eq!(Hexf(f128::NAN).to_string(), "NaN"); + assert_eq!(Hexf(f128::INFINITY).to_string(), "inf"); + assert_eq!(Hexf(f128::NEG_INFINITY).to_string(), "-inf"); + } + } +} diff --git a/library/compiler-builtins/libm/src/math/support/int_traits.rs b/library/compiler-builtins/libm/src/math/support/int_traits.rs new file mode 100644 index 00000000000..491adb1f22c --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/int_traits.rs @@ -0,0 +1,451 @@ +use core::{cmp, fmt, ops}; + +/// Minimal integer implementations needed on all integer types, including wide integers. +pub trait MinInt: + Copy + + fmt::Debug + + ops::BitOr<Output = Self> + + ops::Not<Output = Self> + + ops::Shl<u32, Output = Self> +{ + /// Type with the same width but other signedness + type OtherSign: MinInt; + /// Unsigned version of Self + type Unsigned: MinInt; + + /// If `Self` is a signed integer + const SIGNED: bool; + + /// The bitwidth of the int type + const BITS: u32; + + const ZERO: Self; + const ONE: Self; + const MIN: Self; + const MAX: Self; +} + +/// Access the associated `OtherSign` type from an int (helper to avoid ambiguous associated +/// types). +pub type OtherSign<I> = <I as MinInt>::OtherSign; + +/// Trait for some basic operations on integers +#[allow(dead_code)] +pub trait Int: + MinInt + + fmt::Display + + fmt::Binary + + fmt::LowerHex + + PartialEq + + PartialOrd + + ops::AddAssign + + ops::SubAssign + + ops::BitAndAssign + + ops::BitOrAssign + + ops::BitXorAssign + + ops::ShlAssign<i32> + + ops::ShlAssign<u32> + + ops::ShrAssign<u32> + + ops::ShrAssign<i32> + + ops::Add<Output = Self> + + ops::Sub<Output = Self> + + ops::Mul<Output = Self> + + ops::Div<Output = Self> + + ops::Shl<i32, Output = Self> + + ops::Shl<u32, Output = Self> + + ops::Shr<i32, Output = Self> + + ops::Shr<u32, Output = Self> + + ops::BitXor<Output = Self> + + ops::BitAnd<Output = Self> + + cmp::Ord + + From<bool> + + CastFrom<i32> + + CastFrom<u16> + + CastFrom<u32> + + CastFrom<u8> + + CastFrom<usize> + + CastInto<i32> + + CastInto<u16> + + CastInto<u32> + + CastInto<u8> + + CastInto<usize> +{ + fn signed(self) -> OtherSign<Self::Unsigned>; + fn unsigned(self) -> Self::Unsigned; + fn from_unsigned(unsigned: Self::Unsigned) -> Self; + fn abs(self) -> Self; + + fn from_bool(b: bool) -> Self; + + /// Prevents the need for excessive conversions between signed and unsigned + fn logical_shr(self, other: u32) -> Self; + + /// Absolute difference between two integers. + fn abs_diff(self, other: Self) -> Self::Unsigned; + + // copied from primitive integers, but put in a trait + fn is_zero(self) -> bool; + fn checked_add(self, other: Self) -> Option<Self>; + fn checked_sub(self, other: Self) -> Option<Self>; + fn wrapping_neg(self) -> Self; + fn wrapping_add(self, other: Self) -> Self; + fn wrapping_mul(self, other: Self) -> Self; + fn wrapping_sub(self, other: Self) -> Self; + fn wrapping_shl(self, other: u32) -> Self; + fn wrapping_shr(self, other: u32) -> Self; + fn rotate_left(self, other: u32) -> Self; + fn overflowing_add(self, other: Self) -> (Self, bool); + fn overflowing_sub(self, other: Self) -> (Self, bool); + fn leading_zeros(self) -> u32; + fn ilog2(self) -> u32; +} + +macro_rules! int_impl_common { + ($ty:ty) => { + fn from_bool(b: bool) -> Self { + b as $ty + } + + fn logical_shr(self, other: u32) -> Self { + Self::from_unsigned(self.unsigned().wrapping_shr(other)) + } + + fn is_zero(self) -> bool { + self == Self::ZERO + } + + fn checked_add(self, other: Self) -> Option<Self> { + self.checked_add(other) + } + + fn checked_sub(self, other: Self) -> Option<Self> { + self.checked_sub(other) + } + + fn wrapping_neg(self) -> Self { + <Self>::wrapping_neg(self) + } + + fn wrapping_add(self, other: Self) -> Self { + <Self>::wrapping_add(self, other) + } + + fn wrapping_mul(self, other: Self) -> Self { + <Self>::wrapping_mul(self, other) + } + + fn wrapping_sub(self, other: Self) -> Self { + <Self>::wrapping_sub(self, other) + } + + fn wrapping_shl(self, other: u32) -> Self { + <Self>::wrapping_shl(self, other) + } + + fn wrapping_shr(self, other: u32) -> Self { + <Self>::wrapping_shr(self, other) + } + + fn rotate_left(self, other: u32) -> Self { + <Self>::rotate_left(self, other) + } + + fn overflowing_add(self, other: Self) -> (Self, bool) { + <Self>::overflowing_add(self, other) + } + + fn overflowing_sub(self, other: Self) -> (Self, bool) { + <Self>::overflowing_sub(self, other) + } + + fn leading_zeros(self) -> u32 { + <Self>::leading_zeros(self) + } + + fn ilog2(self) -> u32 { + // On our older MSRV, this resolves to the trait method. Which won't actually work, + // but this is only called behind other gates. + #[allow(clippy::incompatible_msrv)] + <Self>::ilog2(self) + } + }; +} + +macro_rules! int_impl { + ($ity:ty, $uty:ty) => { + impl MinInt for $uty { + type OtherSign = $ity; + type Unsigned = $uty; + + const BITS: u32 = <Self as MinInt>::ZERO.count_zeros(); + const SIGNED: bool = Self::MIN != Self::ZERO; + + const ZERO: Self = 0; + const ONE: Self = 1; + const MIN: Self = <Self>::MIN; + const MAX: Self = <Self>::MAX; + } + + impl Int for $uty { + fn signed(self) -> $ity { + self as $ity + } + + fn unsigned(self) -> Self { + self + } + + fn abs(self) -> Self { + unimplemented!() + } + + // It makes writing macros easier if this is implemented for both signed and unsigned + #[allow(clippy::wrong_self_convention)] + fn from_unsigned(me: $uty) -> Self { + me + } + + fn abs_diff(self, other: Self) -> Self { + self.abs_diff(other) + } + + int_impl_common!($uty); + } + + impl MinInt for $ity { + type OtherSign = $uty; + type Unsigned = $uty; + + const BITS: u32 = <Self as MinInt>::ZERO.count_zeros(); + const SIGNED: bool = Self::MIN != Self::ZERO; + + const ZERO: Self = 0; + const ONE: Self = 1; + const MIN: Self = <Self>::MIN; + const MAX: Self = <Self>::MAX; + } + + impl Int for $ity { + fn signed(self) -> Self { + self + } + + fn unsigned(self) -> $uty { + self as $uty + } + + fn abs(self) -> Self { + self.abs() + } + + fn from_unsigned(me: $uty) -> Self { + me as $ity + } + + fn abs_diff(self, other: Self) -> $uty { + self.abs_diff(other) + } + + int_impl_common!($ity); + } + }; +} + +int_impl!(isize, usize); +int_impl!(i8, u8); +int_impl!(i16, u16); +int_impl!(i32, u32); +int_impl!(i64, u64); +int_impl!(i128, u128); + +/// Trait for integers twice the bit width of another integer. This is implemented for all +/// primitives except for `u8`, because there is not a smaller primitive. +pub trait DInt: MinInt { + /// Integer that is half the bit width of the integer this trait is implemented for + type H: HInt<D = Self>; + + /// Returns the low half of `self` + fn lo(self) -> Self::H; + /// Returns the high half of `self` + fn hi(self) -> Self::H; + /// Returns the low and high halves of `self` as a tuple + fn lo_hi(self) -> (Self::H, Self::H) { + (self.lo(), self.hi()) + } + /// Constructs an integer using lower and higher half parts + #[allow(unused)] + fn from_lo_hi(lo: Self::H, hi: Self::H) -> Self { + lo.zero_widen() | hi.widen_hi() + } +} + +/// Trait for integers half the bit width of another integer. This is implemented for all +/// primitives except for `u128`, because it there is not a larger primitive. +pub trait HInt: Int { + /// Integer that is double the bit width of the integer this trait is implemented for + type D: DInt<H = Self> + MinInt; + + // NB: some of the below methods could have default implementations (e.g. `widen_hi`), but for + // unknown reasons this can cause infinite recursion when optimizations are disabled. See + // <https://github.com/rust-lang/compiler-builtins/pull/707> for context. + + /// Widens (using default extension) the integer to have double bit width + fn widen(self) -> Self::D; + /// Widens (zero extension only) the integer to have double bit width. This is needed to get + /// around problems with associated type bounds (such as `Int<Othersign: DInt>`) being unstable + fn zero_widen(self) -> Self::D; + /// Widens the integer to have double bit width and shifts the integer into the higher bits + #[allow(unused)] + fn widen_hi(self) -> Self::D; + /// Widening multiplication with zero widening. This cannot overflow. + fn zero_widen_mul(self, rhs: Self) -> Self::D; + /// Widening multiplication. This cannot overflow. + fn widen_mul(self, rhs: Self) -> Self::D; +} + +macro_rules! impl_d_int { + ($($X:ident $D:ident),*) => { + $( + impl DInt for $D { + type H = $X; + + fn lo(self) -> Self::H { + self as $X + } + fn hi(self) -> Self::H { + (self >> <$X as MinInt>::BITS) as $X + } + } + )* + }; +} + +macro_rules! impl_h_int { + ($($H:ident $uH:ident $X:ident),*) => { + $( + impl HInt for $H { + type D = $X; + + fn widen(self) -> Self::D { + self as $X + } + fn zero_widen(self) -> Self::D { + (self as $uH) as $X + } + fn zero_widen_mul(self, rhs: Self) -> Self::D { + self.zero_widen().wrapping_mul(rhs.zero_widen()) + } + fn widen_mul(self, rhs: Self) -> Self::D { + self.widen().wrapping_mul(rhs.widen()) + } + fn widen_hi(self) -> Self::D { + (self as $X) << <Self as MinInt>::BITS + } + } + )* + }; +} + +impl_d_int!(u8 u16, u16 u32, u32 u64, u64 u128, i8 i16, i16 i32, i32 i64, i64 i128); +impl_h_int!( + u8 u8 u16, + u16 u16 u32, + u32 u32 u64, + u64 u64 u128, + i8 u8 i16, + i16 u16 i32, + i32 u32 i64, + i64 u64 i128 +); + +/// Trait to express (possibly lossy) casting of integers +pub trait CastInto<T: Copy>: Copy { + /// By default, casts should be exact. + fn cast(self) -> T; + + /// Call for casts that are expected to truncate. + fn cast_lossy(self) -> T; +} + +pub trait CastFrom<T: Copy>: Copy { + /// By default, casts should be exact. + fn cast_from(value: T) -> Self; + + /// Call for casts that are expected to truncate. + fn cast_from_lossy(value: T) -> Self; +} + +impl<T: Copy, U: CastInto<T> + Copy> CastFrom<U> for T { + fn cast_from(value: U) -> Self { + value.cast() + } + + fn cast_from_lossy(value: U) -> Self { + value.cast_lossy() + } +} + +macro_rules! cast_into { + ($ty:ty) => { + cast_into!($ty; usize, isize, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128); + }; + ($ty:ty; $($into:ty),*) => {$( + impl CastInto<$into> for $ty { + fn cast(self) -> $into { + // All we can really do to enforce casting rules is check the rules when in + // debug mode. + #[cfg(not(feature = "compiler-builtins"))] + debug_assert!(<$into>::try_from(self).is_ok(), "failed cast from {self}"); + self as $into + } + + fn cast_lossy(self) -> $into { + self as $into + } + } + )*}; +} + +macro_rules! cast_into_float { + ($ty:ty) => { + #[cfg(f16_enabled)] + cast_into_float!($ty; f16); + + cast_into_float!($ty; f32, f64); + + #[cfg(f128_enabled)] + cast_into_float!($ty; f128); + }; + ($ty:ty; $($into:ty),*) => {$( + impl CastInto<$into> for $ty { + fn cast(self) -> $into { + #[cfg(not(feature = "compiler-builtins"))] + debug_assert_eq!(self as $into as $ty, self, "inexact float cast"); + self as $into + } + + fn cast_lossy(self) -> $into { + self as $into + } + } + )*}; +} + +cast_into!(usize); +cast_into!(isize); +cast_into!(u8); +cast_into!(i8); +cast_into!(u16); +cast_into!(i16); +cast_into!(u32); +cast_into!(i32); +cast_into!(u64); +cast_into!(i64); +cast_into!(u128); +cast_into!(i128); + +cast_into_float!(i8); +cast_into_float!(i16); +cast_into_float!(i32); +cast_into_float!(i64); +cast_into_float!(i128); diff --git a/library/compiler-builtins/libm/src/math/support/macros.rs b/library/compiler-builtins/libm/src/math/support/macros.rs new file mode 100644 index 00000000000..0b72db0e46e --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/macros.rs @@ -0,0 +1,157 @@ +/// `libm` cannot have dependencies, so this is vendored directly from the `cfg-if` crate +/// (with some comments stripped for compactness). +macro_rules! cfg_if { + // match if/else chains with a final `else` + ($( + if #[cfg($meta:meta)] { $($tokens:tt)* } + ) else * else { + $($tokens2:tt)* + }) => { + cfg_if! { @__items () ; $( ( ($meta) ($($tokens)*) ), )* ( () ($($tokens2)*) ), } + }; + + // match if/else chains lacking a final `else` + ( + if #[cfg($i_met:meta)] { $($i_tokens:tt)* } + $( else if #[cfg($e_met:meta)] { $($e_tokens:tt)* } )* + ) => { + cfg_if! { + @__items + () ; + ( ($i_met) ($($i_tokens)*) ), + $( ( ($e_met) ($($e_tokens)*) ), )* + ( () () ), + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the negated cfgs in a list at the beginning and after the + // semicolon is all the remaining items + (@__items ($($not:meta,)*) ; ) => {}; + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($tokens:tt)*) ), $($rest:tt)*) => { + #[cfg(all($($m,)* not(any($($not),*))))] cfg_if! { @__identity $($tokens)* } + cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + }; + + // Internal macro to make __apply work out right for different match types, + // because of how macros matching/expand stuff. + (@__identity $($tokens:tt)*) => { $($tokens)* }; +} + +/// Choose between using an arch-specific implementation and the function body. Returns directly +/// if the arch implementation is used, otherwise continue with the rest of the function. +/// +/// Specify a `use_arch` meta field if an architecture-specific implementation is provided. +/// These live in the `math::arch::some_target_arch` module. +/// +/// Specify a `use_arch_required` meta field if something architecture-specific must be used +/// regardless of feature configuration (`force-soft-floats`). +/// +/// The passed meta options do not need to account for the `arch` target feature. +macro_rules! select_implementation { + ( + name: $fn_name:ident, + // Configuration meta for when to use arch-specific implementation that requires hard + // float ops + $( use_arch: $use_arch:meta, )? + // Configuration meta for when to use the arch module regardless of whether softfloats + // have been requested. + $( use_arch_required: $use_arch_required:meta, )? + args: $($arg:ident),+ , + ) => { + // FIXME: these use paths that are a pretty fragile (`super`). We should figure out + // something better w.r.t. how this is vendored into compiler-builtins. + + // However, we do need a few things from `arch` that are used even with soft floats. + select_implementation! { + @cfg $($use_arch_required)?; + if true { + return super::arch::$fn_name( $($arg),+ ); + } + } + + // By default, never use arch-specific implementations if we have force-soft-floats + #[cfg(arch_enabled)] + select_implementation! { + @cfg $($use_arch)?; + // Wrap in `if true` to avoid unused warnings + if true { + return super::arch::$fn_name( $($arg),+ ); + } + } + }; + + // Coalesce helper to construct an expression only if a config is provided + (@cfg ; $ex:expr) => { }; + (@cfg $provided:meta; $ex:expr) => { #[cfg($provided)] $ex }; +} + +/// Construct a 16-bit float from hex float representation (C-style), guaranteed to +/// evaluate at compile time. +#[cfg(f16_enabled)] +#[cfg_attr(feature = "unstable-public-internals", macro_export)] +#[allow(unused_macros)] +macro_rules! hf16 { + ($s:literal) => {{ + const X: f16 = $crate::support::hf16($s); + X + }}; +} + +/// Construct a 32-bit float from hex float representation (C-style), guaranteed to +/// evaluate at compile time. +#[allow(unused_macros)] +#[cfg_attr(feature = "unstable-public-internals", macro_export)] +macro_rules! hf32 { + ($s:literal) => {{ + const X: f32 = $crate::support::hf32($s); + X + }}; +} + +/// Construct a 64-bit float from hex float representation (C-style), guaranteed to +/// evaluate at compile time. +#[allow(unused_macros)] +#[cfg_attr(feature = "unstable-public-internals", macro_export)] +macro_rules! hf64 { + ($s:literal) => {{ + const X: f64 = $crate::support::hf64($s); + X + }}; +} + +/// Construct a 128-bit float from hex float representation (C-style), guaranteed to +/// evaluate at compile time. +#[cfg(f128_enabled)] +#[allow(unused_macros)] +#[cfg_attr(feature = "unstable-public-internals", macro_export)] +macro_rules! hf128 { + ($s:literal) => {{ + const X: f128 = $crate::support::hf128($s); + X + }}; +} + +/// Assert `F::biteq` with better messages. +#[cfg(test)] +macro_rules! assert_biteq { + ($left:expr, $right:expr, $($tt:tt)*) => {{ + use $crate::support::Int; + let l = $left; + let r = $right; + let bits = Int::leading_zeros(l.to_bits() - l.to_bits()); // hack to get the width from the value + assert!( + l.biteq(r), + "{}\nl: {l:?} ({lb:#0width$x})\nr: {r:?} ({rb:#0width$x})", + format_args!($($tt)*), + lb = l.to_bits(), + rb = r.to_bits(), + width = ((bits / 4) + 2) as usize, + + ); + }}; + ($left:expr, $right:expr $(,)?) => { + assert_biteq!($left, $right, "") + }; +} diff --git a/library/compiler-builtins/libm/src/math/support/mod.rs b/library/compiler-builtins/libm/src/math/support/mod.rs new file mode 100644 index 00000000000..ee3f2bbdf02 --- /dev/null +++ b/library/compiler-builtins/libm/src/math/support/mod.rs @@ -0,0 +1,29 @@ +#[macro_use] +pub mod macros; +mod big; +mod env; +mod float_traits; +pub mod hex_float; +mod int_traits; + +#[allow(unused_imports)] +pub use big::{i256, u256}; +pub use env::{FpResult, Round, Status}; +#[allow(unused_imports)] +pub use float_traits::{DFloat, Float, HFloat, IntTy}; +pub(crate) use float_traits::{f32_from_bits, f64_from_bits}; +#[cfg(f16_enabled)] +#[allow(unused_imports)] +pub use hex_float::hf16; +#[cfg(f128_enabled)] +#[allow(unused_imports)] +pub use hex_float::hf128; +#[allow(unused_imports)] +pub use hex_float::{Hexf, hf32, hf64}; +pub use int_traits::{CastFrom, CastInto, DInt, HInt, Int, MinInt}; + +/// Hint to the compiler that the current path is cold. +pub fn cold_path() { + #[cfg(intrinsics_enabled)] + core::intrinsics::cold_path(); +} |
