about summary refs log tree commit diff
path: root/library/compiler-builtins/libm-test/src
diff options
context:
space:
mode:
authorTrevor Gross <tmgross@umich.edu>2025-04-19 20:58:25 +0000
committerTrevor Gross <t.gross35@gmail.com>2025-04-19 17:20:24 -0400
commit911a70381a9e7c84400b156e3cbcd805f3e64034 (patch)
treeab74d6098fd4f1ecfe965c95080f0248ea514268 /library/compiler-builtins/libm-test/src
parent806bb4fa6e35f65e63e1b96953fab68bfe5a67b4 (diff)
downloadrust-911a70381a9e7c84400b156e3cbcd805f3e64034.tar.gz
rust-911a70381a9e7c84400b156e3cbcd805f3e64034.zip
libm: Reorganize into compiler-builtins
Distribute everything from `libm/` to better locations in the repo.
`libm/libm/*` has not moved yet to avoid Git seeing the move as an edit
to `Cargo.toml`.

Files that remain to be merged somehow are in `etc/libm`.
Diffstat (limited to 'library/compiler-builtins/libm-test/src')
-rw-r--r--library/compiler-builtins/libm-test/src/domain.rs265
-rw-r--r--library/compiler-builtins/libm-test/src/f8_impl.rs503
-rw-r--r--library/compiler-builtins/libm-test/src/generate.rs43
-rw-r--r--library/compiler-builtins/libm-test/src/generate/case_list.rs853
-rw-r--r--library/compiler-builtins/libm-test/src/generate/edge_cases.rs310
-rw-r--r--library/compiler-builtins/libm-test/src/generate/random.rs125
-rw-r--r--library/compiler-builtins/libm-test/src/generate/spaced.rs253
-rw-r--r--library/compiler-builtins/libm-test/src/lib.rs105
-rw-r--r--library/compiler-builtins/libm-test/src/mpfloat.rs603
-rw-r--r--library/compiler-builtins/libm-test/src/num.rs529
-rw-r--r--library/compiler-builtins/libm-test/src/op.rs151
-rw-r--r--library/compiler-builtins/libm-test/src/precision.rs573
-rw-r--r--library/compiler-builtins/libm-test/src/run_cfg.rs370
-rw-r--r--library/compiler-builtins/libm-test/src/test_traits.rs447
14 files changed, 5130 insertions, 0 deletions
diff --git a/library/compiler-builtins/libm-test/src/domain.rs b/library/compiler-builtins/libm-test/src/domain.rs
new file mode 100644
index 00000000000..41e94846163
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/domain.rs
@@ -0,0 +1,265 @@
+//! Traits and operations related to bounds of a function.
+
+use std::fmt;
+use std::ops::Bound;
+
+use libm::support::Int;
+
+use crate::{BaseName, Float, FloatExt, Identifier};
+
+/// Representation of a single dimension of a function's domain.
+#[derive(Clone, Debug)]
+pub struct Domain<T> {
+    /// Start of the region for which a function is defined (ignoring poles).
+    pub start: Bound<T>,
+    /// Endof the region for which a function is defined (ignoring poles).
+    pub end: Bound<T>,
+    /// Additional points to check closer around. These can be e.g. undefined asymptotes or
+    /// inflection points.
+    pub check_points: Option<fn() -> BoxIter<T>>,
+}
+
+type BoxIter<T> = Box<dyn Iterator<Item = T>>;
+
+impl<F: FloatExt> Domain<F> {
+    /// The start of this domain, saturating at negative infinity.
+    pub fn range_start(&self) -> F {
+        match self.start {
+            Bound::Included(v) => v,
+            Bound::Excluded(v) => v.next_up(),
+            Bound::Unbounded => F::NEG_INFINITY,
+        }
+    }
+
+    /// The end of this domain, saturating at infinity.
+    pub fn range_end(&self) -> F {
+        match self.end {
+            Bound::Included(v) => v,
+            Bound::Excluded(v) => v.next_down(),
+            Bound::Unbounded => F::INFINITY,
+        }
+    }
+}
+
+/// A value that may be any float type or any integer type.
+#[derive(Clone, Debug)]
+pub enum EitherPrim<F, I> {
+    Float(F),
+    Int(I),
+}
+
+impl<F: fmt::Debug, I: fmt::Debug> EitherPrim<F, I> {
+    pub fn unwrap_float(self) -> F {
+        match self {
+            EitherPrim::Float(f) => f,
+            EitherPrim::Int(_) => panic!("expected float; got {self:?}"),
+        }
+    }
+
+    pub fn unwrap_int(self) -> I {
+        match self {
+            EitherPrim::Float(_) => panic!("expected int; got {self:?}"),
+            EitherPrim::Int(i) => i,
+        }
+    }
+}
+
+/// Convenience 1-dimensional float domains.
+impl<F: Float> Domain<F> {
+    /// x ∈ ℝ
+    const UNBOUNDED: Self =
+        Self { start: Bound::Unbounded, end: Bound::Unbounded, check_points: None };
+
+    /// x ∈ ℝ >= 0
+    const POSITIVE: Self =
+        Self { start: Bound::Included(F::ZERO), end: Bound::Unbounded, check_points: None };
+
+    /// x ∈ ℝ > 0
+    const STRICTLY_POSITIVE: Self =
+        Self { start: Bound::Excluded(F::ZERO), end: Bound::Unbounded, check_points: None };
+
+    /// Wrap in the float variant of [`EitherPrim`].
+    const fn into_prim_float<I>(self) -> EitherPrim<Self, Domain<I>> {
+        EitherPrim::Float(self)
+    }
+}
+
+/// Convenience 1-dimensional integer domains.
+impl<I: Int> Domain<I> {
+    /// x ∈ ℝ
+    const UNBOUNDED_INT: Self =
+        Self { start: Bound::Unbounded, end: Bound::Unbounded, check_points: None };
+
+    /// Wrap in the int variant of [`EitherPrim`].
+    const fn into_prim_int<F>(self) -> EitherPrim<Domain<F>, Self> {
+        EitherPrim::Int(self)
+    }
+}
+
+/// Multidimensional domains, represented as an array of 1-D domains.
+impl<F: Float, I: Int> EitherPrim<Domain<F>, Domain<I>> {
+    /// x ∈ ℝ
+    const UNBOUNDED1: [Self; 1] =
+        [Domain { start: Bound::Unbounded, end: Bound::Unbounded, check_points: None }
+            .into_prim_float()];
+
+    /// {x1, x2} ∈ ℝ
+    const UNBOUNDED2: [Self; 2] =
+        [Domain::UNBOUNDED.into_prim_float(), Domain::UNBOUNDED.into_prim_float()];
+
+    /// {x1, x2, x3} ∈ ℝ
+    const UNBOUNDED3: [Self; 3] = [
+        Domain::UNBOUNDED.into_prim_float(),
+        Domain::UNBOUNDED.into_prim_float(),
+        Domain::UNBOUNDED.into_prim_float(),
+    ];
+
+    /// {x1, x2} ∈ ℝ, one float and one int
+    const UNBOUNDED_F_I: [Self; 2] =
+        [Domain::UNBOUNDED.into_prim_float(), Domain::UNBOUNDED_INT.into_prim_int()];
+
+    /// x ∈ ℝ >= 0
+    const POSITIVE: [Self; 1] = [Domain::POSITIVE.into_prim_float()];
+
+    /// x ∈ ℝ > 0
+    const STRICTLY_POSITIVE: [Self; 1] = [Domain::STRICTLY_POSITIVE.into_prim_float()];
+
+    /// Used for versions of `asin` and `acos`.
+    const INVERSE_TRIG_PERIODIC: [Self; 1] = [Domain {
+        start: Bound::Included(F::NEG_ONE),
+        end: Bound::Included(F::ONE),
+        check_points: None,
+    }
+    .into_prim_float()];
+
+    /// Domain for `acosh`
+    const ACOSH: [Self; 1] =
+        [Domain { start: Bound::Included(F::ONE), end: Bound::Unbounded, check_points: None }
+            .into_prim_float()];
+
+    /// Domain for `atanh`
+    const ATANH: [Self; 1] = [Domain {
+        start: Bound::Excluded(F::NEG_ONE),
+        end: Bound::Excluded(F::ONE),
+        check_points: None,
+    }
+    .into_prim_float()];
+
+    /// Domain for `sin`, `cos`, and `tan`
+    const TRIG: [Self; 1] = [Domain {
+        // Trig functions have special behavior at fractions of π.
+        check_points: Some(|| Box::new([-F::PI, -F::FRAC_PI_2, F::FRAC_PI_2, F::PI].into_iter())),
+        ..Domain::UNBOUNDED
+    }
+    .into_prim_float()];
+
+    /// Domain for `log` in various bases
+    const LOG: [Self; 1] = Self::STRICTLY_POSITIVE;
+
+    /// Domain for `log1p` i.e. `log(1 + x)`
+    const LOG1P: [Self; 1] =
+        [Domain { start: Bound::Excluded(F::NEG_ONE), end: Bound::Unbounded, check_points: None }
+            .into_prim_float()];
+
+    /// Domain for `sqrt`
+    const SQRT: [Self; 1] = Self::POSITIVE;
+
+    /// Domain for `gamma`
+    const GAMMA: [Self; 1] = [Domain {
+        check_points: Some(|| {
+            // Negative integers are asymptotes
+            Box::new((0..u8::MAX).map(|scale| {
+                let mut base = F::ZERO;
+                for _ in 0..scale {
+                    base = base - F::ONE;
+                }
+                base
+            }))
+        }),
+        // Whether or not gamma is defined for negative numbers is implementation dependent
+        ..Domain::UNBOUNDED
+    }
+    .into_prim_float()];
+
+    /// Domain for `loggamma`
+    const LGAMMA: [Self; 1] = Self::STRICTLY_POSITIVE;
+
+    /// Domain for `jn` and `yn`.
+    // FIXME: the domain should provide some sort of "reasonable range" so we don't actually test
+    // the entire system unbounded.
+    const BESSEL_N: [Self; 2] =
+        [Domain::UNBOUNDED_INT.into_prim_int(), Domain::UNBOUNDED.into_prim_float()];
+}
+
+/// Get the domain for a given function.
+pub fn get_domain<F: Float, I: Int>(
+    id: Identifier,
+    argnum: usize,
+) -> EitherPrim<Domain<F>, Domain<I>> {
+    let x = match id.base_name() {
+        BaseName::Acos => &EitherPrim::INVERSE_TRIG_PERIODIC[..],
+        BaseName::Acosh => &EitherPrim::ACOSH[..],
+        BaseName::Asin => &EitherPrim::INVERSE_TRIG_PERIODIC[..],
+        BaseName::Asinh => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Atan => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Atan2 => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Cbrt => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Atanh => &EitherPrim::ATANH[..],
+        BaseName::Ceil => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Cosh => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Copysign => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Cos => &EitherPrim::TRIG[..],
+        BaseName::Exp => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Erf => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Erfc => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Expm1 => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Exp10 => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Exp2 => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Frexp => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Fabs => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Fdim => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Floor => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Fma => &EitherPrim::UNBOUNDED3[..],
+        BaseName::Fmax => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Fmaximum => &EitherPrim::UNBOUNDED2[..],
+        BaseName::FmaximumNum => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Fmin => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Fminimum => &EitherPrim::UNBOUNDED2[..],
+        BaseName::FminimumNum => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Fmod => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Hypot => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Ilogb => &EitherPrim::UNBOUNDED1[..],
+        BaseName::J0 => &EitherPrim::UNBOUNDED1[..],
+        BaseName::J1 => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Jn => &EitherPrim::BESSEL_N[..],
+        BaseName::Ldexp => &EitherPrim::UNBOUNDED_F_I[..],
+        BaseName::Lgamma => &EitherPrim::LGAMMA[..],
+        BaseName::LgammaR => &EitherPrim::LGAMMA[..],
+        BaseName::Log => &EitherPrim::LOG[..],
+        BaseName::Log10 => &EitherPrim::LOG[..],
+        BaseName::Log1p => &EitherPrim::LOG1P[..],
+        BaseName::Log2 => &EitherPrim::LOG[..],
+        BaseName::Modf => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Nextafter => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Pow => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Remainder => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Remquo => &EitherPrim::UNBOUNDED2[..],
+        BaseName::Rint => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Round => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Roundeven => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Scalbn => &EitherPrim::UNBOUNDED_F_I[..],
+        BaseName::Sin => &EitherPrim::TRIG[..],
+        BaseName::Sincos => &EitherPrim::TRIG[..],
+        BaseName::Sinh => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Sqrt => &EitherPrim::SQRT[..],
+        BaseName::Tan => &EitherPrim::TRIG[..],
+        BaseName::Tanh => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Tgamma => &EitherPrim::GAMMA[..],
+        BaseName::Trunc => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Y0 => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Y1 => &EitherPrim::UNBOUNDED1[..],
+        BaseName::Yn => &EitherPrim::BESSEL_N[..],
+    };
+
+    x[argnum].clone()
+}
diff --git a/library/compiler-builtins/libm-test/src/f8_impl.rs b/library/compiler-builtins/libm-test/src/f8_impl.rs
new file mode 100644
index 00000000000..ddb7bf90e7f
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/f8_impl.rs
@@ -0,0 +1,503 @@
+//! An IEEE-compliant 8-bit float type for testing purposes.
+
+use std::cmp::{self, Ordering};
+use std::{fmt, ops};
+
+use crate::Float;
+
+/// Sometimes verifying float logic is easiest when all values can quickly be checked exhaustively
+/// or by hand.
+///
+/// IEEE-754 compliant type that includes a 1 bit sign, 4 bit exponent, and 3 bit significand.
+/// Bias is -7.
+///
+/// Based on <https://en.wikipedia.org/wiki/Minifloat#Example_8-bit_float_(1.4.3)>.
+#[derive(Clone, Copy)]
+#[repr(transparent)]
+#[allow(non_camel_case_types)]
+pub struct f8(u8);
+
+impl Float for f8 {
+    type Int = u8;
+    type SignedInt = i8;
+
+    const ZERO: Self = Self(0b0_0000_000);
+    const NEG_ZERO: Self = Self(0b1_0000_000);
+    const ONE: Self = Self(0b0_0111_000);
+    const NEG_ONE: Self = Self(0b1_0111_000);
+    const MAX: Self = Self(0b0_1110_111);
+    const MIN: Self = Self(0b1_1110_111);
+    const INFINITY: Self = Self(0b0_1111_000);
+    const NEG_INFINITY: Self = Self(0b1_1111_000);
+    const NAN: Self = Self(0b0_1111_100);
+    const NEG_NAN: Self = Self(0b1_1111_100);
+    const MIN_POSITIVE_NORMAL: Self = Self(1 << Self::SIG_BITS);
+    // FIXME: incorrect values
+    const EPSILON: Self = Self::ZERO;
+    const PI: Self = Self::ZERO;
+    const NEG_PI: Self = Self::ZERO;
+    const FRAC_PI_2: Self = Self::ZERO;
+
+    const BITS: u32 = 8;
+    const SIG_BITS: u32 = 3;
+    const SIGN_MASK: Self::Int = 0b1_0000_000;
+    const SIG_MASK: Self::Int = 0b0_0000_111;
+    const EXP_MASK: Self::Int = 0b0_1111_000;
+    const IMPLICIT_BIT: Self::Int = 0b0_0001_000;
+
+    fn to_bits(self) -> Self::Int {
+        self.0
+    }
+
+    fn to_bits_signed(self) -> Self::SignedInt {
+        self.0 as i8
+    }
+
+    fn is_nan(self) -> bool {
+        self.0 & Self::EXP_MASK == Self::EXP_MASK && self.0 & Self::SIG_MASK != 0
+    }
+
+    fn is_infinite(self) -> bool {
+        self.0 & Self::EXP_MASK == Self::EXP_MASK && self.0 & Self::SIG_MASK == 0
+    }
+
+    fn is_sign_negative(self) -> bool {
+        self.0 & Self::SIGN_MASK != 0
+    }
+
+    fn from_bits(a: Self::Int) -> Self {
+        Self(a)
+    }
+
+    fn abs(self) -> Self {
+        libm::generic::fabs(self)
+    }
+
+    fn copysign(self, other: Self) -> Self {
+        libm::generic::copysign(self, other)
+    }
+
+    fn fma(self, _y: Self, _z: Self) -> Self {
+        unimplemented!()
+    }
+
+    fn normalize(_significand: Self::Int) -> (i32, Self::Int) {
+        unimplemented!()
+    }
+}
+
+impl f8 {
+    pub const ALL_LEN: usize = 240;
+
+    /// All non-infinite non-NaN values of `f8`
+    pub const ALL: [Self; Self::ALL_LEN] = [
+        // -m*2^7
+        Self(0b1_1110_111), // -240
+        Self(0b1_1110_110),
+        Self(0b1_1110_101),
+        Self(0b1_1110_100),
+        Self(0b1_1110_011),
+        Self(0b1_1110_010),
+        Self(0b1_1110_001),
+        Self(0b1_1110_000), // -128
+        // -m*2^6
+        Self(0b1_1101_111), // -120
+        Self(0b1_1101_110),
+        Self(0b1_1101_101),
+        Self(0b1_1101_100),
+        Self(0b1_1101_011),
+        Self(0b1_1101_010),
+        Self(0b1_1101_001),
+        Self(0b1_1101_000), // -64
+        // -m*2^5
+        Self(0b1_1100_111), // -60
+        Self(0b1_1100_110),
+        Self(0b1_1100_101),
+        Self(0b1_1100_100),
+        Self(0b1_1100_011),
+        Self(0b1_1100_010),
+        Self(0b1_1100_001),
+        Self(0b1_1100_000), // -32
+        // -m*2^4
+        Self(0b1_1011_111), // -30
+        Self(0b1_1011_110),
+        Self(0b1_1011_101),
+        Self(0b1_1011_100),
+        Self(0b1_1011_011),
+        Self(0b1_1011_010),
+        Self(0b1_1011_001),
+        Self(0b1_1011_000), // -16
+        // -m*2^3
+        Self(0b1_1010_111), // -15
+        Self(0b1_1010_110),
+        Self(0b1_1010_101),
+        Self(0b1_1010_100),
+        Self(0b1_1010_011),
+        Self(0b1_1010_010),
+        Self(0b1_1010_001),
+        Self(0b1_1010_000), // -8
+        // -m*2^2
+        Self(0b1_1001_111), // -7.5
+        Self(0b1_1001_110),
+        Self(0b1_1001_101),
+        Self(0b1_1001_100),
+        Self(0b1_1001_011),
+        Self(0b1_1001_010),
+        Self(0b1_1001_001),
+        Self(0b1_1001_000), // -4
+        // -m*2^1
+        Self(0b1_1000_111), // -3.75
+        Self(0b1_1000_110),
+        Self(0b1_1000_101),
+        Self(0b1_1000_100),
+        Self(0b1_1000_011),
+        Self(0b1_1000_010),
+        Self(0b1_1000_001),
+        Self(0b1_1000_000), // -2
+        // -m*2^0
+        Self(0b1_0111_111), // -1.875
+        Self(0b1_0111_110),
+        Self(0b1_0111_101),
+        Self(0b1_0111_100),
+        Self(0b1_0111_011),
+        Self(0b1_0111_010),
+        Self(0b1_0111_001),
+        Self(0b1_0111_000), // -1
+        // -m*2^-1
+        Self(0b1_0110_111), // −0.9375
+        Self(0b1_0110_110),
+        Self(0b1_0110_101),
+        Self(0b1_0110_100),
+        Self(0b1_0110_011),
+        Self(0b1_0110_010),
+        Self(0b1_0110_001),
+        Self(0b1_0110_000), // -0.5
+        // -m*2^-2
+        Self(0b1_0101_111), // −0.46875
+        Self(0b1_0101_110),
+        Self(0b1_0101_101),
+        Self(0b1_0101_100),
+        Self(0b1_0101_011),
+        Self(0b1_0101_010),
+        Self(0b1_0101_001),
+        Self(0b1_0101_000), // -0.25
+        // -m*2^-3
+        Self(0b1_0100_111), // −0.234375
+        Self(0b1_0100_110),
+        Self(0b1_0100_101),
+        Self(0b1_0100_100),
+        Self(0b1_0100_011),
+        Self(0b1_0100_010),
+        Self(0b1_0100_001),
+        Self(0b1_0100_000), // -0.125
+        // -m*2^-4
+        Self(0b1_0011_111), // −0.1171875
+        Self(0b1_0011_110),
+        Self(0b1_0011_101),
+        Self(0b1_0011_100),
+        Self(0b1_0011_011),
+        Self(0b1_0011_010),
+        Self(0b1_0011_001),
+        Self(0b1_0011_000), // −0.0625
+        // -m*2^-5
+        Self(0b1_0010_111), // −0.05859375
+        Self(0b1_0010_110),
+        Self(0b1_0010_101),
+        Self(0b1_0010_100),
+        Self(0b1_0010_011),
+        Self(0b1_0010_010),
+        Self(0b1_0010_001),
+        Self(0b1_0010_000), // −0.03125
+        // -m*2^-6
+        Self(0b1_0001_111), // −0.029296875
+        Self(0b1_0001_110),
+        Self(0b1_0001_101),
+        Self(0b1_0001_100),
+        Self(0b1_0001_011),
+        Self(0b1_0001_010),
+        Self(0b1_0001_001),
+        Self(0b1_0001_000), // −0.015625
+        // -m*2^-7 subnormal numbers
+        Self(0b1_0000_111), // −0.013671875
+        Self(0b1_0000_110),
+        Self(0b1_0000_101),
+        Self(0b1_0000_100),
+        Self(0b1_0000_011),
+        Self(0b1_0000_010),
+        Self(0b1_0000_001), // −0.001953125
+        // Zeroes
+        Self(0b1_0000_000), // -0.0
+        Self(0b0_0000_000), // 0.0
+        // m*2^-7 // subnormal numbers
+        Self(0b0_0000_001),
+        Self(0b0_0000_010),
+        Self(0b0_0000_011),
+        Self(0b0_0000_100),
+        Self(0b0_0000_101),
+        Self(0b0_0000_110),
+        Self(0b0_0000_111), // 0.013671875
+        // m*2^-6
+        Self(0b0_0001_000), // 0.015625
+        Self(0b0_0001_001),
+        Self(0b0_0001_010),
+        Self(0b0_0001_011),
+        Self(0b0_0001_100),
+        Self(0b0_0001_101),
+        Self(0b0_0001_110),
+        Self(0b0_0001_111), // 0.029296875
+        // m*2^-5
+        Self(0b0_0010_000), // 0.03125
+        Self(0b0_0010_001),
+        Self(0b0_0010_010),
+        Self(0b0_0010_011),
+        Self(0b0_0010_100),
+        Self(0b0_0010_101),
+        Self(0b0_0010_110),
+        Self(0b0_0010_111), // 0.05859375
+        // m*2^-4
+        Self(0b0_0011_000), // 0.0625
+        Self(0b0_0011_001),
+        Self(0b0_0011_010),
+        Self(0b0_0011_011),
+        Self(0b0_0011_100),
+        Self(0b0_0011_101),
+        Self(0b0_0011_110),
+        Self(0b0_0011_111), // 0.1171875
+        // m*2^-3
+        Self(0b0_0100_000), // 0.125
+        Self(0b0_0100_001),
+        Self(0b0_0100_010),
+        Self(0b0_0100_011),
+        Self(0b0_0100_100),
+        Self(0b0_0100_101),
+        Self(0b0_0100_110),
+        Self(0b0_0100_111), // 0.234375
+        // m*2^-2
+        Self(0b0_0101_000), // 0.25
+        Self(0b0_0101_001),
+        Self(0b0_0101_010),
+        Self(0b0_0101_011),
+        Self(0b0_0101_100),
+        Self(0b0_0101_101),
+        Self(0b0_0101_110),
+        Self(0b0_0101_111), // 0.46875
+        // m*2^-1
+        Self(0b0_0110_000), // 0.5
+        Self(0b0_0110_001),
+        Self(0b0_0110_010),
+        Self(0b0_0110_011),
+        Self(0b0_0110_100),
+        Self(0b0_0110_101),
+        Self(0b0_0110_110),
+        Self(0b0_0110_111), // 0.9375
+        // m*2^0
+        Self(0b0_0111_000), // 1
+        Self(0b0_0111_001),
+        Self(0b0_0111_010),
+        Self(0b0_0111_011),
+        Self(0b0_0111_100),
+        Self(0b0_0111_101),
+        Self(0b0_0111_110),
+        Self(0b0_0111_111), // 1.875
+        // m*2^1
+        Self(0b0_1000_000), // 2
+        Self(0b0_1000_001),
+        Self(0b0_1000_010),
+        Self(0b0_1000_011),
+        Self(0b0_1000_100),
+        Self(0b0_1000_101),
+        Self(0b0_1000_110),
+        Self(0b0_1000_111), // 3.75
+        // m*2^2
+        Self(0b0_1001_000), // 4
+        Self(0b0_1001_001),
+        Self(0b0_1001_010),
+        Self(0b0_1001_011),
+        Self(0b0_1001_100),
+        Self(0b0_1001_101),
+        Self(0b0_1001_110),
+        Self(0b0_1001_111), // 7.5
+        // m*2^3
+        Self(0b0_1010_000), // 8
+        Self(0b0_1010_001),
+        Self(0b0_1010_010),
+        Self(0b0_1010_011),
+        Self(0b0_1010_100),
+        Self(0b0_1010_101),
+        Self(0b0_1010_110),
+        Self(0b0_1010_111), // 15
+        // m*2^4
+        Self(0b0_1011_000), // 16
+        Self(0b0_1011_001),
+        Self(0b0_1011_010),
+        Self(0b0_1011_011),
+        Self(0b0_1011_100),
+        Self(0b0_1011_101),
+        Self(0b0_1011_110),
+        Self(0b0_1011_111), // 30
+        // m*2^5
+        Self(0b0_1100_000), // 32
+        Self(0b0_1100_001),
+        Self(0b0_1100_010),
+        Self(0b0_1100_011),
+        Self(0b0_1100_100),
+        Self(0b0_1100_101),
+        Self(0b0_1100_110),
+        Self(0b0_1100_111), // 60
+        // m*2^6
+        Self(0b0_1101_000), // 64
+        Self(0b0_1101_001),
+        Self(0b0_1101_010),
+        Self(0b0_1101_011),
+        Self(0b0_1101_100),
+        Self(0b0_1101_101),
+        Self(0b0_1101_110),
+        Self(0b0_1101_111), // 120
+        // m*2^7
+        Self(0b0_1110_000), // 128
+        Self(0b0_1110_001),
+        Self(0b0_1110_010),
+        Self(0b0_1110_011),
+        Self(0b0_1110_100),
+        Self(0b0_1110_101),
+        Self(0b0_1110_110),
+        Self(0b0_1110_111), // 240
+    ];
+}
+
+impl ops::Add for f8 {
+    type Output = Self;
+    fn add(self, _rhs: Self) -> Self::Output {
+        unimplemented!()
+    }
+}
+
+impl ops::Sub for f8 {
+    type Output = Self;
+    fn sub(self, _rhs: Self) -> Self::Output {
+        unimplemented!()
+    }
+}
+impl ops::Mul for f8 {
+    type Output = Self;
+    fn mul(self, _rhs: Self) -> Self::Output {
+        unimplemented!()
+    }
+}
+impl ops::Div for f8 {
+    type Output = Self;
+    fn div(self, _rhs: Self) -> Self::Output {
+        unimplemented!()
+    }
+}
+
+impl ops::Neg for f8 {
+    type Output = Self;
+    fn neg(self) -> Self::Output {
+        Self(self.0 ^ Self::SIGN_MASK)
+    }
+}
+
+impl ops::Rem for f8 {
+    type Output = Self;
+    fn rem(self, _rhs: Self) -> Self::Output {
+        unimplemented!()
+    }
+}
+
+impl ops::AddAssign for f8 {
+    fn add_assign(&mut self, _rhs: Self) {
+        unimplemented!()
+    }
+}
+
+impl ops::SubAssign for f8 {
+    fn sub_assign(&mut self, _rhs: Self) {
+        unimplemented!()
+    }
+}
+
+impl ops::MulAssign for f8 {
+    fn mul_assign(&mut self, _rhs: Self) {
+        unimplemented!()
+    }
+}
+
+impl cmp::PartialEq for f8 {
+    fn eq(&self, other: &Self) -> bool {
+        if self.is_nan() || other.is_nan() {
+            false
+        } else if self.abs().to_bits() | other.abs().to_bits() == 0 {
+            true
+        } else {
+            self.0 == other.0
+        }
+    }
+}
+impl cmp::PartialOrd for f8 {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        let inf_rep = f8::EXP_MASK;
+
+        let a_abs = self.abs().to_bits();
+        let b_abs = other.abs().to_bits();
+
+        // If either a or b is NaN, they are unordered.
+        if a_abs > inf_rep || b_abs > inf_rep {
+            return None;
+        }
+
+        // If a and b are both zeros, they are equal.
+        if a_abs | b_abs == 0 {
+            return Some(Ordering::Equal);
+        }
+
+        let a_srep = self.to_bits_signed();
+        let b_srep = other.to_bits_signed();
+        let res = a_srep.cmp(&b_srep);
+
+        if a_srep & b_srep >= 0 {
+            // If at least one of a and b is positive, we get the same result comparing
+            // a and b as signed integers as we would with a fp_ting-point compare.
+            Some(res)
+        } else {
+            // Otherwise, both are negative, so we need to flip the sense of the
+            // comparison to get the correct result.
+            Some(res.reverse())
+        }
+    }
+}
+impl fmt::Display for f8 {
+    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        unimplemented!()
+    }
+}
+
+impl fmt::Debug for f8 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Binary::fmt(self, f)
+    }
+}
+
+impl fmt::Binary for f8 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let v = self.0;
+        write!(
+            f,
+            "0b{:b}_{:04b}_{:03b}",
+            v >> 7,
+            (v & Self::EXP_MASK) >> Self::SIG_BITS,
+            v & Self::SIG_MASK
+        )
+    }
+}
+
+impl fmt::LowerHex for f8 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.0.fmt(f)
+    }
+}
+
+pub const fn hf8(s: &str) -> f8 {
+    let Ok(bits) = libm::support::hex_float::parse_hex_exact(s, 8, 3) else { panic!() };
+    f8(bits as u8)
+}
diff --git a/library/compiler-builtins/libm-test/src/generate.rs b/library/compiler-builtins/libm-test/src/generate.rs
new file mode 100644
index 00000000000..89ca09a7a0b
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/generate.rs
@@ -0,0 +1,43 @@
+//! Different generators that can create random or systematic bit patterns.
+
+pub mod case_list;
+pub mod edge_cases;
+pub mod random;
+pub mod spaced;
+
+/// A wrapper to turn any iterator into an `ExactSizeIterator`. Asserts the final result to ensure
+/// the provided size was correct.
+#[derive(Debug)]
+pub struct KnownSize<I> {
+    total: u64,
+    current: u64,
+    iter: I,
+}
+
+impl<I> KnownSize<I> {
+    pub fn new(iter: I, total: u64) -> Self {
+        Self { total, current: 0, iter }
+    }
+}
+
+impl<I: Iterator> Iterator for KnownSize<I> {
+    type Item = I::Item;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        let next = self.iter.next();
+        if next.is_some() {
+            self.current += 1;
+            return next;
+        }
+
+        assert_eq!(self.current, self.total, "total items did not match expected");
+        None
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let remaining = usize::try_from(self.total - self.current).unwrap();
+        (remaining, Some(remaining))
+    }
+}
+
+impl<I: Iterator> ExactSizeIterator for KnownSize<I> {}
diff --git a/library/compiler-builtins/libm-test/src/generate/case_list.rs b/library/compiler-builtins/libm-test/src/generate/case_list.rs
new file mode 100644
index 00000000000..e3628d51c9a
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/generate/case_list.rs
@@ -0,0 +1,853 @@
+//! Test cases to verify specific values.
+//!
+//! Each routine can have a set of inputs and, optinoally, outputs. If an output is provided, it
+//! will be used to check against. If only inputs are provided, the case will be checked against
+//! a basis.
+//!
+//! This is useful for adding regression tests or expected failures.
+
+use libm::hf64;
+#[cfg(f128_enabled)]
+use libm::hf128;
+
+use crate::{CheckBasis, CheckCtx, GeneratorKind, MathOp, op};
+
+pub struct TestCase<Op: MathOp> {
+    pub input: Op::RustArgs,
+    pub output: Option<Op::RustRet>,
+}
+
+impl<Op: MathOp> TestCase<Op> {
+    #[expect(dead_code)]
+    fn append_inputs(v: &mut Vec<Self>, l: &[Op::RustArgs]) {
+        v.extend(l.iter().copied().map(|input| Self { input, output: None }));
+    }
+
+    fn append_pairs(v: &mut Vec<Self>, l: &[(Op::RustArgs, Option<Op::RustRet>)])
+    where
+        Op::RustRet: Copy,
+    {
+        v.extend(l.iter().copied().map(|(input, output)| Self { input, output }));
+    }
+}
+
+fn acos_cases() -> Vec<TestCase<op::acos::Routine>> {
+    vec![]
+}
+
+fn acosf_cases() -> Vec<TestCase<op::acosf::Routine>> {
+    vec![]
+}
+
+fn acosh_cases() -> Vec<TestCase<op::acosh::Routine>> {
+    vec![]
+}
+
+fn acoshf_cases() -> Vec<TestCase<op::acoshf::Routine>> {
+    vec![]
+}
+
+fn asin_cases() -> Vec<TestCase<op::asin::Routine>> {
+    vec![]
+}
+
+fn asinf_cases() -> Vec<TestCase<op::asinf::Routine>> {
+    vec![]
+}
+
+fn asinh_cases() -> Vec<TestCase<op::asinh::Routine>> {
+    vec![]
+}
+
+fn asinhf_cases() -> Vec<TestCase<op::asinhf::Routine>> {
+    vec![]
+}
+
+fn atan_cases() -> Vec<TestCase<op::atan::Routine>> {
+    vec![]
+}
+
+fn atan2_cases() -> Vec<TestCase<op::atan2::Routine>> {
+    vec![]
+}
+
+fn atan2f_cases() -> Vec<TestCase<op::atan2f::Routine>> {
+    vec![]
+}
+
+fn atanf_cases() -> Vec<TestCase<op::atanf::Routine>> {
+    vec![]
+}
+
+fn atanh_cases() -> Vec<TestCase<op::atanh::Routine>> {
+    vec![]
+}
+
+fn atanhf_cases() -> Vec<TestCase<op::atanhf::Routine>> {
+    vec![]
+}
+
+fn cbrt_cases() -> Vec<TestCase<op::cbrt::Routine>> {
+    vec![]
+}
+
+fn cbrtf_cases() -> Vec<TestCase<op::cbrtf::Routine>> {
+    vec![]
+}
+
+fn ceil_cases() -> Vec<TestCase<op::ceil::Routine>> {
+    vec![]
+}
+
+fn ceilf_cases() -> Vec<TestCase<op::ceilf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn ceilf128_cases() -> Vec<TestCase<op::ceilf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn ceilf16_cases() -> Vec<TestCase<op::ceilf16::Routine>> {
+    vec![]
+}
+
+fn copysign_cases() -> Vec<TestCase<op::copysign::Routine>> {
+    vec![]
+}
+
+fn copysignf_cases() -> Vec<TestCase<op::copysignf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn copysignf128_cases() -> Vec<TestCase<op::copysignf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn copysignf16_cases() -> Vec<TestCase<op::copysignf16::Routine>> {
+    vec![]
+}
+
+fn cos_cases() -> Vec<TestCase<op::cos::Routine>> {
+    vec![]
+}
+
+fn cosf_cases() -> Vec<TestCase<op::cosf::Routine>> {
+    vec![]
+}
+
+fn cosh_cases() -> Vec<TestCase<op::cosh::Routine>> {
+    vec![]
+}
+
+fn coshf_cases() -> Vec<TestCase<op::coshf::Routine>> {
+    vec![]
+}
+
+fn erf_cases() -> Vec<TestCase<op::erf::Routine>> {
+    vec![]
+}
+
+fn erfc_cases() -> Vec<TestCase<op::erfc::Routine>> {
+    vec![]
+}
+
+fn erfcf_cases() -> Vec<TestCase<op::erfcf::Routine>> {
+    vec![]
+}
+
+fn erff_cases() -> Vec<TestCase<op::erff::Routine>> {
+    vec![]
+}
+
+fn exp_cases() -> Vec<TestCase<op::exp::Routine>> {
+    vec![]
+}
+
+fn exp10_cases() -> Vec<TestCase<op::exp10::Routine>> {
+    vec![]
+}
+
+fn exp10f_cases() -> Vec<TestCase<op::exp10f::Routine>> {
+    vec![]
+}
+
+fn exp2_cases() -> Vec<TestCase<op::exp2::Routine>> {
+    vec![]
+}
+
+fn exp2f_cases() -> Vec<TestCase<op::exp2f::Routine>> {
+    vec![]
+}
+
+fn expf_cases() -> Vec<TestCase<op::expf::Routine>> {
+    vec![]
+}
+
+fn expm1_cases() -> Vec<TestCase<op::expm1::Routine>> {
+    vec![]
+}
+
+fn expm1f_cases() -> Vec<TestCase<op::expm1f::Routine>> {
+    vec![]
+}
+
+fn fabs_cases() -> Vec<TestCase<op::fabs::Routine>> {
+    vec![]
+}
+
+fn fabsf_cases() -> Vec<TestCase<op::fabsf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fabsf128_cases() -> Vec<TestCase<op::fabsf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fabsf16_cases() -> Vec<TestCase<op::fabsf16::Routine>> {
+    vec![]
+}
+
+fn fdim_cases() -> Vec<TestCase<op::fdim::Routine>> {
+    vec![]
+}
+
+fn fdimf_cases() -> Vec<TestCase<op::fdimf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fdimf128_cases() -> Vec<TestCase<op::fdimf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fdimf16_cases() -> Vec<TestCase<op::fdimf16::Routine>> {
+    vec![]
+}
+
+fn floor_cases() -> Vec<TestCase<op::floor::Routine>> {
+    vec![]
+}
+
+fn floorf_cases() -> Vec<TestCase<op::floorf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn floorf128_cases() -> Vec<TestCase<op::floorf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn floorf16_cases() -> Vec<TestCase<op::floorf16::Routine>> {
+    vec![]
+}
+
+fn fma_cases() -> Vec<TestCase<op::fma::Routine>> {
+    let mut v = vec![];
+    TestCase::append_pairs(
+        &mut v,
+        &[
+            // Previous failure with incorrect sign
+            ((5e-324, -5e-324, 0.0), Some(-0.0)),
+        ],
+    );
+    v
+}
+
+fn fmaf_cases() -> Vec<TestCase<op::fmaf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fmaf128_cases() -> Vec<TestCase<op::fmaf128::Routine>> {
+    let mut v = vec![];
+    TestCase::append_pairs(
+        &mut v,
+        &[
+            (
+                // Tricky rounding case that previously failed in extensive tests
+                (
+                    hf128!("-0x1.1966cc01966cc01966cc01966f06p-25"),
+                    hf128!("-0x1.669933fe69933fe69933fe6997c9p-16358"),
+                    hf128!("-0x0.000000000000000000000000048ap-16382"),
+                ),
+                Some(hf128!("0x0.c5171470a3ff5e0f68d751491b18p-16382")),
+            ),
+            (
+                // Subnormal edge case that caused a failure
+                (
+                    hf128!("0x0.7ffffffffffffffffffffffffff7p-16382"),
+                    hf128!("0x1.ffffffffffffffffffffffffffffp-1"),
+                    hf128!("0x0.8000000000000000000000000009p-16382"),
+                ),
+                Some(hf128!("0x1.0000000000000000000000000000p-16382")),
+            ),
+        ],
+    );
+    v
+}
+
+#[cfg(f16_enabled)]
+fn fmaxf16_cases() -> Vec<TestCase<op::fmaxf16::Routine>> {
+    vec![]
+}
+
+fn fmaxf_cases() -> Vec<TestCase<op::fmaxf::Routine>> {
+    vec![]
+}
+
+fn fmax_cases() -> Vec<TestCase<op::fmax::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fmaxf128_cases() -> Vec<TestCase<op::fmaxf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fmaximumf16_cases() -> Vec<TestCase<op::fmaximumf16::Routine>> {
+    vec![]
+}
+
+fn fmaximumf_cases() -> Vec<TestCase<op::fmaximumf::Routine>> {
+    vec![]
+}
+
+fn fmaximum_cases() -> Vec<TestCase<op::fmaximum::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fmaximumf128_cases() -> Vec<TestCase<op::fmaximumf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fmaximum_numf16_cases() -> Vec<TestCase<op::fmaximum_numf16::Routine>> {
+    vec![]
+}
+
+fn fmaximum_numf_cases() -> Vec<TestCase<op::fmaximum_numf::Routine>> {
+    vec![]
+}
+
+fn fmaximum_num_cases() -> Vec<TestCase<op::fmaximum_num::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fmaximum_numf128_cases() -> Vec<TestCase<op::fmaximum_numf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fminf16_cases() -> Vec<TestCase<op::fminf16::Routine>> {
+    vec![]
+}
+
+fn fminf_cases() -> Vec<TestCase<op::fminf::Routine>> {
+    vec![]
+}
+
+fn fmin_cases() -> Vec<TestCase<op::fmin::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fminf128_cases() -> Vec<TestCase<op::fminf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fminimumf16_cases() -> Vec<TestCase<op::fminimumf16::Routine>> {
+    vec![]
+}
+
+fn fminimumf_cases() -> Vec<TestCase<op::fminimumf::Routine>> {
+    vec![]
+}
+
+fn fminimum_cases() -> Vec<TestCase<op::fminimum::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fminimumf128_cases() -> Vec<TestCase<op::fminimumf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fminimum_numf16_cases() -> Vec<TestCase<op::fminimum_numf16::Routine>> {
+    vec![]
+}
+
+fn fminimum_numf_cases() -> Vec<TestCase<op::fminimum_numf::Routine>> {
+    vec![]
+}
+
+fn fminimum_num_cases() -> Vec<TestCase<op::fminimum_num::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn fminimum_numf128_cases() -> Vec<TestCase<op::fminimum_numf128::Routine>> {
+    vec![]
+}
+
+fn fmod_cases() -> Vec<TestCase<op::fmod::Routine>> {
+    let mut v = vec![];
+    TestCase::append_pairs(
+        &mut v,
+        &[
+            // Previous failure with incorrect loop iteration
+            // <https://github.com/rust-lang/libm/pull/469#discussion_r2022337272>
+            ((2.1, 3.123e-320), Some(2.0696e-320)),
+            ((2.1, 2.253547e-318), Some(1.772535e-318)),
+        ],
+    );
+    v
+}
+
+fn fmodf_cases() -> Vec<TestCase<op::fmodf::Routine>> {
+    let mut v = vec![];
+    TestCase::append_pairs(
+        &mut v,
+        &[
+            // Previous failure with incorrect loop iteration
+            // <https://github.com/rust-lang/libm/pull/469#discussion_r2022337272>
+            ((2.1, 8.858e-42), Some(8.085e-42)),
+            ((2.1, 6.39164e-40), Some(6.1636e-40)),
+            ((5.5, 6.39164e-40), Some(4.77036e-40)),
+            ((-151.189, 6.39164e-40), Some(-5.64734e-40)),
+        ],
+    );
+    v
+}
+
+#[cfg(f128_enabled)]
+fn fmodf128_cases() -> Vec<TestCase<op::fmodf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn fmodf16_cases() -> Vec<TestCase<op::fmodf16::Routine>> {
+    vec![]
+}
+
+fn frexp_cases() -> Vec<TestCase<op::frexp::Routine>> {
+    vec![]
+}
+
+fn frexpf_cases() -> Vec<TestCase<op::frexpf::Routine>> {
+    vec![]
+}
+
+fn hypot_cases() -> Vec<TestCase<op::hypot::Routine>> {
+    vec![]
+}
+
+fn hypotf_cases() -> Vec<TestCase<op::hypotf::Routine>> {
+    vec![]
+}
+
+fn ilogb_cases() -> Vec<TestCase<op::ilogb::Routine>> {
+    vec![]
+}
+
+fn ilogbf_cases() -> Vec<TestCase<op::ilogbf::Routine>> {
+    vec![]
+}
+
+fn j0_cases() -> Vec<TestCase<op::j0::Routine>> {
+    vec![]
+}
+
+fn j0f_cases() -> Vec<TestCase<op::j0f::Routine>> {
+    vec![]
+}
+
+fn j1_cases() -> Vec<TestCase<op::j1::Routine>> {
+    vec![]
+}
+
+fn j1f_cases() -> Vec<TestCase<op::j1f::Routine>> {
+    vec![]
+}
+
+fn jn_cases() -> Vec<TestCase<op::jn::Routine>> {
+    vec![]
+}
+
+fn jnf_cases() -> Vec<TestCase<op::jnf::Routine>> {
+    vec![]
+}
+
+fn ldexp_cases() -> Vec<TestCase<op::ldexp::Routine>> {
+    vec![]
+}
+
+fn ldexpf_cases() -> Vec<TestCase<op::ldexpf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn ldexpf128_cases() -> Vec<TestCase<op::ldexpf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn ldexpf16_cases() -> Vec<TestCase<op::ldexpf16::Routine>> {
+    vec![]
+}
+
+fn lgamma_cases() -> Vec<TestCase<op::lgamma::Routine>> {
+    vec![]
+}
+
+fn lgamma_r_cases() -> Vec<TestCase<op::lgamma_r::Routine>> {
+    vec![]
+}
+
+fn lgammaf_cases() -> Vec<TestCase<op::lgammaf::Routine>> {
+    vec![]
+}
+
+fn lgammaf_r_cases() -> Vec<TestCase<op::lgammaf_r::Routine>> {
+    vec![]
+}
+
+fn log_cases() -> Vec<TestCase<op::log::Routine>> {
+    vec![]
+}
+
+fn log10_cases() -> Vec<TestCase<op::log10::Routine>> {
+    vec![]
+}
+
+fn log10f_cases() -> Vec<TestCase<op::log10f::Routine>> {
+    vec![]
+}
+
+fn log1p_cases() -> Vec<TestCase<op::log1p::Routine>> {
+    vec![]
+}
+
+fn log1pf_cases() -> Vec<TestCase<op::log1pf::Routine>> {
+    vec![]
+}
+
+fn log2_cases() -> Vec<TestCase<op::log2::Routine>> {
+    vec![]
+}
+
+fn log2f_cases() -> Vec<TestCase<op::log2f::Routine>> {
+    vec![]
+}
+
+fn logf_cases() -> Vec<TestCase<op::logf::Routine>> {
+    vec![]
+}
+
+fn modf_cases() -> Vec<TestCase<op::modf::Routine>> {
+    vec![]
+}
+
+fn modff_cases() -> Vec<TestCase<op::modff::Routine>> {
+    vec![]
+}
+
+fn nextafter_cases() -> Vec<TestCase<op::nextafter::Routine>> {
+    vec![]
+}
+
+fn nextafterf_cases() -> Vec<TestCase<op::nextafterf::Routine>> {
+    vec![]
+}
+
+fn pow_cases() -> Vec<TestCase<op::pow::Routine>> {
+    vec![]
+}
+
+fn powf_cases() -> Vec<TestCase<op::powf::Routine>> {
+    vec![]
+}
+
+fn remainder_cases() -> Vec<TestCase<op::remainder::Routine>> {
+    vec![]
+}
+
+fn remainderf_cases() -> Vec<TestCase<op::remainderf::Routine>> {
+    vec![]
+}
+
+fn remquo_cases() -> Vec<TestCase<op::remquo::Routine>> {
+    vec![]
+}
+
+fn remquof_cases() -> Vec<TestCase<op::remquof::Routine>> {
+    vec![]
+}
+
+fn rint_cases() -> Vec<TestCase<op::rint::Routine>> {
+    let mut v = vec![];
+    TestCase::append_pairs(
+        &mut v,
+        &[
+            // Known failure on i586
+            #[cfg(not(x86_no_sse))]
+            ((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff994000p+38"))),
+            #[cfg(x86_no_sse)]
+            ((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff998000p+38"))),
+        ],
+    );
+    v
+}
+
+fn rintf_cases() -> Vec<TestCase<op::rintf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn rintf128_cases() -> Vec<TestCase<op::rintf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn rintf16_cases() -> Vec<TestCase<op::rintf16::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn roundf16_cases() -> Vec<TestCase<op::roundf16::Routine>> {
+    vec![]
+}
+
+fn round_cases() -> Vec<TestCase<op::round::Routine>> {
+    vec![]
+}
+
+fn roundf_cases() -> Vec<TestCase<op::roundf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn roundf128_cases() -> Vec<TestCase<op::roundf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn roundevenf16_cases() -> Vec<TestCase<op::roundevenf16::Routine>> {
+    vec![]
+}
+
+fn roundeven_cases() -> Vec<TestCase<op::roundeven::Routine>> {
+    let mut v = vec![];
+    TestCase::append_pairs(
+        &mut v,
+        &[
+            // Known failure on i586
+            #[cfg(not(x86_no_sse))]
+            ((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff994000p+38"))),
+            #[cfg(x86_no_sse)]
+            ((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff998000p+38"))),
+        ],
+    );
+    v
+}
+
+fn roundevenf_cases() -> Vec<TestCase<op::roundevenf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn roundevenf128_cases() -> Vec<TestCase<op::roundevenf128::Routine>> {
+    vec![]
+}
+
+fn scalbn_cases() -> Vec<TestCase<op::scalbn::Routine>> {
+    vec![]
+}
+
+fn scalbnf_cases() -> Vec<TestCase<op::scalbnf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn scalbnf128_cases() -> Vec<TestCase<op::scalbnf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn scalbnf16_cases() -> Vec<TestCase<op::scalbnf16::Routine>> {
+    vec![]
+}
+
+fn sin_cases() -> Vec<TestCase<op::sin::Routine>> {
+    vec![]
+}
+
+fn sincos_cases() -> Vec<TestCase<op::sincos::Routine>> {
+    vec![]
+}
+
+fn sincosf_cases() -> Vec<TestCase<op::sincosf::Routine>> {
+    vec![]
+}
+
+fn sinf_cases() -> Vec<TestCase<op::sinf::Routine>> {
+    vec![]
+}
+
+fn sinh_cases() -> Vec<TestCase<op::sinh::Routine>> {
+    vec![]
+}
+
+fn sinhf_cases() -> Vec<TestCase<op::sinhf::Routine>> {
+    vec![]
+}
+
+fn sqrt_cases() -> Vec<TestCase<op::sqrt::Routine>> {
+    vec![]
+}
+
+fn sqrtf_cases() -> Vec<TestCase<op::sqrtf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn sqrtf128_cases() -> Vec<TestCase<op::sqrtf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn sqrtf16_cases() -> Vec<TestCase<op::sqrtf16::Routine>> {
+    vec![]
+}
+
+fn tan_cases() -> Vec<TestCase<op::tan::Routine>> {
+    vec![]
+}
+
+fn tanf_cases() -> Vec<TestCase<op::tanf::Routine>> {
+    vec![]
+}
+
+fn tanh_cases() -> Vec<TestCase<op::tanh::Routine>> {
+    vec![]
+}
+
+fn tanhf_cases() -> Vec<TestCase<op::tanhf::Routine>> {
+    vec![]
+}
+
+fn tgamma_cases() -> Vec<TestCase<op::tgamma::Routine>> {
+    vec![]
+}
+
+fn tgammaf_cases() -> Vec<TestCase<op::tgammaf::Routine>> {
+    vec![]
+}
+
+fn trunc_cases() -> Vec<TestCase<op::trunc::Routine>> {
+    vec![]
+}
+
+fn truncf_cases() -> Vec<TestCase<op::truncf::Routine>> {
+    vec![]
+}
+
+#[cfg(f128_enabled)]
+fn truncf128_cases() -> Vec<TestCase<op::truncf128::Routine>> {
+    vec![]
+}
+
+#[cfg(f16_enabled)]
+fn truncf16_cases() -> Vec<TestCase<op::truncf16::Routine>> {
+    vec![]
+}
+
+fn y0_cases() -> Vec<TestCase<op::y0::Routine>> {
+    vec![]
+}
+
+fn y0f_cases() -> Vec<TestCase<op::y0f::Routine>> {
+    vec![]
+}
+
+fn y1_cases() -> Vec<TestCase<op::y1::Routine>> {
+    vec![]
+}
+
+fn y1f_cases() -> Vec<TestCase<op::y1f::Routine>> {
+    vec![]
+}
+
+fn yn_cases() -> Vec<TestCase<op::yn::Routine>> {
+    vec![]
+}
+
+fn ynf_cases() -> Vec<TestCase<op::ynf::Routine>> {
+    vec![]
+}
+
+pub trait CaseListInput: MathOp + Sized {
+    fn get_cases() -> Vec<TestCase<Self>>;
+}
+
+macro_rules! impl_case_list {
+    (
+        fn_name: $fn_name:ident,
+        attrs: [$($attr:meta),*],
+    ) => {
+        paste::paste! {
+            $(#[$attr])*
+            impl CaseListInput for crate::op::$fn_name::Routine {
+                fn get_cases() -> Vec<TestCase<Self>> {
+                    [< $fn_name _cases >]()
+                }
+            }
+        }
+    };
+}
+
+libm_macros::for_each_function! {
+    callback: impl_case_list,
+}
+
+/// This is the test generator for standalone tests, i.e. those with no basis. For this, it
+/// only extracts tests with a known output.
+pub fn get_test_cases_standalone<Op>(
+    ctx: &CheckCtx,
+) -> impl Iterator<Item = (Op::RustArgs, Op::RustRet)> + use<'_, Op>
+where
+    Op: MathOp + CaseListInput,
+{
+    assert_eq!(ctx.basis, CheckBasis::None);
+    assert_eq!(ctx.gen_kind, GeneratorKind::List);
+    Op::get_cases().into_iter().filter_map(|x| x.output.map(|o| (x.input, o)))
+}
+
+/// Opposite of the above; extract only test cases that don't have a known output, to be run
+/// against a basis.
+pub fn get_test_cases_basis<Op>(
+    ctx: &CheckCtx,
+) -> (impl Iterator<Item = Op::RustArgs> + use<'_, Op>, u64)
+where
+    Op: MathOp + CaseListInput,
+{
+    assert_ne!(ctx.basis, CheckBasis::None);
+    assert_eq!(ctx.gen_kind, GeneratorKind::List);
+
+    let cases = Op::get_cases();
+    let count: u64 = cases.iter().filter(|case| case.output.is_none()).count().try_into().unwrap();
+
+    (cases.into_iter().filter(|x| x.output.is_none()).map(|x| x.input), count)
+}
diff --git a/library/compiler-builtins/libm-test/src/generate/edge_cases.rs b/library/compiler-builtins/libm-test/src/generate/edge_cases.rs
new file mode 100644
index 00000000000..56cc9fa9a70
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/generate/edge_cases.rs
@@ -0,0 +1,310 @@
+//! A generator that checks a handful of cases near infinities, zeros, asymptotes, and NaNs.
+
+use libm::support::{CastInto, Float, Int, MinInt};
+
+use crate::domain::get_domain;
+use crate::generate::KnownSize;
+use crate::op::OpITy;
+use crate::run_cfg::{check_near_count, check_point_count};
+use crate::{BaseName, CheckCtx, FloatExt, FloatTy, MathOp, test_log};
+
+/// Generate a sequence of edge cases, e.g. numbers near zeroes and infiniteis.
+pub trait EdgeCaseInput<Op> {
+    fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self> + Send, u64);
+}
+
+/// Create a list of values around interesting points (infinities, zeroes, NaNs).
+fn float_edge_cases<Op>(
+    ctx: &CheckCtx,
+    argnum: usize,
+) -> (impl Iterator<Item = Op::FTy> + Clone, u64)
+where
+    Op: MathOp,
+{
+    let mut ret = Vec::new();
+    let one = OpITy::<Op>::ONE;
+    let values = &mut ret;
+    let domain = get_domain::<_, i8>(ctx.fn_ident, argnum).unwrap_float();
+    let domain_start = domain.range_start();
+    let domain_end = domain.range_end();
+
+    let check_points = check_point_count(ctx);
+    let near_points = check_near_count(ctx);
+
+    // Check near some notable constants
+    count_up(Op::FTy::ONE, near_points, values);
+    count_up(Op::FTy::ZERO, near_points, values);
+    count_up(Op::FTy::NEG_ONE, near_points, values);
+    count_down(Op::FTy::ONE, near_points, values);
+    count_down(Op::FTy::ZERO, near_points, values);
+    count_down(Op::FTy::NEG_ONE, near_points, values);
+    values.push(Op::FTy::NEG_ZERO);
+
+    // Check values near the extremes
+    count_up(Op::FTy::NEG_INFINITY, near_points, values);
+    count_down(Op::FTy::INFINITY, near_points, values);
+    count_down(domain_end, near_points, values);
+    count_up(domain_start, near_points, values);
+    count_down(domain_start, near_points, values);
+    count_up(domain_end, near_points, values);
+    count_down(domain_end, near_points, values);
+
+    // Check some special values that aren't included in the above ranges
+    values.push(Op::FTy::NAN);
+    values.extend(Op::FTy::consts().iter());
+
+    // Check around the maximum subnormal value
+    let sub_max = Op::FTy::from_bits(Op::FTy::SIG_MASK);
+    count_up(sub_max, near_points, values);
+    count_down(sub_max, near_points, values);
+    count_up(-sub_max, near_points, values);
+    count_down(-sub_max, near_points, values);
+
+    // Check a few values around the subnormal range
+    for shift in (0..Op::FTy::SIG_BITS).step_by(Op::FTy::SIG_BITS as usize / 5) {
+        let v = Op::FTy::from_bits(one << shift);
+        count_up(v, 2, values);
+        count_down(v, 2, values);
+        count_up(-v, 2, values);
+        count_down(-v, 2, values);
+    }
+
+    // Check around asymptotes
+    if let Some(f) = domain.check_points {
+        let iter = f();
+        for x in iter.take(check_points) {
+            count_up(x, near_points, values);
+            count_down(x, near_points, values);
+        }
+    }
+
+    // Some results may overlap so deduplicate the vector to save test cycles.
+    values.sort_by_key(|x| x.to_bits());
+    values.dedup_by_key(|x| x.to_bits());
+
+    let count = ret.len().try_into().unwrap();
+
+    test_log(&format!(
+        "{gen_kind:?} {basis:?} {fn_ident} arg {arg}/{args}: {count} edge cases",
+        gen_kind = ctx.gen_kind,
+        basis = ctx.basis,
+        fn_ident = ctx.fn_ident,
+        arg = argnum + 1,
+        args = ctx.input_count(),
+    ));
+
+    (ret.into_iter(), count)
+}
+
+/// Add `points` values starting at and including `x` and counting up. Uses the smallest possible
+/// increments (1 ULP).
+fn count_up<F: Float>(mut x: F, points: u64, values: &mut Vec<F>) {
+    assert!(!x.is_nan());
+
+    let mut count = 0;
+    while x < F::INFINITY && count < points {
+        values.push(x);
+        x = x.next_up();
+        count += 1;
+    }
+}
+
+/// Add `points` values starting at and including `x` and counting down. Uses the smallest possible
+/// increments (1 ULP).
+fn count_down<F: Float>(mut x: F, points: u64, values: &mut Vec<F>) {
+    assert!(!x.is_nan());
+
+    let mut count = 0;
+    while x > F::NEG_INFINITY && count < points {
+        values.push(x);
+        x = x.next_down();
+        count += 1;
+    }
+}
+
+/// Create a list of values around interesting integer points (min, zero, max).
+pub fn int_edge_cases<I: Int>(
+    ctx: &CheckCtx,
+    argnum: usize,
+) -> (impl Iterator<Item = I> + Clone, u64)
+where
+    i32: CastInto<I>,
+{
+    let mut values = Vec::new();
+    let near_points = check_near_count(ctx);
+
+    // Check around max/min and zero
+    int_count_around(I::MIN, near_points, &mut values);
+    int_count_around(I::MAX, near_points, &mut values);
+    int_count_around(I::ZERO, near_points, &mut values);
+    int_count_around(I::ZERO, near_points, &mut values);
+
+    if matches!(ctx.base_name, BaseName::Scalbn | BaseName::Ldexp) {
+        assert_eq!(argnum, 1, "scalbn integer argument should be arg1");
+        let (emax, emin, emin_sn) = match ctx.fn_ident.math_op().float_ty {
+            FloatTy::F16 => {
+                #[cfg(not(f16_enabled))]
+                unreachable!();
+                #[cfg(f16_enabled)]
+                (f16::EXP_MAX, f16::EXP_MIN, f16::EXP_MIN_SUBNORM)
+            }
+            FloatTy::F32 => (f32::EXP_MAX, f32::EXP_MIN, f32::EXP_MIN_SUBNORM),
+            FloatTy::F64 => (f64::EXP_MAX, f64::EXP_MIN, f64::EXP_MIN_SUBNORM),
+            FloatTy::F128 => {
+                #[cfg(not(f128_enabled))]
+                unreachable!();
+                #[cfg(f128_enabled)]
+                (f128::EXP_MAX, f128::EXP_MIN, f128::EXP_MIN_SUBNORM)
+            }
+        };
+
+        // `scalbn`/`ldexp` have their trickiest behavior around exponent limits
+        int_count_around(emax.cast(), near_points, &mut values);
+        int_count_around(emin.cast(), near_points, &mut values);
+        int_count_around(emin_sn.cast(), near_points, &mut values);
+        int_count_around((-emin_sn).cast(), near_points, &mut values);
+
+        // Also check values that cause the maximum possible difference in exponents
+        int_count_around((emax - emin).cast(), near_points, &mut values);
+        int_count_around((emin - emax).cast(), near_points, &mut values);
+        int_count_around((emax - emin_sn).cast(), near_points, &mut values);
+        int_count_around((emin_sn - emax).cast(), near_points, &mut values);
+    }
+
+    values.sort();
+    values.dedup();
+    let count = values.len().try_into().unwrap();
+
+    test_log(&format!(
+        "{gen_kind:?} {basis:?} {fn_ident} arg {arg}/{args}: {count} edge cases",
+        gen_kind = ctx.gen_kind,
+        basis = ctx.basis,
+        fn_ident = ctx.fn_ident,
+        arg = argnum + 1,
+        args = ctx.input_count(),
+    ));
+
+    (values.into_iter(), count)
+}
+
+/// Add `points` values both up and down, starting at and including `x`.
+fn int_count_around<I: Int>(x: I, points: u64, values: &mut Vec<I>) {
+    let mut current = x;
+    for _ in 0..points {
+        values.push(current);
+        current = match current.checked_add(I::ONE) {
+            Some(v) => v,
+            None => break,
+        };
+    }
+
+    current = x;
+    for _ in 0..points {
+        values.push(current);
+        current = match current.checked_sub(I::ONE) {
+            Some(v) => v,
+            None => break,
+        };
+    }
+}
+
+macro_rules! impl_edge_case_input {
+    ($fty:ty) => {
+        impl<Op> EdgeCaseInput<Op> for ($fty,)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
+                let iter0 = iter0.map(|v| (v,));
+                (iter0, steps0)
+            }
+        }
+
+        impl<Op> EdgeCaseInput<Op> for ($fty, $fty)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
+                let (iter1, steps1) = float_edge_cases::<Op>(ctx, 1);
+                let iter =
+                    iter0.flat_map(move |first| iter1.clone().map(move |second| (first, second)));
+                let count = steps0.checked_mul(steps1).unwrap();
+                (iter, count)
+            }
+        }
+
+        impl<Op> EdgeCaseInput<Op> for ($fty, $fty, $fty)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
+                let (iter1, steps1) = float_edge_cases::<Op>(ctx, 1);
+                let (iter2, steps2) = float_edge_cases::<Op>(ctx, 2);
+
+                let iter = iter0
+                    .flat_map(move |first| iter1.clone().map(move |second| (first, second)))
+                    .flat_map(move |(first, second)| {
+                        iter2.clone().map(move |third| (first, second, third))
+                    });
+                let count = steps0.checked_mul(steps1).unwrap().checked_mul(steps2).unwrap();
+
+                (iter, count)
+            }
+        }
+
+        impl<Op> EdgeCaseInput<Op> for (i32, $fty)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let (iter0, steps0) = int_edge_cases(ctx, 0);
+                let (iter1, steps1) = float_edge_cases::<Op>(ctx, 1);
+
+                let iter =
+                    iter0.flat_map(move |first| iter1.clone().map(move |second| (first, second)));
+                let count = steps0.checked_mul(steps1).unwrap();
+
+                (iter, count)
+            }
+        }
+
+        impl<Op> EdgeCaseInput<Op> for ($fty, i32)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
+                let (iter1, steps1) = int_edge_cases(ctx, 1);
+
+                let iter =
+                    iter0.flat_map(move |first| iter1.clone().map(move |second| (first, second)));
+                let count = steps0.checked_mul(steps1).unwrap();
+
+                (iter, count)
+            }
+        }
+    };
+}
+
+#[cfg(f16_enabled)]
+impl_edge_case_input!(f16);
+impl_edge_case_input!(f32);
+impl_edge_case_input!(f64);
+#[cfg(f128_enabled)]
+impl_edge_case_input!(f128);
+
+pub fn get_test_cases<Op>(
+    ctx: &CheckCtx,
+) -> (impl Iterator<Item = Op::RustArgs> + Send + use<'_, Op>, u64)
+where
+    Op: MathOp,
+    Op::RustArgs: EdgeCaseInput<Op>,
+{
+    let (iter, count) = Op::RustArgs::get_cases(ctx);
+
+    // Wrap in `KnownSize` so we get an assertion if the cuunt is wrong.
+    (KnownSize::new(iter, count), count)
+}
diff --git a/library/compiler-builtins/libm-test/src/generate/random.rs b/library/compiler-builtins/libm-test/src/generate/random.rs
new file mode 100644
index 00000000000..e8a7ee9057e
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/generate/random.rs
@@ -0,0 +1,125 @@
+use std::env;
+use std::ops::RangeInclusive;
+use std::sync::LazyLock;
+
+use libm::support::Float;
+use rand::distr::{Alphanumeric, StandardUniform};
+use rand::prelude::Distribution;
+use rand::{Rng, SeedableRng};
+use rand_chacha::ChaCha8Rng;
+
+use super::KnownSize;
+use crate::CheckCtx;
+use crate::run_cfg::{int_range, iteration_count};
+
+pub(crate) const SEED_ENV: &str = "LIBM_SEED";
+
+pub static SEED: LazyLock<[u8; 32]> = LazyLock::new(|| {
+    let s = env::var(SEED_ENV).unwrap_or_else(|_| {
+        let mut rng = rand::rng();
+        (0..32).map(|_| rng.sample(Alphanumeric) as char).collect()
+    });
+
+    s.as_bytes().try_into().unwrap_or_else(|_| {
+        panic!("Seed must be 32 characters, got `{s}`");
+    })
+});
+
+/// Generate a sequence of random values of this type.
+pub trait RandomInput: Sized {
+    fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self> + Send, u64);
+}
+
+/// Generate a sequence of deterministically random floats.
+fn random_floats<F: Float>(count: u64) -> impl Iterator<Item = F>
+where
+    StandardUniform: Distribution<F::Int>,
+{
+    let mut rng = ChaCha8Rng::from_seed(*SEED);
+
+    // Generate integers to get a full range of bitpatterns (including NaNs), then convert back
+    // to the float type.
+    (0..count).map(move |_| F::from_bits(rng.random::<F::Int>()))
+}
+
+/// Generate a sequence of deterministically random `i32`s within a specified range.
+fn random_ints(count: u64, range: RangeInclusive<i32>) -> impl Iterator<Item = i32> {
+    let mut rng = ChaCha8Rng::from_seed(*SEED);
+    (0..count).map(move |_| rng.random_range::<i32, _>(range.clone()))
+}
+
+macro_rules! impl_random_input {
+    ($fty:ty) => {
+        impl RandomInput for ($fty,) {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let count = iteration_count(ctx, 0);
+                let iter = random_floats(count).map(|f: $fty| (f,));
+                (iter, count)
+            }
+        }
+
+        impl RandomInput for ($fty, $fty) {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let count0 = iteration_count(ctx, 0);
+                let count1 = iteration_count(ctx, 1);
+                let iter = random_floats(count0)
+                    .flat_map(move |f1: $fty| random_floats(count1).map(move |f2: $fty| (f1, f2)));
+                (iter, count0 * count1)
+            }
+        }
+
+        impl RandomInput for ($fty, $fty, $fty) {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let count0 = iteration_count(ctx, 0);
+                let count1 = iteration_count(ctx, 1);
+                let count2 = iteration_count(ctx, 2);
+                let iter = random_floats(count0).flat_map(move |f1: $fty| {
+                    random_floats(count1).flat_map(move |f2: $fty| {
+                        random_floats(count2).map(move |f3: $fty| (f1, f2, f3))
+                    })
+                });
+                (iter, count0 * count1 * count2)
+            }
+        }
+
+        impl RandomInput for (i32, $fty) {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let count0 = iteration_count(ctx, 0);
+                let count1 = iteration_count(ctx, 1);
+                let range0 = int_range(ctx, 0);
+                let iter = random_ints(count0, range0)
+                    .flat_map(move |f1: i32| random_floats(count1).map(move |f2: $fty| (f1, f2)));
+                (iter, count0 * count1)
+            }
+        }
+
+        impl RandomInput for ($fty, i32) {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let count0 = iteration_count(ctx, 0);
+                let count1 = iteration_count(ctx, 1);
+                let range1 = int_range(ctx, 1);
+                let iter = random_floats(count0).flat_map(move |f1: $fty| {
+                    random_ints(count1, range1.clone()).map(move |f2: i32| (f1, f2))
+                });
+                (iter, count0 * count1)
+            }
+        }
+    };
+}
+
+#[cfg(f16_enabled)]
+impl_random_input!(f16);
+impl_random_input!(f32);
+impl_random_input!(f64);
+#[cfg(f128_enabled)]
+impl_random_input!(f128);
+
+/// Create a test case iterator.
+pub fn get_test_cases<RustArgs: RandomInput>(
+    ctx: &CheckCtx,
+) -> (impl Iterator<Item = RustArgs> + Send + use<'_, RustArgs>, u64) {
+    let (iter, count) = RustArgs::get_cases(ctx);
+
+    // Wrap in `KnownSize` so we get an assertion if the cuunt is wrong.
+    (KnownSize::new(iter, count), count)
+}
diff --git a/library/compiler-builtins/libm-test/src/generate/spaced.rs b/library/compiler-builtins/libm-test/src/generate/spaced.rs
new file mode 100644
index 00000000000..bea3f4c7e1b
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/generate/spaced.rs
@@ -0,0 +1,253 @@
+use std::fmt;
+use std::ops::RangeInclusive;
+
+use libm::support::{Float, MinInt};
+
+use crate::domain::get_domain;
+use crate::op::OpITy;
+use crate::run_cfg::{int_range, iteration_count};
+use crate::{CheckCtx, MathOp, linear_ints, logspace};
+
+/// Generate a sequence of inputs that eiher cover the domain in completeness (for smaller float
+/// types and single argument functions) or provide evenly spaced inputs across the domain with
+/// approximately `u32::MAX` total iterations.
+pub trait SpacedInput<Op> {
+    fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self> + Send, u64);
+}
+
+/// Construct an iterator from `logspace` and also calculate the total number of steps expected
+/// for that iterator.
+fn logspace_steps<Op>(
+    ctx: &CheckCtx,
+    argnum: usize,
+    max_steps: u64,
+) -> (impl Iterator<Item = Op::FTy> + Clone, u64)
+where
+    Op: MathOp,
+    OpITy<Op>: TryFrom<u64, Error: fmt::Debug>,
+    u64: TryFrom<OpITy<Op>, Error: fmt::Debug>,
+    RangeInclusive<OpITy<Op>>: Iterator,
+{
+    // i8 is a dummy type here, it can be any integer.
+    let domain = get_domain::<Op::FTy, i8>(ctx.fn_ident, argnum).unwrap_float();
+    let start = domain.range_start();
+    let end = domain.range_end();
+
+    let max_steps = OpITy::<Op>::try_from(max_steps).unwrap_or(OpITy::<Op>::MAX);
+    let (iter, steps) = logspace(start, end, max_steps);
+
+    // `steps` will be <= the original `max_steps`, which is a `u64`.
+    (iter, steps.try_into().unwrap())
+}
+
+/// Represents the iterator in either `Left` or `Right`.
+enum EitherIter<A, B> {
+    A(A),
+    B(B),
+}
+
+impl<T, A: Iterator<Item = T>, B: Iterator<Item = T>> Iterator for EitherIter<A, B> {
+    type Item = T;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self {
+            Self::A(iter) => iter.next(),
+            Self::B(iter) => iter.next(),
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        match self {
+            Self::A(iter) => iter.size_hint(),
+            Self::B(iter) => iter.size_hint(),
+        }
+    }
+}
+
+/// Gets the total number of possible values, returning `None` if that number doesn't fit in a
+/// `u64`.
+fn value_count<F: Float>() -> Option<u64>
+where
+    u64: TryFrom<F::Int>,
+{
+    u64::try_from(F::Int::MAX).ok().and_then(|max| max.checked_add(1))
+}
+
+/// Returns an iterator of every possible value of type `F`.
+fn all_values<F: Float>() -> impl Iterator<Item = F>
+where
+    RangeInclusive<F::Int>: Iterator<Item = F::Int>,
+{
+    (F::Int::MIN..=F::Int::MAX).map(|bits| F::from_bits(bits))
+}
+
+macro_rules! impl_spaced_input {
+    ($fty:ty) => {
+        impl<Op> SpacedInput<Op> for ($fty,)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let max_steps0 = iteration_count(ctx, 0);
+                // `f16` and `f32` can have exhaustive tests.
+                match value_count::<Op::FTy>() {
+                    Some(steps0) if steps0 <= max_steps0 => {
+                        let iter0 = all_values();
+                        let iter0 = iter0.map(|v| (v,));
+                        (EitherIter::A(iter0), steps0)
+                    }
+                    _ => {
+                        let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
+                        let iter0 = iter0.map(|v| (v,));
+                        (EitherIter::B(iter0), steps0)
+                    }
+                }
+            }
+        }
+
+        impl<Op> SpacedInput<Op> for ($fty, $fty)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let max_steps0 = iteration_count(ctx, 0);
+                let max_steps1 = iteration_count(ctx, 1);
+                // `f16` can have exhaustive tests.
+                match value_count::<Op::FTy>() {
+                    Some(count) if count <= max_steps0 && count <= max_steps1 => {
+                        let iter = all_values()
+                            .flat_map(|first| all_values().map(move |second| (first, second)));
+                        (EitherIter::A(iter), count.checked_mul(count).unwrap())
+                    }
+                    _ => {
+                        let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
+                        let (iter1, steps1) = logspace_steps::<Op>(ctx, 1, max_steps1);
+                        let iter = iter0.flat_map(move |first| {
+                            iter1.clone().map(move |second| (first, second))
+                        });
+                        let count = steps0.checked_mul(steps1).unwrap();
+                        (EitherIter::B(iter), count)
+                    }
+                }
+            }
+        }
+
+        impl<Op> SpacedInput<Op> for ($fty, $fty, $fty)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let max_steps0 = iteration_count(ctx, 0);
+                let max_steps1 = iteration_count(ctx, 1);
+                let max_steps2 = iteration_count(ctx, 2);
+                // `f16` can be exhaustive tested if `LIBM_EXTENSIVE_TESTS` is incresed.
+                match value_count::<Op::FTy>() {
+                    Some(count)
+                        if count <= max_steps0 && count <= max_steps1 && count <= max_steps2 =>
+                    {
+                        let iter = all_values().flat_map(|first| {
+                            all_values().flat_map(move |second| {
+                                all_values().map(move |third| (first, second, third))
+                            })
+                        });
+                        (EitherIter::A(iter), count.checked_pow(3).unwrap())
+                    }
+                    _ => {
+                        let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
+                        let (iter1, steps1) = logspace_steps::<Op>(ctx, 1, max_steps1);
+                        let (iter2, steps2) = logspace_steps::<Op>(ctx, 2, max_steps2);
+
+                        let iter = iter0
+                            .flat_map(move |first| iter1.clone().map(move |second| (first, second)))
+                            .flat_map(move |(first, second)| {
+                                iter2.clone().map(move |third| (first, second, third))
+                            });
+                        let count =
+                            steps0.checked_mul(steps1).unwrap().checked_mul(steps2).unwrap();
+
+                        (EitherIter::B(iter), count)
+                    }
+                }
+            }
+        }
+
+        impl<Op> SpacedInput<Op> for (i32, $fty)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let range0 = int_range(ctx, 0);
+                let max_steps0 = iteration_count(ctx, 0);
+                let max_steps1 = iteration_count(ctx, 1);
+                match value_count::<Op::FTy>() {
+                    Some(count1) if count1 <= max_steps1 => {
+                        let (iter0, steps0) = linear_ints(range0, max_steps0);
+                        let iter = iter0
+                            .flat_map(move |first| all_values().map(move |second| (first, second)));
+                        (EitherIter::A(iter), steps0.checked_mul(count1).unwrap())
+                    }
+                    _ => {
+                        let (iter0, steps0) = linear_ints(range0, max_steps0);
+                        let (iter1, steps1) = logspace_steps::<Op>(ctx, 1, max_steps1);
+
+                        let iter = iter0.flat_map(move |first| {
+                            iter1.clone().map(move |second| (first, second))
+                        });
+                        let count = steps0.checked_mul(steps1).unwrap();
+
+                        (EitherIter::B(iter), count)
+                    }
+                }
+            }
+        }
+
+        impl<Op> SpacedInput<Op> for ($fty, i32)
+        where
+            Op: MathOp<RustArgs = Self, FTy = $fty>,
+        {
+            fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
+                let max_steps0 = iteration_count(ctx, 0);
+                let range1 = int_range(ctx, 1);
+                let max_steps1 = iteration_count(ctx, 1);
+                match value_count::<Op::FTy>() {
+                    Some(count0) if count0 <= max_steps0 => {
+                        let (iter1, steps1) = linear_ints(range1, max_steps1);
+                        let iter = all_values().flat_map(move |first| {
+                            iter1.clone().map(move |second| (first, second))
+                        });
+                        (EitherIter::A(iter), count0.checked_mul(steps1).unwrap())
+                    }
+                    _ => {
+                        let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
+                        let (iter1, steps1) = linear_ints(range1, max_steps1);
+
+                        let iter = iter0.flat_map(move |first| {
+                            iter1.clone().map(move |second| (first, second))
+                        });
+                        let count = steps0.checked_mul(steps1).unwrap();
+
+                        (EitherIter::B(iter), count)
+                    }
+                }
+            }
+        }
+    };
+}
+
+#[cfg(f16_enabled)]
+impl_spaced_input!(f16);
+impl_spaced_input!(f32);
+impl_spaced_input!(f64);
+#[cfg(f128_enabled)]
+impl_spaced_input!(f128);
+
+/// Create a test case iterator for extensive inputs. Also returns the total test case count.
+pub fn get_test_cases<Op>(
+    ctx: &CheckCtx,
+) -> (impl Iterator<Item = Op::RustArgs> + Send + use<'_, Op>, u64)
+where
+    Op: MathOp,
+    Op::RustArgs: SpacedInput<Op>,
+{
+    Op::RustArgs::get_cases(ctx)
+}
diff --git a/library/compiler-builtins/libm-test/src/lib.rs b/library/compiler-builtins/libm-test/src/lib.rs
new file mode 100644
index 00000000000..485c01a4782
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/lib.rs
@@ -0,0 +1,105 @@
+#![cfg_attr(f16_enabled, feature(f16))]
+#![cfg_attr(f128_enabled, feature(f128))]
+#![allow(clippy::unusual_byte_groupings)] // sometimes we group by sign_exp_sig
+
+pub mod domain;
+mod f8_impl;
+pub mod generate;
+#[cfg(feature = "build-mpfr")]
+pub mod mpfloat;
+mod num;
+pub mod op;
+mod precision;
+mod run_cfg;
+mod test_traits;
+
+use std::env;
+use std::fs::File;
+use std::io::Write;
+use std::path::PathBuf;
+use std::sync::LazyLock;
+use std::time::SystemTime;
+
+pub use f8_impl::{f8, hf8};
+pub use libm::support::{Float, Int, IntTy, MinInt};
+pub use num::{FloatExt, linear_ints, logspace};
+pub use op::{
+    BaseName, FloatTy, Identifier, MathOp, OpCFn, OpCRet, OpFTy, OpRustArgs, OpRustFn, OpRustRet,
+    Ty,
+};
+pub use precision::{MaybeOverride, SpecialCase, default_ulp};
+use run_cfg::extensive_max_iterations;
+pub use run_cfg::{
+    CheckBasis, CheckCtx, EXTENSIVE_ENV, GeneratorKind, bigint_fuzz_iteration_count,
+    skip_extensive_test,
+};
+pub use test_traits::{CheckOutput, Hex, TupleCall};
+
+/// Result type for tests is usually from `anyhow`. Most times there is no success value to
+/// propagate.
+pub type TestResult<T = (), E = anyhow::Error> = Result<T, E>;
+
+/// True if `EMULATED` is set and nonempty. Used to determine how many iterations to run.
+pub const fn emulated() -> bool {
+    match option_env!("EMULATED") {
+        Some(s) if s.is_empty() => false,
+        None => false,
+        Some(_) => true,
+    }
+}
+
+/// True if `CI` is set and nonempty.
+pub const fn ci() -> bool {
+    match option_env!("CI") {
+        Some(s) if s.is_empty() => false,
+        None => false,
+        Some(_) => true,
+    }
+}
+
+/// Print to stderr and additionally log it to `target/test-log.txt`. This is useful for saving
+/// output that would otherwise be consumed by the test harness.
+pub fn test_log(s: &str) {
+    // Handle to a file opened in append mode, unless a suitable path can't be determined.
+    static OUTFILE: LazyLock<Option<File>> = LazyLock::new(|| {
+        // If the target directory is overridden, use that environment variable. Otherwise, save
+        // at the default path `{workspace_root}/target`.
+        let target_dir = match env::var("CARGO_TARGET_DIR") {
+            Ok(s) => PathBuf::from(s),
+            Err(_) => {
+                let Ok(x) = env::var("CARGO_MANIFEST_DIR") else {
+                    return None;
+                };
+
+                PathBuf::from(x).parent().unwrap().parent().unwrap().join("target")
+            }
+        };
+        let outfile = target_dir.join("test-log.txt");
+
+        let mut f = File::options()
+            .create(true)
+            .append(true)
+            .open(outfile)
+            .expect("failed to open logfile");
+        let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap();
+
+        writeln!(f, "\n\nTest run at {}", now.as_secs()).unwrap();
+        writeln!(f, "arch: {}", env::consts::ARCH).unwrap();
+        writeln!(f, "os: {}", env::consts::OS).unwrap();
+        writeln!(f, "bits: {}", usize::BITS).unwrap();
+        writeln!(f, "emulated: {}", emulated()).unwrap();
+        writeln!(f, "ci: {}", ci()).unwrap();
+        writeln!(f, "cargo features: {}", env!("CFG_CARGO_FEATURES")).unwrap();
+        writeln!(f, "opt level: {}", env!("CFG_OPT_LEVEL")).unwrap();
+        writeln!(f, "target features: {}", env!("CFG_TARGET_FEATURES")).unwrap();
+        writeln!(f, "extensive iterations {}", extensive_max_iterations()).unwrap();
+
+        Some(f)
+    });
+
+    eprintln!("{s}");
+
+    if let Some(mut f) = OUTFILE.as_ref() {
+        writeln!(f, "{s}").unwrap();
+    }
+}
diff --git a/library/compiler-builtins/libm-test/src/mpfloat.rs b/library/compiler-builtins/libm-test/src/mpfloat.rs
new file mode 100644
index 00000000000..9b51dc6051d
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/mpfloat.rs
@@ -0,0 +1,603 @@
+//! Interfaces needed to support testing with multi-precision floating point numbers.
+//!
+//! Within this module, the macros create a submodule for each `libm` function. These contain
+//! a struct named `Operation` that implements [`MpOp`].
+
+use std::cmp::Ordering;
+
+use rug::Assign;
+pub use rug::Float as MpFloat;
+use rug::az::{self, Az};
+use rug::float::Round::Nearest;
+use rug::ops::{PowAssignRound, RemAssignRound};
+
+use crate::{Float, MathOp};
+
+/// Create a multiple-precision float with the correct number of bits for a concrete float type.
+fn new_mpfloat<F: Float>() -> MpFloat {
+    MpFloat::new(F::SIG_BITS + 1)
+}
+
+/// Set subnormal emulation and convert to a concrete float type.
+fn prep_retval<F: Float>(mp: &mut MpFloat, ord: Ordering) -> F
+where
+    for<'a> &'a MpFloat: az::Cast<F>,
+{
+    mp.subnormalize_ieee_round(ord, Nearest);
+    (&*mp).az::<F>()
+}
+
+/// Structures that represent a float operation.
+///
+pub trait MpOp: MathOp {
+    /// The struct itself should hold any context that can be reused among calls to `run` (allocated
+    /// `MpFloat`s).
+    type MpTy;
+
+    /// Create a new instance.
+    fn new_mp() -> Self::MpTy;
+
+    /// Perform the operation.
+    ///
+    /// Usually this means assigning inputs to cached floats, performing the operation, applying
+    /// subnormal approximation, and converting the result back to concrete values.
+    fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet;
+}
+
+/// Implement `MpOp` for functions with a single return value.
+macro_rules! impl_mp_op {
+    // Matcher for unary functions
+    (
+        fn_name: $fn_name:ident,
+        RustFn: fn($_fty:ty,) -> $_ret:ty,
+        attrs: [$($attr:meta),*],
+        fn_extra: $fn_name_normalized:expr,
+    ) => {
+        paste::paste! {
+            $(#[$attr])*
+            impl MpOp for crate::op::$fn_name::Routine {
+                type MpTy = MpFloat;
+
+                fn new_mp() -> Self::MpTy {
+                    new_mpfloat::<Self::FTy>()
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.assign(input.0);
+                    let ord = this.[< $fn_name_normalized _round >](Nearest);
+                    prep_retval::<Self::RustRet>(this, ord)
+                }
+            }
+        }
+    };
+    // Matcher for binary functions
+    (
+        fn_name: $fn_name:ident,
+        RustFn: fn($_fty:ty, $_fty2:ty,) -> $_ret:ty,
+        attrs: [$($attr:meta),*],
+        fn_extra: $fn_name_normalized:expr,
+    ) => {
+        paste::paste! {
+            $(#[$attr])*
+            impl MpOp for crate::op::$fn_name::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    let ord = this.0.[< $fn_name_normalized _round >](&this.1, Nearest);
+                    prep_retval::<Self::RustRet>(&mut this.0, ord)
+                }
+            }
+        }
+    };
+    // Matcher for ternary functions
+    (
+        fn_name: $fn_name:ident,
+        RustFn: fn($_fty:ty, $_fty2:ty, $_fty3:ty,) -> $_ret:ty,
+        attrs: [$($attr:meta),*],
+        fn_extra: $fn_name_normalized:expr,
+    ) => {
+        paste::paste! {
+            $(#[$attr])*
+            impl MpOp for crate::op::$fn_name::Routine {
+                type MpTy = (MpFloat, MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (
+                        new_mpfloat::<Self::FTy>(),
+                        new_mpfloat::<Self::FTy>(),
+                        new_mpfloat::<Self::FTy>(),
+                    )
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    this.2.assign(input.2);
+                    let ord = this.0.[< $fn_name_normalized _round >](&this.1, &this.2, Nearest);
+                    prep_retval::<Self::RustRet>(&mut this.0, ord)
+                }
+            }
+        }
+    };
+}
+
+libm_macros::for_each_function! {
+    callback: impl_mp_op,
+    emit_types: [RustFn],
+    skip: [
+        // Most of these need a manual implementation
+        // verify-sorted-start
+        ceil,
+        ceilf,
+        ceilf128,
+        ceilf16,
+        copysign,
+        copysignf,
+        copysignf128,
+        copysignf16,
+        fabs,
+        fabsf,
+        fabsf128,
+        fabsf16,floor,
+        floorf,
+        floorf128,
+        floorf16,
+        fmaximum,
+        fmaximumf,
+        fmaximumf128,
+        fmaximumf16,
+        fminimum,
+        fminimumf,
+        fminimumf128,
+        fminimumf16,
+        fmod,
+        fmodf,
+        fmodf128,
+        fmodf16,
+        frexp,
+        frexpf,
+        ilogb,
+        ilogbf,
+        jn,
+        jnf,
+        ldexp,
+        ldexpf,
+        ldexpf128,
+        ldexpf16,
+        lgamma_r,
+        lgammaf_r,
+        modf,
+        modff,
+        nextafter,
+        nextafterf,
+        pow,
+        powf,remquo,
+        remquof,
+        rint,
+        rintf,
+        rintf128,
+        rintf16,
+        round,
+        roundeven,
+        roundevenf,
+        roundevenf128,
+        roundevenf16,
+        roundf,
+        roundf128,
+        roundf16,
+        scalbn,
+        scalbnf,
+        scalbnf128,
+        scalbnf16,
+        sincos,sincosf,
+        trunc,
+        truncf,
+        truncf128,
+        truncf16,yn,
+        ynf,
+        // verify-sorted-end
+    ],
+    fn_extra: match MACRO_FN_NAME {
+        // Remap function names that are different between mpfr and libm
+        expm1 | expm1f => exp_m1,
+        fabs | fabsf => abs,
+        fdim | fdimf | fdimf16 | fdimf128  => positive_diff,
+        fma | fmaf | fmaf128 => mul_add,
+        fmax | fmaxf | fmaxf16 | fmaxf128 |
+        fmaximum_num | fmaximum_numf | fmaximum_numf16 | fmaximum_numf128 => max,
+        fmin | fminf | fminf16 | fminf128 |
+        fminimum_num | fminimum_numf | fminimum_numf16 | fminimum_numf128 => min,
+        lgamma | lgammaf => ln_gamma,
+        log | logf => ln,
+        log1p | log1pf => ln_1p,
+        tgamma | tgammaf => gamma,
+        _ => MACRO_FN_NAME_NORMALIZED
+    }
+}
+
+/// Implement unary functions that don't have a `_round` version
+macro_rules! impl_no_round {
+    // Unary matcher
+    ($($fn_name:ident => $rug_name:ident;)*) => {
+        paste::paste! {
+            $( impl_no_round!{ @inner_unary $fn_name, $rug_name } )*
+        }
+    };
+
+    (@inner_unary $fn_name:ident, $rug_name:ident) => {
+        impl MpOp for crate::op::$fn_name::Routine {
+            type MpTy = MpFloat;
+
+            fn new_mp() -> Self::MpTy {
+                new_mpfloat::<Self::FTy>()
+            }
+
+            fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                this.assign(input.0);
+                this.$rug_name();
+                prep_retval::<Self::RustRet>(this, Ordering::Equal)
+            }
+        }
+    };
+}
+
+impl_no_round! {
+    ceil => ceil_mut;
+    ceilf => ceil_mut;
+    fabs => abs_mut;
+    fabsf => abs_mut;
+    floor => floor_mut;
+    floorf => floor_mut;
+    rint => round_even_mut; // FIXME: respect rounding mode
+    rintf => round_even_mut; // FIXME: respect rounding mode
+    round => round_mut;
+    roundeven => round_even_mut;
+    roundevenf => round_even_mut;
+    roundf => round_mut;
+    trunc => trunc_mut;
+    truncf => trunc_mut;
+}
+
+#[cfg(f16_enabled)]
+impl_no_round! {
+    ceilf16 => ceil_mut;
+    fabsf16 => abs_mut;
+    floorf16 => floor_mut;
+    rintf16 => round_even_mut; // FIXME: respect rounding mode
+    roundf16 => round_mut;
+    roundevenf16 => round_even_mut;
+    truncf16 => trunc_mut;
+}
+
+#[cfg(f128_enabled)]
+impl_no_round! {
+    ceilf128 => ceil_mut;
+    fabsf128 => abs_mut;
+    floorf128 => floor_mut;
+    rintf128 => round_even_mut; // FIXME: respect rounding mode
+    roundf128 => round_mut;
+    roundevenf128 => round_even_mut;
+    truncf128 => trunc_mut;
+}
+
+/// Some functions are difficult to do in a generic way. Implement them here.
+macro_rules! impl_op_for_ty {
+    ($fty:ty, $suffix:literal) => {
+        paste::paste! {
+            impl MpOp for crate::op::[<modf $suffix>]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(&this.0);
+                    let (ord0, ord1) = this.0.trunc_fract_round(&mut this.1, Nearest);
+                    (
+                        prep_retval::<Self::FTy>(&mut this.1, ord0),
+                        prep_retval::<Self::FTy>(&mut this.0, ord1),
+                    )
+                }
+            }
+
+            impl MpOp for crate::op::[<pow $suffix>]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    let ord = this.0.pow_assign_round(&this.1, Nearest);
+                    prep_retval::<Self::RustRet>(&mut this.0, ord)
+                }
+            }
+
+            impl MpOp for crate::op::[<frexp $suffix>]::Routine {
+                type MpTy = MpFloat;
+
+                fn new_mp() -> Self::MpTy {
+                    new_mpfloat::<Self::FTy>()
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.assign(input.0);
+                    let exp = this.frexp_mut();
+                    (prep_retval::<Self::FTy>(this, Ordering::Equal), exp)
+                }
+            }
+
+            impl MpOp for crate::op::[<ilogb $suffix>]::Routine {
+                type MpTy = MpFloat;
+
+                fn new_mp() -> Self::MpTy {
+                    new_mpfloat::<Self::FTy>()
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.assign(input.0);
+
+                    // `get_exp` follows `frexp` for `0.5 <= |m| < 1.0`. Adjust the exponent by
+                    // one to scale the significand to `1.0 <= |m| < 2.0`.
+                    this.get_exp().map(|v| v - 1).unwrap_or_else(|| {
+                        if this.is_infinite() {
+                            i32::MAX
+                        } else {
+                            // Zero or NaN
+                            i32::MIN
+                        }
+                    })
+                }
+            }
+
+            impl MpOp for crate::op::[<jn $suffix>]::Routine {
+                type MpTy = MpFloat;
+
+                fn new_mp() -> Self::MpTy {
+                    new_mpfloat::<Self::FTy>()
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    let (n, x) = input;
+                    this.assign(x);
+                    let ord = this.jn_round(n, Nearest);
+                    prep_retval::<Self::FTy>(this, ord)
+                }
+            }
+
+            impl MpOp for crate::op::[<sincos $suffix>]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(0.0);
+                    let (sord, cord) = this.0.sin_cos_round(&mut this.1, Nearest);
+                    (
+                        prep_retval::<Self::FTy>(&mut this.0, sord),
+                        prep_retval::<Self::FTy>(&mut this.1, cord)
+                    )
+                }
+            }
+
+            impl MpOp for crate::op::[<remquo $suffix>]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (
+                        new_mpfloat::<Self::FTy>(),
+                        new_mpfloat::<Self::FTy>(),
+                    )
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    let (ord, q) = this.0.remainder_quo31_round(&this.1, Nearest);
+                    (prep_retval::<Self::FTy>(&mut this.0, ord), q)
+                }
+            }
+
+            impl MpOp for crate::op::[<yn $suffix>]::Routine {
+                type MpTy = MpFloat;
+
+                fn new_mp() -> Self::MpTy {
+                    new_mpfloat::<Self::FTy>()
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    let (n, x) = input;
+                    this.assign(x);
+                    let ord = this.yn_round(n, Nearest);
+                    prep_retval::<Self::FTy>(this, ord)
+                }
+            }
+        }
+    };
+}
+
+/// Version of `impl_op_for_ty` with only functions that have `f16` and `f128` implementations.
+macro_rules! impl_op_for_ty_all {
+    ($fty:ty, $suffix:literal) => {
+        paste::paste! {
+            impl MpOp for crate::op::[<copysign $suffix>]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    this.0.copysign_mut(&this.1);
+                    prep_retval::<Self::RustRet>(&mut this.0, Ordering::Equal)
+                }
+            }
+
+            impl MpOp for crate::op::[<fmod $suffix>]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    let ord = this.0.rem_assign_round(&this.1, Nearest);
+                    prep_retval::<Self::RustRet>(&mut this.0, ord)
+
+                }
+            }
+
+            impl MpOp for crate::op::[< fmaximum $suffix >]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    let ord = if this.0.is_nan() || this.1.is_nan() {
+                        this.0.assign($fty::NAN);
+                        Ordering::Equal
+                    } else {
+                        this.0.max_round(&this.1, Nearest)
+                    };
+                    prep_retval::<Self::RustRet>(&mut this.0, ord)
+                }
+            }
+
+            impl MpOp for crate::op::[< fminimum $suffix >]::Routine {
+                type MpTy = (MpFloat, MpFloat);
+
+                fn new_mp() -> Self::MpTy {
+                    (new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.0.assign(input.0);
+                    this.1.assign(input.1);
+                    let ord = if this.0.is_nan() || this.1.is_nan() {
+                        this.0.assign($fty::NAN);
+                        Ordering::Equal
+                    } else {
+                        this.0.min_round(&this.1, Nearest)
+                    };
+                    prep_retval::<Self::RustRet>(&mut this.0, ord)
+                }
+            }
+
+            // `ldexp` and `scalbn` are the same for binary floating point, so just forward all
+            // methods.
+            impl MpOp for crate::op::[<ldexp $suffix>]::Routine {
+                type MpTy = <crate::op::[<scalbn $suffix>]::Routine as MpOp>::MpTy;
+
+                fn new_mp() -> Self::MpTy {
+                    <crate::op::[<scalbn $suffix>]::Routine as MpOp>::new_mp()
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    <crate::op::[<scalbn $suffix>]::Routine as MpOp>::run(this, input)
+                }
+            }
+
+            impl MpOp for crate::op::[<scalbn $suffix>]::Routine {
+                type MpTy = MpFloat;
+
+                fn new_mp() -> Self::MpTy {
+                    new_mpfloat::<Self::FTy>()
+                }
+
+                fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+                    this.assign(input.0);
+                    *this <<= input.1;
+                    prep_retval::<Self::FTy>(this, Ordering::Equal)
+                }
+            }
+        }
+    };
+}
+
+impl_op_for_ty!(f32, "f");
+impl_op_for_ty!(f64, "");
+
+#[cfg(f16_enabled)]
+impl_op_for_ty_all!(f16, "f16");
+impl_op_for_ty_all!(f32, "f");
+impl_op_for_ty_all!(f64, "");
+#[cfg(f128_enabled)]
+impl_op_for_ty_all!(f128, "f128");
+
+// `lgamma_r` is not a simple suffix so we can't use the above macro.
+impl MpOp for crate::op::lgamma_r::Routine {
+    type MpTy = MpFloat;
+
+    fn new_mp() -> Self::MpTy {
+        new_mpfloat::<Self::FTy>()
+    }
+
+    fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+        this.assign(input.0);
+        let (sign, ord) = this.ln_abs_gamma_round(Nearest);
+        let ret = prep_retval::<Self::FTy>(this, ord);
+        (ret, sign as i32)
+    }
+}
+
+impl MpOp for crate::op::lgammaf_r::Routine {
+    type MpTy = MpFloat;
+
+    fn new_mp() -> Self::MpTy {
+        new_mpfloat::<Self::FTy>()
+    }
+
+    fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
+        this.assign(input.0);
+        let (sign, ord) = this.ln_abs_gamma_round(Nearest);
+        let ret = prep_retval::<Self::FTy>(this, ord);
+        (ret, sign as i32)
+    }
+}
+
+/* stub implementations so we don't need to special case them */
+
+impl MpOp for crate::op::nextafter::Routine {
+    type MpTy = MpFloat;
+
+    fn new_mp() -> Self::MpTy {
+        unimplemented!("nextafter does not yet have a MPFR operation");
+    }
+
+    fn run(_this: &mut Self::MpTy, _input: Self::RustArgs) -> Self::RustRet {
+        unimplemented!("nextafter does not yet have a MPFR operation");
+    }
+}
+
+impl MpOp for crate::op::nextafterf::Routine {
+    type MpTy = MpFloat;
+
+    fn new_mp() -> Self::MpTy {
+        unimplemented!("nextafter does not yet have a MPFR operation");
+    }
+
+    fn run(_this: &mut Self::MpTy, _input: Self::RustArgs) -> Self::RustRet {
+        unimplemented!("nextafter does not yet have a MPFR operation");
+    }
+}
diff --git a/library/compiler-builtins/libm-test/src/num.rs b/library/compiler-builtins/libm-test/src/num.rs
new file mode 100644
index 00000000000..eed941423d3
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/num.rs
@@ -0,0 +1,529 @@
+//! Helpful numeric operations.
+
+use std::cmp::min;
+use std::ops::RangeInclusive;
+
+use libm::support::Float;
+
+use crate::{Int, MinInt};
+
+/// Extension to `libm`'s `Float` trait with methods that are useful for tests but not
+/// needed in `libm` itself.
+pub trait FloatExt: Float {
+    /// The minimum subnormal number.
+    const TINY_BITS: Self::Int = Self::Int::ONE;
+
+    /// Retrieve additional constants for this float type.
+    fn consts() -> Consts<Self> {
+        Consts::new()
+    }
+
+    /// Increment by one ULP, saturating at infinity.
+    fn next_up(self) -> Self {
+        let bits = self.to_bits();
+        if self.is_nan() || bits == Self::INFINITY.to_bits() {
+            return self;
+        }
+
+        let abs = self.abs().to_bits();
+        let next_bits = if abs == Self::Int::ZERO {
+            // Next up from 0 is the smallest subnormal
+            Self::TINY_BITS
+        } else if bits == abs {
+            // Positive: counting up is more positive
+            bits + Self::Int::ONE
+        } else {
+            // Negative: counting down is more positive
+            bits - Self::Int::ONE
+        };
+        Self::from_bits(next_bits)
+    }
+
+    /// A faster way to effectively call `next_up` `n` times.
+    fn n_up(self, n: Self::Int) -> Self {
+        let bits = self.to_bits();
+        if self.is_nan() || bits == Self::INFINITY.to_bits() || n == Self::Int::ZERO {
+            return self;
+        }
+
+        let abs = self.abs().to_bits();
+        let is_positive = bits == abs;
+        let crosses_zero = !is_positive && n > abs;
+        let inf_bits = Self::INFINITY.to_bits();
+
+        let next_bits = if abs == Self::Int::ZERO {
+            min(n, inf_bits)
+        } else if crosses_zero {
+            min(n - abs, inf_bits)
+        } else if is_positive {
+            // Positive, counting up is more positive but this may overflow
+            match bits.checked_add(n) {
+                Some(v) if v >= inf_bits => inf_bits,
+                Some(v) => v,
+                None => inf_bits,
+            }
+        } else {
+            // Negative, counting down is more positive
+            bits - n
+        };
+        Self::from_bits(next_bits)
+    }
+
+    /// Decrement by one ULP, saturating at negative infinity.
+    fn next_down(self) -> Self {
+        let bits = self.to_bits();
+        if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
+            return self;
+        }
+
+        let abs = self.abs().to_bits();
+        let next_bits = if abs == Self::Int::ZERO {
+            // Next up from 0 is the smallest negative subnormal
+            Self::TINY_BITS | Self::SIGN_MASK
+        } else if bits == abs {
+            // Positive: counting down is more negative
+            bits - Self::Int::ONE
+        } else {
+            // Negative: counting up is more negative
+            bits + Self::Int::ONE
+        };
+        Self::from_bits(next_bits)
+    }
+
+    /// A faster way to effectively call `next_down` `n` times.
+    fn n_down(self, n: Self::Int) -> Self {
+        let bits = self.to_bits();
+        if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() || n == Self::Int::ZERO {
+            return self;
+        }
+
+        let abs = self.abs().to_bits();
+        let is_positive = bits == abs;
+        let crosses_zero = is_positive && n > abs;
+        let inf_bits = Self::INFINITY.to_bits();
+        let ninf_bits = Self::NEG_INFINITY.to_bits();
+
+        let next_bits = if abs == Self::Int::ZERO {
+            min(n, inf_bits) | Self::SIGN_MASK
+        } else if crosses_zero {
+            min(n - abs, inf_bits) | Self::SIGN_MASK
+        } else if is_positive {
+            // Positive, counting down is more negative
+            bits - n
+        } else {
+            // Negative, counting up is more negative but this may overflow
+            match bits.checked_add(n) {
+                Some(v) if v > ninf_bits => ninf_bits,
+                Some(v) => v,
+                None => ninf_bits,
+            }
+        };
+        Self::from_bits(next_bits)
+    }
+}
+
+impl<F> FloatExt for F where F: Float {}
+
+/// Extra constants that are useful for tests.
+#[derive(Debug, Clone, Copy)]
+pub struct Consts<F> {
+    /// The default quiet NaN, which is also the minimum quiet NaN.
+    pub pos_nan: F,
+    /// The default quiet NaN with negative sign.
+    pub neg_nan: F,
+    /// NaN with maximum (unsigned) significand to be a quiet NaN. The significand is saturated.
+    pub max_qnan: F,
+    /// NaN with minimum (unsigned) significand to be a signaling NaN.
+    pub min_snan: F,
+    /// NaN with maximum (unsigned) significand to be a signaling NaN.
+    pub max_snan: F,
+    pub neg_max_qnan: F,
+    pub neg_min_snan: F,
+    pub neg_max_snan: F,
+}
+
+impl<F: FloatExt> Consts<F> {
+    fn new() -> Self {
+        let top_sigbit_mask = F::Int::ONE << (F::SIG_BITS - 1);
+        let pos_nan = F::EXP_MASK | top_sigbit_mask;
+        let max_qnan = F::EXP_MASK | F::SIG_MASK;
+        let min_snan = F::EXP_MASK | F::Int::ONE;
+        let max_snan = (F::EXP_MASK | F::SIG_MASK) ^ top_sigbit_mask;
+
+        let neg_nan = pos_nan | F::SIGN_MASK;
+        let neg_max_qnan = max_qnan | F::SIGN_MASK;
+        let neg_min_snan = min_snan | F::SIGN_MASK;
+        let neg_max_snan = max_snan | F::SIGN_MASK;
+
+        Self {
+            pos_nan: F::from_bits(pos_nan),
+            neg_nan: F::from_bits(neg_nan),
+            max_qnan: F::from_bits(max_qnan),
+            min_snan: F::from_bits(min_snan),
+            max_snan: F::from_bits(max_snan),
+            neg_max_qnan: F::from_bits(neg_max_qnan),
+            neg_min_snan: F::from_bits(neg_min_snan),
+            neg_max_snan: F::from_bits(neg_max_snan),
+        }
+    }
+
+    pub fn iter(self) -> impl Iterator<Item = F> {
+        // Destructure so we get unused warnings if we forget a list entry.
+        let Self {
+            pos_nan,
+            neg_nan,
+            max_qnan,
+            min_snan,
+            max_snan,
+            neg_max_qnan,
+            neg_min_snan,
+            neg_max_snan,
+        } = self;
+
+        [pos_nan, neg_nan, max_qnan, min_snan, max_snan, neg_max_qnan, neg_min_snan, neg_max_snan]
+            .into_iter()
+    }
+}
+
+/// Return the number of steps between two floats, returning `None` if either input is NaN.
+///
+/// This is the number of steps needed for `n_up` or `n_down` to go between values. Infinities
+/// are treated the same as those functions (will return the nearest finite value), and only one
+/// of `-0` or `+0` is counted. It does not matter which value is greater.
+pub fn ulp_between<F: Float>(x: F, y: F) -> Option<F::Int> {
+    let a = as_ulp_steps(x)?;
+    let b = as_ulp_steps(y)?;
+    Some(a.abs_diff(b))
+}
+
+/// Return the (signed) number of steps from zero to `x`.
+fn as_ulp_steps<F: Float>(x: F) -> Option<F::SignedInt> {
+    let s = x.to_bits_signed();
+    let val = if s >= F::SignedInt::ZERO {
+        // each increment from `s = 0` is one step up from `x = 0.0`
+        s
+    } else {
+        // each increment from `s = F::SignedInt::MIN` is one step down from `x = -0.0`
+        F::SignedInt::MIN - s
+    };
+
+    // If `x` is NaN, return `None`
+    (!x.is_nan()).then_some(val)
+}
+
+/// An iterator that returns floats with linearly spaced integer representations, which translates
+/// to logarithmic spacing of their values.
+///
+/// Note that this tends to skip negative zero, so that needs to be checked explicitly.
+///
+/// Returns `(iterator, iterator_length)`.
+pub fn logspace<F: FloatExt>(
+    start: F,
+    end: F,
+    steps: F::Int,
+) -> (impl Iterator<Item = F> + Clone, F::Int)
+where
+    RangeInclusive<F::Int>: Iterator,
+{
+    assert!(!start.is_nan());
+    assert!(!end.is_nan());
+    assert!(end >= start);
+
+    let steps = steps.checked_sub(F::Int::ONE).expect("`steps` must be at least 2");
+    let between = ulp_between(start, end).expect("`start` or `end` is NaN");
+    let spacing = (between / steps).max(F::Int::ONE);
+    let steps = steps.min(between); // At maximum, one step per ULP
+
+    let mut x = start;
+    (
+        (F::Int::ZERO..=steps).map(move |_| {
+            let ret = x;
+            x = x.n_up(spacing);
+            ret
+        }),
+        steps + F::Int::ONE,
+    )
+}
+
+/// Returns an iterator of up to `steps` integers evenly distributed.
+pub fn linear_ints(
+    range: RangeInclusive<i32>,
+    steps: u64,
+) -> (impl Iterator<Item = i32> + Clone, u64) {
+    let steps = steps.checked_sub(1).unwrap();
+    let between = u64::from(range.start().abs_diff(*range.end()));
+    let spacing = i32::try_from((between / steps).max(1)).unwrap();
+    let steps = steps.min(between);
+    let mut x: i32 = *range.start();
+    (
+        (0..=steps).map(move |_| {
+            let res = x;
+            // Wrapping add to avoid panic on last item (where `x` could overflow past i32::MAX as
+            // there is no next item).
+            x = x.wrapping_add(spacing);
+            res
+        }),
+        steps + 1,
+    )
+}
+
+#[cfg(test)]
+mod tests {
+    use std::cmp::max;
+
+    use super::*;
+    use crate::f8;
+
+    #[test]
+    fn test_next_up_down() {
+        for (i, v) in f8::ALL.into_iter().enumerate() {
+            let down = v.next_down().to_bits();
+            let up = v.next_up().to_bits();
+
+            if i == 0 {
+                assert_eq!(down, f8::NEG_INFINITY.to_bits(), "{i} next_down({v:#010b})");
+            } else {
+                let expected =
+                    if v == f8::ZERO { 1 | f8::SIGN_MASK } else { f8::ALL[i - 1].to_bits() };
+                assert_eq!(down, expected, "{i} next_down({v:#010b})");
+            }
+
+            if i == f8::ALL_LEN - 1 {
+                assert_eq!(up, f8::INFINITY.to_bits(), "{i} next_up({v:#010b})");
+            } else {
+                let expected = if v == f8::NEG_ZERO { 1 } else { f8::ALL[i + 1].to_bits() };
+                assert_eq!(up, expected, "{i} next_up({v:#010b})");
+            }
+        }
+    }
+
+    #[test]
+    fn test_next_up_down_inf_nan() {
+        assert_eq!(f8::NEG_INFINITY.next_up().to_bits(), f8::ALL[0].to_bits(),);
+        assert_eq!(f8::NEG_INFINITY.next_down().to_bits(), f8::NEG_INFINITY.to_bits(),);
+        assert_eq!(f8::INFINITY.next_down().to_bits(), f8::ALL[f8::ALL_LEN - 1].to_bits(),);
+        assert_eq!(f8::INFINITY.next_up().to_bits(), f8::INFINITY.to_bits(),);
+        assert_eq!(f8::NAN.next_up().to_bits(), f8::NAN.to_bits(),);
+        assert_eq!(f8::NAN.next_down().to_bits(), f8::NAN.to_bits(),);
+    }
+
+    #[test]
+    fn test_n_up_down_quick() {
+        assert_eq!(f8::ALL[0].n_up(4).to_bits(), f8::ALL[4].to_bits(),);
+        assert_eq!(
+            f8::ALL[f8::ALL_LEN - 1].n_down(4).to_bits(),
+            f8::ALL[f8::ALL_LEN - 5].to_bits(),
+        );
+
+        // Check around zero
+        assert_eq!(f8::from_bits(0b0).n_up(7).to_bits(), 0b0_0000_111);
+        assert_eq!(f8::from_bits(0b0).n_down(7).to_bits(), 0b1_0000_111);
+
+        // Check across zero
+        assert_eq!(f8::from_bits(0b1_0000_111).n_up(8).to_bits(), 0b0_0000_001);
+        assert_eq!(f8::from_bits(0b0_0000_111).n_down(8).to_bits(), 0b1_0000_001);
+    }
+
+    #[test]
+    fn test_n_up_down_one() {
+        // Verify that `n_up(1)` and `n_down(1)` are the same as `next_up()` and next_down()`.`
+        for i in 0..u8::MAX {
+            let v = f8::from_bits(i);
+            assert_eq!(v.next_up().to_bits(), v.n_up(1).to_bits());
+            assert_eq!(v.next_down().to_bits(), v.n_down(1).to_bits());
+        }
+    }
+
+    #[test]
+    fn test_n_up_down_inf_nan_zero() {
+        assert_eq!(f8::NEG_INFINITY.n_up(1).to_bits(), f8::ALL[0].to_bits());
+        assert_eq!(f8::NEG_INFINITY.n_up(239).to_bits(), f8::ALL[f8::ALL_LEN - 1].to_bits());
+        assert_eq!(f8::NEG_INFINITY.n_up(240).to_bits(), f8::INFINITY.to_bits());
+        assert_eq!(f8::NEG_INFINITY.n_down(u8::MAX).to_bits(), f8::NEG_INFINITY.to_bits());
+
+        assert_eq!(f8::INFINITY.n_down(1).to_bits(), f8::ALL[f8::ALL_LEN - 1].to_bits());
+        assert_eq!(f8::INFINITY.n_down(239).to_bits(), f8::ALL[0].to_bits());
+        assert_eq!(f8::INFINITY.n_down(240).to_bits(), f8::NEG_INFINITY.to_bits());
+        assert_eq!(f8::INFINITY.n_up(u8::MAX).to_bits(), f8::INFINITY.to_bits());
+
+        assert_eq!(f8::NAN.n_up(u8::MAX).to_bits(), f8::NAN.to_bits());
+        assert_eq!(f8::NAN.n_down(u8::MAX).to_bits(), f8::NAN.to_bits());
+
+        assert_eq!(f8::ZERO.n_down(1).to_bits(), f8::TINY_BITS | f8::SIGN_MASK);
+        assert_eq!(f8::NEG_ZERO.n_up(1).to_bits(), f8::TINY_BITS);
+    }
+
+    /// True if the specified range of `f8::ALL` includes both +0 and -0
+    fn crossed_zero(start: usize, end: usize) -> bool {
+        let crossed = &f8::ALL[start..=end];
+        crossed.iter().any(|f| f8::eq_repr(*f, f8::ZERO))
+            && crossed.iter().any(|f| f8::eq_repr(*f, f8::NEG_ZERO))
+    }
+
+    #[test]
+    fn test_n_up_down() {
+        for (i, v) in f8::ALL.into_iter().enumerate() {
+            for n in 0..f8::ALL_LEN {
+                let down = v.n_down(n as u8).to_bits();
+                let up = v.n_up(n as u8).to_bits();
+
+                if let Some(down_exp_idx) = i.checked_sub(n) {
+                    // No overflow
+                    let mut expected = f8::ALL[down_exp_idx].to_bits();
+                    if n >= 1 && crossed_zero(down_exp_idx, i) {
+                        // If both -0 and +0 are included, we need to adjust our expected value
+                        match down_exp_idx.checked_sub(1) {
+                            Some(v) => expected = f8::ALL[v].to_bits(),
+                            // Saturate to -inf if we are out of values
+                            None => expected = f8::NEG_INFINITY.to_bits(),
+                        }
+                    }
+                    assert_eq!(down, expected, "{i} {n} n_down({v:#010b})");
+                } else {
+                    // Overflow to -inf
+                    assert_eq!(down, f8::NEG_INFINITY.to_bits(), "{i} {n} n_down({v:#010b})");
+                }
+
+                let mut up_exp_idx = i + n;
+                if up_exp_idx < f8::ALL_LEN {
+                    // No overflow
+                    if n >= 1 && up_exp_idx < f8::ALL_LEN && crossed_zero(i, up_exp_idx) {
+                        // If both -0 and +0 are included, we need to adjust our expected value
+                        up_exp_idx += 1;
+                    }
+
+                    let expected = if up_exp_idx >= f8::ALL_LEN {
+                        f8::INFINITY.to_bits()
+                    } else {
+                        f8::ALL[up_exp_idx].to_bits()
+                    };
+
+                    assert_eq!(up, expected, "{i} {n} n_up({v:#010b})");
+                } else {
+                    // Overflow to +inf
+                    assert_eq!(up, f8::INFINITY.to_bits(), "{i} {n} n_up({v:#010b})");
+                }
+            }
+        }
+    }
+
+    #[test]
+    fn test_ulp_between() {
+        for (i, x) in f8::ALL.into_iter().enumerate() {
+            for (j, y) in f8::ALL.into_iter().enumerate() {
+                let ulp = ulp_between(x, y).unwrap();
+                let make_msg = || format!("i: {i} j: {j} x: {x:b} y: {y:b} ulp {ulp}");
+
+                let i_low = min(i, j);
+                let i_hi = max(i, j);
+                let mut expected = u8::try_from(i_hi - i_low).unwrap();
+                if crossed_zero(i_low, i_hi) {
+                    expected -= 1;
+                }
+
+                assert_eq!(ulp, expected, "{}", make_msg());
+
+                // Skip if either are zero since `next_{up,down}` will count over it
+                let either_zero = x == f8::ZERO || y == f8::ZERO;
+                if x < y && !either_zero {
+                    assert_eq!(x.n_up(ulp).to_bits(), y.to_bits(), "{}", make_msg());
+                    assert_eq!(y.n_down(ulp).to_bits(), x.to_bits(), "{}", make_msg());
+                } else if !either_zero {
+                    assert_eq!(y.n_up(ulp).to_bits(), x.to_bits(), "{}", make_msg());
+                    assert_eq!(x.n_down(ulp).to_bits(), y.to_bits(), "{}", make_msg());
+                }
+            }
+        }
+    }
+
+    #[test]
+    fn test_ulp_between_inf_nan_zero() {
+        assert_eq!(ulp_between(f8::NEG_INFINITY, f8::INFINITY).unwrap(), f8::ALL_LEN as u8);
+        assert_eq!(ulp_between(f8::INFINITY, f8::NEG_INFINITY).unwrap(), f8::ALL_LEN as u8);
+        assert_eq!(
+            ulp_between(f8::NEG_INFINITY, f8::ALL[f8::ALL_LEN - 1]).unwrap(),
+            f8::ALL_LEN as u8 - 1
+        );
+        assert_eq!(ulp_between(f8::INFINITY, f8::ALL[0]).unwrap(), f8::ALL_LEN as u8 - 1);
+
+        assert_eq!(ulp_between(f8::ZERO, f8::NEG_ZERO).unwrap(), 0);
+        assert_eq!(ulp_between(f8::NAN, f8::ZERO), None);
+        assert_eq!(ulp_between(f8::ZERO, f8::NAN), None);
+    }
+
+    #[test]
+    fn test_logspace() {
+        let (ls, count) = logspace(f8::from_bits(0x0), f8::from_bits(0x4), 2);
+        let ls: Vec<_> = ls.collect();
+        let exp = [f8::from_bits(0x0), f8::from_bits(0x4)];
+        assert_eq!(ls, exp);
+        assert_eq!(ls.len(), usize::from(count));
+
+        let (ls, count) = logspace(f8::from_bits(0x0), f8::from_bits(0x4), 3);
+        let ls: Vec<_> = ls.collect();
+        let exp = [f8::from_bits(0x0), f8::from_bits(0x2), f8::from_bits(0x4)];
+        assert_eq!(ls, exp);
+        assert_eq!(ls.len(), usize::from(count));
+
+        // Check that we include all values with no repeats if `steps` exceeds the maximum number
+        // of steps.
+        let (ls, count) = logspace(f8::from_bits(0x0), f8::from_bits(0x3), 10);
+        let ls: Vec<_> = ls.collect();
+        let exp = [f8::from_bits(0x0), f8::from_bits(0x1), f8::from_bits(0x2), f8::from_bits(0x3)];
+        assert_eq!(ls, exp);
+        assert_eq!(ls.len(), usize::from(count));
+    }
+
+    #[test]
+    fn test_linear_ints() {
+        let (ints, count) = linear_ints(0..=4, 2);
+        let ints: Vec<_> = ints.collect();
+        let exp = [0, 4];
+        assert_eq!(ints, exp);
+        assert_eq!(ints.len(), usize::try_from(count).unwrap());
+
+        let (ints, count) = linear_ints(0..=4, 3);
+        let ints: Vec<_> = ints.collect();
+        let exp = [0, 2, 4];
+        assert_eq!(ints, exp);
+        assert_eq!(ints.len(), usize::try_from(count).unwrap());
+
+        // Check that we include all values with no repeats if `steps` exceeds the maximum number
+        // of steps.
+        let (ints, count) = linear_ints(0x0..=0x3, 10);
+        let ints: Vec<_> = ints.collect();
+        let exp = [0, 1, 2, 3];
+        assert_eq!(ints, exp);
+        assert_eq!(ints.len(), usize::try_from(count).unwrap());
+
+        // Check that there are no panics around `i32::MAX`.
+        let (ints, count) = linear_ints(i32::MAX - 1..=i32::MAX, 5);
+        let ints: Vec<_> = ints.collect();
+        let exp = [i32::MAX - 1, i32::MAX];
+        assert_eq!(ints, exp);
+        assert_eq!(ints.len(), usize::try_from(count).unwrap());
+    }
+
+    #[test]
+    fn test_consts() {
+        let Consts {
+            pos_nan,
+            neg_nan,
+            max_qnan,
+            min_snan,
+            max_snan,
+            neg_max_qnan,
+            neg_min_snan,
+            neg_max_snan,
+        } = f8::consts();
+
+        assert_eq!(pos_nan.to_bits(), 0b0_1111_100);
+        assert_eq!(neg_nan.to_bits(), 0b1_1111_100);
+        assert_eq!(max_qnan.to_bits(), 0b0_1111_111);
+        assert_eq!(min_snan.to_bits(), 0b0_1111_001);
+        assert_eq!(max_snan.to_bits(), 0b0_1111_011);
+        assert_eq!(neg_max_qnan.to_bits(), 0b1_1111_111);
+        assert_eq!(neg_min_snan.to_bits(), 0b1_1111_001);
+        assert_eq!(neg_max_snan.to_bits(), 0b1_1111_011);
+    }
+}
diff --git a/library/compiler-builtins/libm-test/src/op.rs b/library/compiler-builtins/libm-test/src/op.rs
new file mode 100644
index 00000000000..47d72ae58b3
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/op.rs
@@ -0,0 +1,151 @@
+//! Types representing individual functions.
+//!
+//! Each routine gets a module with its name, e.g. `mod sinf { /* ... */ }`. The module
+//! contains a unit struct `Routine` which implements `MathOp`.
+//!
+//! Basically everything could be called a "function" here, so we loosely use the following
+//! terminology:
+//!
+//! - "Function": the math operation that does not have an associated precision. E.g. `f(x) = e^x`,
+//!   `f(x) = log(x)`.
+//! - "Routine": A code implementation of a math operation with a specific precision. E.g. `exp`,
+//!   `expf`, `expl`, `log`, `logf`.
+//! - "Operation" / "Op": Something that relates a routine to a function or is otherwise higher
+//!   level. `Op` is also used as the name for generic parameters since it is terse.
+
+use std::fmt;
+use std::panic::{RefUnwindSafe, UnwindSafe};
+
+pub use shared::{ALL_OPERATIONS, FloatTy, MathOpInfo, Ty};
+
+use crate::{CheckOutput, Float, TupleCall};
+
+mod shared {
+    include!("../../libm-macros/src/shared.rs");
+}
+
+/// An enum representing each possible symbol name (`sin`, `sinf`, `sinl`, etc).
+#[libm_macros::function_enum(BaseName)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum Identifier {}
+
+impl fmt::Display for Identifier {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(self.as_str())
+    }
+}
+
+/// The name without any type specifier, e.g. `sin` and `sinf` both become `sin`.
+#[libm_macros::base_name_enum]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum BaseName {}
+
+impl fmt::Display for BaseName {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(self.as_str())
+    }
+}
+
+/// Attributes ascribed to a `libm` routine including signature, type information,
+/// and naming.
+pub trait MathOp {
+    /// The float type used for this operation.
+    type FTy: Float;
+
+    /// The function type representing the signature in a C library.
+    type CFn: Copy;
+
+    /// Arguments passed to the C library function as a tuple. These may include `&mut` return
+    /// values.
+    type CArgs<'a>
+    where
+        Self: 'a;
+
+    /// The type returned by C implementations.
+    type CRet;
+
+    /// The signature of the Rust function as a `fn(...) -> ...` type.
+    type RustFn: Copy + UnwindSafe;
+
+    /// Arguments passed to the Rust library function as a tuple.
+    ///
+    /// The required `TupleCall` bounds ensure this type can be passed either to the C function or
+    /// to the Rust function.
+    type RustArgs: Copy
+        + TupleCall<Self::RustFn, Output = Self::RustRet>
+        + TupleCall<Self::CFn, Output = Self::RustRet>
+        + RefUnwindSafe;
+
+    /// Type returned from the Rust function.
+    type RustRet: CheckOutput<Self::RustArgs>;
+
+    /// The name of this function, including suffix (e.g. `sin`, `sinf`).
+    const IDENTIFIER: Identifier;
+
+    /// The name as a string.
+    const NAME: &'static str = Self::IDENTIFIER.as_str();
+
+    /// The name of the function excluding the type suffix, e.g. `sin` and `sinf` are both `sin`.
+    const BASE_NAME: BaseName = Self::IDENTIFIER.base_name();
+
+    /// The function in `libm` which can be called.
+    const ROUTINE: Self::RustFn;
+}
+
+/// Access the associated `FTy` type from an op (helper to avoid ambiguous associated types).
+pub type OpFTy<Op> = <Op as MathOp>::FTy;
+/// Access the associated `FTy::Int` type from an op (helper to avoid ambiguous associated types).
+pub type OpITy<Op> = <<Op as MathOp>::FTy as Float>::Int;
+/// Access the associated `CFn` type from an op (helper to avoid ambiguous associated types).
+pub type OpCFn<Op> = <Op as MathOp>::CFn;
+/// Access the associated `CRet` type from an op (helper to avoid ambiguous associated types).
+pub type OpCRet<Op> = <Op as MathOp>::CRet;
+/// Access the associated `RustFn` type from an op (helper to avoid ambiguous associated types).
+pub type OpRustFn<Op> = <Op as MathOp>::RustFn;
+/// Access the associated `RustArgs` type from an op (helper to avoid ambiguous associated types).
+pub type OpRustArgs<Op> = <Op as MathOp>::RustArgs;
+/// Access the associated `RustRet` type from an op (helper to avoid ambiguous associated types).
+pub type OpRustRet<Op> = <Op as MathOp>::RustRet;
+
+macro_rules! do_thing {
+    // Matcher for unary functions
+    (
+        fn_name: $fn_name:ident,
+        FTy: $FTy:ty,
+        CFn: $CFn:ty,
+        CArgs: $CArgs:ty,
+        CRet: $CRet:ty,
+        RustFn: $RustFn:ty,
+        RustArgs: $RustArgs:ty,
+        RustRet: $RustRet:ty,
+        attrs: [$($attr:meta),*],
+
+    ) => {
+        paste::paste! {
+            $(#[$attr])*
+            pub mod $fn_name {
+                use super::*;
+                pub struct Routine;
+
+                impl MathOp for Routine {
+                    type FTy = $FTy;
+                    type CFn = for<'a> $CFn;
+                    type CArgs<'a> = $CArgs where Self: 'a;
+                    type CRet = $CRet;
+                    type RustFn = $RustFn;
+                    type RustArgs = $RustArgs;
+                    type RustRet = $RustRet;
+
+                    const IDENTIFIER: Identifier = Identifier::[< $fn_name:camel >];
+                    const ROUTINE: Self::RustFn = libm::$fn_name;
+                }
+            }
+
+        }
+    };
+}
+
+libm_macros::for_each_function! {
+    callback: do_thing,
+    emit_types: all,
+}
diff --git a/library/compiler-builtins/libm-test/src/precision.rs b/library/compiler-builtins/libm-test/src/precision.rs
new file mode 100644
index 00000000000..f5fb5f6707b
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/precision.rs
@@ -0,0 +1,573 @@
+//! Configuration for skipping or changing the result for individual test cases (inputs) rather
+//! than ignoring entire tests.
+
+use core::f32;
+
+use CheckBasis::{Mpfr, Musl};
+use libm::support::CastFrom;
+use {BaseName as Bn, Identifier as Id};
+
+use crate::{BaseName, CheckBasis, CheckCtx, Float, Identifier, Int, TestResult};
+
+/// Type implementing [`IgnoreCase`].
+pub struct SpecialCase;
+
+/// ULP allowed to differ from the results returned by a test basis.
+#[allow(clippy::single_match)]
+pub fn default_ulp(ctx: &CheckCtx) -> u32 {
+    // ULP compared to the infinite (MPFR) result.
+    let mut ulp = match ctx.base_name {
+        // Operations that require exact results. This list should correlate with what we
+        // have documented at <https://doc.rust-lang.org/std/primitive.f32.html>.
+        Bn::Ceil
+        | Bn::Copysign
+        | Bn::Fabs
+        | Bn::Fdim
+        | Bn::Floor
+        | Bn::Fma
+        | Bn::Fmax
+        | Bn::Fmaximum
+        | Bn::FmaximumNum
+        | Bn::Fmin
+        | Bn::Fminimum
+        | Bn::FminimumNum
+        | Bn::Fmod
+        | Bn::Frexp
+        | Bn::Ilogb
+        | Bn::Ldexp
+        | Bn::Modf
+        | Bn::Nextafter
+        | Bn::Remainder
+        | Bn::Remquo
+        | Bn::Rint
+        | Bn::Round
+        | Bn::Roundeven
+        | Bn::Scalbn
+        | Bn::Sqrt
+        | Bn::Trunc => 0,
+
+        // Operations that aren't required to be exact, but our implementations are.
+        Bn::Cbrt => 0,
+
+        // Bessel functions have large inaccuracies.
+        Bn::J0 | Bn::J1 | Bn::Y0 | Bn::Y1 | Bn::Jn | Bn::Yn => 8_000_000,
+
+        // For all other operations, specify our implementation's worst case precision.
+        Bn::Acos => 1,
+        Bn::Acosh => 4,
+        Bn::Asin => 1,
+        Bn::Asinh => 2,
+        Bn::Atan => 1,
+        Bn::Atan2 => 2,
+        Bn::Atanh => 2,
+        Bn::Cos => 1,
+        Bn::Cosh => 1,
+        Bn::Erf => 1,
+        Bn::Erfc => 4,
+        Bn::Exp => 1,
+        Bn::Exp10 => 6,
+        Bn::Exp2 => 1,
+        Bn::Expm1 => 1,
+        Bn::Hypot => 1,
+        Bn::Lgamma | Bn::LgammaR => 16,
+        Bn::Log => 1,
+        Bn::Log10 => 1,
+        Bn::Log1p => 1,
+        Bn::Log2 => 1,
+        Bn::Pow => 1,
+        Bn::Sin => 1,
+        Bn::Sincos => 1,
+        Bn::Sinh => 2,
+        Bn::Tan => 1,
+        Bn::Tanh => 2,
+        // tgammaf has higher accuracy than tgamma.
+        Bn::Tgamma if ctx.fn_ident != Id::Tgamma => 1,
+        Bn::Tgamma => 20,
+    };
+
+    // There are some cases where musl's approximation is less accurate than ours. For these
+    // cases, increase the ULP.
+    if ctx.basis == Musl {
+        match ctx.base_name {
+            Bn::Cosh => ulp = 2,
+            Bn::Exp10 if usize::BITS < 64 => ulp = 4,
+            Bn::Lgamma | Bn::LgammaR => ulp = 400,
+            Bn::Tanh => ulp = 4,
+            _ => (),
+        }
+
+        match ctx.fn_ident {
+            Id::Cbrt => ulp = 2,
+            // FIXME(#401): musl has an incorrect result here.
+            Id::Fdim => ulp = 2,
+            Id::Sincosf => ulp = 500,
+            Id::Tgamma => ulp = 20,
+            _ => (),
+        }
+    }
+
+    if cfg!(target_arch = "x86") {
+        match ctx.fn_ident {
+            // Input `fma(0.999999999999999, 1.0000000000000013, 0.0) = 1.0000000000000002` is
+            // incorrect on i586 and i686.
+            Id::Fma => ulp = 1,
+            _ => (),
+        }
+    }
+
+    // In some cases, our implementation is less accurate than musl on i586.
+    if cfg!(x86_no_sse) {
+        match ctx.fn_ident {
+            // FIXME(#401): these need to be correctly rounded but are not.
+            Id::Fmaf => ulp = 1,
+            Id::Fdim => ulp = 1,
+            Id::Round => ulp = 1,
+
+            Id::Asinh => ulp = 3,
+            Id::Asinhf => ulp = 3,
+            Id::Cbrt => ulp = 1,
+            Id::Exp10 | Id::Exp10f => ulp = 1_000_000,
+            Id::Exp2 | Id::Exp2f => ulp = 10_000_000,
+            Id::Log1p | Id::Log1pf => ulp = 2,
+            Id::Tan => ulp = 2,
+            _ => (),
+        }
+    }
+
+    ulp
+}
+
+/// Result of checking for possible overrides.
+#[derive(Debug, Default)]
+pub enum CheckAction {
+    /// The check should pass. Default case.
+    #[default]
+    AssertSuccess,
+
+    /// Override the ULP for this check.
+    AssertWithUlp(u32),
+
+    /// Failure is expected, ensure this is the case (xfail). Takes a contxt string to help trace
+    /// back exactly why we expect this to fail.
+    AssertFailure(&'static str),
+
+    /// The override somehow validated the result, here it is.
+    Custom(TestResult),
+
+    /// Disregard the output.
+    Skip,
+}
+
+/// Don't run further validation on this test case.
+const SKIP: CheckAction = CheckAction::Skip;
+
+/// Return this to skip checks on a test that currently fails but shouldn't. Takes a description
+/// of context.
+const XFAIL: fn(&'static str) -> CheckAction = CheckAction::AssertFailure;
+
+/// Indicates that we expect a test to fail but we aren't asserting that it does (e.g. some results
+/// within a range do actually pass).
+///
+/// Same as `SKIP`, just indicates we have something to eventually fix.
+const XFAIL_NOCHECK: CheckAction = CheckAction::Skip;
+
+/// By default, all tests should pass.
+const DEFAULT: CheckAction = CheckAction::AssertSuccess;
+
+/// Allow overriding the outputs of specific test cases.
+///
+/// There are some cases where we want to xfail specific cases or handle certain inputs
+/// differently than the rest of calls to `validate`. This provides a hook to do that.
+///
+/// If `None` is returned, checks will proceed as usual. If `Some(result)` is returned, checks
+/// are skipped and the provided result is returned instead.
+///
+/// This gets implemented once per input type, then the functions provide further filtering
+/// based on function name and values.
+///
+/// `ulp` can also be set to adjust the ULP for that specific test, even if `None` is still
+/// returned.
+pub trait MaybeOverride<Input> {
+    fn check_float<F: Float>(
+        _input: Input,
+        _actual: F,
+        _expected: F,
+        _ctx: &CheckCtx,
+    ) -> CheckAction {
+        DEFAULT
+    }
+
+    fn check_int<I: Int>(_input: Input, _actual: I, _expected: I, _ctx: &CheckCtx) -> CheckAction {
+        DEFAULT
+    }
+}
+
+#[cfg(f16_enabled)]
+impl MaybeOverride<(f16,)> for SpecialCase {}
+
+impl MaybeOverride<(f32,)> for SpecialCase {
+    fn check_float<F: Float>(input: (f32,), actual: F, expected: F, ctx: &CheckCtx) -> CheckAction {
+        if ctx.base_name == BaseName::Expm1
+            && !input.0.is_infinite()
+            && input.0 > 80.0
+            && actual.is_infinite()
+            && !expected.is_infinite()
+        {
+            // we return infinity but the number is representable
+            if ctx.basis == CheckBasis::Musl {
+                return XFAIL_NOCHECK;
+            }
+            return XFAIL("expm1 representable numbers");
+        }
+
+        if cfg!(x86_no_sse)
+            && ctx.base_name == BaseName::Exp2
+            && !expected.is_infinite()
+            && actual.is_infinite()
+        {
+            // We return infinity when there is a representable value. Test input: 127.97238
+            return XFAIL("586 exp2 representable numbers");
+        }
+
+        if ctx.base_name == BaseName::Sinh && input.0.abs() > 80.0 && actual.is_nan() {
+            // we return some NaN that should be real values or infinite
+            if ctx.basis == CheckBasis::Musl {
+                return XFAIL_NOCHECK;
+            }
+            return XFAIL("sinh unexpected NaN");
+        }
+
+        if (ctx.base_name == BaseName::Lgamma || ctx.base_name == BaseName::LgammaR)
+            && input.0 > 4e36
+            && expected.is_infinite()
+            && !actual.is_infinite()
+        {
+            // This result should saturate but we return a finite value.
+            return XFAIL_NOCHECK;
+        }
+
+        if ctx.base_name == BaseName::J0 && input.0 < -1e34 {
+            // Errors get huge close to -inf
+            return XFAIL_NOCHECK;
+        }
+
+        unop_common(input, actual, expected, ctx)
+    }
+
+    fn check_int<I: Int>(input: (f32,), actual: I, expected: I, ctx: &CheckCtx) -> CheckAction {
+        // On MPFR for lgammaf_r, we set -1 as the integer result for negative infinity but MPFR
+        // sets +1
+        if ctx.basis == CheckBasis::Mpfr
+            && ctx.base_name == BaseName::LgammaR
+            && input.0 == f32::NEG_INFINITY
+            && actual.abs() == expected.abs()
+        {
+            return XFAIL("lgammar integer result");
+        }
+
+        DEFAULT
+    }
+}
+
+impl MaybeOverride<(f64,)> for SpecialCase {
+    fn check_float<F: Float>(input: (f64,), actual: F, expected: F, ctx: &CheckCtx) -> CheckAction {
+        if cfg!(x86_no_sse)
+            && ctx.base_name == BaseName::Ceil
+            && ctx.basis == CheckBasis::Musl
+            && input.0 < 0.0
+            && input.0 > -1.0
+            && expected == F::ZERO
+            && actual == F::ZERO
+        {
+            // musl returns -0.0, we return +0.0
+            return XFAIL("i586 ceil signed zero");
+        }
+
+        if cfg!(x86_no_sse)
+            && (ctx.base_name == BaseName::Rint || ctx.base_name == BaseName::Roundeven)
+            && (expected - actual).abs() <= F::ONE
+            && (expected - actual).abs() > F::ZERO
+        {
+            // Our rounding mode is incorrect.
+            return XFAIL("i586 rint rounding mode");
+        }
+
+        if cfg!(x86_no_sse)
+            && (ctx.fn_ident == Identifier::Ceil || ctx.fn_ident == Identifier::Floor)
+            && expected.eq_repr(F::NEG_ZERO)
+            && actual.eq_repr(F::ZERO)
+        {
+            // FIXME: the x87 implementations do not keep the distinction between -0.0 and 0.0.
+            // See https://github.com/rust-lang/libm/pull/404#issuecomment-2572399955
+            return XFAIL("i586 ceil/floor signed zero");
+        }
+
+        if cfg!(x86_no_sse)
+            && (ctx.fn_ident == Identifier::Exp10 || ctx.fn_ident == Identifier::Exp2)
+        {
+            // FIXME: i586 has very imprecise results with ULP > u32::MAX for these
+            // operations so we can't reasonably provide a limit.
+            return XFAIL_NOCHECK;
+        }
+
+        if ctx.base_name == BaseName::J0 && input.0 < -1e300 {
+            // Errors get huge close to -inf
+            return XFAIL_NOCHECK;
+        }
+
+        // maybe_check_nan_bits(actual, expected, ctx)
+        unop_common(input, actual, expected, ctx)
+    }
+
+    fn check_int<I: Int>(input: (f64,), actual: I, expected: I, ctx: &CheckCtx) -> CheckAction {
+        // On MPFR for lgamma_r, we set -1 as the integer result for negative infinity but MPFR
+        // sets +1
+        if ctx.basis == CheckBasis::Mpfr
+            && ctx.base_name == BaseName::LgammaR
+            && input.0 == f64::NEG_INFINITY
+            && actual.abs() == expected.abs()
+        {
+            return XFAIL("lgammar integer result");
+        }
+
+        DEFAULT
+    }
+}
+
+#[cfg(f128_enabled)]
+impl MaybeOverride<(f128,)> for SpecialCase {}
+
+// F1 and F2 are always the same type, this is just to please generics
+fn unop_common<F1: Float, F2: Float>(
+    input: (F1,),
+    actual: F2,
+    expected: F2,
+    ctx: &CheckCtx,
+) -> CheckAction {
+    if ctx.base_name == BaseName::Acosh
+        && input.0 < F1::NEG_ONE
+        && !(expected.is_nan() && actual.is_nan())
+    {
+        // acoshf is undefined for x <= 1.0, but we return a random result at lower values.
+
+        if ctx.basis == CheckBasis::Musl {
+            return XFAIL_NOCHECK;
+        }
+
+        return XFAIL("acoshf undefined");
+    }
+
+    if (ctx.base_name == BaseName::Lgamma || ctx.base_name == BaseName::LgammaR)
+        && input.0 < F1::ZERO
+        && !input.0.is_infinite()
+    {
+        // loggamma should not be defined for x < 0, yet we both return results
+        return XFAIL_NOCHECK;
+    }
+
+    // fabs and copysign must leave NaNs untouched.
+    if ctx.base_name == BaseName::Fabs && input.0.is_nan() {
+        // LLVM currently uses x87 instructions which quieten signalling NaNs to handle the i686
+        // `extern "C"` `f32`/`f64` return ABI.
+        // LLVM issue <https://github.com/llvm/llvm-project/issues/66803>
+        // Rust issue <https://github.com/rust-lang/rust/issues/115567>
+        if cfg!(target_arch = "x86") && ctx.basis == CheckBasis::Musl && actual.is_nan() {
+            return XFAIL_NOCHECK;
+        }
+
+        // MPFR only has one NaN bitpattern; allow the default `.is_nan()` checks to validate.
+        if ctx.basis == CheckBasis::Mpfr {
+            return DEFAULT;
+        }
+
+        // abs and copysign require signaling NaNs to be propagated, so verify bit equality.
+        if actual.to_bits() == expected.to_bits() {
+            return CheckAction::Custom(Ok(()));
+        } else {
+            return CheckAction::Custom(Err(anyhow::anyhow!("NaNs have different bitpatterns")));
+        }
+    }
+
+    DEFAULT
+}
+
+#[cfg(f16_enabled)]
+impl MaybeOverride<(f16, f16)> for SpecialCase {
+    fn check_float<F: Float>(
+        input: (f16, f16),
+        actual: F,
+        expected: F,
+        ctx: &CheckCtx,
+    ) -> CheckAction {
+        binop_common(input, actual, expected, ctx)
+    }
+}
+
+impl MaybeOverride<(f32, f32)> for SpecialCase {
+    fn check_float<F: Float>(
+        input: (f32, f32),
+        actual: F,
+        expected: F,
+        ctx: &CheckCtx,
+    ) -> CheckAction {
+        binop_common(input, actual, expected, ctx)
+    }
+}
+
+impl MaybeOverride<(f64, f64)> for SpecialCase {
+    fn check_float<F: Float>(
+        input: (f64, f64),
+        actual: F,
+        expected: F,
+        ctx: &CheckCtx,
+    ) -> CheckAction {
+        binop_common(input, actual, expected, ctx)
+    }
+}
+
+#[cfg(f128_enabled)]
+impl MaybeOverride<(f128, f128)> for SpecialCase {
+    fn check_float<F: Float>(
+        input: (f128, f128),
+        actual: F,
+        expected: F,
+        ctx: &CheckCtx,
+    ) -> CheckAction {
+        binop_common(input, actual, expected, ctx)
+    }
+}
+
+// F1 and F2 are always the same type, this is just to please generics
+fn binop_common<F1: Float, F2: Float>(
+    input: (F1, F1),
+    actual: F2,
+    expected: F2,
+    ctx: &CheckCtx,
+) -> CheckAction {
+    // MPFR only has one NaN bitpattern; allow the default `.is_nan()` checks to validate. Skip if
+    // the first input (magnitude source) is NaN and the output is also a NaN, or if the second
+    // input (sign source) is NaN.
+    if ctx.basis == CheckBasis::Mpfr
+        && ((input.0.is_nan() && actual.is_nan() && expected.is_nan()) || input.1.is_nan())
+    {
+        return SKIP;
+    }
+
+    /* FIXME(#439): our fmin and fmax do not compare signed zeros */
+
+    if ctx.base_name == BaseName::Fmin
+        && input.0.biteq(F1::NEG_ZERO)
+        && input.1.biteq(F1::ZERO)
+        && expected.biteq(F2::NEG_ZERO)
+        && actual.biteq(F2::ZERO)
+    {
+        return XFAIL("fmin signed zeroes");
+    }
+
+    if ctx.base_name == BaseName::Fmax
+        && input.0.biteq(F1::NEG_ZERO)
+        && input.1.biteq(F1::ZERO)
+        && expected.biteq(F2::ZERO)
+        && actual.biteq(F2::NEG_ZERO)
+    {
+        return XFAIL("fmax signed zeroes");
+    }
+
+    // Musl propagates NaNs if one is provided as the input, but we return the other input.
+    if (ctx.base_name == BaseName::Fmax || ctx.base_name == BaseName::Fmin)
+        && ctx.basis == Musl
+        && (input.0.is_nan() ^ input.1.is_nan())
+        && expected.is_nan()
+    {
+        return XFAIL("fmax/fmin musl NaN");
+    }
+
+    DEFAULT
+}
+
+impl MaybeOverride<(i32, f32)> for SpecialCase {
+    fn check_float<F: Float>(
+        input: (i32, f32),
+        actual: F,
+        expected: F,
+        ctx: &CheckCtx,
+    ) -> CheckAction {
+        // `ynf(213, 109.15641) = -inf` with our library, should be finite.
+        if ctx.basis == Mpfr
+            && ctx.base_name == BaseName::Yn
+            && input.0 > 200
+            && !expected.is_infinite()
+            && actual.is_infinite()
+        {
+            return XFAIL("ynf infinity mismatch");
+        }
+
+        int_float_common(input, actual, expected, ctx)
+    }
+}
+
+impl MaybeOverride<(i32, f64)> for SpecialCase {
+    fn check_float<F: Float>(
+        input: (i32, f64),
+        actual: F,
+        expected: F,
+        ctx: &CheckCtx,
+    ) -> CheckAction {
+        int_float_common(input, actual, expected, ctx)
+    }
+}
+
+fn int_float_common<F1: Float, F2: Float>(
+    input: (i32, F1),
+    actual: F2,
+    expected: F2,
+    ctx: &CheckCtx,
+) -> CheckAction {
+    if ctx.basis == Mpfr
+        && (ctx.base_name == BaseName::Jn || ctx.base_name == BaseName::Yn)
+        && input.1 == F1::NEG_INFINITY
+        && actual == F2::ZERO
+        && expected == F2::ZERO
+    {
+        return XFAIL("we disagree with MPFR on the sign of zero");
+    }
+
+    // Values near infinity sometimes get cut off for us. `ynf(681, 509.90924) = -inf` but should
+    // be -3.2161271e38.
+    if ctx.basis == Musl
+        && ctx.fn_ident == Identifier::Ynf
+        && !expected.is_infinite()
+        && actual.is_infinite()
+        && (expected.abs().to_bits().abs_diff(actual.abs().to_bits())
+            < F2::Int::cast_from(10_000_000u32))
+    {
+        return XFAIL_NOCHECK;
+    }
+
+    // Our bessel functions blow up with large N values
+    if ctx.basis == Musl && (ctx.base_name == BaseName::Jn || ctx.base_name == BaseName::Yn) {
+        if cfg!(x86_no_sse) {
+            // Precision is especially bad on i586, not worth checking.
+            return XFAIL_NOCHECK;
+        }
+
+        if input.0 > 4000 {
+            return XFAIL_NOCHECK;
+        } else if input.0 > 100 {
+            return CheckAction::AssertWithUlp(1_000_000);
+        }
+    }
+    DEFAULT
+}
+
+#[cfg(f16_enabled)]
+impl MaybeOverride<(f16, i32)> for SpecialCase {}
+impl MaybeOverride<(f32, i32)> for SpecialCase {}
+impl MaybeOverride<(f64, i32)> for SpecialCase {}
+#[cfg(f128_enabled)]
+impl MaybeOverride<(f128, i32)> for SpecialCase {}
+
+impl MaybeOverride<(f32, f32, f32)> for SpecialCase {}
+impl MaybeOverride<(f64, f64, f64)> for SpecialCase {}
+#[cfg(f128_enabled)]
+impl MaybeOverride<(f128, f128, f128)> for SpecialCase {}
diff --git a/library/compiler-builtins/libm-test/src/run_cfg.rs b/library/compiler-builtins/libm-test/src/run_cfg.rs
new file mode 100644
index 00000000000..b36164b005f
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/run_cfg.rs
@@ -0,0 +1,370 @@
+//! Configuration for how tests get run.
+
+use std::ops::RangeInclusive;
+use std::sync::LazyLock;
+use std::{env, str};
+
+use crate::generate::random::{SEED, SEED_ENV};
+use crate::{BaseName, FloatTy, Identifier, test_log};
+
+/// The environment variable indicating which extensive tests should be run.
+pub const EXTENSIVE_ENV: &str = "LIBM_EXTENSIVE_TESTS";
+
+/// Specify the number of iterations via this environment variable, rather than using the default.
+pub const EXTENSIVE_ITER_ENV: &str = "LIBM_EXTENSIVE_ITERATIONS";
+
+/// The override value, if set by the above environment.
+static EXTENSIVE_ITER_OVERRIDE: LazyLock<Option<u64>> = LazyLock::new(|| {
+    env::var(EXTENSIVE_ITER_ENV).map(|v| v.parse().expect("failed to parse iteration count")).ok()
+});
+
+/// Specific tests that need to have a reduced amount of iterations to complete in a reasonable
+/// amount of time.
+///
+/// Contains the itentifier+generator combo to match on, plus the factor to reduce by.
+const EXTEMELY_SLOW_TESTS: &[(Identifier, GeneratorKind, u64)] = &[
+    (Identifier::Fmodf128, GeneratorKind::QuickSpaced, 50),
+    (Identifier::Fmodf128, GeneratorKind::Extensive, 50),
+];
+
+/// Maximum number of iterations to run for a single routine.
+///
+/// The default value of one greater than `u32::MAX` allows testing single-argument `f32` routines
+/// and single- or double-argument `f16` routines exhaustively. `f64` and `f128` can't feasibly
+/// be tested exhaustively; however, [`EXTENSIVE_ITER_ENV`] can be set to run tests for multiple
+/// hours.
+pub fn extensive_max_iterations() -> u64 {
+    let default = 1 << 32; // default value
+    EXTENSIVE_ITER_OVERRIDE.unwrap_or(default)
+}
+
+/// Context passed to [`CheckOutput`].
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct CheckCtx {
+    /// Allowed ULP deviation
+    pub ulp: u32,
+    pub fn_ident: Identifier,
+    pub base_name: BaseName,
+    /// Function name.
+    pub fn_name: &'static str,
+    /// Return the unsuffixed version of the function name.
+    pub base_name_str: &'static str,
+    /// Source of truth for tests.
+    pub basis: CheckBasis,
+    pub gen_kind: GeneratorKind,
+    /// If specified, this value will override the value returned by [`iteration_count`].
+    pub override_iterations: Option<u64>,
+}
+
+impl CheckCtx {
+    /// Create a new check context, using the default ULP for the function.
+    pub fn new(fn_ident: Identifier, basis: CheckBasis, gen_kind: GeneratorKind) -> Self {
+        let mut ret = Self {
+            ulp: 0,
+            fn_ident,
+            fn_name: fn_ident.as_str(),
+            base_name: fn_ident.base_name(),
+            base_name_str: fn_ident.base_name().as_str(),
+            basis,
+            gen_kind,
+            override_iterations: None,
+        };
+        ret.ulp = crate::default_ulp(&ret);
+        ret
+    }
+
+    /// The number of input arguments for this function.
+    pub fn input_count(&self) -> usize {
+        self.fn_ident.math_op().rust_sig.args.len()
+    }
+
+    pub fn override_iterations(&mut self, count: u64) {
+        self.override_iterations = Some(count)
+    }
+}
+
+/// Possible items to test against
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum CheckBasis {
+    /// Check against Musl's math sources.
+    Musl,
+    /// Check against infinite precision (MPFR).
+    Mpfr,
+    /// Benchmarks or other times when this is not relevant.
+    None,
+}
+
+/// The different kinds of generators that provide test input, which account for input pattern
+/// and quantity.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum GeneratorKind {
+    EdgeCases,
+    Extensive,
+    QuickSpaced,
+    Random,
+    List,
+}
+
+/// A list of all functions that should get extensive tests.
+///
+/// This also supports the special test name `all` to run all tests, as well as `all_f16`,
+/// `all_f32`, `all_f64`, and `all_f128` to run all tests for a specific float type.
+static EXTENSIVE: LazyLock<Vec<Identifier>> = LazyLock::new(|| {
+    let var = env::var(EXTENSIVE_ENV).unwrap_or_default();
+    let list = var.split(",").filter(|s| !s.is_empty()).collect::<Vec<_>>();
+    let mut ret = Vec::new();
+
+    let append_ty_ops = |ret: &mut Vec<_>, fty: FloatTy| {
+        let iter = Identifier::ALL.iter().filter(move |id| id.math_op().float_ty == fty).copied();
+        ret.extend(iter);
+    };
+
+    for item in list {
+        match item {
+            "all" => ret = Identifier::ALL.to_owned(),
+            "all_f16" => append_ty_ops(&mut ret, FloatTy::F16),
+            "all_f32" => append_ty_ops(&mut ret, FloatTy::F32),
+            "all_f64" => append_ty_ops(&mut ret, FloatTy::F64),
+            "all_f128" => append_ty_ops(&mut ret, FloatTy::F128),
+            s => {
+                let id = Identifier::from_str(s)
+                    .unwrap_or_else(|| panic!("unrecognized test name `{s}`"));
+                ret.push(id);
+            }
+        }
+    }
+
+    ret
+});
+
+/// Information about the function to be tested.
+#[derive(Debug)]
+struct TestEnv {
+    /// Tests should be reduced because the platform is slow. E.g. 32-bit or emulated.
+    slow_platform: bool,
+    /// The float cannot be tested exhaustively, `f64` or `f128`.
+    large_float_ty: bool,
+    /// Env indicates that an extensive test should be run.
+    should_run_extensive: bool,
+    /// Multiprecision tests will be run.
+    mp_tests_enabled: bool,
+    /// The number of inputs to the function.
+    input_count: usize,
+}
+
+impl TestEnv {
+    fn from_env(ctx: &CheckCtx) -> Self {
+        let id = ctx.fn_ident;
+        let op = id.math_op();
+
+        let will_run_mp = cfg!(feature = "build-mpfr");
+        let large_float_ty = match op.float_ty {
+            FloatTy::F16 | FloatTy::F32 => false,
+            FloatTy::F64 | FloatTy::F128 => true,
+        };
+
+        let will_run_extensive = EXTENSIVE.contains(&id);
+
+        let input_count = op.rust_sig.args.len();
+
+        Self {
+            slow_platform: slow_platform(),
+            large_float_ty,
+            should_run_extensive: will_run_extensive,
+            mp_tests_enabled: will_run_mp,
+            input_count,
+        }
+    }
+}
+
+/// Tests are pretty slow on non-64-bit targets, x86 MacOS, and targets that run in QEMU. Start
+/// with a reduced number on these platforms.
+fn slow_platform() -> bool {
+    let slow_on_ci = crate::emulated()
+        || usize::BITS < 64
+        || cfg!(all(target_arch = "x86_64", target_vendor = "apple"));
+
+    // If not running in CI, there is no need to reduce iteration count.
+    slow_on_ci && crate::ci()
+}
+
+/// The number of iterations to run for a given test.
+pub fn iteration_count(ctx: &CheckCtx, argnum: usize) -> u64 {
+    let t_env = TestEnv::from_env(ctx);
+
+    // Ideally run 5M tests
+    let mut domain_iter_count: u64 = 4_000_000;
+
+    // Start with a reduced number of tests on slow platforms.
+    if t_env.slow_platform {
+        domain_iter_count = 100_000;
+    }
+
+    // If we will be running tests against MPFR, we don't need to test as much against musl.
+    // However, there are some platforms where we have to test against musl since MPFR can't be
+    // built.
+    if t_env.mp_tests_enabled && ctx.basis == CheckBasis::Musl {
+        domain_iter_count /= 100;
+    }
+
+    // Run fewer random tests than domain tests.
+    let random_iter_count = domain_iter_count / 100;
+
+    let mut total_iterations = match ctx.gen_kind {
+        GeneratorKind::QuickSpaced => domain_iter_count,
+        GeneratorKind::Random => random_iter_count,
+        GeneratorKind::Extensive => extensive_max_iterations(),
+        GeneratorKind::EdgeCases | GeneratorKind::List => {
+            unimplemented!("shoudn't need `iteration_count` for {:?}", ctx.gen_kind)
+        }
+    };
+
+    // Larger float types get more iterations.
+    if t_env.large_float_ty && ctx.gen_kind != GeneratorKind::Extensive {
+        if ctx.gen_kind == GeneratorKind::Extensive {
+            // Extensive already has a pretty high test count.
+            total_iterations *= 2;
+        } else {
+            total_iterations *= 4;
+        }
+    }
+
+    // Functions with more arguments get more iterations.
+    let arg_multiplier = 1 << (t_env.input_count - 1);
+    total_iterations *= arg_multiplier;
+
+    // FMA has a huge domain but is reasonably fast to run, so increase another 1.5x.
+    if ctx.base_name == BaseName::Fma {
+        total_iterations = 3 * total_iterations / 2;
+    }
+
+    // Some tests are significantly slower than others and need to be further reduced.
+    if let Some((_id, _gen, scale)) = EXTEMELY_SLOW_TESTS
+        .iter()
+        .find(|(id, generator, _scale)| *id == ctx.fn_ident && *generator == ctx.gen_kind)
+    {
+        // However, do not override if the extensive iteration count has been manually set.
+        if !(ctx.gen_kind == GeneratorKind::Extensive && EXTENSIVE_ITER_OVERRIDE.is_some()) {
+            total_iterations /= scale;
+        }
+    }
+
+    if cfg!(optimizations_enabled) {
+        // Always run at least 10,000 tests.
+        total_iterations = total_iterations.max(10_000);
+    } else {
+        // Without optimizations, just run a quick check regardless of other parameters.
+        total_iterations = 800;
+    }
+
+    let mut overridden = false;
+    if let Some(count) = ctx.override_iterations {
+        total_iterations = count;
+        overridden = true;
+    }
+
+    // Adjust for the number of inputs
+    let ntests = match t_env.input_count {
+        1 => total_iterations,
+        2 => (total_iterations as f64).sqrt().ceil() as u64,
+        3 => (total_iterations as f64).cbrt().ceil() as u64,
+        _ => panic!("test has more than three arguments"),
+    };
+
+    let total = ntests.pow(t_env.input_count.try_into().unwrap());
+
+    let seed_msg = match ctx.gen_kind {
+        GeneratorKind::QuickSpaced | GeneratorKind::Extensive => String::new(),
+        GeneratorKind::Random => {
+            format!(" using `{SEED_ENV}={}`", str::from_utf8(SEED.as_slice()).unwrap())
+        }
+        GeneratorKind::EdgeCases | GeneratorKind::List => unimplemented!(),
+    };
+
+    test_log(&format!(
+        "{gen_kind:?} {basis:?} {fn_ident} arg {arg}/{args}: {ntests} iterations \
+         ({total} total){seed_msg}{omsg}",
+        gen_kind = ctx.gen_kind,
+        basis = ctx.basis,
+        fn_ident = ctx.fn_ident,
+        arg = argnum + 1,
+        args = t_env.input_count,
+        omsg = if overridden { " (overridden)" } else { "" }
+    ));
+
+    ntests
+}
+
+/// Some tests require that an integer be kept within reasonable limits; generate that here.
+pub fn int_range(ctx: &CheckCtx, argnum: usize) -> RangeInclusive<i32> {
+    let t_env = TestEnv::from_env(ctx);
+
+    if !matches!(ctx.base_name, BaseName::Jn | BaseName::Yn) {
+        return i32::MIN..=i32::MAX;
+    }
+
+    assert_eq!(argnum, 0, "For `jn`/`yn`, only the first argument takes an integer");
+
+    // The integer argument to `jn` is an iteration count. Limit this to ensure tests can be
+    // completed in a reasonable amount of time.
+    let non_extensive_range = if t_env.slow_platform || !cfg!(optimizations_enabled) {
+        (-0xf)..=0xff
+    } else {
+        (-0xff)..=0xffff
+    };
+
+    let extensive_range = (-0xfff)..=0xfffff;
+
+    match ctx.gen_kind {
+        GeneratorKind::Extensive => extensive_range,
+        GeneratorKind::QuickSpaced | GeneratorKind::Random => non_extensive_range,
+        GeneratorKind::EdgeCases => extensive_range,
+        GeneratorKind::List => unimplemented!("shoudn't need range for {:?}", ctx.gen_kind),
+    }
+}
+
+/// For domain tests, limit how many asymptotes or specified check points we test.
+pub fn check_point_count(ctx: &CheckCtx) -> usize {
+    assert_eq!(
+        ctx.gen_kind,
+        GeneratorKind::EdgeCases,
+        "check_point_count is intended for edge case tests"
+    );
+    let t_env = TestEnv::from_env(ctx);
+    if t_env.slow_platform || !cfg!(optimizations_enabled) { 4 } else { 10 }
+}
+
+/// When validating points of interest (e.g. asymptotes, inflection points, extremes), also check
+/// this many surrounding values.
+pub fn check_near_count(ctx: &CheckCtx) -> u64 {
+    assert_eq!(
+        ctx.gen_kind,
+        GeneratorKind::EdgeCases,
+        "check_near_count is intended for edge case tests"
+    );
+    if cfg!(optimizations_enabled) {
+        // Taper based on the number of inputs.
+        match ctx.input_count() {
+            1 | 2 => 100,
+            3 => 50,
+            x => panic!("unexpected argument count {x}"),
+        }
+    } else {
+        8
+    }
+}
+
+/// Check whether extensive actions should be run or skipped.
+pub fn skip_extensive_test(ctx: &CheckCtx) -> bool {
+    let t_env = TestEnv::from_env(ctx);
+    !t_env.should_run_extensive
+}
+
+/// The number of iterations to run for `u256` fuzz tests.
+pub fn bigint_fuzz_iteration_count() -> u64 {
+    if !cfg!(optimizations_enabled) {
+        return 1000;
+    }
+
+    if slow_platform() { 100_000 } else { 5_000_000 }
+}
diff --git a/library/compiler-builtins/libm-test/src/test_traits.rs b/library/compiler-builtins/libm-test/src/test_traits.rs
new file mode 100644
index 00000000000..c560dade884
--- /dev/null
+++ b/library/compiler-builtins/libm-test/src/test_traits.rs
@@ -0,0 +1,447 @@
+//! Traits related to testing.
+//!
+//! There are two main traits in this module:
+//!
+//! - `TupleCall`: implemented on tuples to allow calling them as function arguments.
+//! - `CheckOutput`: implemented on anything that is an output type for validation against an
+//!   expected value.
+
+use std::panic::{RefUnwindSafe, UnwindSafe};
+use std::{fmt, panic};
+
+use anyhow::{Context, anyhow, bail, ensure};
+use libm::support::Hexf;
+
+use crate::precision::CheckAction;
+use crate::{
+    CheckBasis, CheckCtx, Float, GeneratorKind, Int, MaybeOverride, SpecialCase, TestResult,
+};
+
+/// Trait for calling a function with a tuple as arguments.
+///
+/// Implemented on the tuple with the function signature as the generic (so we can use the same
+/// tuple for multiple signatures).
+pub trait TupleCall<Func>: fmt::Debug {
+    type Output;
+    fn call(self, f: Func) -> Self::Output;
+
+    /// Intercept panics and print the input to stderr before continuing.
+    fn call_intercept_panics(self, f: Func) -> Self::Output
+    where
+        Self: RefUnwindSafe + Copy,
+        Func: UnwindSafe,
+    {
+        let res = panic::catch_unwind(|| self.call(f));
+        match res {
+            Ok(v) => v,
+            Err(e) => {
+                eprintln!("panic with the following input: {self:?}");
+                panic::resume_unwind(e)
+            }
+        }
+    }
+}
+
+/// A trait to implement on any output type so we can verify it in a generic way.
+pub trait CheckOutput<Input>: Sized {
+    /// Validate `self` (actual) and `expected` are the same.
+    ///
+    /// `input` is only used here for error messages.
+    fn validate(self, expected: Self, input: Input, ctx: &CheckCtx) -> TestResult;
+}
+
+/// A helper trait to print something as hex with the correct number of nibbles, e.g. a `u32`
+/// will always print with `0x` followed by 8 digits.
+///
+/// This is only used for printing errors so allocating is okay.
+pub trait Hex: Copy {
+    /// Hex integer syntax.
+    fn hex(self) -> String;
+    /// Hex float syntax.
+    fn hexf(self) -> String;
+}
+
+/* implement `TupleCall` */
+
+impl<T1, R> TupleCall<fn(T1) -> R> for (T1,)
+where
+    T1: fmt::Debug,
+{
+    type Output = R;
+
+    fn call(self, f: fn(T1) -> R) -> Self::Output {
+        f(self.0)
+    }
+}
+
+impl<T1, T2, R> TupleCall<fn(T1, T2) -> R> for (T1, T2)
+where
+    T1: fmt::Debug,
+    T2: fmt::Debug,
+{
+    type Output = R;
+
+    fn call(self, f: fn(T1, T2) -> R) -> Self::Output {
+        f(self.0, self.1)
+    }
+}
+
+impl<T1, T2, R> TupleCall<fn(T1, &mut T2) -> R> for (T1,)
+where
+    T1: fmt::Debug,
+    T2: fmt::Debug + Default,
+{
+    type Output = (R, T2);
+
+    fn call(self, f: fn(T1, &mut T2) -> R) -> Self::Output {
+        let mut t2 = T2::default();
+        (f(self.0, &mut t2), t2)
+    }
+}
+
+impl<T1, T2, T3, R> TupleCall<fn(T1, T2, T3) -> R> for (T1, T2, T3)
+where
+    T1: fmt::Debug,
+    T2: fmt::Debug,
+    T3: fmt::Debug,
+{
+    type Output = R;
+
+    fn call(self, f: fn(T1, T2, T3) -> R) -> Self::Output {
+        f(self.0, self.1, self.2)
+    }
+}
+
+impl<T1, T2, T3, R> TupleCall<fn(T1, T2, &mut T3) -> R> for (T1, T2)
+where
+    T1: fmt::Debug,
+    T2: fmt::Debug,
+    T3: fmt::Debug + Default,
+{
+    type Output = (R, T3);
+
+    fn call(self, f: fn(T1, T2, &mut T3) -> R) -> Self::Output {
+        let mut t3 = T3::default();
+        (f(self.0, self.1, &mut t3), t3)
+    }
+}
+
+impl<T1, T2, T3> TupleCall<for<'a> fn(T1, &'a mut T2, &'a mut T3)> for (T1,)
+where
+    T1: fmt::Debug,
+    T2: fmt::Debug + Default,
+    T3: fmt::Debug + Default,
+{
+    type Output = (T2, T3);
+
+    fn call(self, f: for<'a> fn(T1, &'a mut T2, &'a mut T3)) -> Self::Output {
+        let mut t2 = T2::default();
+        let mut t3 = T3::default();
+        f(self.0, &mut t2, &mut t3);
+        (t2, t3)
+    }
+}
+
+/* implement `Hex` */
+
+impl<T1> Hex for (T1,)
+where
+    T1: Hex,
+{
+    fn hex(self) -> String {
+        format!("({},)", self.0.hex())
+    }
+
+    fn hexf(self) -> String {
+        format!("({},)", self.0.hexf())
+    }
+}
+
+impl<T1, T2> Hex for (T1, T2)
+where
+    T1: Hex,
+    T2: Hex,
+{
+    fn hex(self) -> String {
+        format!("({}, {})", self.0.hex(), self.1.hex())
+    }
+
+    fn hexf(self) -> String {
+        format!("({}, {})", self.0.hexf(), self.1.hexf())
+    }
+}
+
+impl<T1, T2, T3> Hex for (T1, T2, T3)
+where
+    T1: Hex,
+    T2: Hex,
+    T3: Hex,
+{
+    fn hex(self) -> String {
+        format!("({}, {}, {})", self.0.hex(), self.1.hex(), self.2.hex())
+    }
+
+    fn hexf(self) -> String {
+        format!("({}, {}, {})", self.0.hexf(), self.1.hexf(), self.2.hexf())
+    }
+}
+
+/* trait implementations for ints */
+
+macro_rules! impl_int {
+    ($($ty:ty),*) => {
+        $(
+            impl Hex for $ty {
+                fn hex(self) -> String {
+                    format!("{self:#0width$x}", width = ((Self::BITS / 4) + 2) as usize)
+                }
+
+                fn hexf(self) -> String {
+                    String::new()
+                }
+            }
+
+            impl<Input> $crate::CheckOutput<Input> for $ty
+            where
+                Input: Hex + fmt::Debug,
+                SpecialCase: MaybeOverride<Input>,
+            {
+                fn validate<'a>(
+                    self,
+                    expected: Self,
+                    input: Input,
+                    ctx: &$crate::CheckCtx,
+                ) -> TestResult {
+                    validate_int(self, expected, input, ctx)
+                }
+            }
+        )*
+    };
+}
+
+fn validate_int<I, Input>(actual: I, expected: I, input: Input, ctx: &CheckCtx) -> TestResult
+where
+    I: Int + Hex,
+    Input: Hex + fmt::Debug,
+    SpecialCase: MaybeOverride<Input>,
+{
+    let (result, xfail_msg) = match SpecialCase::check_int(input, actual, expected, ctx) {
+        // `require_biteq` forbids overrides.
+        _ if ctx.gen_kind == GeneratorKind::List => (actual == expected, None),
+        CheckAction::AssertSuccess => (actual == expected, None),
+        CheckAction::AssertFailure(msg) => (actual != expected, Some(msg)),
+        CheckAction::Custom(res) => return res,
+        CheckAction::Skip => return Ok(()),
+        CheckAction::AssertWithUlp(_) => panic!("ulp has no meaning for integer checks"),
+    };
+
+    let make_xfail_msg = || match xfail_msg {
+        Some(m) => format!(
+            "expected failure but test passed. Does an XFAIL need to be updated?\n\
+            failed at: {m}",
+        ),
+        None => String::new(),
+    };
+
+    anyhow::ensure!(
+        result,
+        "\
+        \n    input:    {input:?} {ibits}\
+        \n    expected: {expected:<22?} {expbits}\
+        \n    actual:   {actual:<22?} {actbits}\
+        \n    {msg}\
+        ",
+        actbits = actual.hex(),
+        expbits = expected.hex(),
+        ibits = input.hex(),
+        msg = make_xfail_msg()
+    );
+
+    Ok(())
+}
+
+impl_int!(u32, i32, u64, i64);
+
+/* trait implementations for floats */
+
+macro_rules! impl_float {
+    ($($ty:ty),*) => {
+        $(
+            impl Hex for $ty {
+                fn hex(self) -> String {
+                    format!(
+                        "{:#0width$x}",
+                        self.to_bits(),
+                        width = ((Self::BITS / 4) + 2) as usize
+                    )
+                }
+
+                fn hexf(self) -> String {
+                    format!("{}", Hexf(self))
+                }
+            }
+
+            impl<Input> $crate::CheckOutput<Input> for $ty
+            where
+                Input: Hex + fmt::Debug,
+                SpecialCase: MaybeOverride<Input>,
+            {
+                fn validate<'a>(
+                    self,
+                    expected: Self,
+                    input: Input,
+                    ctx: &$crate::CheckCtx,
+                ) -> TestResult {
+                    validate_float(self, expected, input, ctx)
+                }
+            }
+        )*
+    };
+}
+
+fn validate_float<F, Input>(actual: F, expected: F, input: Input, ctx: &CheckCtx) -> TestResult
+where
+    F: Float + Hex,
+    Input: Hex + fmt::Debug,
+    u32: TryFrom<F::SignedInt, Error: fmt::Debug>,
+    SpecialCase: MaybeOverride<Input>,
+{
+    let mut assert_failure_msg = None;
+
+    // Create a wrapper function so we only need to `.with_context` once.
+    let mut inner = || -> TestResult {
+        let mut allowed_ulp = ctx.ulp;
+
+        // Forbid overrides if the items came from an explicit list, as long as we are checking
+        // against either MPFR or the result itself.
+        let require_biteq = ctx.gen_kind == GeneratorKind::List && ctx.basis != CheckBasis::Musl;
+
+        match SpecialCase::check_float(input, actual, expected, ctx) {
+            _ if require_biteq => (),
+            CheckAction::AssertSuccess => (),
+            CheckAction::AssertFailure(msg) => assert_failure_msg = Some(msg),
+            CheckAction::Custom(res) => return res,
+            CheckAction::Skip => return Ok(()),
+            CheckAction::AssertWithUlp(ulp_override) => allowed_ulp = ulp_override,
+        };
+
+        // Check when both are NaNs
+        if actual.is_nan() && expected.is_nan() {
+            if require_biteq && ctx.basis == CheckBasis::None {
+                ensure!(actual.to_bits() == expected.to_bits(), "mismatched NaN bitpatterns");
+            }
+            // By default, NaNs have nothing special to check.
+            return Ok(());
+        } else if actual.is_nan() || expected.is_nan() {
+            // Check when only one is a NaN
+            bail!("real value != NaN")
+        }
+
+        // Make sure that the signs are the same before checing ULP to avoid wraparound
+        let act_sig = actual.signum();
+        let exp_sig = expected.signum();
+        ensure!(act_sig == exp_sig, "mismatched signs {act_sig:?} {exp_sig:?}");
+
+        if actual.is_infinite() ^ expected.is_infinite() {
+            bail!("mismatched infinities");
+        }
+
+        let act_bits = actual.to_bits().signed();
+        let exp_bits = expected.to_bits().signed();
+
+        let ulp_diff = act_bits.checked_sub(exp_bits).unwrap().abs();
+
+        let ulp_u32 = u32::try_from(ulp_diff)
+            .map_err(|e| anyhow!("{e:?}: ulp of {ulp_diff} exceeds u32::MAX"))?;
+
+        ensure!(ulp_u32 <= allowed_ulp, "ulp {ulp_diff} > {allowed_ulp}",);
+
+        Ok(())
+    };
+
+    let mut res = inner();
+
+    if let Some(msg) = assert_failure_msg {
+        // Invert `Ok` and `Err` if the test is an xfail.
+        if res.is_ok() {
+            let e = anyhow!(
+                "expected failure but test passed. Does an XFAIL need to be updated?\n\
+                failed at: {msg}",
+            );
+            res = Err(e)
+        } else {
+            res = Ok(())
+        }
+    }
+
+    res.with_context(|| {
+        format!(
+            "\
+            \n    input:    {input:?}\
+            \n    as hex:   {ihex}\
+            \n    as bits:  {ibits}\
+            \n    expected: {expected:<22?} {exphex} {expbits}\
+            \n    actual:   {actual:<22?} {acthex} {actbits}\
+            ",
+            ihex = input.hexf(),
+            ibits = input.hex(),
+            exphex = expected.hexf(),
+            expbits = expected.hex(),
+            actbits = actual.hex(),
+            acthex = actual.hexf(),
+        )
+    })
+}
+
+impl_float!(f32, f64);
+
+#[cfg(f16_enabled)]
+impl_float!(f16);
+
+#[cfg(f128_enabled)]
+impl_float!(f128);
+
+/* trait implementations for compound types */
+
+/// Implement `CheckOutput` for combinations of types.
+macro_rules! impl_tuples {
+    ($(($a:ty, $b:ty);)*) => {
+        $(
+            impl<Input> CheckOutput<Input> for ($a, $b)
+            where
+                Input: Hex + fmt::Debug,
+                SpecialCase: MaybeOverride<Input>,
+              {
+                fn validate<'a>(
+                    self,
+                    expected: Self,
+                    input: Input,
+                    ctx: &CheckCtx,
+                ) -> TestResult {
+                    self.0.validate(expected.0, input, ctx)
+                        .and_then(|()| self.1.validate(expected.1, input, ctx))
+                        .with_context(|| format!(
+                            "full context:\
+                            \n    input:    {input:?} {ibits}\
+                            \n    as hex:   {ihex}\
+                            \n    as bits:  {ibits}\
+                            \n    expected: {expected:?} {expbits}\
+                            \n    actual:   {self:?} {actbits}\
+                            ",
+                            ihex = input.hexf(),
+                            ibits = input.hex(),
+                            expbits = expected.hex(),
+                            actbits = self.hex(),
+                        ))
+                }
+            }
+        )*
+    };
+}
+
+impl_tuples!(
+    (f32, i32);
+    (f64, i32);
+    (f32, f32);
+    (f64, f64);
+);