about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_abi/Cargo.toml24
-rw-r--r--compiler/rustc_abi/src/layout.rs (renamed from compiler/rustc_target/src/abi/layout.rs)20
-rw-r--r--compiler/rustc_abi/src/lib.rs1399
-rw-r--r--compiler/rustc_hir_analysis/src/collect.rs3
-rw-r--r--compiler/rustc_index/src/lib.rs19
-rw-r--r--compiler/rustc_lint/src/types.rs2
-rw-r--r--compiler/rustc_middle/src/ty/context.rs2
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs147
-rw-r--r--compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs2
-rw-r--r--compiler/rustc_session/src/config.rs4
-rw-r--r--compiler/rustc_target/Cargo.toml26
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs14
-rw-r--r--compiler/rustc_target/src/abi/mod.rs1522
-rw-r--r--compiler/rustc_target/src/lib.rs23
-rw-r--r--compiler/rustc_target/src/spec/mod.rs119
-rw-r--r--compiler/rustc_ty_utils/Cargo.toml2
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs2
-rw-r--r--compiler/rustc_ty_utils/src/layout_sanity_check.rs18
19 files changed, 1683 insertions, 1667 deletions
diff --git a/compiler/rustc_abi/Cargo.toml b/compiler/rustc_abi/Cargo.toml
new file mode 100644
index 00000000000..48b199cb8ee
--- /dev/null
+++ b/compiler/rustc_abi/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "rustc_abi"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+bitflags = "1.2.1"
+tracing = "0.1"
+rand = { version = "0.8.4", default-features = false, optional = true }
+rand_xoshiro = { version = "0.6.0", optional = true }
+rustc_data_structures = { path = "../rustc_data_structures", optional = true  }
+rustc_index = { path = "../rustc_index", default-features = false }
+rustc_macros = { path = "../rustc_macros", optional = true }
+rustc_serialize = { path = "../rustc_serialize", optional = true  }
+
+[features]
+default = ["nightly", "randomize"]
+randomize = ["rand", "rand_xoshiro"]
+nightly = [
+    "rustc_data_structures",
+    "rustc_index/nightly",
+    "rustc_macros",
+    "rustc_serialize",
+]
diff --git a/compiler/rustc_target/src/abi/layout.rs b/compiler/rustc_abi/src/layout.rs
index cf4843e9d6c..39ea7a85be6 100644
--- a/compiler/rustc_target/src/abi/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -7,7 +7,9 @@ use std::{
     ops::{Bound, Deref},
 };
 
+#[cfg(feature = "randomize")]
 use rand::{seq::SliceRandom, SeedableRng};
+#[cfg(feature = "randomize")]
 use rand_xoshiro::Xoshiro128StarStar;
 
 use tracing::debug;
@@ -91,14 +93,16 @@ pub trait LayoutCalculator {
             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
             // the field ordering to try and catch some code making assumptions about layouts
             // we don't guarantee
-            if repr.can_randomize_type_layout() {
-                // `ReprOptions.layout_seed` is a deterministic seed that we can use to
-                // randomize field ordering with
-                let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
-
-                // Shuffle the ordering of the fields
-                optimizing.shuffle(&mut rng);
+            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
+                #[cfg(feature = "randomize")]
+                {
+                    // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+                    // randomize field ordering with
+                    let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
 
+                    // Shuffle the ordering of the fields
+                    optimizing.shuffle(&mut rng);
+                }
                 // Otherwise we just leave things alone and actually optimize the type's fields
             } else {
                 match kind {
@@ -900,7 +904,7 @@ pub trait LayoutCalculator {
         let mut abi = Abi::Aggregate { sized: true };
         let index = V::new(0);
         for field in &variants[index] {
-            assert!(!field.is_unsized());
+            assert!(field.is_sized());
             align = align.max(field.align);
 
             // If all non-ZST fields have the same ABI, forward this ABI
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
new file mode 100644
index 00000000000..4f4a4bf314f
--- /dev/null
+++ b/compiler/rustc_abi/src/lib.rs
@@ -0,0 +1,1399 @@
+#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
+
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+#[cfg(feature = "nightly")]
+use std::iter::Step;
+use std::num::{NonZeroUsize, ParseIntError};
+use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
+use std::str::FromStr;
+
+use bitflags::bitflags;
+use rustc_index::vec::{Idx, IndexVec};
+#[cfg(feature = "nightly")]
+use rustc_macros::HashStable_Generic;
+#[cfg(feature = "nightly")]
+use rustc_macros::{Decodable, Encodable};
+
+mod layout;
+
+pub use layout::LayoutCalculator;
+
+/// Requirements for a `StableHashingContext` to be used in this crate.
+/// This is a hack to allow using the `HashStable_Generic` derive macro
+/// instead of implementing everything in `rustc_middle`.
+pub trait HashStableContext {}
+
+use Integer::*;
+use Primitive::*;
+
+bitflags! {
+    #[derive(Default)]
+    #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+    pub struct ReprFlags: u8 {
+        const IS_C               = 1 << 0;
+        const IS_SIMD            = 1 << 1;
+        const IS_TRANSPARENT     = 1 << 2;
+        // Internal only for now. If true, don't reorder fields.
+        const IS_LINEAR          = 1 << 3;
+        // If true, the type's layout can be randomized using
+        // the seed stored in `ReprOptions.layout_seed`
+        const RANDOMIZE_LAYOUT   = 1 << 4;
+        // Any of these flags being set prevent field reordering optimisation.
+        const IS_UNOPTIMISABLE   = ReprFlags::IS_C.bits
+                                 | ReprFlags::IS_SIMD.bits
+                                 | ReprFlags::IS_LINEAR.bits;
+    }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub enum IntegerType {
+    /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
+    /// is, `Pointer(true)` is isize.
+    Pointer(bool),
+    /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
+    Fixed(Integer, bool),
+}
+
+impl IntegerType {
+    pub fn is_signed(&self) -> bool {
+        match self {
+            IntegerType::Pointer(b) => *b,
+            IntegerType::Fixed(_, b) => *b,
+        }
+    }
+}
+
+/// Represents the repr options provided by the user,
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub struct ReprOptions {
+    pub int: Option<IntegerType>,
+    pub align: Option<Align>,
+    pub pack: Option<Align>,
+    pub flags: ReprFlags,
+    /// The seed to be used for randomizing a type's layout
+    ///
+    /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
+    /// be the "most accurate" hash as it'd encompass the item and crate
+    /// hash without loss, but it does pay the price of being larger.
+    /// Everything's a tradeoff, a `u64` seed should be sufficient for our
+    /// purposes (primarily `-Z randomize-layout`)
+    pub field_shuffle_seed: u64,
+}
+
+impl ReprOptions {
+    #[inline]
+    pub fn simd(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_SIMD)
+    }
+
+    #[inline]
+    pub fn c(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_C)
+    }
+
+    #[inline]
+    pub fn packed(&self) -> bool {
+        self.pack.is_some()
+    }
+
+    #[inline]
+    pub fn transparent(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_TRANSPARENT)
+    }
+
+    #[inline]
+    pub fn linear(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_LINEAR)
+    }
+
+    /// Returns the discriminant type, given these `repr` options.
+    /// This must only be called on enums!
+    pub fn discr_type(&self) -> IntegerType {
+        self.int.unwrap_or(IntegerType::Pointer(true))
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
+    /// layout" optimizations, such as representing `Foo<&T>` as a
+    /// single pointer.
+    pub fn inhibit_enum_layout_opt(&self) -> bool {
+        self.c() || self.int.is_some()
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
+    /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
+    pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
+        if let Some(pack) = self.pack {
+            if pack.bytes() == 1 {
+                return true;
+            }
+        }
+
+        self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
+    }
+
+    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
+    /// was enabled for its declaration crate
+    pub fn can_randomize_type_layout(&self) -> bool {
+        !self.inhibit_struct_field_reordering_opt()
+            && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
+    pub fn inhibit_union_abi_opt(&self) -> bool {
+        self.c()
+    }
+}
+
+/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
+/// for a target, which contains everything needed to compute layouts.
+#[derive(Debug, PartialEq, Eq)]
+pub struct TargetDataLayout {
+    pub endian: Endian,
+    pub i1_align: AbiAndPrefAlign,
+    pub i8_align: AbiAndPrefAlign,
+    pub i16_align: AbiAndPrefAlign,
+    pub i32_align: AbiAndPrefAlign,
+    pub i64_align: AbiAndPrefAlign,
+    pub i128_align: AbiAndPrefAlign,
+    pub f32_align: AbiAndPrefAlign,
+    pub f64_align: AbiAndPrefAlign,
+    pub pointer_size: Size,
+    pub pointer_align: AbiAndPrefAlign,
+    pub aggregate_align: AbiAndPrefAlign,
+
+    /// Alignments for vector types.
+    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
+
+    pub instruction_address_space: AddressSpace,
+
+    /// Minimum size of #[repr(C)] enums (default I32 bits)
+    pub c_enum_min_size: Integer,
+}
+
+impl Default for TargetDataLayout {
+    /// Creates an instance of `TargetDataLayout`.
+    fn default() -> TargetDataLayout {
+        let align = |bits| Align::from_bits(bits).unwrap();
+        TargetDataLayout {
+            endian: Endian::Big,
+            i1_align: AbiAndPrefAlign::new(align(8)),
+            i8_align: AbiAndPrefAlign::new(align(8)),
+            i16_align: AbiAndPrefAlign::new(align(16)),
+            i32_align: AbiAndPrefAlign::new(align(32)),
+            i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+            i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+            f32_align: AbiAndPrefAlign::new(align(32)),
+            f64_align: AbiAndPrefAlign::new(align(64)),
+            pointer_size: Size::from_bits(64),
+            pointer_align: AbiAndPrefAlign::new(align(64)),
+            aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
+            vector_align: vec![
+                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
+                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
+            ],
+            instruction_address_space: AddressSpace::DATA,
+            c_enum_min_size: Integer::I32,
+        }
+    }
+}
+
+pub enum TargetDataLayoutErrors<'a> {
+    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
+    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
+    MissingAlignment { cause: &'a str },
+    InvalidAlignment { cause: &'a str, err: String },
+    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
+    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
+    InvalidBitsSize { err: String },
+}
+
+impl TargetDataLayout {
+    /// Returns exclusive upper bound on object size.
+    ///
+    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
+    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
+    /// index every address within an object along with one byte past the end, along with allowing
+    /// `isize` to store the difference between any two pointers into an object.
+    ///
+    /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
+    /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
+    /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
+    /// address space on 64-bit ARMv8 and x86_64.
+    #[inline]
+    pub fn obj_size_bound(&self) -> u64 {
+        match self.pointer_size.bits() {
+            16 => 1 << 15,
+            32 => 1 << 31,
+            64 => 1 << 47,
+            bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
+        }
+    }
+
+    #[inline]
+    pub fn ptr_sized_integer(&self) -> Integer {
+        match self.pointer_size.bits() {
+            16 => I16,
+            32 => I32,
+            64 => I64,
+            bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
+        }
+    }
+
+    #[inline]
+    pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
+        for &(size, align) in &self.vector_align {
+            if size == vec_size {
+                return align;
+            }
+        }
+        // Default to natural alignment, which is what LLVM does.
+        // That is, use the size, rounded up to a power of 2.
+        AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
+    }
+}
+
+pub trait HasDataLayout {
+    fn data_layout(&self) -> &TargetDataLayout;
+}
+
+impl HasDataLayout for TargetDataLayout {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        self
+    }
+}
+
+/// Endianness of the target, which must match cfg(target-endian).
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum Endian {
+    Little,
+    Big,
+}
+
+impl Endian {
+    pub fn as_str(&self) -> &'static str {
+        match self {
+            Self::Little => "little",
+            Self::Big => "big",
+        }
+    }
+}
+
+impl fmt::Debug for Endian {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(self.as_str())
+    }
+}
+
+impl FromStr for Endian {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match s {
+            "little" => Ok(Self::Little),
+            "big" => Ok(Self::Big),
+            _ => Err(format!(r#"unknown endian: "{}""#, s)),
+        }
+    }
+}
+
+/// Size of a type in bytes.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub struct Size {
+    raw: u64,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Size {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "Size({} bytes)", self.bytes())
+    }
+}
+
+impl Size {
+    pub const ZERO: Size = Size { raw: 0 };
+
+    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
+    /// not a multiple of 8.
+    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
+        let bits = bits.try_into().ok().unwrap();
+        // Avoid potential overflow from `bits + 7`.
+        Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
+    }
+
+    #[inline]
+    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
+        let bytes: u64 = bytes.try_into().ok().unwrap();
+        Size { raw: bytes }
+    }
+
+    #[inline]
+    pub fn bytes(self) -> u64 {
+        self.raw
+    }
+
+    #[inline]
+    pub fn bytes_usize(self) -> usize {
+        self.bytes().try_into().unwrap()
+    }
+
+    #[inline]
+    pub fn bits(self) -> u64 {
+        #[cold]
+        fn overflow(bytes: u64) -> ! {
+            panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
+        }
+
+        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
+    }
+
+    #[inline]
+    pub fn bits_usize(self) -> usize {
+        self.bits().try_into().unwrap()
+    }
+
+    #[inline]
+    pub fn align_to(self, align: Align) -> Size {
+        let mask = align.bytes() - 1;
+        Size::from_bytes((self.bytes() + mask) & !mask)
+    }
+
+    #[inline]
+    pub fn is_aligned(self, align: Align) -> bool {
+        let mask = align.bytes() - 1;
+        self.bytes() & mask == 0
+    }
+
+    #[inline]
+    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
+        let dl = cx.data_layout();
+
+        let bytes = self.bytes().checked_add(offset.bytes())?;
+
+        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+    }
+
+    #[inline]
+    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
+        let dl = cx.data_layout();
+
+        let bytes = self.bytes().checked_mul(count)?;
+        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+    }
+
+    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
+    /// (i.e., if it is negative, fill with 1's on the left).
+    #[inline]
+    pub fn sign_extend(self, value: u128) -> u128 {
+        let size = self.bits();
+        if size == 0 {
+            // Truncated until nothing is left.
+            return 0;
+        }
+        // Sign-extend it.
+        let shift = 128 - size;
+        // Shift the unsigned value to the left, then shift back to the right as signed
+        // (essentially fills with sign bit on the left).
+        (((value << shift) as i128) >> shift) as u128
+    }
+
+    /// Truncates `value` to `self` bits.
+    #[inline]
+    pub fn truncate(self, value: u128) -> u128 {
+        let size = self.bits();
+        if size == 0 {
+            // Truncated until nothing is left.
+            return 0;
+        }
+        let shift = 128 - size;
+        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
+        (value << shift) >> shift
+    }
+
+    #[inline]
+    pub fn signed_int_min(&self) -> i128 {
+        self.sign_extend(1_u128 << (self.bits() - 1)) as i128
+    }
+
+    #[inline]
+    pub fn signed_int_max(&self) -> i128 {
+        i128::MAX >> (128 - self.bits())
+    }
+
+    #[inline]
+    pub fn unsigned_int_max(&self) -> u128 {
+        u128::MAX >> (128 - self.bits())
+    }
+}
+
+// Panicking addition, subtraction and multiplication for convenience.
+// Avoid during layout computation, return `LayoutError` instead.
+
+impl Add for Size {
+    type Output = Size;
+    #[inline]
+    fn add(self, other: Size) -> Size {
+        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
+            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
+        }))
+    }
+}
+
+impl Sub for Size {
+    type Output = Size;
+    #[inline]
+    fn sub(self, other: Size) -> Size {
+        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
+            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
+        }))
+    }
+}
+
+impl Mul<Size> for u64 {
+    type Output = Size;
+    #[inline]
+    fn mul(self, size: Size) -> Size {
+        size * self
+    }
+}
+
+impl Mul<u64> for Size {
+    type Output = Size;
+    #[inline]
+    fn mul(self, count: u64) -> Size {
+        match self.bytes().checked_mul(count) {
+            Some(bytes) => Size::from_bytes(bytes),
+            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
+        }
+    }
+}
+
+impl AddAssign for Size {
+    #[inline]
+    fn add_assign(&mut self, other: Size) {
+        *self = *self + other;
+    }
+}
+
+#[cfg(feature = "nightly")]
+impl Step for Size {
+    #[inline]
+    fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+        u64::steps_between(&start.bytes(), &end.bytes())
+    }
+
+    #[inline]
+    fn forward_checked(start: Self, count: usize) -> Option<Self> {
+        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
+    }
+
+    #[inline]
+    fn forward(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::forward(start.bytes(), count))
+    }
+
+    #[inline]
+    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
+    }
+
+    #[inline]
+    fn backward_checked(start: Self, count: usize) -> Option<Self> {
+        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
+    }
+
+    #[inline]
+    fn backward(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::backward(start.bytes(), count))
+    }
+
+    #[inline]
+    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
+    }
+}
+
+/// Alignment of a type in bytes (always a power of two).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub struct Align {
+    pow2: u8,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Align {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "Align({} bytes)", self.bytes())
+    }
+}
+
+impl Align {
+    pub const ONE: Align = Align { pow2: 0 };
+    pub const MAX: Align = Align { pow2: 29 };
+
+    #[inline]
+    pub fn from_bits(bits: u64) -> Result<Align, String> {
+        Align::from_bytes(Size::from_bits(bits).bytes())
+    }
+
+    #[inline]
+    pub fn from_bytes(align: u64) -> Result<Align, String> {
+        // Treat an alignment of 0 bytes like 1-byte alignment.
+        if align == 0 {
+            return Ok(Align::ONE);
+        }
+
+        #[cold]
+        fn not_power_of_2(align: u64) -> String {
+            format!("`{}` is not a power of 2", align)
+        }
+
+        #[cold]
+        fn too_large(align: u64) -> String {
+            format!("`{}` is too large", align)
+        }
+
+        let mut bytes = align;
+        let mut pow2: u8 = 0;
+        while (bytes & 1) == 0 {
+            pow2 += 1;
+            bytes >>= 1;
+        }
+        if bytes != 1 {
+            return Err(not_power_of_2(align));
+        }
+        if pow2 > Self::MAX.pow2 {
+            return Err(too_large(align));
+        }
+
+        Ok(Align { pow2 })
+    }
+
+    #[inline]
+    pub fn bytes(self) -> u64 {
+        1 << self.pow2
+    }
+
+    #[inline]
+    pub fn bits(self) -> u64 {
+        self.bytes() * 8
+    }
+
+    /// Computes the best alignment possible for the given offset
+    /// (the largest power of two that the offset is a multiple of).
+    ///
+    /// N.B., for an offset of `0`, this happens to return `2^64`.
+    #[inline]
+    pub fn max_for_offset(offset: Size) -> Align {
+        Align { pow2: offset.bytes().trailing_zeros() as u8 }
+    }
+
+    /// Lower the alignment, if necessary, such that the given offset
+    /// is aligned to it (the offset is a multiple of the alignment).
+    #[inline]
+    pub fn restrict_for_offset(self, offset: Size) -> Align {
+        self.min(Align::max_for_offset(offset))
+    }
+}
+
+/// A pair of alignments, ABI-mandated and preferred.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+
+pub struct AbiAndPrefAlign {
+    pub abi: Align,
+    pub pref: Align,
+}
+
+impl AbiAndPrefAlign {
+    #[inline]
+    pub fn new(align: Align) -> AbiAndPrefAlign {
+        AbiAndPrefAlign { abi: align, pref: align }
+    }
+
+    #[inline]
+    pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+        AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
+    }
+
+    #[inline]
+    pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+        AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
+    }
+}
+
+/// Integers, also used for enum discriminants.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+
+pub enum Integer {
+    I8,
+    I16,
+    I32,
+    I64,
+    I128,
+}
+
+impl Integer {
+    #[inline]
+    pub fn size(self) -> Size {
+        match self {
+            I8 => Size::from_bytes(1),
+            I16 => Size::from_bytes(2),
+            I32 => Size::from_bytes(4),
+            I64 => Size::from_bytes(8),
+            I128 => Size::from_bytes(16),
+        }
+    }
+
+    /// Gets the Integer type from an IntegerType.
+    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
+        let dl = cx.data_layout();
+
+        match ity {
+            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
+            IntegerType::Fixed(x, _) => x,
+        }
+    }
+
+    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+        let dl = cx.data_layout();
+
+        match self {
+            I8 => dl.i8_align,
+            I16 => dl.i16_align,
+            I32 => dl.i32_align,
+            I64 => dl.i64_align,
+            I128 => dl.i128_align,
+        }
+    }
+
+    /// Finds the smallest Integer type which can represent the signed value.
+    #[inline]
+    pub fn fit_signed(x: i128) -> Integer {
+        match x {
+            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
+            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
+            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
+            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
+            _ => I128,
+        }
+    }
+
+    /// Finds the smallest Integer type which can represent the unsigned value.
+    #[inline]
+    pub fn fit_unsigned(x: u128) -> Integer {
+        match x {
+            0..=0x0000_0000_0000_00ff => I8,
+            0..=0x0000_0000_0000_ffff => I16,
+            0..=0x0000_0000_ffff_ffff => I32,
+            0..=0xffff_ffff_ffff_ffff => I64,
+            _ => I128,
+        }
+    }
+
+    /// Finds the smallest integer with the given alignment.
+    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
+        let dl = cx.data_layout();
+
+        for candidate in [I8, I16, I32, I64, I128] {
+            if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
+                return Some(candidate);
+            }
+        }
+        None
+    }
+
+    /// Find the largest integer with the given alignment or less.
+    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
+        let dl = cx.data_layout();
+
+        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
+        for candidate in [I64, I32, I16] {
+            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
+                return candidate;
+            }
+        }
+        I8
+    }
+
+    // FIXME(eddyb) consolidate this and other methods that find the appropriate
+    // `Integer` given some requirements.
+    #[inline]
+    pub fn from_size(size: Size) -> Result<Self, String> {
+        match size.bits() {
+            8 => Ok(Integer::I8),
+            16 => Ok(Integer::I16),
+            32 => Ok(Integer::I32),
+            64 => Ok(Integer::I64),
+            128 => Ok(Integer::I128),
+            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
+        }
+    }
+}
+
+/// Fundamental unit of memory access and layout.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum Primitive {
+    /// The `bool` is the signedness of the `Integer` type.
+    ///
+    /// One would think we would not care about such details this low down,
+    /// but some ABIs are described in terms of C types and ISAs where the
+    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
+    /// a negative integer passed by zero-extension will appear positive in
+    /// the callee, and most operations on it will produce the wrong values.
+    Int(Integer, bool),
+    F32,
+    F64,
+    Pointer,
+}
+
+impl Primitive {
+    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
+        let dl = cx.data_layout();
+
+        match self {
+            Int(i, _) => i.size(),
+            F32 => Size::from_bits(32),
+            F64 => Size::from_bits(64),
+            Pointer => dl.pointer_size,
+        }
+    }
+
+    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+        let dl = cx.data_layout();
+
+        match self {
+            Int(i, _) => i.align(dl),
+            F32 => dl.f32_align,
+            F64 => dl.f64_align,
+            Pointer => dl.pointer_align,
+        }
+    }
+
+    // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
+    #[inline]
+    pub fn is_float(self) -> bool {
+        matches!(self, F32 | F64)
+    }
+
+    // FIXME(eddyb) remove, it's completely unused.
+    #[inline]
+    pub fn is_int(self) -> bool {
+        matches!(self, Int(..))
+    }
+
+    #[inline]
+    pub fn is_ptr(self) -> bool {
+        matches!(self, Pointer)
+    }
+}
+
+/// Inclusive wrap-around range of valid values, that is, if
+/// start > end, it represents `start..=MAX`,
+/// followed by `0..=end`.
+///
+/// That is, for an i8 primitive, a range of `254..=2` means following
+/// sequence:
+///
+///    254 (-2), 255 (-1), 0, 1, 2
+///
+/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub struct WrappingRange {
+    pub start: u128,
+    pub end: u128,
+}
+
+impl WrappingRange {
+    pub fn full(size: Size) -> Self {
+        Self { start: 0, end: size.unsigned_int_max() }
+    }
+
+    /// Returns `true` if `v` is contained in the range.
+    #[inline(always)]
+    pub fn contains(&self, v: u128) -> bool {
+        if self.start <= self.end {
+            self.start <= v && v <= self.end
+        } else {
+            self.start <= v || v <= self.end
+        }
+    }
+
+    /// Returns `self` with replaced `start`
+    #[inline(always)]
+    pub fn with_start(mut self, start: u128) -> Self {
+        self.start = start;
+        self
+    }
+
+    /// Returns `self` with replaced `end`
+    #[inline(always)]
+    pub fn with_end(mut self, end: u128) -> Self {
+        self.end = end;
+        self
+    }
+
+    /// Returns `true` if `size` completely fills the range.
+    #[inline]
+    pub fn is_full_for(&self, size: Size) -> bool {
+        let max_value = size.unsigned_int_max();
+        debug_assert!(self.start <= max_value && self.end <= max_value);
+        self.start == (self.end.wrapping_add(1) & max_value)
+    }
+}
+
+impl fmt::Debug for WrappingRange {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if self.start > self.end {
+            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
+        } else {
+            write!(fmt, "{}..={}", self.start, self.end)?;
+        }
+        Ok(())
+    }
+}
+
+/// Information about one scalar component of a Rust type.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum Scalar {
+    Initialized {
+        value: Primitive,
+
+        // FIXME(eddyb) always use the shortest range, e.g., by finding
+        // the largest space between two consecutive valid values and
+        // taking everything else as the (shortest) valid range.
+        valid_range: WrappingRange,
+    },
+    Union {
+        /// Even for unions, we need to use the correct registers for the kind of
+        /// values inside the union, so we keep the `Primitive` type around. We
+        /// also use it to compute the size of the scalar.
+        /// However, unions never have niches and even allow undef,
+        /// so there is no `valid_range`.
+        value: Primitive,
+    },
+}
+
+impl Scalar {
+    #[inline]
+    pub fn is_bool(&self) -> bool {
+        matches!(
+            self,
+            Scalar::Initialized {
+                value: Int(I8, false),
+                valid_range: WrappingRange { start: 0, end: 1 }
+            }
+        )
+    }
+
+    /// Get the primitive representation of this type, ignoring the valid range and whether the
+    /// value is allowed to be undefined (due to being a union).
+    pub fn primitive(&self) -> Primitive {
+        match *self {
+            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
+        }
+    }
+
+    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
+        self.primitive().align(cx)
+    }
+
+    pub fn size(self, cx: &impl HasDataLayout) -> Size {
+        self.primitive().size(cx)
+    }
+
+    #[inline]
+    pub fn to_union(&self) -> Self {
+        Self::Union { value: self.primitive() }
+    }
+
+    #[inline]
+    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
+        match *self {
+            Scalar::Initialized { valid_range, .. } => valid_range,
+            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
+        }
+    }
+
+    #[inline]
+    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
+    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
+        match self {
+            Scalar::Initialized { valid_range, .. } => valid_range,
+            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
+        }
+    }
+
+    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
+    #[inline]
+    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
+        match *self {
+            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
+            Scalar::Union { .. } => true,
+        }
+    }
+
+    /// Returns `true` if this type can be left uninit.
+    #[inline]
+    pub fn is_uninit_valid(&self) -> bool {
+        match *self {
+            Scalar::Initialized { .. } => false,
+            Scalar::Union { .. } => true,
+        }
+    }
+}
+
+/// Describes how the fields of a type are located in memory.
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum FieldsShape {
+    /// Scalar primitives and `!`, which never have fields.
+    Primitive,
+
+    /// All fields start at no offset. The `usize` is the field count.
+    Union(NonZeroUsize),
+
+    /// Array/vector-like placement, with all fields of identical types.
+    Array { stride: Size, count: u64 },
+
+    /// Struct-like placement, with precomputed offsets.
+    ///
+    /// Fields are guaranteed to not overlap, but note that gaps
+    /// before, between and after all the fields are NOT always
+    /// padding, and as such their contents may not be discarded.
+    /// For example, enum variants leave a gap at the start,
+    /// where the discriminant field in the enum layout goes.
+    Arbitrary {
+        /// Offsets for the first byte of each field,
+        /// ordered to match the source definition order.
+        /// This vector does not go in increasing order.
+        // FIXME(eddyb) use small vector optimization for the common case.
+        offsets: Vec<Size>,
+
+        /// Maps source order field indices to memory order indices,
+        /// depending on how the fields were reordered (if at all).
+        /// This is a permutation, with both the source order and the
+        /// memory order using the same (0..n) index ranges.
+        ///
+        /// Note that during computation of `memory_index`, sometimes
+        /// it is easier to operate on the inverse mapping (that is,
+        /// from memory order to source order), and that is usually
+        /// named `inverse_memory_index`.
+        ///
+        // FIXME(eddyb) build a better abstraction for permutations, if possible.
+        // FIXME(camlorn) also consider small vector  optimization here.
+        memory_index: Vec<u32>,
+    },
+}
+
+impl FieldsShape {
+    #[inline]
+    pub fn count(&self) -> usize {
+        match *self {
+            FieldsShape::Primitive => 0,
+            FieldsShape::Union(count) => count.get(),
+            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
+            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
+        }
+    }
+
+    #[inline]
+    pub fn offset(&self, i: usize) -> Size {
+        match *self {
+            FieldsShape::Primitive => {
+                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
+            }
+            FieldsShape::Union(count) => {
+                assert!(
+                    i < count.get(),
+                    "tried to access field {} of union with {} fields",
+                    i,
+                    count
+                );
+                Size::ZERO
+            }
+            FieldsShape::Array { stride, count } => {
+                let i = u64::try_from(i).unwrap();
+                assert!(i < count);
+                stride * i
+            }
+            FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
+        }
+    }
+
+    #[inline]
+    pub fn memory_index(&self, i: usize) -> usize {
+        match *self {
+            FieldsShape::Primitive => {
+                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
+            }
+            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+            FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
+        }
+    }
+
+    /// Gets source indices of the fields by increasing offsets.
+    #[inline]
+    pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
+        let mut inverse_small = [0u8; 64];
+        let mut inverse_big = vec![];
+        let use_small = self.count() <= inverse_small.len();
+
+        // We have to write this logic twice in order to keep the array small.
+        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
+            if use_small {
+                for i in 0..self.count() {
+                    inverse_small[memory_index[i] as usize] = i as u8;
+                }
+            } else {
+                inverse_big = vec![0; self.count()];
+                for i in 0..self.count() {
+                    inverse_big[memory_index[i] as usize] = i as u32;
+                }
+            }
+        }
+
+        (0..self.count()).map(move |i| match *self {
+            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+            FieldsShape::Arbitrary { .. } => {
+                if use_small {
+                    inverse_small[i] as usize
+                } else {
+                    inverse_big[i] as usize
+                }
+            }
+        })
+    }
+}
+
+/// An identifier that specifies the address space that some operation
+/// should operate on. Special address spaces have an effect on code generation,
+/// depending on the target and the address spaces it implements.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct AddressSpace(pub u32);
+
+impl AddressSpace {
+    /// The default address space, corresponding to data space.
+    pub const DATA: Self = AddressSpace(0);
+}
+
+/// Describes how values of the type are passed by target ABIs,
+/// in terms of categories of C types there are ABI rules for.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+
+pub enum Abi {
+    Uninhabited,
+    Scalar(Scalar),
+    ScalarPair(Scalar, Scalar),
+    Vector {
+        element: Scalar,
+        count: u64,
+    },
+    Aggregate {
+        /// If true, the size is exact, otherwise it's only a lower bound.
+        sized: bool,
+    },
+}
+
+impl Abi {
+    /// Returns `true` if the layout corresponds to an unsized type.
+    #[inline]
+    pub fn is_unsized(&self) -> bool {
+        match *self {
+            Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+            Abi::Aggregate { sized } => !sized,
+        }
+    }
+
+    #[inline]
+    pub fn is_sized(&self) -> bool {
+        !self.is_unsized()
+    }
+
+    /// Returns `true` if this is a single signed integer scalar
+    #[inline]
+    pub fn is_signed(&self) -> bool {
+        match self {
+            Abi::Scalar(scal) => match scal.primitive() {
+                Primitive::Int(_, signed) => signed,
+                _ => false,
+            },
+            _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
+        }
+    }
+
+    /// Returns `true` if this is an uninhabited type
+    #[inline]
+    pub fn is_uninhabited(&self) -> bool {
+        matches!(*self, Abi::Uninhabited)
+    }
+
+    /// Returns `true` is this is a scalar type
+    #[inline]
+    pub fn is_scalar(&self) -> bool {
+        matches!(*self, Abi::Scalar(_))
+    }
+}
+
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum Variants<V: Idx> {
+    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
+    Single { index: V },
+
+    /// Enum-likes with more than one inhabited variant: each variant comes with
+    /// a *discriminant* (usually the same as the variant index but the user can
+    /// assign explicit discriminant values).  That discriminant is encoded
+    /// as a *tag* on the machine.  The layout of each variant is
+    /// a struct, and they all have space reserved for the tag.
+    /// For enums, the tag is the sole field of the layout.
+    Multiple {
+        tag: Scalar,
+        tag_encoding: TagEncoding<V>,
+        tag_field: usize,
+        variants: IndexVec<V, LayoutS<V>>,
+    },
+}
+
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum TagEncoding<V: Idx> {
+    /// The tag directly stores the discriminant, but possibly with a smaller layout
+    /// (so converting the tag to the discriminant can require sign extension).
+    Direct,
+
+    /// Niche (values invalid for a type) encoding the discriminant:
+    /// Discriminant and variant index coincide.
+    /// The variant `untagged_variant` contains a niche at an arbitrary
+    /// offset (field `tag_field` of the enum), which for a variant with
+    /// discriminant `d` is set to
+    /// `(d - niche_variants.start).wrapping_add(niche_start)`.
+    ///
+    /// For example, `Option<(usize, &T)>`  is represented such that
+    /// `None` has a null pointer for the second tuple field, and
+    /// `Some` is the identity function (with a non-null reference).
+    Niche { untagged_variant: V, niche_variants: RangeInclusive<V>, niche_start: u128 },
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub struct Niche {
+    pub offset: Size,
+    pub value: Primitive,
+    pub valid_range: WrappingRange,
+}
+
+impl Niche {
+    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
+        let Scalar::Initialized { value, valid_range } = scalar else { return None };
+        let niche = Niche { offset, value, valid_range };
+        if niche.available(cx) > 0 { Some(niche) } else { None }
+    }
+
+    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
+        let Self { value, valid_range: v, .. } = *self;
+        let size = value.size(cx);
+        assert!(size.bits() <= 128);
+        let max_value = size.unsigned_int_max();
+
+        // Find out how many values are outside the valid range.
+        let niche = v.end.wrapping_add(1)..v.start;
+        niche.end.wrapping_sub(niche.start) & max_value
+    }
+
+    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
+        assert!(count > 0);
+
+        let Self { value, valid_range: v, .. } = *self;
+        let size = value.size(cx);
+        assert!(size.bits() <= 128);
+        let max_value = size.unsigned_int_max();
+
+        let niche = v.end.wrapping_add(1)..v.start;
+        let available = niche.end.wrapping_sub(niche.start) & max_value;
+        if count > available {
+            return None;
+        }
+
+        // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
+        // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
+        // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
+        // Having `None` in niche zero can enable some special optimizations.
+        //
+        // Bound selection criteria:
+        // 1. Select closest to zero given wrapping semantics.
+        // 2. Avoid moving past zero if possible.
+        //
+        // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
+        // If niche zero is already reserved, the selection of bounds are of little interest.
+        let move_start = |v: WrappingRange| {
+            let start = v.start.wrapping_sub(count) & max_value;
+            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
+        };
+        let move_end = |v: WrappingRange| {
+            let start = v.end.wrapping_add(1) & max_value;
+            let end = v.end.wrapping_add(count) & max_value;
+            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
+        };
+        let distance_end_zero = max_value - v.end;
+        if v.start > v.end {
+            // zero is unavailable because wrapping occurs
+            move_end(v)
+        } else if v.start <= distance_end_zero {
+            if count <= v.start {
+                move_start(v)
+            } else {
+                // moved past zero, use other bound
+                move_end(v)
+            }
+        } else {
+            let end = v.end.wrapping_add(count) & max_value;
+            let overshot_zero = (1..=v.end).contains(&end);
+            if overshot_zero {
+                // moved past zero, use other bound
+                move_start(v)
+            } else {
+                move_end(v)
+            }
+        }
+    }
+}
+
+#[derive(PartialEq, Eq, Hash, Clone)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub struct LayoutS<V: Idx> {
+    /// Says where the fields are located within the layout.
+    pub fields: FieldsShape,
+
+    /// Encodes information about multi-variant layouts.
+    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
+    /// shared between all variants. One of them will be the discriminant,
+    /// but e.g. generators can have more.
+    ///
+    /// To access all fields of this layout, both `fields` and the fields of the active variant
+    /// must be taken into account.
+    pub variants: Variants<V>,
+
+    /// The `abi` defines how this data is passed between functions, and it defines
+    /// value restrictions via `valid_range`.
+    ///
+    /// Note that this is entirely orthogonal to the recursive structure defined by
+    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
+    /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
+    /// have to be taken into account to find all fields of this layout.
+    pub abi: Abi,
+
+    /// The leaf scalar with the largest number of invalid values
+    /// (i.e. outside of its `valid_range`), if it exists.
+    pub largest_niche: Option<Niche>,
+
+    pub align: AbiAndPrefAlign,
+    pub size: Size,
+}
+
+impl<V: Idx> LayoutS<V> {
+    pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
+        let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
+        let size = scalar.size(cx);
+        let align = scalar.align(cx);
+        LayoutS {
+            variants: Variants::Single { index: V::new(0) },
+            fields: FieldsShape::Primitive,
+            abi: Abi::Scalar(scalar),
+            largest_niche,
+            size,
+            align,
+        }
+    }
+}
+
+impl<V: Idx> fmt::Debug for LayoutS<V> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        // This is how `Layout` used to print before it become
+        // `Interned<LayoutS>`. We print it like this to avoid having to update
+        // expected output in a lot of tests.
+        let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
+        f.debug_struct("Layout")
+            .field("size", size)
+            .field("align", align)
+            .field("abi", abi)
+            .field("fields", fields)
+            .field("largest_niche", largest_niche)
+            .field("variants", variants)
+            .finish()
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum PointerKind {
+    /// Most general case, we know no restrictions to tell LLVM.
+    SharedMutable,
+
+    /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
+    Frozen,
+
+    /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
+    UniqueBorrowed,
+
+    /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
+    UniqueBorrowedPinned,
+
+    /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
+    /// nor `dereferenceable`.
+    UniqueOwned,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct PointeeInfo {
+    pub size: Size,
+    pub align: Align,
+    pub safe: Option<PointerKind>,
+    pub address_space: AddressSpace,
+}
+
+/// Used in `might_permit_raw_init` to indicate the kind of initialisation
+/// that is checked to be valid
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitKind {
+    Zero,
+    UninitMitigated0x01Fill,
+}
+
+impl<V: Idx> LayoutS<V> {
+    /// Returns `true` if the layout corresponds to an unsized type.
+    pub fn is_unsized(&self) -> bool {
+        self.abi.is_unsized()
+    }
+
+    pub fn is_sized(&self) -> bool {
+        self.abi.is_sized()
+    }
+
+    /// Returns `true` if the type is a ZST and not unsized.
+    pub fn is_zst(&self) -> bool {
+        match self.abi {
+            Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+            Abi::Uninhabited => self.size.bytes() == 0,
+            Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum StructKind {
+    /// A tuple, closure, or univariant which cannot be coerced to unsized.
+    AlwaysSized,
+    /// A univariant, the last field of which may be coerced to unsized.
+    MaybeUnsized,
+    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
+    Prefixed(Size, Align),
+}
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index 6bdd5511459..4c1d95a452d 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -32,7 +32,6 @@ use rustc_middle::hir::nested_filter;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::mir::mono::Linkage;
 use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::repr_options_of_def;
 use rustc_middle::ty::util::{Discr, IntTypeExt};
 use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt};
 use rustc_session::lint;
@@ -860,7 +859,7 @@ fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
         bug!();
     };
 
-    let repr = repr_options_of_def(tcx, def_id.to_def_id());
+    let repr = tcx.repr_options_of_def(def_id.to_def_id());
     let (kind, variants) = match item.kind {
         ItemKind::Enum(ref def, _) => {
             let mut distance_from_explicit = 0;
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
index 03d8ee13918..db2c7915256 100644
--- a/compiler/rustc_index/src/lib.rs
+++ b/compiler/rustc_index/src/lib.rs
@@ -1,12 +1,17 @@
 #![deny(rustc::untranslatable_diagnostic)]
 #![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(feature = "nightly", feature(allow_internal_unstable))]
-#![cfg_attr(feature = "nightly", feature(extend_one))]
-#![cfg_attr(feature = "nightly", feature(min_specialization))]
-#![cfg_attr(feature = "nightly", feature(new_uninit))]
-#![cfg_attr(feature = "nightly", feature(step_trait))]
-#![cfg_attr(feature = "nightly", feature(stmt_expr_attributes))]
-#![cfg_attr(feature = "nightly", feature(test))]
+#![cfg_attr(
+    feature = "nightly",
+    feature(
+        allow_internal_unstable,
+        extend_one,
+        min_specialization,
+        new_uninit,
+        step_trait,
+        stmt_expr_attributes,
+        test
+    )
+)]
 
 #[cfg(feature = "nightly")]
 pub mod bit_set;
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index fadd47eed72..297b509d402 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -1378,7 +1378,7 @@ impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
             let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
                 .map(|(variant, variant_layout)| {
                     // Subtract the size of the enum tag.
-                    let bytes = variant_layout.size().bytes().saturating_sub(tag_size);
+                    let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
 
                     debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
                     bytes
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index f298e44e089..b5327ad0cec 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -1233,7 +1233,7 @@ impl<'tcx> TyCtxt<'tcx> {
             global_ctxt: untracked_resolutions,
             ast_lowering: untracked_resolver_for_lowering,
         } = resolver_outputs;
-        let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
+        let data_layout = s.target.parse_data_layout().unwrap_or_else(|err| {
             s.emit_fatal(err);
         });
         let interners = CtxtInterners::new(arena);
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index e3421ab9ce0..9d778ff2fb6 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -1995,78 +1995,6 @@ impl Hash for FieldDef {
     }
 }
 
-pub fn repr_options_of_def(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
-    let mut flags = ReprFlags::empty();
-    let mut size = None;
-    let mut max_align: Option<Align> = None;
-    let mut min_pack: Option<Align> = None;
-
-    // Generate a deterministically-derived seed from the item's path hash
-    // to allow for cross-crate compilation to actually work
-    let mut field_shuffle_seed = tcx.def_path_hash(did).0.to_smaller_hash();
-
-    // If the user defined a custom seed for layout randomization, xor the item's
-    // path hash with the user defined seed, this will allowing determinism while
-    // still allowing users to further randomize layout generation for e.g. fuzzing
-    if let Some(user_seed) = tcx.sess.opts.unstable_opts.layout_seed {
-        field_shuffle_seed ^= user_seed;
-    }
-
-    for attr in tcx.get_attrs(did, sym::repr) {
-        for r in attr::parse_repr_attr(&tcx.sess, attr) {
-            flags.insert(match r {
-                attr::ReprC => ReprFlags::IS_C,
-                attr::ReprPacked(pack) => {
-                    let pack = Align::from_bytes(pack as u64).unwrap();
-                    min_pack =
-                        Some(if let Some(min_pack) = min_pack { min_pack.min(pack) } else { pack });
-                    ReprFlags::empty()
-                }
-                attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
-                attr::ReprSimd => ReprFlags::IS_SIMD,
-                attr::ReprInt(i) => {
-                    size = Some(match i {
-                        attr::IntType::SignedInt(x) => match x {
-                            ast::IntTy::Isize => IntegerType::Pointer(true),
-                            ast::IntTy::I8 => IntegerType::Fixed(Integer::I8, true),
-                            ast::IntTy::I16 => IntegerType::Fixed(Integer::I16, true),
-                            ast::IntTy::I32 => IntegerType::Fixed(Integer::I32, true),
-                            ast::IntTy::I64 => IntegerType::Fixed(Integer::I64, true),
-                            ast::IntTy::I128 => IntegerType::Fixed(Integer::I128, true),
-                        },
-                        attr::IntType::UnsignedInt(x) => match x {
-                            ast::UintTy::Usize => IntegerType::Pointer(false),
-                            ast::UintTy::U8 => IntegerType::Fixed(Integer::I8, false),
-                            ast::UintTy::U16 => IntegerType::Fixed(Integer::I16, false),
-                            ast::UintTy::U32 => IntegerType::Fixed(Integer::I32, false),
-                            ast::UintTy::U64 => IntegerType::Fixed(Integer::I64, false),
-                            ast::UintTy::U128 => IntegerType::Fixed(Integer::I128, false),
-                        },
-                    });
-                    ReprFlags::empty()
-                }
-                attr::ReprAlign(align) => {
-                    max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
-                    ReprFlags::empty()
-                }
-            });
-        }
-    }
-
-    // If `-Z randomize-layout` was enabled for the type definition then we can
-    // consider performing layout randomization
-    if tcx.sess.opts.unstable_opts.randomize_layout {
-        flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
-    }
-
-    // This is here instead of layout because the choice must make it into metadata.
-    if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
-        flags.insert(ReprFlags::IS_LINEAR);
-    }
-
-    ReprOptions { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
-}
-
 impl<'tcx> FieldDef {
     /// Returns the type of this field. The resulting type is not normalized. The `subst` is
     /// typically obtained via the second field of [`TyKind::Adt`].
@@ -2134,6 +2062,81 @@ impl<'tcx> TyCtxt<'tcx> {
             .filter(move |item| item.kind == AssocKind::Fn && item.defaultness(self).has_value())
     }
 
+    pub fn repr_options_of_def(self, did: DefId) -> ReprOptions {
+        let mut flags = ReprFlags::empty();
+        let mut size = None;
+        let mut max_align: Option<Align> = None;
+        let mut min_pack: Option<Align> = None;
+
+        // Generate a deterministically-derived seed from the item's path hash
+        // to allow for cross-crate compilation to actually work
+        let mut field_shuffle_seed = self.def_path_hash(did).0.to_smaller_hash();
+
+        // If the user defined a custom seed for layout randomization, xor the item's
+        // path hash with the user defined seed, this will allowing determinism while
+        // still allowing users to further randomize layout generation for e.g. fuzzing
+        if let Some(user_seed) = self.sess.opts.unstable_opts.layout_seed {
+            field_shuffle_seed ^= user_seed;
+        }
+
+        for attr in self.get_attrs(did, sym::repr) {
+            for r in attr::parse_repr_attr(&self.sess, attr) {
+                flags.insert(match r {
+                    attr::ReprC => ReprFlags::IS_C,
+                    attr::ReprPacked(pack) => {
+                        let pack = Align::from_bytes(pack as u64).unwrap();
+                        min_pack = Some(if let Some(min_pack) = min_pack {
+                            min_pack.min(pack)
+                        } else {
+                            pack
+                        });
+                        ReprFlags::empty()
+                    }
+                    attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
+                    attr::ReprSimd => ReprFlags::IS_SIMD,
+                    attr::ReprInt(i) => {
+                        size = Some(match i {
+                            attr::IntType::SignedInt(x) => match x {
+                                ast::IntTy::Isize => IntegerType::Pointer(true),
+                                ast::IntTy::I8 => IntegerType::Fixed(Integer::I8, true),
+                                ast::IntTy::I16 => IntegerType::Fixed(Integer::I16, true),
+                                ast::IntTy::I32 => IntegerType::Fixed(Integer::I32, true),
+                                ast::IntTy::I64 => IntegerType::Fixed(Integer::I64, true),
+                                ast::IntTy::I128 => IntegerType::Fixed(Integer::I128, true),
+                            },
+                            attr::IntType::UnsignedInt(x) => match x {
+                                ast::UintTy::Usize => IntegerType::Pointer(false),
+                                ast::UintTy::U8 => IntegerType::Fixed(Integer::I8, false),
+                                ast::UintTy::U16 => IntegerType::Fixed(Integer::I16, false),
+                                ast::UintTy::U32 => IntegerType::Fixed(Integer::I32, false),
+                                ast::UintTy::U64 => IntegerType::Fixed(Integer::I64, false),
+                                ast::UintTy::U128 => IntegerType::Fixed(Integer::I128, false),
+                            },
+                        });
+                        ReprFlags::empty()
+                    }
+                    attr::ReprAlign(align) => {
+                        max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
+                        ReprFlags::empty()
+                    }
+                });
+            }
+        }
+
+        // If `-Z randomize-layout` was enabled for the type definition then we can
+        // consider performing layout randomization
+        if self.sess.opts.unstable_opts.randomize_layout {
+            flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
+        }
+
+        // This is here instead of layout because the choice must make it into metadata.
+        if !self.consider_optimizing(|| format!("Reorder fields of {:?}", self.def_path_str(did))) {
+            flags.insert(ReprFlags::IS_LINEAR);
+        }
+
+        ReprOptions { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
+    }
+
     /// Look up the name of a definition across crates. This does not look at HIR.
     pub fn opt_item_name(self, def_id: DefId) -> Option<Symbol> {
         if let Some(cnum) = def_id.as_crate_root() {
diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
index 96ea15f1b80..be0aa0fc4c1 100644
--- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
+++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
@@ -65,7 +65,7 @@ fn variant_discriminants<'tcx>(
         Variants::Multiple { variants, .. } => variants
             .iter_enumerated()
             .filter_map(|(idx, layout)| {
-                (layout.abi() != Abi::Uninhabited)
+                (layout.abi != Abi::Uninhabited)
                     .then(|| ty.discriminant_for_variant(tcx, idx).unwrap().val)
             })
             .collect(),
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index 1ce3a613dc7..3b1b33aa095 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -11,7 +11,7 @@ use crate::{lint, HashStableContext};
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 
 use rustc_data_structures::stable_hasher::ToStableHashKey;
-use rustc_target::abi::{Align, TargetDataLayout};
+use rustc_target::abi::Align;
 use rustc_target::spec::{PanicStrategy, SanitizerSet, SplitDebuginfo};
 use rustc_target::spec::{Target, TargetTriple, TargetWarnings, TARGETS};
 
@@ -900,7 +900,7 @@ fn default_configuration(sess: &Session) -> CrateConfig {
     let min_atomic_width = sess.target.min_atomic_width();
     let max_atomic_width = sess.target.max_atomic_width();
     let atomic_cas = sess.target.atomic_cas;
-    let layout = TargetDataLayout::parse(&sess.target).unwrap_or_else(|err| {
+    let layout = sess.target.parse_data_layout().unwrap_or_else(|err| {
         sess.emit_fatal(err);
     });
 
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
index f2e21078b44..568c916a163 100644
--- a/compiler/rustc_target/Cargo.toml
+++ b/compiler/rustc_target/Cargo.toml
@@ -6,23 +6,11 @@ edition = "2021"
 [dependencies]
 bitflags = "1.2.1"
 tracing = "0.1"
-rand = "0.8.4"
-rand_xoshiro = "0.6.0"
 serde_json = "1.0.59"
-rustc_data_structures = { path = "../rustc_data_structures", optional = true  }
-rustc_feature = { path = "../rustc_feature", optional = true }
-rustc_index = { path = "../rustc_index", default-features = false }
-rustc_macros = { path = "../rustc_macros", optional = true }
-rustc_serialize = { path = "../rustc_serialize", optional = true  }
-rustc_span = { path = "../rustc_span", optional = true }
-
-[features]
-default = ["nightly"]
-nightly = [
-    "rustc_data_structures",
-    "rustc_feature",
-    "rustc_index/nightly",
-    "rustc_macros",
-    "rustc_serialize",
-    "rustc_span",
-]
+rustc_abi = { path = "../rustc_abi" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 0c559ec04a4..a5ffaebea0b 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -262,7 +262,7 @@ impl CastTarget {
         let mut size = self.rest.total;
         for i in 0..self.prefix.iter().count() {
             match self.prefix[i] {
-                Some(v) => size += Size { raw: v.size.bytes() },
+                Some(v) => size += v.size,
                 None => {}
             }
         }
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index 1b74959ad17..ec8f20fe692 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -87,8 +87,8 @@ where
         _ => {}
     }
 
-    if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
-        offset.raw += 4 - (offset.raw % 4);
+    if (offset.bytes() % 4) != 0 && scalar2.primitive().is_float() {
+        offset += Size::from_bytes(4 - (offset.bytes() % 4));
     }
     data = arg_scalar(cx, &scalar2, offset, data);
     return data;
@@ -169,14 +169,14 @@ where
                     has_float: false,
                     arg_attribute: ArgAttribute::default(),
                 },
-                Size { raw: 0 },
+                Size::ZERO,
             );
 
             if data.has_float {
                 // Structure { float, int, int } doesn't like to be handled like
                 // { float, long int }. Other way around it doesn't mind.
                 if data.last_offset < arg.layout.size
-                    && (data.last_offset.raw % 8) != 0
+                    && (data.last_offset.bytes() % 8) != 0
                     && data.prefix_index < data.prefix.len()
                 {
                     data.prefix[data.prefix_index] = Some(Reg::i32());
@@ -185,7 +185,7 @@ where
                 }
 
                 let mut rest_size = arg.layout.size - data.last_offset;
-                if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+                if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() {
                     data.prefix[data.prefix_index] = Some(Reg::i32());
                     rest_size = rest_size - Reg::i32().size;
                 }
@@ -214,13 +214,13 @@ where
     C: HasDataLayout,
 {
     if !fn_abi.ret.is_ignore() {
-        classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+        classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32));
     }
 
     for arg in fn_abi.args.iter_mut() {
         if arg.is_ignore() {
             continue;
         }
-        classify_arg(cx, arg, Size { raw: 16 });
+        classify_arg(cx, arg, Size::from_bytes(16));
     }
 }
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index b6972d914a0..53c9878ab87 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -2,413 +2,16 @@ pub use Integer::*;
 pub use Primitive::*;
 
 use crate::json::{Json, ToJson};
-#[cfg(feature = "nightly")]
-use crate::spec::Target;
 
-use std::convert::{TryFrom, TryInto};
 use std::fmt;
-#[cfg(feature = "nightly")]
-use std::iter::Step;
-use std::num::{NonZeroUsize, ParseIntError};
-use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
-use std::str::FromStr;
+use std::ops::Deref;
 
-use bitflags::bitflags;
-#[cfg(feature = "nightly")]
 use rustc_data_structures::intern::Interned;
-use rustc_index::vec::{Idx, IndexVec};
-#[cfg(feature = "nightly")]
 use rustc_macros::HashStable_Generic;
 
-#[cfg(feature = "nightly")]
 pub mod call;
 
-mod layout;
-
-pub use layout::LayoutCalculator;
-
-bitflags! {
-    #[derive(Default)]
-    #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-    pub struct ReprFlags: u8 {
-        const IS_C               = 1 << 0;
-        const IS_SIMD            = 1 << 1;
-        const IS_TRANSPARENT     = 1 << 2;
-        // Internal only for now. If true, don't reorder fields.
-        const IS_LINEAR          = 1 << 3;
-        // If true, the type's layout can be randomized using
-        // the seed stored in `ReprOptions.layout_seed`
-        const RANDOMIZE_LAYOUT   = 1 << 4;
-        // Any of these flags being set prevent field reordering optimisation.
-        const IS_UNOPTIMISABLE   = ReprFlags::IS_C.bits
-                                 | ReprFlags::IS_SIMD.bits
-                                 | ReprFlags::IS_LINEAR.bits;
-    }
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-pub enum IntegerType {
-    Pointer(bool),
-    Fixed(Integer, bool),
-}
-
-impl IntegerType {
-    pub fn is_signed(&self) -> bool {
-        match self {
-            IntegerType::Pointer(b) => *b,
-            IntegerType::Fixed(_, b) => *b,
-        }
-    }
-}
-
-/// Represents the repr options provided by the user,
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
-#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-pub struct ReprOptions {
-    pub int: Option<IntegerType>,
-    pub align: Option<Align>,
-    pub pack: Option<Align>,
-    pub flags: ReprFlags,
-    /// The seed to be used for randomizing a type's layout
-    ///
-    /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
-    /// be the "most accurate" hash as it'd encompass the item and crate
-    /// hash without loss, but it does pay the price of being larger.
-    /// Everything's a tradeoff, a `u64` seed should be sufficient for our
-    /// purposes (primarily `-Z randomize-layout`)
-    pub field_shuffle_seed: u64,
-}
-
-impl ReprOptions {
-    #[inline]
-    pub fn simd(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_SIMD)
-    }
-
-    #[inline]
-    pub fn c(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_C)
-    }
-
-    #[inline]
-    pub fn packed(&self) -> bool {
-        self.pack.is_some()
-    }
-
-    #[inline]
-    pub fn transparent(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_TRANSPARENT)
-    }
-
-    #[inline]
-    pub fn linear(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_LINEAR)
-    }
-
-    /// Returns the discriminant type, given these `repr` options.
-    /// This must only be called on enums!
-    pub fn discr_type(&self) -> IntegerType {
-        self.int.unwrap_or(IntegerType::Pointer(true))
-    }
-
-    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
-    /// layout" optimizations, such as representing `Foo<&T>` as a
-    /// single pointer.
-    pub fn inhibit_enum_layout_opt(&self) -> bool {
-        self.c() || self.int.is_some()
-    }
-
-    /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
-    /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
-    pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
-        if let Some(pack) = self.pack {
-            if pack.bytes() == 1 {
-                return true;
-            }
-        }
-
-        self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
-    }
-
-    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
-    /// was enabled for its declaration crate
-    pub fn can_randomize_type_layout(&self) -> bool {
-        !self.inhibit_struct_field_reordering_opt()
-            && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
-    }
-
-    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
-    pub fn inhibit_union_abi_opt(&self) -> bool {
-        self.c()
-    }
-}
-
-/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
-/// for a target, which contains everything needed to compute layouts.
-#[derive(Debug, PartialEq, Eq)]
-pub struct TargetDataLayout {
-    pub endian: Endian,
-    pub i1_align: AbiAndPrefAlign,
-    pub i8_align: AbiAndPrefAlign,
-    pub i16_align: AbiAndPrefAlign,
-    pub i32_align: AbiAndPrefAlign,
-    pub i64_align: AbiAndPrefAlign,
-    pub i128_align: AbiAndPrefAlign,
-    pub f32_align: AbiAndPrefAlign,
-    pub f64_align: AbiAndPrefAlign,
-    pub pointer_size: Size,
-    pub pointer_align: AbiAndPrefAlign,
-    pub aggregate_align: AbiAndPrefAlign,
-
-    /// Alignments for vector types.
-    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
-
-    pub instruction_address_space: AddressSpace,
-
-    /// Minimum size of #[repr(C)] enums (default I32 bits)
-    pub c_enum_min_size: Integer,
-}
-
-impl Default for TargetDataLayout {
-    /// Creates an instance of `TargetDataLayout`.
-    fn default() -> TargetDataLayout {
-        let align = |bits| Align::from_bits(bits).unwrap();
-        TargetDataLayout {
-            endian: Endian::Big,
-            i1_align: AbiAndPrefAlign::new(align(8)),
-            i8_align: AbiAndPrefAlign::new(align(8)),
-            i16_align: AbiAndPrefAlign::new(align(16)),
-            i32_align: AbiAndPrefAlign::new(align(32)),
-            i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
-            i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
-            f32_align: AbiAndPrefAlign::new(align(32)),
-            f64_align: AbiAndPrefAlign::new(align(64)),
-            pointer_size: Size::from_bits(64),
-            pointer_align: AbiAndPrefAlign::new(align(64)),
-            aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
-            vector_align: vec![
-                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
-                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
-            ],
-            instruction_address_space: AddressSpace::DATA,
-            c_enum_min_size: Integer::I32,
-        }
-    }
-}
-
-pub enum TargetDataLayoutErrors<'a> {
-    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
-    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
-    MissingAlignment { cause: &'a str },
-    InvalidAlignment { cause: &'a str, err: String },
-    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
-    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
-    InvalidBitsSize { err: String },
-}
-
-impl TargetDataLayout {
-    #[cfg(feature = "nightly")]
-    pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
-        // Parse an address space index from a string.
-        let parse_address_space = |s: &'a str, cause: &'a str| {
-            s.parse::<u32>().map(AddressSpace).map_err(|err| {
-                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
-            })
-        };
-
-        // Parse a bit count from a string.
-        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
-            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
-                kind,
-                bit: s,
-                cause,
-                err,
-            })
-        };
-
-        // Parse a size string.
-        let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
-
-        // Parse an alignment string.
-        let align = |s: &[&'a str], cause: &'a str| {
-            if s.is_empty() {
-                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
-            }
-            let align_from_bits = |bits| {
-                Align::from_bits(bits)
-                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
-            };
-            let abi = parse_bits(s[0], "alignment", cause)?;
-            let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
-            Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
-        };
-
-        let mut dl = TargetDataLayout::default();
-        let mut i128_align_src = 64;
-        for spec in target.data_layout.split('-') {
-            let spec_parts = spec.split(':').collect::<Vec<_>>();
-
-            match &*spec_parts {
-                ["e"] => dl.endian = Endian::Little,
-                ["E"] => dl.endian = Endian::Big,
-                [p] if p.starts_with('P') => {
-                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
-                }
-                ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
-                ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
-                ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
-                [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
-                    dl.pointer_size = size(s, p)?;
-                    dl.pointer_align = align(a, p)?;
-                }
-                [s, ref a @ ..] if s.starts_with('i') => {
-                    let Ok(bits) = s[1..].parse::<u64>() else {
-                        size(&s[1..], "i")?; // For the user error.
-                        continue;
-                    };
-                    let a = align(a, s)?;
-                    match bits {
-                        1 => dl.i1_align = a,
-                        8 => dl.i8_align = a,
-                        16 => dl.i16_align = a,
-                        32 => dl.i32_align = a,
-                        64 => dl.i64_align = a,
-                        _ => {}
-                    }
-                    if bits >= i128_align_src && bits <= 128 {
-                        // Default alignment for i128 is decided by taking the alignment of
-                        // largest-sized i{64..=128}.
-                        i128_align_src = bits;
-                        dl.i128_align = a;
-                    }
-                }
-                [s, ref a @ ..] if s.starts_with('v') => {
-                    let v_size = size(&s[1..], "v")?;
-                    let a = align(a, s)?;
-                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
-                        v.1 = a;
-                        continue;
-                    }
-                    // No existing entry, add a new one.
-                    dl.vector_align.push((v_size, a));
-                }
-                _ => {} // Ignore everything else.
-            }
-        }
-
-        // Perform consistency checks against the Target information.
-        if dl.endian != target.endian {
-            return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
-                dl: dl.endian.as_str(),
-                target: target.endian.as_str(),
-            });
-        }
-
-        let target_pointer_width: u64 = target.pointer_width.into();
-        if dl.pointer_size.bits() != target_pointer_width {
-            return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
-                pointer_size: dl.pointer_size.bits(),
-                target: target.pointer_width,
-            });
-        }
-
-        dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) {
-            Ok(bits) => bits,
-            Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
-        };
-
-        Ok(dl)
-    }
-
-    /// Returns exclusive upper bound on object size.
-    ///
-    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
-    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
-    /// index every address within an object along with one byte past the end, along with allowing
-    /// `isize` to store the difference between any two pointers into an object.
-    ///
-    /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
-    /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
-    /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
-    /// address space on 64-bit ARMv8 and x86_64.
-    #[inline]
-    pub fn obj_size_bound(&self) -> u64 {
-        match self.pointer_size.bits() {
-            16 => 1 << 15,
-            32 => 1 << 31,
-            64 => 1 << 47,
-            bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
-        }
-    }
-
-    #[inline]
-    pub fn ptr_sized_integer(&self) -> Integer {
-        match self.pointer_size.bits() {
-            16 => I16,
-            32 => I32,
-            64 => I64,
-            bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
-        }
-    }
-
-    #[inline]
-    pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
-        for &(size, align) in &self.vector_align {
-            if size == vec_size {
-                return align;
-            }
-        }
-        // Default to natural alignment, which is what LLVM does.
-        // That is, use the size, rounded up to a power of 2.
-        AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
-    }
-}
-
-pub trait HasDataLayout {
-    fn data_layout(&self) -> &TargetDataLayout;
-}
-
-impl HasDataLayout for TargetDataLayout {
-    #[inline]
-    fn data_layout(&self) -> &TargetDataLayout {
-        self
-    }
-}
-
-/// Endianness of the target, which must match cfg(target-endian).
-#[derive(Copy, Clone, PartialEq, Eq)]
-pub enum Endian {
-    Little,
-    Big,
-}
-
-impl Endian {
-    pub fn as_str(&self) -> &'static str {
-        match self {
-            Self::Little => "little",
-            Self::Big => "big",
-        }
-    }
-}
-
-impl fmt::Debug for Endian {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.write_str(self.as_str())
-    }
-}
-
-impl FromStr for Endian {
-    type Err = String;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        match s {
-            "little" => Ok(Self::Little),
-            "big" => Ok(Self::Big),
-            _ => Err(format!(r#"unknown endian: "{}""#, s)),
-        }
-    }
-}
+pub use rustc_abi::*;
 
 impl ToJson for Endian {
     fn to_json(&self) -> Json {
@@ -416,1082 +19,16 @@ impl ToJson for Endian {
     }
 }
 
-/// Size of a type in bytes.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-pub struct Size {
-    raw: u64,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Size {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "Size({} bytes)", self.bytes())
-    }
-}
-
-impl Size {
-    pub const ZERO: Size = Size { raw: 0 };
-
-    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
-    /// not a multiple of 8.
-    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
-        let bits = bits.try_into().ok().unwrap();
-        // Avoid potential overflow from `bits + 7`.
-        Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
-    }
-
-    #[inline]
-    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
-        let bytes: u64 = bytes.try_into().ok().unwrap();
-        Size { raw: bytes }
-    }
-
-    #[inline]
-    pub fn bytes(self) -> u64 {
-        self.raw
-    }
-
-    #[inline]
-    pub fn bytes_usize(self) -> usize {
-        self.bytes().try_into().unwrap()
-    }
-
-    #[inline]
-    pub fn bits(self) -> u64 {
-        #[cold]
-        fn overflow(bytes: u64) -> ! {
-            panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
-        }
-
-        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
-    }
-
-    #[inline]
-    pub fn bits_usize(self) -> usize {
-        self.bits().try_into().unwrap()
-    }
-
-    #[inline]
-    pub fn align_to(self, align: Align) -> Size {
-        let mask = align.bytes() - 1;
-        Size::from_bytes((self.bytes() + mask) & !mask)
-    }
-
-    #[inline]
-    pub fn is_aligned(self, align: Align) -> bool {
-        let mask = align.bytes() - 1;
-        self.bytes() & mask == 0
-    }
-
-    #[inline]
-    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
-        let dl = cx.data_layout();
-
-        let bytes = self.bytes().checked_add(offset.bytes())?;
-
-        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
-    }
-
-    #[inline]
-    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
-        let dl = cx.data_layout();
-
-        let bytes = self.bytes().checked_mul(count)?;
-        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
-    }
-
-    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
-    /// (i.e., if it is negative, fill with 1's on the left).
-    #[inline]
-    pub fn sign_extend(self, value: u128) -> u128 {
-        let size = self.bits();
-        if size == 0 {
-            // Truncated until nothing is left.
-            return 0;
-        }
-        // Sign-extend it.
-        let shift = 128 - size;
-        // Shift the unsigned value to the left, then shift back to the right as signed
-        // (essentially fills with sign bit on the left).
-        (((value << shift) as i128) >> shift) as u128
-    }
-
-    /// Truncates `value` to `self` bits.
-    #[inline]
-    pub fn truncate(self, value: u128) -> u128 {
-        let size = self.bits();
-        if size == 0 {
-            // Truncated until nothing is left.
-            return 0;
-        }
-        let shift = 128 - size;
-        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
-        (value << shift) >> shift
-    }
-
-    #[inline]
-    pub fn signed_int_min(&self) -> i128 {
-        self.sign_extend(1_u128 << (self.bits() - 1)) as i128
-    }
-
-    #[inline]
-    pub fn signed_int_max(&self) -> i128 {
-        i128::MAX >> (128 - self.bits())
-    }
-
-    #[inline]
-    pub fn unsigned_int_max(&self) -> u128 {
-        u128::MAX >> (128 - self.bits())
-    }
-}
-
-// Panicking addition, subtraction and multiplication for convenience.
-// Avoid during layout computation, return `LayoutError` instead.
-
-impl Add for Size {
-    type Output = Size;
-    #[inline]
-    fn add(self, other: Size) -> Size {
-        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
-            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
-        }))
-    }
-}
-
-impl Sub for Size {
-    type Output = Size;
-    #[inline]
-    fn sub(self, other: Size) -> Size {
-        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
-            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
-        }))
-    }
-}
-
-impl Mul<Size> for u64 {
-    type Output = Size;
-    #[inline]
-    fn mul(self, size: Size) -> Size {
-        size * self
-    }
-}
-
-impl Mul<u64> for Size {
-    type Output = Size;
-    #[inline]
-    fn mul(self, count: u64) -> Size {
-        match self.bytes().checked_mul(count) {
-            Some(bytes) => Size::from_bytes(bytes),
-            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
-        }
-    }
-}
-
-impl AddAssign for Size {
-    #[inline]
-    fn add_assign(&mut self, other: Size) {
-        *self = *self + other;
-    }
-}
-
-#[cfg(feature = "nightly")]
-impl Step for Size {
-    #[inline]
-    fn steps_between(start: &Self, end: &Self) -> Option<usize> {
-        u64::steps_between(&start.bytes(), &end.bytes())
-    }
-
-    #[inline]
-    fn forward_checked(start: Self, count: usize) -> Option<Self> {
-        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
-    }
-
-    #[inline]
-    fn forward(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::forward(start.bytes(), count))
-    }
-
-    #[inline]
-    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
-    }
-
-    #[inline]
-    fn backward_checked(start: Self, count: usize) -> Option<Self> {
-        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
-    }
-
-    #[inline]
-    fn backward(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::backward(start.bytes(), count))
-    }
-
-    #[inline]
-    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
-    }
-}
-
-/// Alignment of a type in bytes (always a power of two).
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-pub struct Align {
-    pow2: u8,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Align {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "Align({} bytes)", self.bytes())
-    }
-}
-
-impl Align {
-    pub const ONE: Align = Align { pow2: 0 };
-    pub const MAX: Align = Align { pow2: 29 };
-
-    #[inline]
-    pub fn from_bits(bits: u64) -> Result<Align, String> {
-        Align::from_bytes(Size::from_bits(bits).bytes())
-    }
-
-    #[inline]
-    pub fn from_bytes(align: u64) -> Result<Align, String> {
-        // Treat an alignment of 0 bytes like 1-byte alignment.
-        if align == 0 {
-            return Ok(Align::ONE);
-        }
-
-        #[cold]
-        fn not_power_of_2(align: u64) -> String {
-            format!("`{}` is not a power of 2", align)
-        }
-
-        #[cold]
-        fn too_large(align: u64) -> String {
-            format!("`{}` is too large", align)
-        }
-
-        let mut bytes = align;
-        let mut pow2: u8 = 0;
-        while (bytes & 1) == 0 {
-            pow2 += 1;
-            bytes >>= 1;
-        }
-        if bytes != 1 {
-            return Err(not_power_of_2(align));
-        }
-        if pow2 > Self::MAX.pow2 {
-            return Err(too_large(align));
-        }
-
-        Ok(Align { pow2 })
-    }
-
-    #[inline]
-    pub fn bytes(self) -> u64 {
-        1 << self.pow2
-    }
-
-    #[inline]
-    pub fn bits(self) -> u64 {
-        self.bytes() * 8
-    }
-
-    /// Computes the best alignment possible for the given offset
-    /// (the largest power of two that the offset is a multiple of).
-    ///
-    /// N.B., for an offset of `0`, this happens to return `2^64`.
-    #[inline]
-    pub fn max_for_offset(offset: Size) -> Align {
-        Align { pow2: offset.bytes().trailing_zeros() as u8 }
-    }
-
-    /// Lower the alignment, if necessary, such that the given offset
-    /// is aligned to it (the offset is a multiple of the alignment).
-    #[inline]
-    pub fn restrict_for_offset(self, offset: Size) -> Align {
-        self.min(Align::max_for_offset(offset))
-    }
-}
-
-/// A pair of alignments, ABI-mandated and preferred.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-
-pub struct AbiAndPrefAlign {
-    pub abi: Align,
-    pub pref: Align,
-}
-
-impl AbiAndPrefAlign {
-    #[inline]
-    pub fn new(align: Align) -> AbiAndPrefAlign {
-        AbiAndPrefAlign { abi: align, pref: align }
-    }
-
-    #[inline]
-    pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
-        AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
-    }
-
-    #[inline]
-    pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
-        AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
-    }
-}
-
-/// Integers, also used for enum discriminants.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
-
-pub enum Integer {
-    I8,
-    I16,
-    I32,
-    I64,
-    I128,
-}
-
-impl Integer {
-    #[inline]
-    pub fn size(self) -> Size {
-        match self {
-            I8 => Size::from_bytes(1),
-            I16 => Size::from_bytes(2),
-            I32 => Size::from_bytes(4),
-            I64 => Size::from_bytes(8),
-            I128 => Size::from_bytes(16),
-        }
-    }
-
-    /// Gets the Integer type from an attr::IntType.
-    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
-        let dl = cx.data_layout();
-
-        match ity {
-            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
-            IntegerType::Fixed(x, _) => x,
-        }
-    }
-
-    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
-        let dl = cx.data_layout();
-
-        match self {
-            I8 => dl.i8_align,
-            I16 => dl.i16_align,
-            I32 => dl.i32_align,
-            I64 => dl.i64_align,
-            I128 => dl.i128_align,
-        }
-    }
-
-    /// Finds the smallest Integer type which can represent the signed value.
-    #[inline]
-    pub fn fit_signed(x: i128) -> Integer {
-        match x {
-            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
-            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
-            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
-            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
-            _ => I128,
-        }
-    }
-
-    /// Finds the smallest Integer type which can represent the unsigned value.
-    #[inline]
-    pub fn fit_unsigned(x: u128) -> Integer {
-        match x {
-            0..=0x0000_0000_0000_00ff => I8,
-            0..=0x0000_0000_0000_ffff => I16,
-            0..=0x0000_0000_ffff_ffff => I32,
-            0..=0xffff_ffff_ffff_ffff => I64,
-            _ => I128,
-        }
-    }
-
-    /// Finds the smallest integer with the given alignment.
-    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
-        let dl = cx.data_layout();
-
-        for candidate in [I8, I16, I32, I64, I128] {
-            if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
-                return Some(candidate);
-            }
-        }
-        None
-    }
-
-    /// Find the largest integer with the given alignment or less.
-    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
-        let dl = cx.data_layout();
-
-        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
-        for candidate in [I64, I32, I16] {
-            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
-                return candidate;
-            }
-        }
-        I8
-    }
-
-    // FIXME(eddyb) consolidate this and other methods that find the appropriate
-    // `Integer` given some requirements.
-    #[inline]
-    fn from_size(size: Size) -> Result<Self, String> {
-        match size.bits() {
-            8 => Ok(Integer::I8),
-            16 => Ok(Integer::I16),
-            32 => Ok(Integer::I32),
-            64 => Ok(Integer::I64),
-            128 => Ok(Integer::I128),
-            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
-        }
-    }
-}
-
-/// Fundamental unit of memory access and layout.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Primitive {
-    /// The `bool` is the signedness of the `Integer` type.
-    ///
-    /// One would think we would not care about such details this low down,
-    /// but some ABIs are described in terms of C types and ISAs where the
-    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
-    /// a negative integer passed by zero-extension will appear positive in
-    /// the callee, and most operations on it will produce the wrong values.
-    Int(Integer, bool),
-    F32,
-    F64,
-    Pointer,
-}
-
-impl Primitive {
-    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
-        let dl = cx.data_layout();
-
-        match self {
-            Int(i, _) => i.size(),
-            F32 => Size::from_bits(32),
-            F64 => Size::from_bits(64),
-            Pointer => dl.pointer_size,
-        }
-    }
-
-    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
-        let dl = cx.data_layout();
-
-        match self {
-            Int(i, _) => i.align(dl),
-            F32 => dl.f32_align,
-            F64 => dl.f64_align,
-            Pointer => dl.pointer_align,
-        }
-    }
-
-    // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
-    #[inline]
-    pub fn is_float(self) -> bool {
-        matches!(self, F32 | F64)
-    }
-
-    // FIXME(eddyb) remove, it's completely unused.
-    #[inline]
-    pub fn is_int(self) -> bool {
-        matches!(self, Int(..))
-    }
-
-    #[inline]
-    pub fn is_ptr(self) -> bool {
-        matches!(self, Pointer)
-    }
-}
-
-/// Inclusive wrap-around range of valid values, that is, if
-/// start > end, it represents `start..=MAX`,
-/// followed by `0..=end`.
-///
-/// That is, for an i8 primitive, a range of `254..=2` means following
-/// sequence:
-///
-///    254 (-2), 255 (-1), 0, 1, 2
-///
-/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub struct WrappingRange {
-    pub start: u128,
-    pub end: u128,
-}
-
-impl WrappingRange {
-    pub fn full(size: Size) -> Self {
-        Self { start: 0, end: size.unsigned_int_max() }
-    }
-
-    /// Returns `true` if `v` is contained in the range.
-    #[inline(always)]
-    pub fn contains(&self, v: u128) -> bool {
-        if self.start <= self.end {
-            self.start <= v && v <= self.end
-        } else {
-            self.start <= v || v <= self.end
-        }
-    }
-
-    /// Returns `self` with replaced `start`
-    #[inline(always)]
-    pub fn with_start(mut self, start: u128) -> Self {
-        self.start = start;
-        self
-    }
-
-    /// Returns `self` with replaced `end`
-    #[inline(always)]
-    pub fn with_end(mut self, end: u128) -> Self {
-        self.end = end;
-        self
-    }
-
-    /// Returns `true` if `size` completely fills the range.
-    #[inline]
-    pub fn is_full_for(&self, size: Size) -> bool {
-        let max_value = size.unsigned_int_max();
-        debug_assert!(self.start <= max_value && self.end <= max_value);
-        self.start == (self.end.wrapping_add(1) & max_value)
-    }
-}
-
-impl fmt::Debug for WrappingRange {
-    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
-        if self.start > self.end {
-            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
-        } else {
-            write!(fmt, "{}..={}", self.start, self.end)?;
-        }
-        Ok(())
-    }
-}
-
-/// Information about one scalar component of a Rust type.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Scalar {
-    Initialized {
-        value: Primitive,
-
-        // FIXME(eddyb) always use the shortest range, e.g., by finding
-        // the largest space between two consecutive valid values and
-        // taking everything else as the (shortest) valid range.
-        valid_range: WrappingRange,
-    },
-    Union {
-        /// Even for unions, we need to use the correct registers for the kind of
-        /// values inside the union, so we keep the `Primitive` type around. We
-        /// also use it to compute the size of the scalar.
-        /// However, unions never have niches and even allow undef,
-        /// so there is no `valid_range`.
-        value: Primitive,
-    },
-}
-
-impl Scalar {
-    #[inline]
-    pub fn is_bool(&self) -> bool {
-        matches!(
-            self,
-            Scalar::Initialized {
-                value: Int(I8, false),
-                valid_range: WrappingRange { start: 0, end: 1 }
-            }
-        )
-    }
-
-    /// Get the primitive representation of this type, ignoring the valid range and whether the
-    /// value is allowed to be undefined (due to being a union).
-    pub fn primitive(&self) -> Primitive {
-        match *self {
-            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
-        }
-    }
-
-    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
-        self.primitive().align(cx)
-    }
-
-    pub fn size(self, cx: &impl HasDataLayout) -> Size {
-        self.primitive().size(cx)
-    }
-
-    #[inline]
-    pub fn to_union(&self) -> Self {
-        Self::Union { value: self.primitive() }
-    }
-
-    #[inline]
-    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
-        match *self {
-            Scalar::Initialized { valid_range, .. } => valid_range,
-            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
-        }
-    }
-
-    #[inline]
-    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
-    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
-        match self {
-            Scalar::Initialized { valid_range, .. } => valid_range,
-            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
-        }
-    }
-
-    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
-    #[inline]
-    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
-        match *self {
-            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
-            Scalar::Union { .. } => true,
-        }
-    }
-
-    /// Returns `true` if this type can be left uninit.
-    #[inline]
-    pub fn is_uninit_valid(&self) -> bool {
-        match *self {
-            Scalar::Initialized { .. } => false,
-            Scalar::Union { .. } => true,
-        }
-    }
-}
-
-/// Describes how the fields of a type are located in memory.
-#[derive(PartialEq, Eq, Hash, Clone, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum FieldsShape {
-    /// Scalar primitives and `!`, which never have fields.
-    Primitive,
-
-    /// All fields start at no offset. The `usize` is the field count.
-    Union(NonZeroUsize),
-
-    /// Array/vector-like placement, with all fields of identical types.
-    Array { stride: Size, count: u64 },
-
-    /// Struct-like placement, with precomputed offsets.
-    ///
-    /// Fields are guaranteed to not overlap, but note that gaps
-    /// before, between and after all the fields are NOT always
-    /// padding, and as such their contents may not be discarded.
-    /// For example, enum variants leave a gap at the start,
-    /// where the discriminant field in the enum layout goes.
-    Arbitrary {
-        /// Offsets for the first byte of each field,
-        /// ordered to match the source definition order.
-        /// This vector does not go in increasing order.
-        // FIXME(eddyb) use small vector optimization for the common case.
-        offsets: Vec<Size>,
-
-        /// Maps source order field indices to memory order indices,
-        /// depending on how the fields were reordered (if at all).
-        /// This is a permutation, with both the source order and the
-        /// memory order using the same (0..n) index ranges.
-        ///
-        /// Note that during computation of `memory_index`, sometimes
-        /// it is easier to operate on the inverse mapping (that is,
-        /// from memory order to source order), and that is usually
-        /// named `inverse_memory_index`.
-        ///
-        // FIXME(eddyb) build a better abstraction for permutations, if possible.
-        // FIXME(camlorn) also consider small vector  optimization here.
-        memory_index: Vec<u32>,
-    },
-}
-
-impl FieldsShape {
-    #[inline]
-    pub fn count(&self) -> usize {
-        match *self {
-            FieldsShape::Primitive => 0,
-            FieldsShape::Union(count) => count.get(),
-            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
-            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
-        }
-    }
-
-    #[inline]
-    pub fn offset(&self, i: usize) -> Size {
-        match *self {
-            FieldsShape::Primitive => {
-                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
-            }
-            FieldsShape::Union(count) => {
-                assert!(
-                    i < count.get(),
-                    "tried to access field {} of union with {} fields",
-                    i,
-                    count
-                );
-                Size::ZERO
-            }
-            FieldsShape::Array { stride, count } => {
-                let i = u64::try_from(i).unwrap();
-                assert!(i < count);
-                stride * i
-            }
-            FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
-        }
-    }
-
-    #[inline]
-    pub fn memory_index(&self, i: usize) -> usize {
-        match *self {
-            FieldsShape::Primitive => {
-                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
-            }
-            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
-            FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
-        }
-    }
-
-    /// Gets source indices of the fields by increasing offsets.
-    #[inline]
-    pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
-        let mut inverse_small = [0u8; 64];
-        let mut inverse_big = vec![];
-        let use_small = self.count() <= inverse_small.len();
-
-        // We have to write this logic twice in order to keep the array small.
-        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
-            if use_small {
-                for i in 0..self.count() {
-                    inverse_small[memory_index[i] as usize] = i as u8;
-                }
-            } else {
-                inverse_big = vec![0; self.count()];
-                for i in 0..self.count() {
-                    inverse_big[memory_index[i] as usize] = i as u32;
-                }
-            }
-        }
-
-        (0..self.count()).map(move |i| match *self {
-            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
-            FieldsShape::Arbitrary { .. } => {
-                if use_small {
-                    inverse_small[i] as usize
-                } else {
-                    inverse_big[i] as usize
-                }
-            }
-        })
-    }
-}
-
-/// An identifier that specifies the address space that some operation
-/// should operate on. Special address spaces have an effect on code generation,
-/// depending on the target and the address spaces it implements.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub struct AddressSpace(pub u32);
-
-impl AddressSpace {
-    /// The default address space, corresponding to data space.
-    pub const DATA: Self = AddressSpace(0);
-}
-
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-
-pub enum Abi {
-    Uninhabited,
-    Scalar(Scalar),
-    ScalarPair(Scalar, Scalar),
-    Vector {
-        element: Scalar,
-        count: u64,
-    },
-    Aggregate {
-        /// If true, the size is exact, otherwise it's only a lower bound.
-        sized: bool,
-    },
-}
-
-impl Abi {
-    /// Returns `true` if the layout corresponds to an unsized type.
-    #[inline]
-    pub fn is_unsized(&self) -> bool {
-        match *self {
-            Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
-            Abi::Aggregate { sized } => !sized,
-        }
-    }
-
-    #[inline]
-    pub fn is_sized(&self) -> bool {
-        !self.is_unsized()
-    }
-
-    /// Returns `true` if this is a single signed integer scalar
-    #[inline]
-    pub fn is_signed(&self) -> bool {
-        match self {
-            Abi::Scalar(scal) => match scal.primitive() {
-                Primitive::Int(_, signed) => signed,
-                _ => false,
-            },
-            _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
-        }
-    }
-
-    /// Returns `true` if this is an uninhabited type
-    #[inline]
-    pub fn is_uninhabited(&self) -> bool {
-        matches!(*self, Abi::Uninhabited)
-    }
-
-    /// Returns `true` is this is a scalar type
-    #[inline]
-    pub fn is_scalar(&self) -> bool {
-        matches!(*self, Abi::Scalar(_))
-    }
-}
-
-#[cfg(feature = "nightly")]
 rustc_index::newtype_index! {
     pub struct VariantIdx {
         derive [HashStable_Generic]
     }
 }
 
-#[derive(PartialEq, Eq, Hash, Clone, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum Variants<V: Idx> {
-    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
-    Single { index: V },
-
-    /// Enum-likes with more than one inhabited variant: each variant comes with
-    /// a *discriminant* (usually the same as the variant index but the user can
-    /// assign explicit discriminant values).  That discriminant is encoded
-    /// as a *tag* on the machine.  The layout of each variant is
-    /// a struct, and they all have space reserved for the tag.
-    /// For enums, the tag is the sole field of the layout.
-    Multiple {
-        tag: Scalar,
-        tag_encoding: TagEncoding<V>,
-        tag_field: usize,
-        variants: IndexVec<V, LayoutS<V>>,
-    },
-}
-
-#[derive(PartialEq, Eq, Hash, Clone, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub enum TagEncoding<V: Idx> {
-    /// The tag directly stores the discriminant, but possibly with a smaller layout
-    /// (so converting the tag to the discriminant can require sign extension).
-    Direct,
-
-    /// Niche (values invalid for a type) encoding the discriminant:
-    /// Discriminant and variant index coincide.
-    /// The variant `untagged_variant` contains a niche at an arbitrary
-    /// offset (field `tag_field` of the enum), which for a variant with
-    /// discriminant `d` is set to
-    /// `(d - niche_variants.start).wrapping_add(niche_start)`.
-    ///
-    /// For example, `Option<(usize, &T)>`  is represented such that
-    /// `None` has a null pointer for the second tuple field, and
-    /// `Some` is the identity function (with a non-null reference).
-    Niche { untagged_variant: V, niche_variants: RangeInclusive<V>, niche_start: u128 },
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub struct Niche {
-    pub offset: Size,
-    pub value: Primitive,
-    pub valid_range: WrappingRange,
-}
-
-impl Niche {
-    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
-        let Scalar::Initialized { value, valid_range } = scalar else { return None };
-        let niche = Niche { offset, value, valid_range };
-        if niche.available(cx) > 0 { Some(niche) } else { None }
-    }
-
-    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
-        let Self { value, valid_range: v, .. } = *self;
-        let size = value.size(cx);
-        assert!(size.bits() <= 128);
-        let max_value = size.unsigned_int_max();
-
-        // Find out how many values are outside the valid range.
-        let niche = v.end.wrapping_add(1)..v.start;
-        niche.end.wrapping_sub(niche.start) & max_value
-    }
-
-    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
-        assert!(count > 0);
-
-        let Self { value, valid_range: v, .. } = *self;
-        let size = value.size(cx);
-        assert!(size.bits() <= 128);
-        let max_value = size.unsigned_int_max();
-
-        let niche = v.end.wrapping_add(1)..v.start;
-        let available = niche.end.wrapping_sub(niche.start) & max_value;
-        if count > available {
-            return None;
-        }
-
-        // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
-        // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
-        // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
-        // Having `None` in niche zero can enable some special optimizations.
-        //
-        // Bound selection criteria:
-        // 1. Select closest to zero given wrapping semantics.
-        // 2. Avoid moving past zero if possible.
-        //
-        // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
-        // If niche zero is already reserved, the selection of bounds are of little interest.
-        let move_start = |v: WrappingRange| {
-            let start = v.start.wrapping_sub(count) & max_value;
-            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
-        };
-        let move_end = |v: WrappingRange| {
-            let start = v.end.wrapping_add(1) & max_value;
-            let end = v.end.wrapping_add(count) & max_value;
-            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
-        };
-        let distance_end_zero = max_value - v.end;
-        if v.start > v.end {
-            // zero is unavailable because wrapping occurs
-            move_end(v)
-        } else if v.start <= distance_end_zero {
-            if count <= v.start {
-                move_start(v)
-            } else {
-                // moved past zero, use other bound
-                move_end(v)
-            }
-        } else {
-            let end = v.end.wrapping_add(count) & max_value;
-            let overshot_zero = (1..=v.end).contains(&end);
-            if overshot_zero {
-                // moved past zero, use other bound
-                move_start(v)
-            } else {
-                move_end(v)
-            }
-        }
-    }
-}
-
-#[derive(PartialEq, Eq, Hash, Clone)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
-pub struct LayoutS<V: Idx> {
-    /// Says where the fields are located within the layout.
-    pub fields: FieldsShape,
-
-    /// Encodes information about multi-variant layouts.
-    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
-    /// shared between all variants. One of them will be the discriminant,
-    /// but e.g. generators can have more.
-    ///
-    /// To access all fields of this layout, both `fields` and the fields of the active variant
-    /// must be taken into account.
-    pub variants: Variants<V>,
-
-    /// The `abi` defines how this data is passed between functions, and it defines
-    /// value restrictions via `valid_range`.
-    ///
-    /// Note that this is entirely orthogonal to the recursive structure defined by
-    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
-    /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
-    /// have to be taken into account to find all fields of this layout.
-    pub abi: Abi,
-
-    /// The leaf scalar with the largest number of invalid values
-    /// (i.e. outside of its `valid_range`), if it exists.
-    pub largest_niche: Option<Niche>,
-
-    pub align: AbiAndPrefAlign,
-    pub size: Size,
-}
-
-impl<V: Idx> LayoutS<V> {
-    pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
-        let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
-        let size = scalar.size(cx);
-        let align = scalar.align(cx);
-        LayoutS {
-            variants: Variants::Single { index: V::new(0) },
-            fields: FieldsShape::Primitive,
-            abi: Abi::Scalar(scalar),
-            largest_niche,
-            size,
-            align,
-        }
-    }
-
-    #[inline]
-    pub fn fields(&self) -> &FieldsShape {
-        &self.fields
-    }
-
-    #[inline]
-    pub fn variants(&self) -> &Variants<V> {
-        &self.variants
-    }
-
-    #[inline]
-    pub fn abi(&self) -> Abi {
-        self.abi
-    }
-
-    #[inline]
-    pub fn largest_niche(&self) -> Option<Niche> {
-        self.largest_niche
-    }
-
-    #[inline]
-    pub fn align(&self) -> AbiAndPrefAlign {
-        self.align
-    }
-
-    #[inline]
-    pub fn size(&self) -> Size {
-        self.size
-    }
-}
-
-impl<V: Idx> fmt::Debug for LayoutS<V> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        // This is how `Layout` used to print before it become
-        // `Interned<LayoutS>`. We print it like this to avoid having to update
-        // expected output in a lot of tests.
-        let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
-        f.debug_struct("Layout")
-            .field("size", size)
-            .field("align", align)
-            .field("abi", abi)
-            .field("fields", fields)
-            .field("largest_niche", largest_niche)
-            .field("variants", variants)
-            .finish()
-    }
-}
-
-#[cfg(feature = "nightly")]
 #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
 #[rustc_pass_by_value]
 pub struct Layout<'a>(pub Interned<'a, LayoutS<VariantIdx>>);
 
-#[cfg(feature = "nightly")]
 impl<'a> fmt::Debug for Layout<'a> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         // See comment on `<LayoutS as Debug>::fmt` above.
@@ -1499,7 +36,6 @@ impl<'a> fmt::Debug for Layout<'a> {
     }
 }
 
-#[cfg(feature = "nightly")]
 impl<'a> Layout<'a> {
     pub fn fields(self) -> &'a FieldsShape {
         &self.0.0.fields
@@ -1533,15 +69,12 @@ impl<'a> Layout<'a> {
 /// to that obtained from `layout_of(ty)`, as we need to produce
 /// layouts for which Rust types do not exist, such as enum variants
 /// or synthetic fields of enums (i.e., discriminants) and fat pointers.
-#[cfg(feature = "nightly")]
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
-#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
 pub struct TyAndLayout<'a, Ty> {
     pub ty: Ty,
     pub layout: Layout<'a>,
 }
 
-#[cfg(feature = "nightly")]
 impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
     type Target = &'a LayoutS<VariantIdx>;
     fn deref(&self) -> &&'a LayoutS<VariantIdx> {
@@ -1549,44 +82,8 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
     }
 }
 
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum PointerKind {
-    /// Most general case, we know no restrictions to tell LLVM.
-    SharedMutable,
-
-    /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
-    Frozen,
-
-    /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
-    UniqueBorrowed,
-
-    /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
-    UniqueBorrowedPinned,
-
-    /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
-    /// nor `dereferenceable`.
-    UniqueOwned,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct PointeeInfo {
-    pub size: Size,
-    pub align: Align,
-    pub safe: Option<PointerKind>,
-    pub address_space: AddressSpace,
-}
-
-/// Used in `might_permit_raw_init` to indicate the kind of initialisation
-/// that is checked to be valid
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum InitKind {
-    Zero,
-    UninitMitigated0x01Fill,
-}
-
 /// Trait that needs to be implemented by the higher-level type representation
 /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
-#[cfg(feature = "nightly")]
 pub trait TyAbiInterface<'a, C>: Sized {
     fn ty_and_layout_for_variant(
         this: TyAndLayout<'a, Self>,
@@ -1605,7 +102,6 @@ pub trait TyAbiInterface<'a, C>: Sized {
     fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
 }
 
-#[cfg(feature = "nightly")]
 impl<'a, Ty> TyAndLayout<'a, Ty> {
     pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
     where
@@ -1675,7 +171,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
     }
 }
 
-impl<V: Idx> LayoutS<V> {
+impl<'a, Ty> TyAndLayout<'a, Ty> {
     /// Returns `true` if the layout corresponds to an unsized type.
     pub fn is_unsized(&self) -> bool {
         self.abi.is_unsized()
@@ -1695,13 +191,3 @@ impl<V: Idx> LayoutS<V> {
         }
     }
 }
-
-#[derive(Copy, Clone, Debug)]
-pub enum StructKind {
-    /// A tuple, closure, or univariant which cannot be coerced to unsized.
-    AlwaysSized,
-    /// A univariant, the last field of which may be coerced to unsized.
-    MaybeUnsized,
-    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
-    Prefixed(Size, Align),
-}
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index 1065980a26a..b69a0a645a4 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -8,13 +8,13 @@
 //! LLVM.
 
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![cfg_attr(feature = "nightly", feature(assert_matches))]
-#![cfg_attr(feature = "nightly", feature(associated_type_bounds))]
-#![cfg_attr(feature = "nightly", feature(exhaustive_patterns))]
-#![cfg_attr(feature = "nightly", feature(min_specialization))]
-#![cfg_attr(feature = "nightly", feature(never_type))]
-#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
-#![cfg_attr(feature = "nightly", feature(step_trait))]
+#![feature(assert_matches)]
+#![feature(associated_type_bounds)]
+#![feature(exhaustive_patterns)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(rustc_attrs)]
+#![feature(step_trait)]
 #![deny(rustc::untranslatable_diagnostic)]
 #![deny(rustc::diagnostic_outside_of_impl)]
 
@@ -22,27 +22,20 @@ use std::iter::FromIterator;
 use std::path::{Path, PathBuf};
 
 #[macro_use]
-#[cfg(feature = "nightly")]
 extern crate rustc_macros;
 
 #[macro_use]
-#[cfg(feature = "nightly")]
 extern crate tracing;
 
 pub mod abi;
-#[cfg(feature = "nightly")]
 pub mod asm;
 pub mod json;
-#[cfg(feature = "nightly")]
 pub mod spec;
 
 #[cfg(test)]
 mod tests;
 
-/// Requirements for a `StableHashingContext` to be used in this crate.
-/// This is a hack to allow using the `HashStable_Generic` derive macro
-/// instead of implementing everything in `rustc_middle`.
-pub trait HashStableContext {}
+pub use rustc_abi::HashStableContext;
 
 /// The name of rustc's own place to organize libraries.
 ///
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index c633ef1e761..bd5b10d6aa7 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -35,7 +35,10 @@
 //! to the list specified by the target, rather than replace.
 
 use crate::abi::call::Conv;
-use crate::abi::Endian;
+use crate::abi::{
+    AbiAndPrefAlign, AddressSpace, Align, Endian, Integer, Size, TargetDataLayout,
+    TargetDataLayoutErrors,
+};
 use crate::json::{Json, ToJson};
 use crate::spec::abi::{lookup as lookup_abi, Abi};
 use crate::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault};
@@ -1317,6 +1320,120 @@ pub struct Target {
     pub options: TargetOptions,
 }
 
+impl Target {
+    pub fn parse_data_layout<'a>(&'a self) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
+        // Parse an address space index from a string.
+        let parse_address_space = |s: &'a str, cause: &'a str| {
+            s.parse::<u32>().map(AddressSpace).map_err(|err| {
+                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
+            })
+        };
+
+        // Parse a bit count from a string.
+        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
+            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
+                kind,
+                bit: s,
+                cause,
+                err,
+            })
+        };
+
+        // Parse a size string.
+        let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
+
+        // Parse an alignment string.
+        let align = |s: &[&'a str], cause: &'a str| {
+            if s.is_empty() {
+                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
+            }
+            let align_from_bits = |bits| {
+                Align::from_bits(bits)
+                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
+            };
+            let abi = parse_bits(s[0], "alignment", cause)?;
+            let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
+            Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
+        };
+
+        let mut dl = TargetDataLayout::default();
+        let mut i128_align_src = 64;
+        for spec in self.data_layout.split('-') {
+            let spec_parts = spec.split(':').collect::<Vec<_>>();
+
+            match &*spec_parts {
+                ["e"] => dl.endian = Endian::Little,
+                ["E"] => dl.endian = Endian::Big,
+                [p] if p.starts_with('P') => {
+                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
+                }
+                ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
+                ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
+                ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
+                [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
+                    dl.pointer_size = size(s, p)?;
+                    dl.pointer_align = align(a, p)?;
+                }
+                [s, ref a @ ..] if s.starts_with('i') => {
+                    let Ok(bits) = s[1..].parse::<u64>() else {
+                        size(&s[1..], "i")?; // For the user error.
+                        continue;
+                    };
+                    let a = align(a, s)?;
+                    match bits {
+                        1 => dl.i1_align = a,
+                        8 => dl.i8_align = a,
+                        16 => dl.i16_align = a,
+                        32 => dl.i32_align = a,
+                        64 => dl.i64_align = a,
+                        _ => {}
+                    }
+                    if bits >= i128_align_src && bits <= 128 {
+                        // Default alignment for i128 is decided by taking the alignment of
+                        // largest-sized i{64..=128}.
+                        i128_align_src = bits;
+                        dl.i128_align = a;
+                    }
+                }
+                [s, ref a @ ..] if s.starts_with('v') => {
+                    let v_size = size(&s[1..], "v")?;
+                    let a = align(a, s)?;
+                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
+                        v.1 = a;
+                        continue;
+                    }
+                    // No existing entry, add a new one.
+                    dl.vector_align.push((v_size, a));
+                }
+                _ => {} // Ignore everything else.
+            }
+        }
+
+        // Perform consistency checks against the Target information.
+        if dl.endian != self.endian {
+            return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
+                dl: dl.endian.as_str(),
+                target: self.endian.as_str(),
+            });
+        }
+
+        let target_pointer_width: u64 = self.pointer_width.into();
+        if dl.pointer_size.bits() != target_pointer_width {
+            return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
+                pointer_size: dl.pointer_size.bits(),
+                target: self.pointer_width,
+            });
+        }
+
+        dl.c_enum_min_size = match Integer::from_size(Size::from_bits(self.c_enum_min_bits)) {
+            Ok(bits) => bits,
+            Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
+        };
+
+        Ok(dl)
+    }
+}
+
 pub trait HasTargetSpec {
     fn target_spec(&self) -> &Target;
 }
diff --git a/compiler/rustc_ty_utils/Cargo.toml b/compiler/rustc_ty_utils/Cargo.toml
index 5e4ba473061..52fbd3ae047 100644
--- a/compiler/rustc_ty_utils/Cargo.toml
+++ b/compiler/rustc_ty_utils/Cargo.toml
@@ -4,8 +4,6 @@ version = "0.0.0"
 edition = "2021"
 
 [dependencies]
-rand = "0.8.4"
-rand_xoshiro = "0.6.0"
 tracing = "0.1"
 rustc_middle = { path = "../rustc_middle" }
 rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 0af8276b246..7a1cc1e9e6d 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -755,7 +755,7 @@ fn generator_layout<'tcx>(
 
     size = size.align_to(align.abi);
 
-    let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+    let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
         Abi::Uninhabited
     } else {
         Abi::Aggregate { sized: true }
diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
index ee5e7bc2359..9eb8f684bdb 100644
--- a/compiler/rustc_ty_utils/src/layout_sanity_check.rs
+++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
@@ -249,27 +249,27 @@ pub(super) fn sanity_check_layout<'tcx>(
         if let Variants::Multiple { variants, .. } = &layout.variants {
             for variant in variants.iter() {
                 // No nested "multiple".
-                assert!(matches!(variant.variants(), Variants::Single { .. }));
+                assert!(matches!(variant.variants, Variants::Single { .. }));
                 // Variants should have the same or a smaller size as the full thing,
                 // and same for alignment.
-                if variant.size() > layout.size {
+                if variant.size > layout.size {
                     bug!(
                         "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
                         layout.size.bytes(),
-                        variant.size().bytes(),
+                        variant.size.bytes(),
                     )
                 }
-                if variant.align().abi > layout.align.abi {
+                if variant.align.abi > layout.align.abi {
                     bug!(
                         "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
                         layout.align.abi.bytes(),
-                        variant.align().abi.bytes(),
+                        variant.align.abi.bytes(),
                     )
                 }
                 // Skip empty variants.
-                if variant.size() == Size::ZERO
-                    || variant.fields().count() == 0
-                    || variant.abi().is_uninhabited()
+                if variant.size == Size::ZERO
+                    || variant.fields.count() == 0
+                    || variant.abi.is_uninhabited()
                 {
                     // These are never actually accessed anyway, so we can skip the coherence check
                     // for them. They also fail that check, since they have
@@ -282,7 +282,7 @@ pub(super) fn sanity_check_layout<'tcx>(
                 let scalar_coherent = |s1: Scalar, s2: Scalar| {
                     s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx)
                 };
-                let abi_coherent = match (layout.abi, variant.abi()) {
+                let abi_coherent = match (layout.abi, variant.abi) {
                     (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
                     (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
                         scalar_coherent(a1, a2) && scalar_coherent(b1, b2)