about summary refs log tree commit diff
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2022-11-24 20:29:13 +0000
committerbors <bors@rust-lang.org>2022-11-24 20:29:13 +0000
commitb3bc6bf31265ac10946a0832092dbcedf9b26805 (patch)
tree5d839466b9602bf1ae6eb4e42883b68a2aa05545
parent5dfb4b0afaf6acace0845d00e85a934fb4289d83 (diff)
parent390a637e296ccfaac4c6abd1291b0523e8a8e00b (diff)
downloadrust-b3bc6bf31265ac10946a0832092dbcedf9b26805.tar.gz
rust-b3bc6bf31265ac10946a0832092dbcedf9b26805.zip
Auto merge of #103693 - HKalbasi:master, r=oli-obk
Make rustc_target usable outside of rustc

I'm working on showing type size in rust-analyzer (https://github.com/rust-lang/rust-analyzer/pull/13490) and I currently copied rustc code inside rust-analyzer, which works, but is bad. With this change, I would become able to use `rustc_target` and `rustc_index` directly in r-a, reducing the amount of copy needed.

This PR contains some feature flag to put nightly features behind them to make crates buildable on the stable compiler + makes layout related types generic over index type + removes interning of nested layouts.
-rw-r--r--Cargo.lock18
-rw-r--r--compiler/rustc_abi/Cargo.toml24
-rw-r--r--compiler/rustc_abi/src/layout.rs947
-rw-r--r--compiler/rustc_abi/src/lib.rs1399
-rw-r--r--compiler/rustc_hir_analysis/src/collect.rs3
-rw-r--r--compiler/rustc_index/Cargo.toml8
-rw-r--r--compiler/rustc_index/src/lib.rs22
-rw-r--r--compiler/rustc_index/src/vec.rs5
-rw-r--r--compiler/rustc_lint/src/types.rs8
-rw-r--r--compiler/rustc_middle/src/arena.rs2
-rw-r--r--compiler/rustc_middle/src/ty/adt.rs6
-rw-r--r--compiler/rustc_middle/src/ty/context.rs6
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs33
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs235
-rw-r--r--compiler/rustc_middle/src/ty/util.rs23
-rw-r--r--compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs2
-rw-r--r--compiler/rustc_session/src/config.rs4
-rw-r--r--compiler/rustc_target/Cargo.toml1
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs14
-rw-r--r--compiler/rustc_target/src/abi/mod.rs1330
-rw-r--r--compiler/rustc_target/src/lib.rs5
-rw-r--r--compiler/rustc_target/src/spec/mod.rs119
-rw-r--r--compiler/rustc_traits/Cargo.toml1
-rw-r--r--compiler/rustc_traits/src/chalk/db.rs30
-rw-r--r--compiler/rustc_ty_utils/Cargo.toml2
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs978
-rw-r--r--compiler/rustc_ty_utils/src/layout_sanity_check.rs18
-rw-r--r--src/librustdoc/html/render/print_item.rs12
-rw-r--r--src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs5
-rw-r--r--src/tools/clippy/clippy_lints/src/lib.rs1
31 files changed, 2725 insertions, 2538 deletions
diff --git a/Cargo.lock b/Cargo.lock
index c987bf44ec0..d8612b3a256 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3203,6 +3203,20 @@ dependencies = [
 ]
 
 [[package]]
+name = "rustc_abi"
+version = "0.0.0"
+dependencies = [
+ "bitflags",
+ "rand 0.8.5",
+ "rand_xoshiro",
+ "rustc_data_structures",
+ "rustc_index",
+ "rustc_macros",
+ "rustc_serialize",
+ "tracing",
+]
+
+[[package]]
 name = "rustc_apfloat"
 version = "0.0.0"
 dependencies = [
@@ -4281,6 +4295,7 @@ name = "rustc_target"
 version = "0.0.0"
 dependencies = [
  "bitflags",
+ "rustc_abi",
  "rustc_data_structures",
  "rustc_feature",
  "rustc_index",
@@ -4336,6 +4351,7 @@ dependencies = [
  "rustc_infer",
  "rustc_middle",
  "rustc_span",
+ "rustc_target",
  "rustc_trait_selection",
  "smallvec",
  "tracing",
@@ -4360,8 +4376,6 @@ dependencies = [
 name = "rustc_ty_utils"
 version = "0.0.0"
 dependencies = [
- "rand 0.8.5",
- "rand_xoshiro",
  "rustc_data_structures",
  "rustc_errors",
  "rustc_hir",
diff --git a/compiler/rustc_abi/Cargo.toml b/compiler/rustc_abi/Cargo.toml
new file mode 100644
index 00000000000..48b199cb8ee
--- /dev/null
+++ b/compiler/rustc_abi/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "rustc_abi"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+bitflags = "1.2.1"
+tracing = "0.1"
+rand = { version = "0.8.4", default-features = false, optional = true }
+rand_xoshiro = { version = "0.6.0", optional = true }
+rustc_data_structures = { path = "../rustc_data_structures", optional = true  }
+rustc_index = { path = "../rustc_index", default-features = false }
+rustc_macros = { path = "../rustc_macros", optional = true }
+rustc_serialize = { path = "../rustc_serialize", optional = true  }
+
+[features]
+default = ["nightly", "randomize"]
+randomize = ["rand", "rand_xoshiro"]
+nightly = [
+    "rustc_data_structures",
+    "rustc_index/nightly",
+    "rustc_macros",
+    "rustc_serialize",
+]
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
new file mode 100644
index 00000000000..39ea7a85be6
--- /dev/null
+++ b/compiler/rustc_abi/src/layout.rs
@@ -0,0 +1,947 @@
+use super::*;
+use std::{
+    borrow::Borrow,
+    cmp,
+    fmt::Debug,
+    iter,
+    ops::{Bound, Deref},
+};
+
+#[cfg(feature = "randomize")]
+use rand::{seq::SliceRandom, SeedableRng};
+#[cfg(feature = "randomize")]
+use rand_xoshiro::Xoshiro128StarStar;
+
+use tracing::debug;
+
+// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
+// This is used to go between `memory_index` (source field order to memory order)
+// and `inverse_memory_index` (memory order to source field order).
+// See also `FieldsShape::Arbitrary::memory_index` for more details.
+// FIXME(eddyb) build a better abstraction for permutations, if possible.
+fn invert_mapping(map: &[u32]) -> Vec<u32> {
+    let mut inverse = vec![0; map.len()];
+    for i in 0..map.len() {
+        inverse[map[i] as usize] = i as u32;
+    }
+    inverse
+}
+
+pub trait LayoutCalculator {
+    type TargetDataLayoutRef: Borrow<TargetDataLayout>;
+
+    fn delay_bug(&self, txt: &str);
+    fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
+
+    fn scalar_pair<V: Idx>(&self, a: Scalar, b: Scalar) -> LayoutS<V> {
+        let dl = self.current_data_layout();
+        let dl = dl.borrow();
+        let b_align = b.align(dl);
+        let align = a.align(dl).max(b_align).max(dl.aggregate_align);
+        let b_offset = a.size(dl).align_to(b_align.abi);
+        let size = (b_offset + b.size(dl)).align_to(align.abi);
+
+        // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+        // returns the last maximum.
+        let largest_niche = Niche::from_scalar(dl, b_offset, b)
+            .into_iter()
+            .chain(Niche::from_scalar(dl, Size::ZERO, a))
+            .max_by_key(|niche| niche.available(dl));
+
+        LayoutS {
+            variants: Variants::Single { index: V::new(0) },
+            fields: FieldsShape::Arbitrary {
+                offsets: vec![Size::ZERO, b_offset],
+                memory_index: vec![0, 1],
+            },
+            abi: Abi::ScalarPair(a, b),
+            largest_niche,
+            align,
+            size,
+        }
+    }
+
+    fn univariant<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
+        &self,
+        dl: &TargetDataLayout,
+        fields: &[F],
+        repr: &ReprOptions,
+        kind: StructKind,
+    ) -> Option<LayoutS<V>> {
+        let pack = repr.pack;
+        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+        let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+        let optimize = !repr.inhibit_struct_field_reordering_opt();
+        if optimize {
+            let end =
+                if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+            let optimizing = &mut inverse_memory_index[..end];
+            let effective_field_align = |f: &F| {
+                if let Some(pack) = pack {
+                    // return the packed alignment in bytes
+                    f.align.abi.min(pack).bytes()
+                } else {
+                    // returns log2(effective-align).
+                    // This is ok since `pack` applies to all fields equally.
+                    // The calculation assumes that size is an integer multiple of align, except for ZSTs.
+                    //
+                    // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
+                    f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
+                }
+            };
+
+            // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+            // the field ordering to try and catch some code making assumptions about layouts
+            // we don't guarantee
+            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
+                #[cfg(feature = "randomize")]
+                {
+                    // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+                    // randomize field ordering with
+                    let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
+
+                    // Shuffle the ordering of the fields
+                    optimizing.shuffle(&mut rng);
+                }
+                // Otherwise we just leave things alone and actually optimize the type's fields
+            } else {
+                match kind {
+                    StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+                        optimizing.sort_by_key(|&x| {
+                            // Place ZSTs first to avoid "interesting offsets",
+                            // especially with only one or two non-ZST fields.
+                            // Then place largest alignments first, largest niches within an alignment group last
+                            let f = &fields[x as usize];
+                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+                            (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
+                        });
+                    }
+
+                    StructKind::Prefixed(..) => {
+                        // Sort in ascending alignment so that the layout stays optimal
+                        // regardless of the prefix.
+                        // And put the largest niche in an alignment group at the end
+                        // so it can be used as discriminant in jagged enums
+                        optimizing.sort_by_key(|&x| {
+                            let f = &fields[x as usize];
+                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+                            (effective_field_align(f), niche_size)
+                        });
+                    }
+                }
+
+                // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+                //                 regardless of the status of `-Z randomize-layout`
+            }
+        }
+        // inverse_memory_index holds field indices by increasing memory offset.
+        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+        // We now write field offsets to the corresponding offset slot;
+        // field 5 with offset 0 puts 0 in offsets[5].
+        // At the bottom of this function, we invert `inverse_memory_index` to
+        // produce `memory_index` (see `invert_mapping`).
+        let mut sized = true;
+        let mut offsets = vec![Size::ZERO; fields.len()];
+        let mut offset = Size::ZERO;
+        let mut largest_niche = None;
+        let mut largest_niche_available = 0;
+        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+            let prefix_align =
+                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+            align = align.max(AbiAndPrefAlign::new(prefix_align));
+            offset = prefix_size.align_to(prefix_align);
+        }
+        for &i in &inverse_memory_index {
+            let field = &fields[i as usize];
+            if !sized {
+                self.delay_bug(&format!(
+                    "univariant: field #{} comes after unsized field",
+                    offsets.len(),
+                ));
+            }
+
+            if field.is_unsized() {
+                sized = false;
+            }
+
+            // Invariant: offset < dl.obj_size_bound() <= 1<<61
+            let field_align = if let Some(pack) = pack {
+                field.align.min(AbiAndPrefAlign::new(pack))
+            } else {
+                field.align
+            };
+            offset = offset.align_to(field_align.abi);
+            align = align.max(field_align);
+
+            debug!("univariant offset: {:?} field: {:#?}", offset, field);
+            offsets[i as usize] = offset;
+
+            if let Some(mut niche) = field.largest_niche {
+                let available = niche.available(dl);
+                if available > largest_niche_available {
+                    largest_niche_available = available;
+                    niche.offset += offset;
+                    largest_niche = Some(niche);
+                }
+            }
+
+            offset = offset.checked_add(field.size, dl)?;
+        }
+        if let Some(repr_align) = repr.align {
+            align = align.max(AbiAndPrefAlign::new(repr_align));
+        }
+        debug!("univariant min_size: {:?}", offset);
+        let min_size = offset;
+        // As stated above, inverse_memory_index holds field indices by increasing offset.
+        // This makes it an already-sorted view of the offsets vec.
+        // To invert it, consider:
+        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+        // Field 5 would be the first element, so memory_index is i:
+        // Note: if we didn't optimize, it's already right.
+        let memory_index =
+            if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+        let size = min_size.align_to(align.abi);
+        let mut abi = Abi::Aggregate { sized };
+        // Unpack newtype ABIs and find scalar pairs.
+        if sized && size.bytes() > 0 {
+            // All other fields must be ZSTs.
+            let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+            match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+                // We have exactly one non-ZST field.
+                (Some((i, field)), None, None) => {
+                    // Field fills the struct and it has a scalar or scalar pair ABI.
+                    if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
+                    {
+                        match field.abi {
+                            // For plain scalars, or vectors of them, we can't unpack
+                            // newtypes for `#[repr(C)]`, as that affects C ABIs.
+                            Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+                                abi = field.abi;
+                            }
+                            // But scalar pairs are Rust-specific and get
+                            // treated as aggregates by C ABIs anyway.
+                            Abi::ScalarPair(..) => {
+                                abi = field.abi;
+                            }
+                            _ => {}
+                        }
+                    }
+                }
+
+                // Two non-ZST fields, and they're both scalars.
+                (Some((i, a)), Some((j, b)), None) => {
+                    match (a.abi, b.abi) {
+                        (Abi::Scalar(a), Abi::Scalar(b)) => {
+                            // Order by the memory placement, not source order.
+                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+                                ((i, a), (j, b))
+                            } else {
+                                ((j, b), (i, a))
+                            };
+                            let pair = self.scalar_pair::<V>(a, b);
+                            let pair_offsets = match pair.fields {
+                                FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+                                    assert_eq!(memory_index, &[0, 1]);
+                                    offsets
+                                }
+                                _ => panic!(),
+                            };
+                            if offsets[i] == pair_offsets[0]
+                                && offsets[j] == pair_offsets[1]
+                                && align == pair.align
+                                && size == pair.size
+                            {
+                                // We can use `ScalarPair` only when it matches our
+                                // already computed layout (including `#[repr(C)]`).
+                                abi = pair.abi;
+                            }
+                        }
+                        _ => {}
+                    }
+                }
+
+                _ => {}
+            }
+        }
+        if fields.iter().any(|f| f.abi.is_uninhabited()) {
+            abi = Abi::Uninhabited;
+        }
+        Some(LayoutS {
+            variants: Variants::Single { index: V::new(0) },
+            fields: FieldsShape::Arbitrary { offsets, memory_index },
+            abi,
+            largest_niche,
+            align,
+            size,
+        })
+    }
+
+    fn layout_of_never_type<V: Idx>(&self) -> LayoutS<V> {
+        let dl = self.current_data_layout();
+        let dl = dl.borrow();
+        LayoutS {
+            variants: Variants::Single { index: V::new(0) },
+            fields: FieldsShape::Primitive,
+            abi: Abi::Uninhabited,
+            largest_niche: None,
+            align: dl.i8_align,
+            size: Size::ZERO,
+        }
+    }
+
+    fn layout_of_struct_or_enum<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
+        &self,
+        repr: &ReprOptions,
+        variants: &IndexVec<V, Vec<F>>,
+        is_enum: bool,
+        is_unsafe_cell: bool,
+        scalar_valid_range: (Bound<u128>, Bound<u128>),
+        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
+        discriminants: impl Iterator<Item = (V, i128)>,
+        niche_optimize_enum: bool,
+        always_sized: bool,
+    ) -> Option<LayoutS<V>> {
+        let dl = self.current_data_layout();
+        let dl = dl.borrow();
+
+        let scalar_unit = |value: Primitive| {
+            let size = value.size(dl);
+            assert!(size.bits() <= 128);
+            Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
+        };
+
+        // A variant is absent if it's uninhabited and only has ZST fields.
+        // Present uninhabited variants only require space for their fields,
+        // but *not* an encoding of the discriminant (e.g., a tag value).
+        // See issue #49298 for more details on the need to leave space
+        // for non-ZST uninhabited data (mostly partial initialization).
+        let absent = |fields: &[F]| {
+            let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
+            let is_zst = fields.iter().all(|f| f.is_zst());
+            uninhabited && is_zst
+        };
+        let (present_first, present_second) = {
+            let mut present_variants = variants
+                .iter_enumerated()
+                .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
+            (present_variants.next(), present_variants.next())
+        };
+        let present_first = match present_first {
+            Some(present_first) => present_first,
+            // Uninhabited because it has no variants, or only absent ones.
+            None if is_enum => {
+                return Some(self.layout_of_never_type());
+            }
+            // If it's a struct, still compute a layout so that we can still compute the
+            // field offsets.
+            None => V::new(0),
+        };
+
+        let is_struct = !is_enum ||
+                    // Only one variant is present.
+                    (present_second.is_none() &&
+                        // Representation optimizations are allowed.
+                        !repr.inhibit_enum_layout_opt());
+        if is_struct {
+            // Struct, or univariant enum equivalent to a struct.
+            // (Typechecking will reject discriminant-sizing attrs.)
+
+            let v = present_first;
+            let kind = if is_enum || variants[v].is_empty() {
+                StructKind::AlwaysSized
+            } else {
+                if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
+            };
+
+            let mut st = self.univariant(dl, &variants[v], &repr, kind)?;
+            st.variants = Variants::Single { index: v };
+
+            if is_unsafe_cell {
+                let hide_niches = |scalar: &mut _| match scalar {
+                    Scalar::Initialized { value, valid_range } => {
+                        *valid_range = WrappingRange::full(value.size(dl))
+                    }
+                    // Already doesn't have any niches
+                    Scalar::Union { .. } => {}
+                };
+                match &mut st.abi {
+                    Abi::Uninhabited => {}
+                    Abi::Scalar(scalar) => hide_niches(scalar),
+                    Abi::ScalarPair(a, b) => {
+                        hide_niches(a);
+                        hide_niches(b);
+                    }
+                    Abi::Vector { element, count: _ } => hide_niches(element),
+                    Abi::Aggregate { sized: _ } => {}
+                }
+                st.largest_niche = None;
+                return Some(st);
+            }
+
+            let (start, end) = scalar_valid_range;
+            match st.abi {
+                Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+                    // the asserts ensure that we are not using the
+                    // `#[rustc_layout_scalar_valid_range(n)]`
+                    // attribute to widen the range of anything as that would probably
+                    // result in UB somewhere
+                    // FIXME(eddyb) the asserts are probably not needed,
+                    // as larger validity ranges would result in missed
+                    // optimizations, *not* wrongly assuming the inner
+                    // value is valid. e.g. unions enlarge validity ranges,
+                    // because the values may be uninitialized.
+                    if let Bound::Included(start) = start {
+                        // FIXME(eddyb) this might be incorrect - it doesn't
+                        // account for wrap-around (end < start) ranges.
+                        let valid_range = scalar.valid_range_mut();
+                        assert!(valid_range.start <= start);
+                        valid_range.start = start;
+                    }
+                    if let Bound::Included(end) = end {
+                        // FIXME(eddyb) this might be incorrect - it doesn't
+                        // account for wrap-around (end < start) ranges.
+                        let valid_range = scalar.valid_range_mut();
+                        assert!(valid_range.end >= end);
+                        valid_range.end = end;
+                    }
+
+                    // Update `largest_niche` if we have introduced a larger niche.
+                    let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
+                    if let Some(niche) = niche {
+                        match st.largest_niche {
+                            Some(largest_niche) => {
+                                // Replace the existing niche even if they're equal,
+                                // because this one is at a lower offset.
+                                if largest_niche.available(dl) <= niche.available(dl) {
+                                    st.largest_niche = Some(niche);
+                                }
+                            }
+                            None => st.largest_niche = Some(niche),
+                        }
+                    }
+                }
+                _ => assert!(
+                    start == Bound::Unbounded && end == Bound::Unbounded,
+                    "nonscalar layout for layout_scalar_valid_range type: {:#?}",
+                    st,
+                ),
+            }
+
+            return Some(st);
+        }
+
+        // At this point, we have handled all unions and
+        // structs. (We have also handled univariant enums
+        // that allow representation optimization.)
+        assert!(is_enum);
+
+        // Until we've decided whether to use the tagged or
+        // niche filling LayoutS, we don't want to intern the
+        // variant layouts, so we can't store them in the
+        // overall LayoutS. Store the overall LayoutS
+        // and the variant LayoutSs here until then.
+        struct TmpLayout<V: Idx> {
+            layout: LayoutS<V>,
+            variants: IndexVec<V, LayoutS<V>>,
+        }
+
+        let calculate_niche_filling_layout = || -> Option<TmpLayout<V>> {
+            if niche_optimize_enum {
+                return None;
+            }
+
+            if variants.len() < 2 {
+                return None;
+            }
+
+            let mut align = dl.aggregate_align;
+            let mut variant_layouts = variants
+                .iter_enumerated()
+                .map(|(j, v)| {
+                    let mut st = self.univariant(dl, v, &repr, StructKind::AlwaysSized)?;
+                    st.variants = Variants::Single { index: j };
+
+                    align = align.max(st.align);
+
+                    Some(st)
+                })
+                .collect::<Option<IndexVec<V, _>>>()?;
+
+            let largest_variant_index = variant_layouts
+                .iter_enumerated()
+                .max_by_key(|(_i, layout)| layout.size.bytes())
+                .map(|(i, _layout)| i)?;
+
+            let all_indices = (0..=variants.len() - 1).map(V::new);
+            let needs_disc = |index: V| index != largest_variant_index && !absent(&variants[index]);
+            let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap().index()
+                ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap().index();
+
+            let count = niche_variants.size_hint().1.unwrap() as u128;
+
+            // Find the field with the largest niche
+            let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
+                .iter()
+                .enumerate()
+                .filter_map(|(j, field)| Some((j, field.largest_niche?)))
+                .max_by_key(|(_, niche)| niche.available(dl))
+                .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
+            let niche_offset =
+                niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
+            let niche_size = niche.value.size(dl);
+            let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
+
+            let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
+                if i == largest_variant_index {
+                    return true;
+                }
+
+                layout.largest_niche = None;
+
+                if layout.size <= niche_offset {
+                    // This variant will fit before the niche.
+                    return true;
+                }
+
+                // Determine if it'll fit after the niche.
+                let this_align = layout.align.abi;
+                let this_offset = (niche_offset + niche_size).align_to(this_align);
+
+                if this_offset + layout.size > size {
+                    return false;
+                }
+
+                // It'll fit, but we need to make some adjustments.
+                match layout.fields {
+                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
+                        for (j, offset) in offsets.iter_mut().enumerate() {
+                            if !variants[i][j].is_zst() {
+                                *offset += this_offset;
+                            }
+                        }
+                    }
+                    _ => {
+                        panic!("Layout of fields should be Arbitrary for variants")
+                    }
+                }
+
+                // It can't be a Scalar or ScalarPair because the offset isn't 0.
+                if !layout.abi.is_uninhabited() {
+                    layout.abi = Abi::Aggregate { sized: true };
+                }
+                layout.size += this_offset;
+
+                true
+            });
+
+            if !all_variants_fit {
+                return None;
+            }
+
+            let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
+
+            let others_zst = variant_layouts
+                .iter_enumerated()
+                .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
+            let same_size = size == variant_layouts[largest_variant_index].size;
+            let same_align = align == variant_layouts[largest_variant_index].align;
+
+            let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
+                Abi::Uninhabited
+            } else if same_size && same_align && others_zst {
+                match variant_layouts[largest_variant_index].abi {
+                    // When the total alignment and size match, we can use the
+                    // same ABI as the scalar variant with the reserved niche.
+                    Abi::Scalar(_) => Abi::Scalar(niche_scalar),
+                    Abi::ScalarPair(first, second) => {
+                        // Only the niche is guaranteed to be initialised,
+                        // so use union layouts for the other primitive.
+                        if niche_offset == Size::ZERO {
+                            Abi::ScalarPair(niche_scalar, second.to_union())
+                        } else {
+                            Abi::ScalarPair(first.to_union(), niche_scalar)
+                        }
+                    }
+                    _ => Abi::Aggregate { sized: true },
+                }
+            } else {
+                Abi::Aggregate { sized: true }
+            };
+
+            let layout = LayoutS {
+                variants: Variants::Multiple {
+                    tag: niche_scalar,
+                    tag_encoding: TagEncoding::Niche {
+                        untagged_variant: largest_variant_index,
+                        niche_variants: (V::new(*niche_variants.start())
+                            ..=V::new(*niche_variants.end())),
+                        niche_start,
+                    },
+                    tag_field: 0,
+                    variants: IndexVec::new(),
+                },
+                fields: FieldsShape::Arbitrary {
+                    offsets: vec![niche_offset],
+                    memory_index: vec![0],
+                },
+                abi,
+                largest_niche,
+                size,
+                align,
+            };
+
+            Some(TmpLayout { layout, variants: variant_layouts })
+        };
+
+        let niche_filling_layout = calculate_niche_filling_layout();
+
+        let (mut min, mut max) = (i128::MAX, i128::MIN);
+        let discr_type = repr.discr_type();
+        let bits = Integer::from_attr(dl, discr_type).size().bits();
+        for (i, mut val) in discriminants {
+            if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
+                continue;
+            }
+            if discr_type.is_signed() {
+                // sign extend the raw representation to be an i128
+                val = (val << (128 - bits)) >> (128 - bits);
+            }
+            if val < min {
+                min = val;
+            }
+            if val > max {
+                max = val;
+            }
+        }
+        // We might have no inhabited variants, so pretend there's at least one.
+        if (min, max) == (i128::MAX, i128::MIN) {
+            min = 0;
+            max = 0;
+        }
+        assert!(min <= max, "discriminant range is {}...{}", min, max);
+        let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
+
+        let mut align = dl.aggregate_align;
+        let mut size = Size::ZERO;
+
+        // We're interested in the smallest alignment, so start large.
+        let mut start_align = Align::from_bytes(256).unwrap();
+        assert_eq!(Integer::for_align(dl, start_align), None);
+
+        // repr(C) on an enum tells us to make a (tag, union) layout,
+        // so we need to grow the prefix alignment to be at least
+        // the alignment of the union. (This value is used both for
+        // determining the alignment of the overall enum, and the
+        // determining the alignment of the payload after the tag.)
+        let mut prefix_align = min_ity.align(dl).abi;
+        if repr.c() {
+            for fields in variants {
+                for field in fields {
+                    prefix_align = prefix_align.max(field.align.abi);
+                }
+            }
+        }
+
+        // Create the set of structs that represent each variant.
+        let mut layout_variants = variants
+            .iter_enumerated()
+            .map(|(i, field_layouts)| {
+                let mut st = self.univariant(
+                    dl,
+                    &field_layouts,
+                    &repr,
+                    StructKind::Prefixed(min_ity.size(), prefix_align),
+                )?;
+                st.variants = Variants::Single { index: i };
+                // Find the first field we can't move later
+                // to make room for a larger discriminant.
+                for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) {
+                    if !field.is_zst() || field.align.abi.bytes() != 1 {
+                        start_align = start_align.min(field.align.abi);
+                        break;
+                    }
+                }
+                size = cmp::max(size, st.size);
+                align = align.max(st.align);
+                Some(st)
+            })
+            .collect::<Option<IndexVec<V, _>>>()?;
+
+        // Align the maximum variant size to the largest alignment.
+        size = size.align_to(align.abi);
+
+        if size.bytes() >= dl.obj_size_bound() {
+            return None;
+        }
+
+        let typeck_ity = Integer::from_attr(dl, repr.discr_type());
+        if typeck_ity < min_ity {
+            // It is a bug if Layout decided on a greater discriminant size than typeck for
+            // some reason at this point (based on values discriminant can take on). Mostly
+            // because this discriminant will be loaded, and then stored into variable of
+            // type calculated by typeck. Consider such case (a bug): typeck decided on
+            // byte-sized discriminant, but layout thinks we need a 16-bit to store all
+            // discriminant values. That would be a bug, because then, in codegen, in order
+            // to store this 16-bit discriminant into 8-bit sized temporary some of the
+            // space necessary to represent would have to be discarded (or layout is wrong
+            // on thinking it needs 16 bits)
+            panic!(
+                "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
+                min_ity, typeck_ity
+            );
+            // However, it is fine to make discr type however large (as an optimisation)
+            // after this point – we’ll just truncate the value we load in codegen.
+        }
+
+        // Check to see if we should use a different type for the
+        // discriminant. We can safely use a type with the same size
+        // as the alignment of the first field of each variant.
+        // We increase the size of the discriminant to avoid LLVM copying
+        // padding when it doesn't need to. This normally causes unaligned
+        // load/stores and excessive memcpy/memset operations. By using a
+        // bigger integer size, LLVM can be sure about its contents and
+        // won't be so conservative.
+
+        // Use the initial field alignment
+        let mut ity = if repr.c() || repr.int.is_some() {
+            min_ity
+        } else {
+            Integer::for_align(dl, start_align).unwrap_or(min_ity)
+        };
+
+        // If the alignment is not larger than the chosen discriminant size,
+        // don't use the alignment as the final size.
+        if ity <= min_ity {
+            ity = min_ity;
+        } else {
+            // Patch up the variants' first few fields.
+            let old_ity_size = min_ity.size();
+            let new_ity_size = ity.size();
+            for variant in &mut layout_variants {
+                match variant.fields {
+                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
+                        for i in offsets {
+                            if *i <= old_ity_size {
+                                assert_eq!(*i, old_ity_size);
+                                *i = new_ity_size;
+                            }
+                        }
+                        // We might be making the struct larger.
+                        if variant.size <= old_ity_size {
+                            variant.size = new_ity_size;
+                        }
+                    }
+                    _ => panic!(),
+                }
+            }
+        }
+
+        let tag_mask = ity.size().unsigned_int_max();
+        let tag = Scalar::Initialized {
+            value: Int(ity, signed),
+            valid_range: WrappingRange {
+                start: (min as u128 & tag_mask),
+                end: (max as u128 & tag_mask),
+            },
+        };
+        let mut abi = Abi::Aggregate { sized: true };
+
+        if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
+            abi = Abi::Uninhabited;
+        } else if tag.size(dl) == size {
+            // Make sure we only use scalar layout when the enum is entirely its
+            // own tag (i.e. it has no padding nor any non-ZST variant fields).
+            abi = Abi::Scalar(tag);
+        } else {
+            // Try to use a ScalarPair for all tagged enums.
+            let mut common_prim = None;
+            let mut common_prim_initialized_in_all_variants = true;
+            for (field_layouts, layout_variant) in iter::zip(&*variants, &layout_variants) {
+                let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
+                    panic!();
+                };
+                let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
+                let (field, offset) = match (fields.next(), fields.next()) {
+                    (None, None) => {
+                        common_prim_initialized_in_all_variants = false;
+                        continue;
+                    }
+                    (Some(pair), None) => pair,
+                    _ => {
+                        common_prim = None;
+                        break;
+                    }
+                };
+                let prim = match field.abi {
+                    Abi::Scalar(scalar) => {
+                        common_prim_initialized_in_all_variants &=
+                            matches!(scalar, Scalar::Initialized { .. });
+                        scalar.primitive()
+                    }
+                    _ => {
+                        common_prim = None;
+                        break;
+                    }
+                };
+                if let Some(pair) = common_prim {
+                    // This is pretty conservative. We could go fancier
+                    // by conflating things like i32 and u32, or even
+                    // realising that (u8, u8) could just cohabit with
+                    // u16 or even u32.
+                    if pair != (prim, offset) {
+                        common_prim = None;
+                        break;
+                    }
+                } else {
+                    common_prim = Some((prim, offset));
+                }
+            }
+            if let Some((prim, offset)) = common_prim {
+                let prim_scalar = if common_prim_initialized_in_all_variants {
+                    scalar_unit(prim)
+                } else {
+                    // Common prim might be uninit.
+                    Scalar::Union { value: prim }
+                };
+                let pair = self.scalar_pair::<V>(tag, prim_scalar);
+                let pair_offsets = match pair.fields {
+                    FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+                        assert_eq!(memory_index, &[0, 1]);
+                        offsets
+                    }
+                    _ => panic!(),
+                };
+                if pair_offsets[0] == Size::ZERO
+                    && pair_offsets[1] == *offset
+                    && align == pair.align
+                    && size == pair.size
+                {
+                    // We can use `ScalarPair` only when it matches our
+                    // already computed layout (including `#[repr(C)]`).
+                    abi = pair.abi;
+                }
+            }
+        }
+
+        // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
+        // variants to ensure they are consistent. This is because a downcast is
+        // semantically a NOP, and thus should not affect layout.
+        if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+            for variant in &mut layout_variants {
+                // We only do this for variants with fields; the others are not accessed anyway.
+                // Also do not overwrite any already existing "clever" ABIs.
+                if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
+                    variant.abi = abi;
+                    // Also need to bump up the size and alignment, so that the entire value fits in here.
+                    variant.size = cmp::max(variant.size, size);
+                    variant.align.abi = cmp::max(variant.align.abi, align.abi);
+                }
+            }
+        }
+
+        let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
+
+        let tagged_layout = LayoutS {
+            variants: Variants::Multiple {
+                tag,
+                tag_encoding: TagEncoding::Direct,
+                tag_field: 0,
+                variants: IndexVec::new(),
+            },
+            fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
+            largest_niche,
+            abi,
+            align,
+            size,
+        };
+
+        let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
+
+        let mut best_layout = match (tagged_layout, niche_filling_layout) {
+            (tl, Some(nl)) => {
+                // Pick the smaller layout; otherwise,
+                // pick the layout with the larger niche; otherwise,
+                // pick tagged as it has simpler codegen.
+                use cmp::Ordering::*;
+                let niche_size = |tmp_l: &TmpLayout<V>| {
+                    tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
+                };
+                match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
+                    (Greater, _) => nl,
+                    (Equal, Less) => nl,
+                    _ => tl,
+                }
+            }
+            (tl, None) => tl,
+        };
+
+        // Now we can intern the variant layouts and store them in the enum layout.
+        best_layout.layout.variants = match best_layout.layout.variants {
+            Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
+                Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
+            }
+            _ => panic!(),
+        };
+        Some(best_layout.layout)
+    }
+
+    fn layout_of_union<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
+        &self,
+        repr: &ReprOptions,
+        variants: &IndexVec<V, Vec<F>>,
+    ) -> Option<LayoutS<V>> {
+        let dl = self.current_data_layout();
+        let dl = dl.borrow();
+        let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+        if let Some(repr_align) = repr.align {
+            align = align.max(AbiAndPrefAlign::new(repr_align));
+        }
+
+        let optimize = !repr.inhibit_union_abi_opt();
+        let mut size = Size::ZERO;
+        let mut abi = Abi::Aggregate { sized: true };
+        let index = V::new(0);
+        for field in &variants[index] {
+            assert!(field.is_sized());
+            align = align.max(field.align);
+
+            // If all non-ZST fields have the same ABI, forward this ABI
+            if optimize && !field.is_zst() {
+                // Discard valid range information and allow undef
+                let field_abi = match field.abi {
+                    Abi::Scalar(x) => Abi::Scalar(x.to_union()),
+                    Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
+                    Abi::Vector { element: x, count } => {
+                        Abi::Vector { element: x.to_union(), count }
+                    }
+                    Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
+                };
+
+                if size == Size::ZERO {
+                    // first non ZST: initialize 'abi'
+                    abi = field_abi;
+                } else if abi != field_abi {
+                    // different fields have different ABI: reset to Aggregate
+                    abi = Abi::Aggregate { sized: true };
+                }
+            }
+
+            size = cmp::max(size, field.size);
+        }
+
+        if let Some(pack) = repr.pack {
+            align = align.min(AbiAndPrefAlign::new(pack));
+        }
+
+        Some(LayoutS {
+            variants: Variants::Single { index },
+            fields: FieldsShape::Union(NonZeroUsize::new(variants[index].len())?),
+            abi,
+            largest_niche: None,
+            align,
+            size: size.align_to(align.abi),
+        })
+    }
+}
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
new file mode 100644
index 00000000000..4f4a4bf314f
--- /dev/null
+++ b/compiler/rustc_abi/src/lib.rs
@@ -0,0 +1,1399 @@
+#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
+
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+#[cfg(feature = "nightly")]
+use std::iter::Step;
+use std::num::{NonZeroUsize, ParseIntError};
+use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
+use std::str::FromStr;
+
+use bitflags::bitflags;
+use rustc_index::vec::{Idx, IndexVec};
+#[cfg(feature = "nightly")]
+use rustc_macros::HashStable_Generic;
+#[cfg(feature = "nightly")]
+use rustc_macros::{Decodable, Encodable};
+
+mod layout;
+
+pub use layout::LayoutCalculator;
+
+/// Requirements for a `StableHashingContext` to be used in this crate.
+/// This is a hack to allow using the `HashStable_Generic` derive macro
+/// instead of implementing everything in `rustc_middle`.
+pub trait HashStableContext {}
+
+use Integer::*;
+use Primitive::*;
+
+bitflags! {
+    #[derive(Default)]
+    #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+    pub struct ReprFlags: u8 {
+        const IS_C               = 1 << 0;
+        const IS_SIMD            = 1 << 1;
+        const IS_TRANSPARENT     = 1 << 2;
+        // Internal only for now. If true, don't reorder fields.
+        const IS_LINEAR          = 1 << 3;
+        // If true, the type's layout can be randomized using
+        // the seed stored in `ReprOptions.layout_seed`
+        const RANDOMIZE_LAYOUT   = 1 << 4;
+        // Any of these flags being set prevent field reordering optimisation.
+        const IS_UNOPTIMISABLE   = ReprFlags::IS_C.bits
+                                 | ReprFlags::IS_SIMD.bits
+                                 | ReprFlags::IS_LINEAR.bits;
+    }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub enum IntegerType {
+    /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
+    /// is, `Pointer(true)` is isize.
+    Pointer(bool),
+    /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
+    Fixed(Integer, bool),
+}
+
+impl IntegerType {
+    pub fn is_signed(&self) -> bool {
+        match self {
+            IntegerType::Pointer(b) => *b,
+            IntegerType::Fixed(_, b) => *b,
+        }
+    }
+}
+
+/// Represents the repr options provided by the user,
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub struct ReprOptions {
+    pub int: Option<IntegerType>,
+    pub align: Option<Align>,
+    pub pack: Option<Align>,
+    pub flags: ReprFlags,
+    /// The seed to be used for randomizing a type's layout
+    ///
+    /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
+    /// be the "most accurate" hash as it'd encompass the item and crate
+    /// hash without loss, but it does pay the price of being larger.
+    /// Everything's a tradeoff, a `u64` seed should be sufficient for our
+    /// purposes (primarily `-Z randomize-layout`)
+    pub field_shuffle_seed: u64,
+}
+
+impl ReprOptions {
+    #[inline]
+    pub fn simd(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_SIMD)
+    }
+
+    #[inline]
+    pub fn c(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_C)
+    }
+
+    #[inline]
+    pub fn packed(&self) -> bool {
+        self.pack.is_some()
+    }
+
+    #[inline]
+    pub fn transparent(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_TRANSPARENT)
+    }
+
+    #[inline]
+    pub fn linear(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_LINEAR)
+    }
+
+    /// Returns the discriminant type, given these `repr` options.
+    /// This must only be called on enums!
+    pub fn discr_type(&self) -> IntegerType {
+        self.int.unwrap_or(IntegerType::Pointer(true))
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
+    /// layout" optimizations, such as representing `Foo<&T>` as a
+    /// single pointer.
+    pub fn inhibit_enum_layout_opt(&self) -> bool {
+        self.c() || self.int.is_some()
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
+    /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
+    pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
+        if let Some(pack) = self.pack {
+            if pack.bytes() == 1 {
+                return true;
+            }
+        }
+
+        self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
+    }
+
+    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
+    /// was enabled for its declaration crate
+    pub fn can_randomize_type_layout(&self) -> bool {
+        !self.inhibit_struct_field_reordering_opt()
+            && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
+    pub fn inhibit_union_abi_opt(&self) -> bool {
+        self.c()
+    }
+}
+
+/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
+/// for a target, which contains everything needed to compute layouts.
+#[derive(Debug, PartialEq, Eq)]
+pub struct TargetDataLayout {
+    pub endian: Endian,
+    pub i1_align: AbiAndPrefAlign,
+    pub i8_align: AbiAndPrefAlign,
+    pub i16_align: AbiAndPrefAlign,
+    pub i32_align: AbiAndPrefAlign,
+    pub i64_align: AbiAndPrefAlign,
+    pub i128_align: AbiAndPrefAlign,
+    pub f32_align: AbiAndPrefAlign,
+    pub f64_align: AbiAndPrefAlign,
+    pub pointer_size: Size,
+    pub pointer_align: AbiAndPrefAlign,
+    pub aggregate_align: AbiAndPrefAlign,
+
+    /// Alignments for vector types.
+    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
+
+    pub instruction_address_space: AddressSpace,
+
+    /// Minimum size of #[repr(C)] enums (default I32 bits)
+    pub c_enum_min_size: Integer,
+}
+
+impl Default for TargetDataLayout {
+    /// Creates an instance of `TargetDataLayout`.
+    fn default() -> TargetDataLayout {
+        let align = |bits| Align::from_bits(bits).unwrap();
+        TargetDataLayout {
+            endian: Endian::Big,
+            i1_align: AbiAndPrefAlign::new(align(8)),
+            i8_align: AbiAndPrefAlign::new(align(8)),
+            i16_align: AbiAndPrefAlign::new(align(16)),
+            i32_align: AbiAndPrefAlign::new(align(32)),
+            i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+            i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+            f32_align: AbiAndPrefAlign::new(align(32)),
+            f64_align: AbiAndPrefAlign::new(align(64)),
+            pointer_size: Size::from_bits(64),
+            pointer_align: AbiAndPrefAlign::new(align(64)),
+            aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
+            vector_align: vec![
+                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
+                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
+            ],
+            instruction_address_space: AddressSpace::DATA,
+            c_enum_min_size: Integer::I32,
+        }
+    }
+}
+
+pub enum TargetDataLayoutErrors<'a> {
+    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
+    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
+    MissingAlignment { cause: &'a str },
+    InvalidAlignment { cause: &'a str, err: String },
+    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
+    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
+    InvalidBitsSize { err: String },
+}
+
+impl TargetDataLayout {
+    /// Returns exclusive upper bound on object size.
+    ///
+    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
+    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
+    /// index every address within an object along with one byte past the end, along with allowing
+    /// `isize` to store the difference between any two pointers into an object.
+    ///
+    /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
+    /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
+    /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
+    /// address space on 64-bit ARMv8 and x86_64.
+    #[inline]
+    pub fn obj_size_bound(&self) -> u64 {
+        match self.pointer_size.bits() {
+            16 => 1 << 15,
+            32 => 1 << 31,
+            64 => 1 << 47,
+            bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
+        }
+    }
+
+    #[inline]
+    pub fn ptr_sized_integer(&self) -> Integer {
+        match self.pointer_size.bits() {
+            16 => I16,
+            32 => I32,
+            64 => I64,
+            bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
+        }
+    }
+
+    #[inline]
+    pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
+        for &(size, align) in &self.vector_align {
+            if size == vec_size {
+                return align;
+            }
+        }
+        // Default to natural alignment, which is what LLVM does.
+        // That is, use the size, rounded up to a power of 2.
+        AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
+    }
+}
+
+pub trait HasDataLayout {
+    fn data_layout(&self) -> &TargetDataLayout;
+}
+
+impl HasDataLayout for TargetDataLayout {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        self
+    }
+}
+
+/// Endianness of the target, which must match cfg(target-endian).
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum Endian {
+    Little,
+    Big,
+}
+
+impl Endian {
+    pub fn as_str(&self) -> &'static str {
+        match self {
+            Self::Little => "little",
+            Self::Big => "big",
+        }
+    }
+}
+
+impl fmt::Debug for Endian {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(self.as_str())
+    }
+}
+
+impl FromStr for Endian {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match s {
+            "little" => Ok(Self::Little),
+            "big" => Ok(Self::Big),
+            _ => Err(format!(r#"unknown endian: "{}""#, s)),
+        }
+    }
+}
+
+/// Size of a type in bytes.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub struct Size {
+    raw: u64,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Size {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "Size({} bytes)", self.bytes())
+    }
+}
+
+impl Size {
+    pub const ZERO: Size = Size { raw: 0 };
+
+    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
+    /// not a multiple of 8.
+    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
+        let bits = bits.try_into().ok().unwrap();
+        // Avoid potential overflow from `bits + 7`.
+        Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
+    }
+
+    #[inline]
+    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
+        let bytes: u64 = bytes.try_into().ok().unwrap();
+        Size { raw: bytes }
+    }
+
+    #[inline]
+    pub fn bytes(self) -> u64 {
+        self.raw
+    }
+
+    #[inline]
+    pub fn bytes_usize(self) -> usize {
+        self.bytes().try_into().unwrap()
+    }
+
+    #[inline]
+    pub fn bits(self) -> u64 {
+        #[cold]
+        fn overflow(bytes: u64) -> ! {
+            panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
+        }
+
+        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
+    }
+
+    #[inline]
+    pub fn bits_usize(self) -> usize {
+        self.bits().try_into().unwrap()
+    }
+
+    #[inline]
+    pub fn align_to(self, align: Align) -> Size {
+        let mask = align.bytes() - 1;
+        Size::from_bytes((self.bytes() + mask) & !mask)
+    }
+
+    #[inline]
+    pub fn is_aligned(self, align: Align) -> bool {
+        let mask = align.bytes() - 1;
+        self.bytes() & mask == 0
+    }
+
+    #[inline]
+    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
+        let dl = cx.data_layout();
+
+        let bytes = self.bytes().checked_add(offset.bytes())?;
+
+        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+    }
+
+    #[inline]
+    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
+        let dl = cx.data_layout();
+
+        let bytes = self.bytes().checked_mul(count)?;
+        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+    }
+
+    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
+    /// (i.e., if it is negative, fill with 1's on the left).
+    #[inline]
+    pub fn sign_extend(self, value: u128) -> u128 {
+        let size = self.bits();
+        if size == 0 {
+            // Truncated until nothing is left.
+            return 0;
+        }
+        // Sign-extend it.
+        let shift = 128 - size;
+        // Shift the unsigned value to the left, then shift back to the right as signed
+        // (essentially fills with sign bit on the left).
+        (((value << shift) as i128) >> shift) as u128
+    }
+
+    /// Truncates `value` to `self` bits.
+    #[inline]
+    pub fn truncate(self, value: u128) -> u128 {
+        let size = self.bits();
+        if size == 0 {
+            // Truncated until nothing is left.
+            return 0;
+        }
+        let shift = 128 - size;
+        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
+        (value << shift) >> shift
+    }
+
+    #[inline]
+    pub fn signed_int_min(&self) -> i128 {
+        self.sign_extend(1_u128 << (self.bits() - 1)) as i128
+    }
+
+    #[inline]
+    pub fn signed_int_max(&self) -> i128 {
+        i128::MAX >> (128 - self.bits())
+    }
+
+    #[inline]
+    pub fn unsigned_int_max(&self) -> u128 {
+        u128::MAX >> (128 - self.bits())
+    }
+}
+
+// Panicking addition, subtraction and multiplication for convenience.
+// Avoid during layout computation, return `LayoutError` instead.
+
+impl Add for Size {
+    type Output = Size;
+    #[inline]
+    fn add(self, other: Size) -> Size {
+        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
+            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
+        }))
+    }
+}
+
+impl Sub for Size {
+    type Output = Size;
+    #[inline]
+    fn sub(self, other: Size) -> Size {
+        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
+            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
+        }))
+    }
+}
+
+impl Mul<Size> for u64 {
+    type Output = Size;
+    #[inline]
+    fn mul(self, size: Size) -> Size {
+        size * self
+    }
+}
+
+impl Mul<u64> for Size {
+    type Output = Size;
+    #[inline]
+    fn mul(self, count: u64) -> Size {
+        match self.bytes().checked_mul(count) {
+            Some(bytes) => Size::from_bytes(bytes),
+            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
+        }
+    }
+}
+
+impl AddAssign for Size {
+    #[inline]
+    fn add_assign(&mut self, other: Size) {
+        *self = *self + other;
+    }
+}
+
+#[cfg(feature = "nightly")]
+impl Step for Size {
+    #[inline]
+    fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+        u64::steps_between(&start.bytes(), &end.bytes())
+    }
+
+    #[inline]
+    fn forward_checked(start: Self, count: usize) -> Option<Self> {
+        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
+    }
+
+    #[inline]
+    fn forward(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::forward(start.bytes(), count))
+    }
+
+    #[inline]
+    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
+    }
+
+    #[inline]
+    fn backward_checked(start: Self, count: usize) -> Option<Self> {
+        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
+    }
+
+    #[inline]
+    fn backward(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::backward(start.bytes(), count))
+    }
+
+    #[inline]
+    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
+        Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
+    }
+}
+
+/// Alignment of a type in bytes (always a power of two).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+pub struct Align {
+    pow2: u8,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Align {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "Align({} bytes)", self.bytes())
+    }
+}
+
+impl Align {
+    pub const ONE: Align = Align { pow2: 0 };
+    pub const MAX: Align = Align { pow2: 29 };
+
+    #[inline]
+    pub fn from_bits(bits: u64) -> Result<Align, String> {
+        Align::from_bytes(Size::from_bits(bits).bytes())
+    }
+
+    #[inline]
+    pub fn from_bytes(align: u64) -> Result<Align, String> {
+        // Treat an alignment of 0 bytes like 1-byte alignment.
+        if align == 0 {
+            return Ok(Align::ONE);
+        }
+
+        #[cold]
+        fn not_power_of_2(align: u64) -> String {
+            format!("`{}` is not a power of 2", align)
+        }
+
+        #[cold]
+        fn too_large(align: u64) -> String {
+            format!("`{}` is too large", align)
+        }
+
+        let mut bytes = align;
+        let mut pow2: u8 = 0;
+        while (bytes & 1) == 0 {
+            pow2 += 1;
+            bytes >>= 1;
+        }
+        if bytes != 1 {
+            return Err(not_power_of_2(align));
+        }
+        if pow2 > Self::MAX.pow2 {
+            return Err(too_large(align));
+        }
+
+        Ok(Align { pow2 })
+    }
+
+    #[inline]
+    pub fn bytes(self) -> u64 {
+        1 << self.pow2
+    }
+
+    #[inline]
+    pub fn bits(self) -> u64 {
+        self.bytes() * 8
+    }
+
+    /// Computes the best alignment possible for the given offset
+    /// (the largest power of two that the offset is a multiple of).
+    ///
+    /// N.B., for an offset of `0`, this happens to return `2^64`.
+    #[inline]
+    pub fn max_for_offset(offset: Size) -> Align {
+        Align { pow2: offset.bytes().trailing_zeros() as u8 }
+    }
+
+    /// Lower the alignment, if necessary, such that the given offset
+    /// is aligned to it (the offset is a multiple of the alignment).
+    #[inline]
+    pub fn restrict_for_offset(self, offset: Size) -> Align {
+        self.min(Align::max_for_offset(offset))
+    }
+}
+
+/// A pair of alignments, ABI-mandated and preferred.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+
+pub struct AbiAndPrefAlign {
+    pub abi: Align,
+    pub pref: Align,
+}
+
+impl AbiAndPrefAlign {
+    #[inline]
+    pub fn new(align: Align) -> AbiAndPrefAlign {
+        AbiAndPrefAlign { abi: align, pref: align }
+    }
+
+    #[inline]
+    pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+        AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
+    }
+
+    #[inline]
+    pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+        AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
+    }
+}
+
+/// Integers, also used for enum discriminants.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
+
+pub enum Integer {
+    I8,
+    I16,
+    I32,
+    I64,
+    I128,
+}
+
+impl Integer {
+    #[inline]
+    pub fn size(self) -> Size {
+        match self {
+            I8 => Size::from_bytes(1),
+            I16 => Size::from_bytes(2),
+            I32 => Size::from_bytes(4),
+            I64 => Size::from_bytes(8),
+            I128 => Size::from_bytes(16),
+        }
+    }
+
+    /// Gets the Integer type from an IntegerType.
+    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
+        let dl = cx.data_layout();
+
+        match ity {
+            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
+            IntegerType::Fixed(x, _) => x,
+        }
+    }
+
+    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+        let dl = cx.data_layout();
+
+        match self {
+            I8 => dl.i8_align,
+            I16 => dl.i16_align,
+            I32 => dl.i32_align,
+            I64 => dl.i64_align,
+            I128 => dl.i128_align,
+        }
+    }
+
+    /// Finds the smallest Integer type which can represent the signed value.
+    #[inline]
+    pub fn fit_signed(x: i128) -> Integer {
+        match x {
+            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
+            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
+            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
+            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
+            _ => I128,
+        }
+    }
+
+    /// Finds the smallest Integer type which can represent the unsigned value.
+    #[inline]
+    pub fn fit_unsigned(x: u128) -> Integer {
+        match x {
+            0..=0x0000_0000_0000_00ff => I8,
+            0..=0x0000_0000_0000_ffff => I16,
+            0..=0x0000_0000_ffff_ffff => I32,
+            0..=0xffff_ffff_ffff_ffff => I64,
+            _ => I128,
+        }
+    }
+
+    /// Finds the smallest integer with the given alignment.
+    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
+        let dl = cx.data_layout();
+
+        for candidate in [I8, I16, I32, I64, I128] {
+            if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
+                return Some(candidate);
+            }
+        }
+        None
+    }
+
+    /// Find the largest integer with the given alignment or less.
+    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
+        let dl = cx.data_layout();
+
+        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
+        for candidate in [I64, I32, I16] {
+            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
+                return candidate;
+            }
+        }
+        I8
+    }
+
+    // FIXME(eddyb) consolidate this and other methods that find the appropriate
+    // `Integer` given some requirements.
+    #[inline]
+    pub fn from_size(size: Size) -> Result<Self, String> {
+        match size.bits() {
+            8 => Ok(Integer::I8),
+            16 => Ok(Integer::I16),
+            32 => Ok(Integer::I32),
+            64 => Ok(Integer::I64),
+            128 => Ok(Integer::I128),
+            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
+        }
+    }
+}
+
+/// Fundamental unit of memory access and layout.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum Primitive {
+    /// The `bool` is the signedness of the `Integer` type.
+    ///
+    /// One would think we would not care about such details this low down,
+    /// but some ABIs are described in terms of C types and ISAs where the
+    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
+    /// a negative integer passed by zero-extension will appear positive in
+    /// the callee, and most operations on it will produce the wrong values.
+    Int(Integer, bool),
+    F32,
+    F64,
+    Pointer,
+}
+
+impl Primitive {
+    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
+        let dl = cx.data_layout();
+
+        match self {
+            Int(i, _) => i.size(),
+            F32 => Size::from_bits(32),
+            F64 => Size::from_bits(64),
+            Pointer => dl.pointer_size,
+        }
+    }
+
+    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+        let dl = cx.data_layout();
+
+        match self {
+            Int(i, _) => i.align(dl),
+            F32 => dl.f32_align,
+            F64 => dl.f64_align,
+            Pointer => dl.pointer_align,
+        }
+    }
+
+    // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
+    #[inline]
+    pub fn is_float(self) -> bool {
+        matches!(self, F32 | F64)
+    }
+
+    // FIXME(eddyb) remove, it's completely unused.
+    #[inline]
+    pub fn is_int(self) -> bool {
+        matches!(self, Int(..))
+    }
+
+    #[inline]
+    pub fn is_ptr(self) -> bool {
+        matches!(self, Pointer)
+    }
+}
+
+/// Inclusive wrap-around range of valid values, that is, if
+/// start > end, it represents `start..=MAX`,
+/// followed by `0..=end`.
+///
+/// That is, for an i8 primitive, a range of `254..=2` means following
+/// sequence:
+///
+///    254 (-2), 255 (-1), 0, 1, 2
+///
+/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub struct WrappingRange {
+    pub start: u128,
+    pub end: u128,
+}
+
+impl WrappingRange {
+    pub fn full(size: Size) -> Self {
+        Self { start: 0, end: size.unsigned_int_max() }
+    }
+
+    /// Returns `true` if `v` is contained in the range.
+    #[inline(always)]
+    pub fn contains(&self, v: u128) -> bool {
+        if self.start <= self.end {
+            self.start <= v && v <= self.end
+        } else {
+            self.start <= v || v <= self.end
+        }
+    }
+
+    /// Returns `self` with replaced `start`
+    #[inline(always)]
+    pub fn with_start(mut self, start: u128) -> Self {
+        self.start = start;
+        self
+    }
+
+    /// Returns `self` with replaced `end`
+    #[inline(always)]
+    pub fn with_end(mut self, end: u128) -> Self {
+        self.end = end;
+        self
+    }
+
+    /// Returns `true` if `size` completely fills the range.
+    #[inline]
+    pub fn is_full_for(&self, size: Size) -> bool {
+        let max_value = size.unsigned_int_max();
+        debug_assert!(self.start <= max_value && self.end <= max_value);
+        self.start == (self.end.wrapping_add(1) & max_value)
+    }
+}
+
+impl fmt::Debug for WrappingRange {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if self.start > self.end {
+            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
+        } else {
+            write!(fmt, "{}..={}", self.start, self.end)?;
+        }
+        Ok(())
+    }
+}
+
+/// Information about one scalar component of a Rust type.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum Scalar {
+    Initialized {
+        value: Primitive,
+
+        // FIXME(eddyb) always use the shortest range, e.g., by finding
+        // the largest space between two consecutive valid values and
+        // taking everything else as the (shortest) valid range.
+        valid_range: WrappingRange,
+    },
+    Union {
+        /// Even for unions, we need to use the correct registers for the kind of
+        /// values inside the union, so we keep the `Primitive` type around. We
+        /// also use it to compute the size of the scalar.
+        /// However, unions never have niches and even allow undef,
+        /// so there is no `valid_range`.
+        value: Primitive,
+    },
+}
+
+impl Scalar {
+    #[inline]
+    pub fn is_bool(&self) -> bool {
+        matches!(
+            self,
+            Scalar::Initialized {
+                value: Int(I8, false),
+                valid_range: WrappingRange { start: 0, end: 1 }
+            }
+        )
+    }
+
+    /// Get the primitive representation of this type, ignoring the valid range and whether the
+    /// value is allowed to be undefined (due to being a union).
+    pub fn primitive(&self) -> Primitive {
+        match *self {
+            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
+        }
+    }
+
+    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
+        self.primitive().align(cx)
+    }
+
+    pub fn size(self, cx: &impl HasDataLayout) -> Size {
+        self.primitive().size(cx)
+    }
+
+    #[inline]
+    pub fn to_union(&self) -> Self {
+        Self::Union { value: self.primitive() }
+    }
+
+    #[inline]
+    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
+        match *self {
+            Scalar::Initialized { valid_range, .. } => valid_range,
+            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
+        }
+    }
+
+    #[inline]
+    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
+    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
+        match self {
+            Scalar::Initialized { valid_range, .. } => valid_range,
+            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
+        }
+    }
+
+    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
+    #[inline]
+    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
+        match *self {
+            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
+            Scalar::Union { .. } => true,
+        }
+    }
+
+    /// Returns `true` if this type can be left uninit.
+    #[inline]
+    pub fn is_uninit_valid(&self) -> bool {
+        match *self {
+            Scalar::Initialized { .. } => false,
+            Scalar::Union { .. } => true,
+        }
+    }
+}
+
+/// Describes how the fields of a type are located in memory.
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum FieldsShape {
+    /// Scalar primitives and `!`, which never have fields.
+    Primitive,
+
+    /// All fields start at no offset. The `usize` is the field count.
+    Union(NonZeroUsize),
+
+    /// Array/vector-like placement, with all fields of identical types.
+    Array { stride: Size, count: u64 },
+
+    /// Struct-like placement, with precomputed offsets.
+    ///
+    /// Fields are guaranteed to not overlap, but note that gaps
+    /// before, between and after all the fields are NOT always
+    /// padding, and as such their contents may not be discarded.
+    /// For example, enum variants leave a gap at the start,
+    /// where the discriminant field in the enum layout goes.
+    Arbitrary {
+        /// Offsets for the first byte of each field,
+        /// ordered to match the source definition order.
+        /// This vector does not go in increasing order.
+        // FIXME(eddyb) use small vector optimization for the common case.
+        offsets: Vec<Size>,
+
+        /// Maps source order field indices to memory order indices,
+        /// depending on how the fields were reordered (if at all).
+        /// This is a permutation, with both the source order and the
+        /// memory order using the same (0..n) index ranges.
+        ///
+        /// Note that during computation of `memory_index`, sometimes
+        /// it is easier to operate on the inverse mapping (that is,
+        /// from memory order to source order), and that is usually
+        /// named `inverse_memory_index`.
+        ///
+        // FIXME(eddyb) build a better abstraction for permutations, if possible.
+        // FIXME(camlorn) also consider small vector  optimization here.
+        memory_index: Vec<u32>,
+    },
+}
+
+impl FieldsShape {
+    #[inline]
+    pub fn count(&self) -> usize {
+        match *self {
+            FieldsShape::Primitive => 0,
+            FieldsShape::Union(count) => count.get(),
+            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
+            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
+        }
+    }
+
+    #[inline]
+    pub fn offset(&self, i: usize) -> Size {
+        match *self {
+            FieldsShape::Primitive => {
+                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
+            }
+            FieldsShape::Union(count) => {
+                assert!(
+                    i < count.get(),
+                    "tried to access field {} of union with {} fields",
+                    i,
+                    count
+                );
+                Size::ZERO
+            }
+            FieldsShape::Array { stride, count } => {
+                let i = u64::try_from(i).unwrap();
+                assert!(i < count);
+                stride * i
+            }
+            FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
+        }
+    }
+
+    #[inline]
+    pub fn memory_index(&self, i: usize) -> usize {
+        match *self {
+            FieldsShape::Primitive => {
+                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
+            }
+            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+            FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
+        }
+    }
+
+    /// Gets source indices of the fields by increasing offsets.
+    #[inline]
+    pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
+        let mut inverse_small = [0u8; 64];
+        let mut inverse_big = vec![];
+        let use_small = self.count() <= inverse_small.len();
+
+        // We have to write this logic twice in order to keep the array small.
+        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
+            if use_small {
+                for i in 0..self.count() {
+                    inverse_small[memory_index[i] as usize] = i as u8;
+                }
+            } else {
+                inverse_big = vec![0; self.count()];
+                for i in 0..self.count() {
+                    inverse_big[memory_index[i] as usize] = i as u32;
+                }
+            }
+        }
+
+        (0..self.count()).map(move |i| match *self {
+            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+            FieldsShape::Arbitrary { .. } => {
+                if use_small {
+                    inverse_small[i] as usize
+                } else {
+                    inverse_big[i] as usize
+                }
+            }
+        })
+    }
+}
+
+/// An identifier that specifies the address space that some operation
+/// should operate on. Special address spaces have an effect on code generation,
+/// depending on the target and the address spaces it implements.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct AddressSpace(pub u32);
+
+impl AddressSpace {
+    /// The default address space, corresponding to data space.
+    pub const DATA: Self = AddressSpace(0);
+}
+
+/// Describes how values of the type are passed by target ABIs,
+/// in terms of categories of C types there are ABI rules for.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+
+pub enum Abi {
+    Uninhabited,
+    Scalar(Scalar),
+    ScalarPair(Scalar, Scalar),
+    Vector {
+        element: Scalar,
+        count: u64,
+    },
+    Aggregate {
+        /// If true, the size is exact, otherwise it's only a lower bound.
+        sized: bool,
+    },
+}
+
+impl Abi {
+    /// Returns `true` if the layout corresponds to an unsized type.
+    #[inline]
+    pub fn is_unsized(&self) -> bool {
+        match *self {
+            Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+            Abi::Aggregate { sized } => !sized,
+        }
+    }
+
+    #[inline]
+    pub fn is_sized(&self) -> bool {
+        !self.is_unsized()
+    }
+
+    /// Returns `true` if this is a single signed integer scalar
+    #[inline]
+    pub fn is_signed(&self) -> bool {
+        match self {
+            Abi::Scalar(scal) => match scal.primitive() {
+                Primitive::Int(_, signed) => signed,
+                _ => false,
+            },
+            _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
+        }
+    }
+
+    /// Returns `true` if this is an uninhabited type
+    #[inline]
+    pub fn is_uninhabited(&self) -> bool {
+        matches!(*self, Abi::Uninhabited)
+    }
+
+    /// Returns `true` is this is a scalar type
+    #[inline]
+    pub fn is_scalar(&self) -> bool {
+        matches!(*self, Abi::Scalar(_))
+    }
+}
+
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum Variants<V: Idx> {
+    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
+    Single { index: V },
+
+    /// Enum-likes with more than one inhabited variant: each variant comes with
+    /// a *discriminant* (usually the same as the variant index but the user can
+    /// assign explicit discriminant values).  That discriminant is encoded
+    /// as a *tag* on the machine.  The layout of each variant is
+    /// a struct, and they all have space reserved for the tag.
+    /// For enums, the tag is the sole field of the layout.
+    Multiple {
+        tag: Scalar,
+        tag_encoding: TagEncoding<V>,
+        tag_field: usize,
+        variants: IndexVec<V, LayoutS<V>>,
+    },
+}
+
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub enum TagEncoding<V: Idx> {
+    /// The tag directly stores the discriminant, but possibly with a smaller layout
+    /// (so converting the tag to the discriminant can require sign extension).
+    Direct,
+
+    /// Niche (values invalid for a type) encoding the discriminant:
+    /// Discriminant and variant index coincide.
+    /// The variant `untagged_variant` contains a niche at an arbitrary
+    /// offset (field `tag_field` of the enum), which for a variant with
+    /// discriminant `d` is set to
+    /// `(d - niche_variants.start).wrapping_add(niche_start)`.
+    ///
+    /// For example, `Option<(usize, &T)>`  is represented such that
+    /// `None` has a null pointer for the second tuple field, and
+    /// `Some` is the identity function (with a non-null reference).
+    Niche { untagged_variant: V, niche_variants: RangeInclusive<V>, niche_start: u128 },
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub struct Niche {
+    pub offset: Size,
+    pub value: Primitive,
+    pub valid_range: WrappingRange,
+}
+
+impl Niche {
+    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
+        let Scalar::Initialized { value, valid_range } = scalar else { return None };
+        let niche = Niche { offset, value, valid_range };
+        if niche.available(cx) > 0 { Some(niche) } else { None }
+    }
+
+    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
+        let Self { value, valid_range: v, .. } = *self;
+        let size = value.size(cx);
+        assert!(size.bits() <= 128);
+        let max_value = size.unsigned_int_max();
+
+        // Find out how many values are outside the valid range.
+        let niche = v.end.wrapping_add(1)..v.start;
+        niche.end.wrapping_sub(niche.start) & max_value
+    }
+
+    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
+        assert!(count > 0);
+
+        let Self { value, valid_range: v, .. } = *self;
+        let size = value.size(cx);
+        assert!(size.bits() <= 128);
+        let max_value = size.unsigned_int_max();
+
+        let niche = v.end.wrapping_add(1)..v.start;
+        let available = niche.end.wrapping_sub(niche.start) & max_value;
+        if count > available {
+            return None;
+        }
+
+        // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
+        // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
+        // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
+        // Having `None` in niche zero can enable some special optimizations.
+        //
+        // Bound selection criteria:
+        // 1. Select closest to zero given wrapping semantics.
+        // 2. Avoid moving past zero if possible.
+        //
+        // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
+        // If niche zero is already reserved, the selection of bounds are of little interest.
+        let move_start = |v: WrappingRange| {
+            let start = v.start.wrapping_sub(count) & max_value;
+            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
+        };
+        let move_end = |v: WrappingRange| {
+            let start = v.end.wrapping_add(1) & max_value;
+            let end = v.end.wrapping_add(count) & max_value;
+            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
+        };
+        let distance_end_zero = max_value - v.end;
+        if v.start > v.end {
+            // zero is unavailable because wrapping occurs
+            move_end(v)
+        } else if v.start <= distance_end_zero {
+            if count <= v.start {
+                move_start(v)
+            } else {
+                // moved past zero, use other bound
+                move_end(v)
+            }
+        } else {
+            let end = v.end.wrapping_add(count) & max_value;
+            let overshot_zero = (1..=v.end).contains(&end);
+            if overshot_zero {
+                // moved past zero, use other bound
+                move_start(v)
+            } else {
+                move_end(v)
+            }
+        }
+    }
+}
+
+#[derive(PartialEq, Eq, Hash, Clone)]
+#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
+pub struct LayoutS<V: Idx> {
+    /// Says where the fields are located within the layout.
+    pub fields: FieldsShape,
+
+    /// Encodes information about multi-variant layouts.
+    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
+    /// shared between all variants. One of them will be the discriminant,
+    /// but e.g. generators can have more.
+    ///
+    /// To access all fields of this layout, both `fields` and the fields of the active variant
+    /// must be taken into account.
+    pub variants: Variants<V>,
+
+    /// The `abi` defines how this data is passed between functions, and it defines
+    /// value restrictions via `valid_range`.
+    ///
+    /// Note that this is entirely orthogonal to the recursive structure defined by
+    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
+    /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
+    /// have to be taken into account to find all fields of this layout.
+    pub abi: Abi,
+
+    /// The leaf scalar with the largest number of invalid values
+    /// (i.e. outside of its `valid_range`), if it exists.
+    pub largest_niche: Option<Niche>,
+
+    pub align: AbiAndPrefAlign,
+    pub size: Size,
+}
+
+impl<V: Idx> LayoutS<V> {
+    pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
+        let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
+        let size = scalar.size(cx);
+        let align = scalar.align(cx);
+        LayoutS {
+            variants: Variants::Single { index: V::new(0) },
+            fields: FieldsShape::Primitive,
+            abi: Abi::Scalar(scalar),
+            largest_niche,
+            size,
+            align,
+        }
+    }
+}
+
+impl<V: Idx> fmt::Debug for LayoutS<V> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        // This is how `Layout` used to print before it become
+        // `Interned<LayoutS>`. We print it like this to avoid having to update
+        // expected output in a lot of tests.
+        let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
+        f.debug_struct("Layout")
+            .field("size", size)
+            .field("align", align)
+            .field("abi", abi)
+            .field("fields", fields)
+            .field("largest_niche", largest_niche)
+            .field("variants", variants)
+            .finish()
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum PointerKind {
+    /// Most general case, we know no restrictions to tell LLVM.
+    SharedMutable,
+
+    /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
+    Frozen,
+
+    /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
+    UniqueBorrowed,
+
+    /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
+    UniqueBorrowedPinned,
+
+    /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
+    /// nor `dereferenceable`.
+    UniqueOwned,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct PointeeInfo {
+    pub size: Size,
+    pub align: Align,
+    pub safe: Option<PointerKind>,
+    pub address_space: AddressSpace,
+}
+
+/// Used in `might_permit_raw_init` to indicate the kind of initialisation
+/// that is checked to be valid
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitKind {
+    Zero,
+    UninitMitigated0x01Fill,
+}
+
+impl<V: Idx> LayoutS<V> {
+    /// Returns `true` if the layout corresponds to an unsized type.
+    pub fn is_unsized(&self) -> bool {
+        self.abi.is_unsized()
+    }
+
+    pub fn is_sized(&self) -> bool {
+        self.abi.is_sized()
+    }
+
+    /// Returns `true` if the type is a ZST and not unsized.
+    pub fn is_zst(&self) -> bool {
+        match self.abi {
+            Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+            Abi::Uninhabited => self.size.bytes() == 0,
+            Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum StructKind {
+    /// A tuple, closure, or univariant which cannot be coerced to unsized.
+    AlwaysSized,
+    /// A univariant, the last field of which may be coerced to unsized.
+    MaybeUnsized,
+    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
+    Prefixed(Size, Align),
+}
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index 5d63d90f304..4c1d95a452d 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -33,7 +33,6 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}
 use rustc_middle::mir::mono::Linkage;
 use rustc_middle::ty::query::Providers;
 use rustc_middle::ty::util::{Discr, IntTypeExt};
-use rustc_middle::ty::ReprOptions;
 use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt};
 use rustc_session::lint;
 use rustc_session::parse::feature_err;
@@ -860,7 +859,7 @@ fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
         bug!();
     };
 
-    let repr = ReprOptions::new(tcx, def_id.to_def_id());
+    let repr = tcx.repr_options_of_def(def_id.to_def_id());
     let (kind, variants) = match item.kind {
         ItemKind::Enum(ref def, _) => {
             let mut distance_from_explicit = 0;
diff --git a/compiler/rustc_index/Cargo.toml b/compiler/rustc_index/Cargo.toml
index d8ea5aa80b8..e1cda5a9edd 100644
--- a/compiler/rustc_index/Cargo.toml
+++ b/compiler/rustc_index/Cargo.toml
@@ -7,6 +7,10 @@ edition = "2021"
 
 [dependencies]
 arrayvec = { version = "0.7", default-features = false }
-rustc_serialize = { path = "../rustc_serialize" }
-rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize", optional = true }
+rustc_macros = { path = "../rustc_macros", optional = true }
 smallvec = "1.8.1"
+
+[features]
+default = ["nightly"]
+nightly = ["rustc_serialize", "rustc_macros"]
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
index 23a4c1f0696..db2c7915256 100644
--- a/compiler/rustc_index/src/lib.rs
+++ b/compiler/rustc_index/src/lib.rs
@@ -1,17 +1,25 @@
 #![deny(rustc::untranslatable_diagnostic)]
 #![deny(rustc::diagnostic_outside_of_impl)]
-#![feature(allow_internal_unstable)]
-#![feature(extend_one)]
-#![feature(min_specialization)]
-#![feature(new_uninit)]
-#![feature(step_trait)]
-#![feature(stmt_expr_attributes)]
-#![feature(test)]
+#![cfg_attr(
+    feature = "nightly",
+    feature(
+        allow_internal_unstable,
+        extend_one,
+        min_specialization,
+        new_uninit,
+        step_trait,
+        stmt_expr_attributes,
+        test
+    )
+)]
 
+#[cfg(feature = "nightly")]
 pub mod bit_set;
+#[cfg(feature = "nightly")]
 pub mod interval;
 pub mod vec;
 
+#[cfg(feature = "rustc_macros")]
 pub use rustc_macros::newtype_index;
 
 /// Type size assertion. The first argument is a type and the second argument is its expected size.
diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs
index 1519258c794..39aa27a23c1 100644
--- a/compiler/rustc_index/src/vec.rs
+++ b/compiler/rustc_index/src/vec.rs
@@ -1,3 +1,4 @@
+#[cfg(feature = "rustc_serialize")]
 use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
 
 use std::fmt;
@@ -61,12 +62,14 @@ pub struct IndexVec<I: Idx, T> {
 // not the phantom data.
 unsafe impl<I: Idx, T> Send for IndexVec<I, T> where T: Send {}
 
+#[cfg(feature = "rustc_serialize")]
 impl<S: Encoder, I: Idx, T: Encodable<S>> Encodable<S> for IndexVec<I, T> {
     fn encode(&self, s: &mut S) {
         Encodable::encode(&self.raw, s);
     }
 }
 
+#[cfg(feature = "rustc_serialize")]
 impl<D: Decoder, I: Idx, T: Decodable<D>> Decodable<D> for IndexVec<I, T> {
     fn decode(d: &mut D) -> Self {
         IndexVec { raw: Decodable::decode(d), _marker: PhantomData }
@@ -359,11 +362,13 @@ impl<I: Idx, T> Extend<T> for IndexVec<I, T> {
     }
 
     #[inline]
+    #[cfg(feature = "nightly")]
     fn extend_one(&mut self, item: T) {
         self.raw.push(item);
     }
 
     #[inline]
+    #[cfg(feature = "nightly")]
     fn extend_reserve(&mut self, additional: usize) {
         self.raw.reserve(additional);
     }
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index afc568f3a50..297b509d402 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -12,7 +12,7 @@ use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeSuperVisitable,
 use rustc_span::source_map;
 use rustc_span::symbol::sym;
 use rustc_span::{Span, Symbol};
-use rustc_target::abi::{Abi, WrappingRange};
+use rustc_target::abi::{Abi, Size, WrappingRange};
 use rustc_target::abi::{Integer, TagEncoding, Variants};
 use rustc_target::spec::abi::Abi as SpecAbi;
 
@@ -225,11 +225,11 @@ fn report_bin_hex_error(
     cx: &LateContext<'_>,
     expr: &hir::Expr<'_>,
     ty: attr::IntType,
+    size: Size,
     repr_str: String,
     val: u128,
     negative: bool,
 ) {
-    let size = Integer::from_attr(&cx.tcx, ty).size();
     cx.struct_span_lint(
         OVERFLOWING_LITERALS,
         expr.span,
@@ -352,6 +352,7 @@ fn lint_int_literal<'tcx>(
                 cx,
                 e,
                 attr::IntType::SignedInt(ty::ast_int_ty(t)),
+                Integer::from_int_ty(cx, t).size(),
                 repr_str,
                 v,
                 negative,
@@ -437,6 +438,7 @@ fn lint_uint_literal<'tcx>(
                 cx,
                 e,
                 attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
+                Integer::from_uint_ty(cx, t).size(),
                 repr_str,
                 lit_val,
                 false,
@@ -1376,7 +1378,7 @@ impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
             let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
                 .map(|(variant, variant_layout)| {
                     // Subtract the size of the enum tag.
-                    let bytes = variant_layout.size().bytes().saturating_sub(tag_size);
+                    let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
 
                     debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
                     bytes
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
index f8aae86fe3d..7bd4b6c0c27 100644
--- a/compiler/rustc_middle/src/arena.rs
+++ b/compiler/rustc_middle/src/arena.rs
@@ -6,7 +6,7 @@
 macro_rules! arena_types {
     ($macro:path) => (
         $macro!([
-            [] layout: rustc_target::abi::LayoutS<'tcx>,
+            [] layout: rustc_target::abi::LayoutS<rustc_target::abi::VariantIdx>,
             [] fn_abi: rustc_target::abi::call::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
             // AdtDef are interned and compared by address
             [decode] adt_def: rustc_middle::ty::AdtDefData,
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
index 6b6aa40a160..d3d667f6840 100644
--- a/compiler/rustc_middle/src/ty/adt.rs
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -14,7 +14,7 @@ use rustc_index::vec::{Idx, IndexVec};
 use rustc_query_system::ich::StableHashingContext;
 use rustc_session::DataTypeKind;
 use rustc_span::symbol::sym;
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{ReprOptions, VariantIdx};
 
 use std::cell::RefCell;
 use std::cmp::Ordering;
@@ -22,9 +22,7 @@ use std::hash::{Hash, Hasher};
 use std::ops::Range;
 use std::str;
 
-use super::{
-    Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
-};
+use super::{Destructor, FieldDef, GenericPredicates, Ty, TyCtxt, VariantDef, VariantDiscr};
 
 bitflags! {
     #[derive(HashStable, TyEncodable, TyDecodable)]
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 26d30308ed3..b5327ad0cec 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -148,7 +148,7 @@ pub struct CtxtInterners<'tcx> {
     const_: InternedSet<'tcx, ConstS<'tcx>>,
     const_allocation: InternedSet<'tcx, Allocation>,
     bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>,
-    layout: InternedSet<'tcx, LayoutS<'tcx>>,
+    layout: InternedSet<'tcx, LayoutS<VariantIdx>>,
     adt_def: InternedSet<'tcx, AdtDefData>,
 }
 
@@ -1233,7 +1233,7 @@ impl<'tcx> TyCtxt<'tcx> {
             global_ctxt: untracked_resolutions,
             ast_lowering: untracked_resolver_for_lowering,
         } = resolver_outputs;
-        let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
+        let data_layout = s.target.parse_data_layout().unwrap_or_else(|err| {
             s.emit_fatal(err);
         });
         let interners = CtxtInterners::new(arena);
@@ -2244,7 +2244,7 @@ direct_interners! {
     region: mk_region(RegionKind<'tcx>): Region -> Region<'tcx>,
     const_: mk_const_internal(ConstS<'tcx>): Const -> Const<'tcx>,
     const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
-    layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>,
+    layout: intern_layout(LayoutS<VariantIdx>): Layout -> Layout<'tcx>,
     adt_def: intern_adt_def(AdtDefData): AdtDef -> AdtDef<'tcx>,
 }
 
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index c74d6bc3774..488fd567846 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1,8 +1,6 @@
 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use crate::ty::normalize_erasing_regions::NormalizationError;
 use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitable};
-use rustc_ast as ast;
-use rustc_attr as attr;
 use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
@@ -20,7 +18,6 @@ use std::ops::Bound;
 
 pub trait IntegerExt {
     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
-    fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
     fn repr_discr<'tcx>(
@@ -49,22 +46,6 @@ impl IntegerExt for Integer {
         }
     }
 
-    /// Gets the Integer type from an attr::IntType.
-    fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
-        let dl = cx.data_layout();
-
-        match ity {
-            attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
-            attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
-            attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
-            attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
-            attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
-            attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
-                dl.ptr_sized_integer()
-            }
-        }
-    }
-
     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
         match ity {
             ty::IntTy::I8 => I8,
@@ -237,6 +218,18 @@ pub struct LayoutCx<'tcx, C> {
     pub param_env: ty::ParamEnv<'tcx>,
 }
 
+impl<'tcx> LayoutCalculator for LayoutCx<'tcx, TyCtxt<'tcx>> {
+    type TargetDataLayoutRef = &'tcx TargetDataLayout;
+
+    fn delay_bug(&self, txt: &str) {
+        self.tcx.sess.delay_span_bug(DUMMY_SP, txt);
+    }
+
+    fn current_data_layout(&self) -> Self::TargetDataLayoutRef {
+        &self.tcx.data_layout
+    }
+}
+
 /// Type size "skeleton", i.e., the only information determining a type's size.
 /// While this is conservative, (aside from constant sizes, only pointers,
 /// newtypes thereof and null pointer optimized enums are allowed), it is
@@ -610,7 +603,7 @@ where
                 })
             }
 
-            Variants::Multiple { ref variants, .. } => variants[variant_index],
+            Variants::Multiple { ref variants, .. } => cx.tcx().intern_layout(variants[variant_index].clone()),
         };
 
         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 0458c4abd3d..9d778ff2fb6 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -48,7 +48,8 @@ use rustc_session::cstore::CrateStoreDyn;
 use rustc_span::hygiene::MacroKind;
 use rustc_span::symbol::{kw, sym, Ident, Symbol};
 use rustc_span::{ExpnId, Span};
-use rustc_target::abi::{Align, VariantIdx};
+use rustc_target::abi::{Align, Integer, IntegerType, VariantIdx};
+pub use rustc_target::abi::{ReprFlags, ReprOptions};
 pub use subst::*;
 pub use vtable::*;
 
@@ -1994,163 +1995,6 @@ impl Hash for FieldDef {
     }
 }
 
-bitflags! {
-    #[derive(TyEncodable, TyDecodable, Default, HashStable)]
-    pub struct ReprFlags: u8 {
-        const IS_C               = 1 << 0;
-        const IS_SIMD            = 1 << 1;
-        const IS_TRANSPARENT     = 1 << 2;
-        // Internal only for now. If true, don't reorder fields.
-        const IS_LINEAR          = 1 << 3;
-        // If true, the type's layout can be randomized using
-        // the seed stored in `ReprOptions.layout_seed`
-        const RANDOMIZE_LAYOUT   = 1 << 4;
-        // Any of these flags being set prevent field reordering optimisation.
-        const IS_UNOPTIMISABLE   = ReprFlags::IS_C.bits
-                                 | ReprFlags::IS_SIMD.bits
-                                 | ReprFlags::IS_LINEAR.bits;
-    }
-}
-
-/// Represents the repr options provided by the user,
-#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Default, HashStable)]
-pub struct ReprOptions {
-    pub int: Option<attr::IntType>,
-    pub align: Option<Align>,
-    pub pack: Option<Align>,
-    pub flags: ReprFlags,
-    /// The seed to be used for randomizing a type's layout
-    ///
-    /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
-    /// be the "most accurate" hash as it'd encompass the item and crate
-    /// hash without loss, but it does pay the price of being larger.
-    /// Everything's a tradeoff, a `u64` seed should be sufficient for our
-    /// purposes (primarily `-Z randomize-layout`)
-    pub field_shuffle_seed: u64,
-}
-
-impl ReprOptions {
-    pub fn new(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
-        let mut flags = ReprFlags::empty();
-        let mut size = None;
-        let mut max_align: Option<Align> = None;
-        let mut min_pack: Option<Align> = None;
-
-        // Generate a deterministically-derived seed from the item's path hash
-        // to allow for cross-crate compilation to actually work
-        let mut field_shuffle_seed = tcx.def_path_hash(did).0.to_smaller_hash();
-
-        // If the user defined a custom seed for layout randomization, xor the item's
-        // path hash with the user defined seed, this will allowing determinism while
-        // still allowing users to further randomize layout generation for e.g. fuzzing
-        if let Some(user_seed) = tcx.sess.opts.unstable_opts.layout_seed {
-            field_shuffle_seed ^= user_seed;
-        }
-
-        for attr in tcx.get_attrs(did, sym::repr) {
-            for r in attr::parse_repr_attr(&tcx.sess, attr) {
-                flags.insert(match r {
-                    attr::ReprC => ReprFlags::IS_C,
-                    attr::ReprPacked(pack) => {
-                        let pack = Align::from_bytes(pack as u64).unwrap();
-                        min_pack = Some(if let Some(min_pack) = min_pack {
-                            min_pack.min(pack)
-                        } else {
-                            pack
-                        });
-                        ReprFlags::empty()
-                    }
-                    attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
-                    attr::ReprSimd => ReprFlags::IS_SIMD,
-                    attr::ReprInt(i) => {
-                        size = Some(i);
-                        ReprFlags::empty()
-                    }
-                    attr::ReprAlign(align) => {
-                        max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
-                        ReprFlags::empty()
-                    }
-                });
-            }
-        }
-
-        // If `-Z randomize-layout` was enabled for the type definition then we can
-        // consider performing layout randomization
-        if tcx.sess.opts.unstable_opts.randomize_layout {
-            flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
-        }
-
-        // This is here instead of layout because the choice must make it into metadata.
-        if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
-            flags.insert(ReprFlags::IS_LINEAR);
-        }
-
-        Self { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
-    }
-
-    #[inline]
-    pub fn simd(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_SIMD)
-    }
-
-    #[inline]
-    pub fn c(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_C)
-    }
-
-    #[inline]
-    pub fn packed(&self) -> bool {
-        self.pack.is_some()
-    }
-
-    #[inline]
-    pub fn transparent(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_TRANSPARENT)
-    }
-
-    #[inline]
-    pub fn linear(&self) -> bool {
-        self.flags.contains(ReprFlags::IS_LINEAR)
-    }
-
-    /// Returns the discriminant type, given these `repr` options.
-    /// This must only be called on enums!
-    pub fn discr_type(&self) -> attr::IntType {
-        self.int.unwrap_or(attr::SignedInt(ast::IntTy::Isize))
-    }
-
-    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
-    /// layout" optimizations, such as representing `Foo<&T>` as a
-    /// single pointer.
-    pub fn inhibit_enum_layout_opt(&self) -> bool {
-        self.c() || self.int.is_some()
-    }
-
-    /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
-    /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
-    pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
-        if let Some(pack) = self.pack {
-            if pack.bytes() == 1 {
-                return true;
-            }
-        }
-
-        self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
-    }
-
-    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
-    /// was enabled for its declaration crate
-    pub fn can_randomize_type_layout(&self) -> bool {
-        !self.inhibit_struct_field_reordering_opt()
-            && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
-    }
-
-    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
-    pub fn inhibit_union_abi_opt(&self) -> bool {
-        self.c()
-    }
-}
-
 impl<'tcx> FieldDef {
     /// Returns the type of this field. The resulting type is not normalized. The `subst` is
     /// typically obtained via the second field of [`TyKind::Adt`].
@@ -2218,6 +2062,81 @@ impl<'tcx> TyCtxt<'tcx> {
             .filter(move |item| item.kind == AssocKind::Fn && item.defaultness(self).has_value())
     }
 
+    pub fn repr_options_of_def(self, did: DefId) -> ReprOptions {
+        let mut flags = ReprFlags::empty();
+        let mut size = None;
+        let mut max_align: Option<Align> = None;
+        let mut min_pack: Option<Align> = None;
+
+        // Generate a deterministically-derived seed from the item's path hash
+        // to allow for cross-crate compilation to actually work
+        let mut field_shuffle_seed = self.def_path_hash(did).0.to_smaller_hash();
+
+        // If the user defined a custom seed for layout randomization, xor the item's
+        // path hash with the user defined seed, this will allowing determinism while
+        // still allowing users to further randomize layout generation for e.g. fuzzing
+        if let Some(user_seed) = self.sess.opts.unstable_opts.layout_seed {
+            field_shuffle_seed ^= user_seed;
+        }
+
+        for attr in self.get_attrs(did, sym::repr) {
+            for r in attr::parse_repr_attr(&self.sess, attr) {
+                flags.insert(match r {
+                    attr::ReprC => ReprFlags::IS_C,
+                    attr::ReprPacked(pack) => {
+                        let pack = Align::from_bytes(pack as u64).unwrap();
+                        min_pack = Some(if let Some(min_pack) = min_pack {
+                            min_pack.min(pack)
+                        } else {
+                            pack
+                        });
+                        ReprFlags::empty()
+                    }
+                    attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
+                    attr::ReprSimd => ReprFlags::IS_SIMD,
+                    attr::ReprInt(i) => {
+                        size = Some(match i {
+                            attr::IntType::SignedInt(x) => match x {
+                                ast::IntTy::Isize => IntegerType::Pointer(true),
+                                ast::IntTy::I8 => IntegerType::Fixed(Integer::I8, true),
+                                ast::IntTy::I16 => IntegerType::Fixed(Integer::I16, true),
+                                ast::IntTy::I32 => IntegerType::Fixed(Integer::I32, true),
+                                ast::IntTy::I64 => IntegerType::Fixed(Integer::I64, true),
+                                ast::IntTy::I128 => IntegerType::Fixed(Integer::I128, true),
+                            },
+                            attr::IntType::UnsignedInt(x) => match x {
+                                ast::UintTy::Usize => IntegerType::Pointer(false),
+                                ast::UintTy::U8 => IntegerType::Fixed(Integer::I8, false),
+                                ast::UintTy::U16 => IntegerType::Fixed(Integer::I16, false),
+                                ast::UintTy::U32 => IntegerType::Fixed(Integer::I32, false),
+                                ast::UintTy::U64 => IntegerType::Fixed(Integer::I64, false),
+                                ast::UintTy::U128 => IntegerType::Fixed(Integer::I128, false),
+                            },
+                        });
+                        ReprFlags::empty()
+                    }
+                    attr::ReprAlign(align) => {
+                        max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
+                        ReprFlags::empty()
+                    }
+                });
+            }
+        }
+
+        // If `-Z randomize-layout` was enabled for the type definition then we can
+        // consider performing layout randomization
+        if self.sess.opts.unstable_opts.randomize_layout {
+            flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
+        }
+
+        // This is here instead of layout because the choice must make it into metadata.
+        if !self.consider_optimizing(|| format!("Reorder fields of {:?}", self.def_path_str(did))) {
+            flags.insert(ReprFlags::IS_LINEAR);
+        }
+
+        ReprOptions { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
+    }
+
     /// Look up the name of a definition across crates. This does not look at HIR.
     pub fn opt_item_name(self, def_id: DefId) -> Option<Symbol> {
         if let Some(cnum) = def_id.as_crate_root() {
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index f72e236eda1..6561c4c278d 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -8,8 +8,6 @@ use crate::ty::{
 };
 use crate::ty::{GenericArgKind, SubstsRef};
 use rustc_apfloat::Float as _;
-use rustc_ast as ast;
-use rustc_attr::{self as attr, SignedInt, UnsignedInt};
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_errors::ErrorGuaranteed;
@@ -19,7 +17,7 @@ use rustc_hir::def_id::DefId;
 use rustc_index::bit_set::GrowableBitSet;
 use rustc_macros::HashStable;
 use rustc_span::{sym, DUMMY_SP};
-use rustc_target::abi::{Integer, Size, TargetDataLayout};
+use rustc_target::abi::{Integer, IntegerType, Size, TargetDataLayout};
 use rustc_target::spec::abi::Abi;
 use smallvec::SmallVec;
 use std::{fmt, iter};
@@ -104,21 +102,12 @@ pub trait IntTypeExt {
     fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
 }
 
-impl IntTypeExt for attr::IntType {
+impl IntTypeExt for IntegerType {
     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
-        match *self {
-            SignedInt(ast::IntTy::I8) => tcx.types.i8,
-            SignedInt(ast::IntTy::I16) => tcx.types.i16,
-            SignedInt(ast::IntTy::I32) => tcx.types.i32,
-            SignedInt(ast::IntTy::I64) => tcx.types.i64,
-            SignedInt(ast::IntTy::I128) => tcx.types.i128,
-            SignedInt(ast::IntTy::Isize) => tcx.types.isize,
-            UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
-            UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
-            UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
-            UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
-            UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
-            UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
+        match self {
+            IntegerType::Pointer(true) => tcx.types.isize,
+            IntegerType::Pointer(false) => tcx.types.usize,
+            IntegerType::Fixed(i, s) => i.to_ty(tcx, *s),
         }
     }
 
diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
index 96ea15f1b80..be0aa0fc4c1 100644
--- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
+++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
@@ -65,7 +65,7 @@ fn variant_discriminants<'tcx>(
         Variants::Multiple { variants, .. } => variants
             .iter_enumerated()
             .filter_map(|(idx, layout)| {
-                (layout.abi() != Abi::Uninhabited)
+                (layout.abi != Abi::Uninhabited)
                     .then(|| ty.discriminant_for_variant(tcx, idx).unwrap().val)
             })
             .collect(),
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index 1ce3a613dc7..3b1b33aa095 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -11,7 +11,7 @@ use crate::{lint, HashStableContext};
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 
 use rustc_data_structures::stable_hasher::ToStableHashKey;
-use rustc_target::abi::{Align, TargetDataLayout};
+use rustc_target::abi::Align;
 use rustc_target::spec::{PanicStrategy, SanitizerSet, SplitDebuginfo};
 use rustc_target::spec::{Target, TargetTriple, TargetWarnings, TARGETS};
 
@@ -900,7 +900,7 @@ fn default_configuration(sess: &Session) -> CrateConfig {
     let min_atomic_width = sess.target.min_atomic_width();
     let max_atomic_width = sess.target.max_atomic_width();
     let atomic_cas = sess.target.atomic_cas;
-    let layout = TargetDataLayout::parse(&sess.target).unwrap_or_else(|err| {
+    let layout = sess.target.parse_data_layout().unwrap_or_else(|err| {
         sess.emit_fatal(err);
     });
 
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
index fc37fdb1c43..568c916a163 100644
--- a/compiler/rustc_target/Cargo.toml
+++ b/compiler/rustc_target/Cargo.toml
@@ -7,6 +7,7 @@ edition = "2021"
 bitflags = "1.2.1"
 tracing = "0.1"
 serde_json = "1.0.59"
+rustc_abi = { path = "../rustc_abi" }
 rustc_data_structures = { path = "../rustc_data_structures" }
 rustc_feature = { path = "../rustc_feature" }
 rustc_index = { path = "../rustc_index" }
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 0c559ec04a4..a5ffaebea0b 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -262,7 +262,7 @@ impl CastTarget {
         let mut size = self.rest.total;
         for i in 0..self.prefix.iter().count() {
             match self.prefix[i] {
-                Some(v) => size += Size { raw: v.size.bytes() },
+                Some(v) => size += v.size,
                 None => {}
             }
         }
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index 1b74959ad17..ec8f20fe692 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -87,8 +87,8 @@ where
         _ => {}
     }
 
-    if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
-        offset.raw += 4 - (offset.raw % 4);
+    if (offset.bytes() % 4) != 0 && scalar2.primitive().is_float() {
+        offset += Size::from_bytes(4 - (offset.bytes() % 4));
     }
     data = arg_scalar(cx, &scalar2, offset, data);
     return data;
@@ -169,14 +169,14 @@ where
                     has_float: false,
                     arg_attribute: ArgAttribute::default(),
                 },
-                Size { raw: 0 },
+                Size::ZERO,
             );
 
             if data.has_float {
                 // Structure { float, int, int } doesn't like to be handled like
                 // { float, long int }. Other way around it doesn't mind.
                 if data.last_offset < arg.layout.size
-                    && (data.last_offset.raw % 8) != 0
+                    && (data.last_offset.bytes() % 8) != 0
                     && data.prefix_index < data.prefix.len()
                 {
                     data.prefix[data.prefix_index] = Some(Reg::i32());
@@ -185,7 +185,7 @@ where
                 }
 
                 let mut rest_size = arg.layout.size - data.last_offset;
-                if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+                if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() {
                     data.prefix[data.prefix_index] = Some(Reg::i32());
                     rest_size = rest_size - Reg::i32().size;
                 }
@@ -214,13 +214,13 @@ where
     C: HasDataLayout,
 {
     if !fn_abi.ret.is_ignore() {
-        classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+        classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32));
     }
 
     for arg in fn_abi.args.iter_mut() {
         if arg.is_ignore() {
             continue;
         }
-        classify_arg(cx, arg, Size { raw: 16 });
+        classify_arg(cx, arg, Size::from_bytes(16));
     }
 }
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index decbefc2f7c..53c9878ab87 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -2,284 +2,16 @@ pub use Integer::*;
 pub use Primitive::*;
 
 use crate::json::{Json, ToJson};
-use crate::spec::Target;
 
-use std::convert::{TryFrom, TryInto};
 use std::fmt;
-use std::iter::Step;
-use std::num::{NonZeroUsize, ParseIntError};
-use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
-use std::str::FromStr;
+use std::ops::Deref;
 
 use rustc_data_structures::intern::Interned;
-use rustc_index::vec::{Idx, IndexVec};
 use rustc_macros::HashStable_Generic;
 
 pub mod call;
 
-/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
-/// for a target, which contains everything needed to compute layouts.
-pub struct TargetDataLayout {
-    pub endian: Endian,
-    pub i1_align: AbiAndPrefAlign,
-    pub i8_align: AbiAndPrefAlign,
-    pub i16_align: AbiAndPrefAlign,
-    pub i32_align: AbiAndPrefAlign,
-    pub i64_align: AbiAndPrefAlign,
-    pub i128_align: AbiAndPrefAlign,
-    pub f32_align: AbiAndPrefAlign,
-    pub f64_align: AbiAndPrefAlign,
-    pub pointer_size: Size,
-    pub pointer_align: AbiAndPrefAlign,
-    pub aggregate_align: AbiAndPrefAlign,
-
-    /// Alignments for vector types.
-    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
-
-    pub instruction_address_space: AddressSpace,
-
-    /// Minimum size of #[repr(C)] enums (default I32 bits)
-    pub c_enum_min_size: Integer,
-}
-
-impl Default for TargetDataLayout {
-    /// Creates an instance of `TargetDataLayout`.
-    fn default() -> TargetDataLayout {
-        let align = |bits| Align::from_bits(bits).unwrap();
-        TargetDataLayout {
-            endian: Endian::Big,
-            i1_align: AbiAndPrefAlign::new(align(8)),
-            i8_align: AbiAndPrefAlign::new(align(8)),
-            i16_align: AbiAndPrefAlign::new(align(16)),
-            i32_align: AbiAndPrefAlign::new(align(32)),
-            i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
-            i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
-            f32_align: AbiAndPrefAlign::new(align(32)),
-            f64_align: AbiAndPrefAlign::new(align(64)),
-            pointer_size: Size::from_bits(64),
-            pointer_align: AbiAndPrefAlign::new(align(64)),
-            aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
-            vector_align: vec![
-                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
-                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
-            ],
-            instruction_address_space: AddressSpace::DATA,
-            c_enum_min_size: Integer::I32,
-        }
-    }
-}
-
-pub enum TargetDataLayoutErrors<'a> {
-    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
-    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
-    MissingAlignment { cause: &'a str },
-    InvalidAlignment { cause: &'a str, err: String },
-    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
-    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
-    InvalidBitsSize { err: String },
-}
-
-impl TargetDataLayout {
-    pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
-        // Parse an address space index from a string.
-        let parse_address_space = |s: &'a str, cause: &'a str| {
-            s.parse::<u32>().map(AddressSpace).map_err(|err| {
-                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
-            })
-        };
-
-        // Parse a bit count from a string.
-        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
-            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
-                kind,
-                bit: s,
-                cause,
-                err,
-            })
-        };
-
-        // Parse a size string.
-        let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
-
-        // Parse an alignment string.
-        let align = |s: &[&'a str], cause: &'a str| {
-            if s.is_empty() {
-                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
-            }
-            let align_from_bits = |bits| {
-                Align::from_bits(bits)
-                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
-            };
-            let abi = parse_bits(s[0], "alignment", cause)?;
-            let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
-            Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
-        };
-
-        let mut dl = TargetDataLayout::default();
-        let mut i128_align_src = 64;
-        for spec in target.data_layout.split('-') {
-            let spec_parts = spec.split(':').collect::<Vec<_>>();
-
-            match &*spec_parts {
-                ["e"] => dl.endian = Endian::Little,
-                ["E"] => dl.endian = Endian::Big,
-                [p] if p.starts_with('P') => {
-                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
-                }
-                ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
-                ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
-                ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
-                [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
-                    dl.pointer_size = size(s, p)?;
-                    dl.pointer_align = align(a, p)?;
-                }
-                [s, ref a @ ..] if s.starts_with('i') => {
-                    let Ok(bits) = s[1..].parse::<u64>() else {
-                        size(&s[1..], "i")?; // For the user error.
-                        continue;
-                    };
-                    let a = align(a, s)?;
-                    match bits {
-                        1 => dl.i1_align = a,
-                        8 => dl.i8_align = a,
-                        16 => dl.i16_align = a,
-                        32 => dl.i32_align = a,
-                        64 => dl.i64_align = a,
-                        _ => {}
-                    }
-                    if bits >= i128_align_src && bits <= 128 {
-                        // Default alignment for i128 is decided by taking the alignment of
-                        // largest-sized i{64..=128}.
-                        i128_align_src = bits;
-                        dl.i128_align = a;
-                    }
-                }
-                [s, ref a @ ..] if s.starts_with('v') => {
-                    let v_size = size(&s[1..], "v")?;
-                    let a = align(a, s)?;
-                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
-                        v.1 = a;
-                        continue;
-                    }
-                    // No existing entry, add a new one.
-                    dl.vector_align.push((v_size, a));
-                }
-                _ => {} // Ignore everything else.
-            }
-        }
-
-        // Perform consistency checks against the Target information.
-        if dl.endian != target.endian {
-            return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
-                dl: dl.endian.as_str(),
-                target: target.endian.as_str(),
-            });
-        }
-
-        let target_pointer_width: u64 = target.pointer_width.into();
-        if dl.pointer_size.bits() != target_pointer_width {
-            return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
-                pointer_size: dl.pointer_size.bits(),
-                target: target.pointer_width,
-            });
-        }
-
-        dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) {
-            Ok(bits) => bits,
-            Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
-        };
-
-        Ok(dl)
-    }
-
-    /// Returns exclusive upper bound on object size.
-    ///
-    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
-    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
-    /// index every address within an object along with one byte past the end, along with allowing
-    /// `isize` to store the difference between any two pointers into an object.
-    ///
-    /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
-    /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
-    /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
-    /// address space on 64-bit ARMv8 and x86_64.
-    #[inline]
-    pub fn obj_size_bound(&self) -> u64 {
-        match self.pointer_size.bits() {
-            16 => 1 << 15,
-            32 => 1 << 31,
-            64 => 1 << 47,
-            bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
-        }
-    }
-
-    #[inline]
-    pub fn ptr_sized_integer(&self) -> Integer {
-        match self.pointer_size.bits() {
-            16 => I16,
-            32 => I32,
-            64 => I64,
-            bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
-        }
-    }
-
-    #[inline]
-    pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
-        for &(size, align) in &self.vector_align {
-            if size == vec_size {
-                return align;
-            }
-        }
-        // Default to natural alignment, which is what LLVM does.
-        // That is, use the size, rounded up to a power of 2.
-        AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
-    }
-}
-
-pub trait HasDataLayout {
-    fn data_layout(&self) -> &TargetDataLayout;
-}
-
-impl HasDataLayout for TargetDataLayout {
-    #[inline]
-    fn data_layout(&self) -> &TargetDataLayout {
-        self
-    }
-}
-
-/// Endianness of the target, which must match cfg(target-endian).
-#[derive(Copy, Clone, PartialEq)]
-pub enum Endian {
-    Little,
-    Big,
-}
-
-impl Endian {
-    pub fn as_str(&self) -> &'static str {
-        match self {
-            Self::Little => "little",
-            Self::Big => "big",
-        }
-    }
-}
-
-impl fmt::Debug for Endian {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.write_str(self.as_str())
-    }
-}
-
-impl FromStr for Endian {
-    type Err = String;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        match s {
-            "little" => Ok(Self::Little),
-            "big" => Ok(Self::Big),
-            _ => Err(format!(r#"unknown endian: "{}""#, s)),
-        }
-    }
-}
+pub use rustc_abi::*;
 
 impl ToJson for Endian {
     fn to_json(&self) -> Json {
@@ -287,1030 +19,15 @@ impl ToJson for Endian {
     }
 }
 
-/// Size of a type in bytes.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
-pub struct Size {
-    raw: u64,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Size {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "Size({} bytes)", self.bytes())
-    }
-}
-
-impl Size {
-    pub const ZERO: Size = Size { raw: 0 };
-
-    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
-    /// not a multiple of 8.
-    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
-        let bits = bits.try_into().ok().unwrap();
-        // Avoid potential overflow from `bits + 7`.
-        Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
-    }
-
-    #[inline]
-    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
-        let bytes: u64 = bytes.try_into().ok().unwrap();
-        Size { raw: bytes }
-    }
-
-    #[inline]
-    pub fn bytes(self) -> u64 {
-        self.raw
-    }
-
-    #[inline]
-    pub fn bytes_usize(self) -> usize {
-        self.bytes().try_into().unwrap()
-    }
-
-    #[inline]
-    pub fn bits(self) -> u64 {
-        #[cold]
-        fn overflow(bytes: u64) -> ! {
-            panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
-        }
-
-        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
-    }
-
-    #[inline]
-    pub fn bits_usize(self) -> usize {
-        self.bits().try_into().unwrap()
-    }
-
-    #[inline]
-    pub fn align_to(self, align: Align) -> Size {
-        let mask = align.bytes() - 1;
-        Size::from_bytes((self.bytes() + mask) & !mask)
-    }
-
-    #[inline]
-    pub fn is_aligned(self, align: Align) -> bool {
-        let mask = align.bytes() - 1;
-        self.bytes() & mask == 0
-    }
-
-    #[inline]
-    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
-        let dl = cx.data_layout();
-
-        let bytes = self.bytes().checked_add(offset.bytes())?;
-
-        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
-    }
-
-    #[inline]
-    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
-        let dl = cx.data_layout();
-
-        let bytes = self.bytes().checked_mul(count)?;
-        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
-    }
-
-    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
-    /// (i.e., if it is negative, fill with 1's on the left).
-    #[inline]
-    pub fn sign_extend(self, value: u128) -> u128 {
-        let size = self.bits();
-        if size == 0 {
-            // Truncated until nothing is left.
-            return 0;
-        }
-        // Sign-extend it.
-        let shift = 128 - size;
-        // Shift the unsigned value to the left, then shift back to the right as signed
-        // (essentially fills with sign bit on the left).
-        (((value << shift) as i128) >> shift) as u128
-    }
-
-    /// Truncates `value` to `self` bits.
-    #[inline]
-    pub fn truncate(self, value: u128) -> u128 {
-        let size = self.bits();
-        if size == 0 {
-            // Truncated until nothing is left.
-            return 0;
-        }
-        let shift = 128 - size;
-        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
-        (value << shift) >> shift
-    }
-
-    #[inline]
-    pub fn signed_int_min(&self) -> i128 {
-        self.sign_extend(1_u128 << (self.bits() - 1)) as i128
-    }
-
-    #[inline]
-    pub fn signed_int_max(&self) -> i128 {
-        i128::MAX >> (128 - self.bits())
-    }
-
-    #[inline]
-    pub fn unsigned_int_max(&self) -> u128 {
-        u128::MAX >> (128 - self.bits())
-    }
-}
-
-// Panicking addition, subtraction and multiplication for convenience.
-// Avoid during layout computation, return `LayoutError` instead.
-
-impl Add for Size {
-    type Output = Size;
-    #[inline]
-    fn add(self, other: Size) -> Size {
-        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
-            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
-        }))
-    }
-}
-
-impl Sub for Size {
-    type Output = Size;
-    #[inline]
-    fn sub(self, other: Size) -> Size {
-        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
-            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
-        }))
-    }
-}
-
-impl Mul<Size> for u64 {
-    type Output = Size;
-    #[inline]
-    fn mul(self, size: Size) -> Size {
-        size * self
-    }
-}
-
-impl Mul<u64> for Size {
-    type Output = Size;
-    #[inline]
-    fn mul(self, count: u64) -> Size {
-        match self.bytes().checked_mul(count) {
-            Some(bytes) => Size::from_bytes(bytes),
-            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
-        }
-    }
-}
-
-impl AddAssign for Size {
-    #[inline]
-    fn add_assign(&mut self, other: Size) {
-        *self = *self + other;
-    }
-}
-
-impl Step for Size {
-    #[inline]
-    fn steps_between(start: &Self, end: &Self) -> Option<usize> {
-        u64::steps_between(&start.bytes(), &end.bytes())
-    }
-
-    #[inline]
-    fn forward_checked(start: Self, count: usize) -> Option<Self> {
-        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
-    }
-
-    #[inline]
-    fn forward(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::forward(start.bytes(), count))
-    }
-
-    #[inline]
-    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
-    }
-
-    #[inline]
-    fn backward_checked(start: Self, count: usize) -> Option<Self> {
-        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
-    }
-
-    #[inline]
-    fn backward(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::backward(start.bytes(), count))
-    }
-
-    #[inline]
-    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
-        Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
-    }
-}
-
-/// Alignment of a type in bytes (always a power of two).
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-#[derive(HashStable_Generic)]
-pub struct Align {
-    pow2: u8,
-}
-
-// This is debug-printed a lot in larger structs, don't waste too much space there
-impl fmt::Debug for Align {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "Align({} bytes)", self.bytes())
-    }
-}
-
-impl Align {
-    pub const ONE: Align = Align { pow2: 0 };
-    pub const MAX: Align = Align { pow2: 29 };
-
-    #[inline]
-    pub fn from_bits(bits: u64) -> Result<Align, String> {
-        Align::from_bytes(Size::from_bits(bits).bytes())
-    }
-
-    #[inline]
-    pub fn from_bytes(align: u64) -> Result<Align, String> {
-        // Treat an alignment of 0 bytes like 1-byte alignment.
-        if align == 0 {
-            return Ok(Align::ONE);
-        }
-
-        #[cold]
-        fn not_power_of_2(align: u64) -> String {
-            format!("`{}` is not a power of 2", align)
-        }
-
-        #[cold]
-        fn too_large(align: u64) -> String {
-            format!("`{}` is too large", align)
-        }
-
-        let mut bytes = align;
-        let mut pow2: u8 = 0;
-        while (bytes & 1) == 0 {
-            pow2 += 1;
-            bytes >>= 1;
-        }
-        if bytes != 1 {
-            return Err(not_power_of_2(align));
-        }
-        if pow2 > Self::MAX.pow2 {
-            return Err(too_large(align));
-        }
-
-        Ok(Align { pow2 })
-    }
-
-    #[inline]
-    pub fn bytes(self) -> u64 {
-        1 << self.pow2
-    }
-
-    #[inline]
-    pub fn bits(self) -> u64 {
-        self.bytes() * 8
-    }
-
-    /// Computes the best alignment possible for the given offset
-    /// (the largest power of two that the offset is a multiple of).
-    ///
-    /// N.B., for an offset of `0`, this happens to return `2^64`.
-    #[inline]
-    pub fn max_for_offset(offset: Size) -> Align {
-        Align { pow2: offset.bytes().trailing_zeros() as u8 }
-    }
-
-    /// Lower the alignment, if necessary, such that the given offset
-    /// is aligned to it (the offset is a multiple of the alignment).
-    #[inline]
-    pub fn restrict_for_offset(self, offset: Size) -> Align {
-        self.min(Align::max_for_offset(offset))
-    }
-}
-
-/// A pair of alignments, ABI-mandated and preferred.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
-pub struct AbiAndPrefAlign {
-    pub abi: Align,
-    pub pref: Align,
-}
-
-impl AbiAndPrefAlign {
-    #[inline]
-    pub fn new(align: Align) -> AbiAndPrefAlign {
-        AbiAndPrefAlign { abi: align, pref: align }
-    }
-
-    #[inline]
-    pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
-        AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
-    }
-
-    #[inline]
-    pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
-        AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
-    }
-}
-
-/// Integers, also used for enum discriminants.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
-pub enum Integer {
-    I8,
-    I16,
-    I32,
-    I64,
-    I128,
-}
-
-impl Integer {
-    #[inline]
-    pub fn size(self) -> Size {
-        match self {
-            I8 => Size::from_bytes(1),
-            I16 => Size::from_bytes(2),
-            I32 => Size::from_bytes(4),
-            I64 => Size::from_bytes(8),
-            I128 => Size::from_bytes(16),
-        }
-    }
-
-    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
-        let dl = cx.data_layout();
-
-        match self {
-            I8 => dl.i8_align,
-            I16 => dl.i16_align,
-            I32 => dl.i32_align,
-            I64 => dl.i64_align,
-            I128 => dl.i128_align,
-        }
-    }
-
-    /// Finds the smallest Integer type which can represent the signed value.
-    #[inline]
-    pub fn fit_signed(x: i128) -> Integer {
-        match x {
-            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
-            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
-            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
-            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
-            _ => I128,
-        }
-    }
-
-    /// Finds the smallest Integer type which can represent the unsigned value.
-    #[inline]
-    pub fn fit_unsigned(x: u128) -> Integer {
-        match x {
-            0..=0x0000_0000_0000_00ff => I8,
-            0..=0x0000_0000_0000_ffff => I16,
-            0..=0x0000_0000_ffff_ffff => I32,
-            0..=0xffff_ffff_ffff_ffff => I64,
-            _ => I128,
-        }
-    }
-
-    /// Finds the smallest integer with the given alignment.
-    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
-        let dl = cx.data_layout();
-
-        for candidate in [I8, I16, I32, I64, I128] {
-            if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
-                return Some(candidate);
-            }
-        }
-        None
-    }
-
-    /// Find the largest integer with the given alignment or less.
-    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
-        let dl = cx.data_layout();
-
-        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
-        for candidate in [I64, I32, I16] {
-            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
-                return candidate;
-            }
-        }
-        I8
-    }
-
-    // FIXME(eddyb) consolidate this and other methods that find the appropriate
-    // `Integer` given some requirements.
-    #[inline]
-    fn from_size(size: Size) -> Result<Self, String> {
-        match size.bits() {
-            8 => Ok(Integer::I8),
-            16 => Ok(Integer::I16),
-            32 => Ok(Integer::I32),
-            64 => Ok(Integer::I64),
-            128 => Ok(Integer::I128),
-            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
-        }
-    }
-}
-
-/// Fundamental unit of memory access and layout.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Primitive {
-    /// The `bool` is the signedness of the `Integer` type.
-    ///
-    /// One would think we would not care about such details this low down,
-    /// but some ABIs are described in terms of C types and ISAs where the
-    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
-    /// a negative integer passed by zero-extension will appear positive in
-    /// the callee, and most operations on it will produce the wrong values.
-    Int(Integer, bool),
-    F32,
-    F64,
-    Pointer,
-}
-
-impl Primitive {
-    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
-        let dl = cx.data_layout();
-
-        match self {
-            Int(i, _) => i.size(),
-            F32 => Size::from_bits(32),
-            F64 => Size::from_bits(64),
-            Pointer => dl.pointer_size,
-        }
-    }
-
-    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
-        let dl = cx.data_layout();
-
-        match self {
-            Int(i, _) => i.align(dl),
-            F32 => dl.f32_align,
-            F64 => dl.f64_align,
-            Pointer => dl.pointer_align,
-        }
-    }
-
-    // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
-    #[inline]
-    pub fn is_float(self) -> bool {
-        matches!(self, F32 | F64)
-    }
-
-    // FIXME(eddyb) remove, it's completely unused.
-    #[inline]
-    pub fn is_int(self) -> bool {
-        matches!(self, Int(..))
-    }
-
-    #[inline]
-    pub fn is_ptr(self) -> bool {
-        matches!(self, Pointer)
-    }
-}
-
-/// Inclusive wrap-around range of valid values, that is, if
-/// start > end, it represents `start..=MAX`,
-/// followed by `0..=end`.
-///
-/// That is, for an i8 primitive, a range of `254..=2` means following
-/// sequence:
-///
-///    254 (-2), 255 (-1), 0, 1, 2
-///
-/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-#[derive(HashStable_Generic)]
-pub struct WrappingRange {
-    pub start: u128,
-    pub end: u128,
-}
-
-impl WrappingRange {
-    pub fn full(size: Size) -> Self {
-        Self { start: 0, end: size.unsigned_int_max() }
-    }
-
-    /// Returns `true` if `v` is contained in the range.
-    #[inline(always)]
-    pub fn contains(&self, v: u128) -> bool {
-        if self.start <= self.end {
-            self.start <= v && v <= self.end
-        } else {
-            self.start <= v || v <= self.end
-        }
-    }
-
-    /// Returns `self` with replaced `start`
-    #[inline(always)]
-    pub fn with_start(mut self, start: u128) -> Self {
-        self.start = start;
-        self
-    }
-
-    /// Returns `self` with replaced `end`
-    #[inline(always)]
-    pub fn with_end(mut self, end: u128) -> Self {
-        self.end = end;
-        self
-    }
-
-    /// Returns `true` if `size` completely fills the range.
-    #[inline]
-    pub fn is_full_for(&self, size: Size) -> bool {
-        let max_value = size.unsigned_int_max();
-        debug_assert!(self.start <= max_value && self.end <= max_value);
-        self.start == (self.end.wrapping_add(1) & max_value)
-    }
-}
-
-impl fmt::Debug for WrappingRange {
-    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
-        if self.start > self.end {
-            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
-        } else {
-            write!(fmt, "{}..={}", self.start, self.end)?;
-        }
-        Ok(())
-    }
-}
-
-/// Information about one scalar component of a Rust type.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-#[derive(HashStable_Generic)]
-pub enum Scalar {
-    Initialized {
-        value: Primitive,
-
-        // FIXME(eddyb) always use the shortest range, e.g., by finding
-        // the largest space between two consecutive valid values and
-        // taking everything else as the (shortest) valid range.
-        valid_range: WrappingRange,
-    },
-    Union {
-        /// Even for unions, we need to use the correct registers for the kind of
-        /// values inside the union, so we keep the `Primitive` type around. We
-        /// also use it to compute the size of the scalar.
-        /// However, unions never have niches and even allow undef,
-        /// so there is no `valid_range`.
-        value: Primitive,
-    },
-}
-
-impl Scalar {
-    #[inline]
-    pub fn is_bool(&self) -> bool {
-        matches!(
-            self,
-            Scalar::Initialized {
-                value: Int(I8, false),
-                valid_range: WrappingRange { start: 0, end: 1 }
-            }
-        )
-    }
-
-    /// Get the primitive representation of this type, ignoring the valid range and whether the
-    /// value is allowed to be undefined (due to being a union).
-    pub fn primitive(&self) -> Primitive {
-        match *self {
-            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
-        }
-    }
-
-    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
-        self.primitive().align(cx)
-    }
-
-    pub fn size(self, cx: &impl HasDataLayout) -> Size {
-        self.primitive().size(cx)
-    }
-
-    #[inline]
-    pub fn to_union(&self) -> Self {
-        Self::Union { value: self.primitive() }
-    }
-
-    #[inline]
-    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
-        match *self {
-            Scalar::Initialized { valid_range, .. } => valid_range,
-            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
-        }
-    }
-
-    #[inline]
-    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
-    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
-        match self {
-            Scalar::Initialized { valid_range, .. } => valid_range,
-            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
-        }
-    }
-
-    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
-    #[inline]
-    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
-        match *self {
-            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
-            Scalar::Union { .. } => true,
-        }
-    }
-
-    /// Returns `true` if this type can be left uninit.
-    #[inline]
-    pub fn is_uninit_valid(&self) -> bool {
-        match *self {
-            Scalar::Initialized { .. } => false,
-            Scalar::Union { .. } => true,
-        }
-    }
-}
-
-/// Describes how the fields of a type are located in memory.
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum FieldsShape {
-    /// Scalar primitives and `!`, which never have fields.
-    Primitive,
-
-    /// All fields start at no offset. The `usize` is the field count.
-    Union(NonZeroUsize),
-
-    /// Array/vector-like placement, with all fields of identical types.
-    Array { stride: Size, count: u64 },
-
-    /// Struct-like placement, with precomputed offsets.
-    ///
-    /// Fields are guaranteed to not overlap, but note that gaps
-    /// before, between and after all the fields are NOT always
-    /// padding, and as such their contents may not be discarded.
-    /// For example, enum variants leave a gap at the start,
-    /// where the discriminant field in the enum layout goes.
-    Arbitrary {
-        /// Offsets for the first byte of each field,
-        /// ordered to match the source definition order.
-        /// This vector does not go in increasing order.
-        // FIXME(eddyb) use small vector optimization for the common case.
-        offsets: Vec<Size>,
-
-        /// Maps source order field indices to memory order indices,
-        /// depending on how the fields were reordered (if at all).
-        /// This is a permutation, with both the source order and the
-        /// memory order using the same (0..n) index ranges.
-        ///
-        /// Note that during computation of `memory_index`, sometimes
-        /// it is easier to operate on the inverse mapping (that is,
-        /// from memory order to source order), and that is usually
-        /// named `inverse_memory_index`.
-        ///
-        // FIXME(eddyb) build a better abstraction for permutations, if possible.
-        // FIXME(camlorn) also consider small vector  optimization here.
-        memory_index: Vec<u32>,
-    },
-}
-
-impl FieldsShape {
-    #[inline]
-    pub fn count(&self) -> usize {
-        match *self {
-            FieldsShape::Primitive => 0,
-            FieldsShape::Union(count) => count.get(),
-            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
-            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
-        }
-    }
-
-    #[inline]
-    pub fn offset(&self, i: usize) -> Size {
-        match *self {
-            FieldsShape::Primitive => {
-                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
-            }
-            FieldsShape::Union(count) => {
-                assert!(
-                    i < count.get(),
-                    "tried to access field {} of union with {} fields",
-                    i,
-                    count
-                );
-                Size::ZERO
-            }
-            FieldsShape::Array { stride, count } => {
-                let i = u64::try_from(i).unwrap();
-                assert!(i < count);
-                stride * i
-            }
-            FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
-        }
-    }
-
-    #[inline]
-    pub fn memory_index(&self, i: usize) -> usize {
-        match *self {
-            FieldsShape::Primitive => {
-                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
-            }
-            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
-            FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
-        }
-    }
-
-    /// Gets source indices of the fields by increasing offsets.
-    #[inline]
-    pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
-        let mut inverse_small = [0u8; 64];
-        let mut inverse_big = vec![];
-        let use_small = self.count() <= inverse_small.len();
-
-        // We have to write this logic twice in order to keep the array small.
-        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
-            if use_small {
-                for i in 0..self.count() {
-                    inverse_small[memory_index[i] as usize] = i as u8;
-                }
-            } else {
-                inverse_big = vec![0; self.count()];
-                for i in 0..self.count() {
-                    inverse_big[memory_index[i] as usize] = i as u32;
-                }
-            }
-        }
-
-        (0..self.count()).map(move |i| match *self {
-            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
-            FieldsShape::Arbitrary { .. } => {
-                if use_small {
-                    inverse_small[i] as usize
-                } else {
-                    inverse_big[i] as usize
-                }
-            }
-        })
-    }
-}
-
-/// An identifier that specifies the address space that some operation
-/// should operate on. Special address spaces have an effect on code generation,
-/// depending on the target and the address spaces it implements.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub struct AddressSpace(pub u32);
-
-impl AddressSpace {
-    /// The default address space, corresponding to data space.
-    pub const DATA: Self = AddressSpace(0);
-}
-
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Abi {
-    Uninhabited,
-    Scalar(Scalar),
-    ScalarPair(Scalar, Scalar),
-    Vector {
-        element: Scalar,
-        count: u64,
-    },
-    Aggregate {
-        /// If true, the size is exact, otherwise it's only a lower bound.
-        sized: bool,
-    },
-}
-
-impl Abi {
-    /// Returns `true` if the layout corresponds to an unsized type.
-    #[inline]
-    pub fn is_unsized(&self) -> bool {
-        match *self {
-            Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
-            Abi::Aggregate { sized } => !sized,
-        }
-    }
-
-    #[inline]
-    pub fn is_sized(&self) -> bool {
-        !self.is_unsized()
-    }
-
-    /// Returns `true` if this is a single signed integer scalar
-    #[inline]
-    pub fn is_signed(&self) -> bool {
-        match self {
-            Abi::Scalar(scal) => match scal.primitive() {
-                Primitive::Int(_, signed) => signed,
-                _ => false,
-            },
-            _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
-        }
-    }
-
-    /// Returns `true` if this is an uninhabited type
-    #[inline]
-    pub fn is_uninhabited(&self) -> bool {
-        matches!(*self, Abi::Uninhabited)
-    }
-
-    /// Returns `true` is this is a scalar type
-    #[inline]
-    pub fn is_scalar(&self) -> bool {
-        matches!(*self, Abi::Scalar(_))
-    }
-}
-
 rustc_index::newtype_index! {
     pub struct VariantIdx {
         derive [HashStable_Generic]
     }
 }
 
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Variants<'a> {
-    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
-    Single { index: VariantIdx },
-
-    /// Enum-likes with more than one inhabited variant: each variant comes with
-    /// a *discriminant* (usually the same as the variant index but the user can
-    /// assign explicit discriminant values).  That discriminant is encoded
-    /// as a *tag* on the machine.  The layout of each variant is
-    /// a struct, and they all have space reserved for the tag.
-    /// For enums, the tag is the sole field of the layout.
-    Multiple {
-        tag: Scalar,
-        tag_encoding: TagEncoding,
-        tag_field: usize,
-        variants: IndexVec<VariantIdx, Layout<'a>>,
-    },
-}
-
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum TagEncoding {
-    /// The tag directly stores the discriminant, but possibly with a smaller layout
-    /// (so converting the tag to the discriminant can require sign extension).
-    Direct,
-
-    /// Niche (values invalid for a type) encoding the discriminant:
-    /// Discriminant and variant index coincide.
-    /// The variant `untagged_variant` contains a niche at an arbitrary
-    /// offset (field `tag_field` of the enum), which for a variant with
-    /// discriminant `d` is set to
-    /// `(d - niche_variants.start).wrapping_add(niche_start)`.
-    ///
-    /// For example, `Option<(usize, &T)>`  is represented such that
-    /// `None` has a null pointer for the second tuple field, and
-    /// `Some` is the identity function (with a non-null reference).
-    Niche {
-        untagged_variant: VariantIdx,
-        niche_variants: RangeInclusive<VariantIdx>,
-        niche_start: u128,
-    },
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub struct Niche {
-    pub offset: Size,
-    pub value: Primitive,
-    pub valid_range: WrappingRange,
-}
-
-impl Niche {
-    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
-        let Scalar::Initialized { value, valid_range } = scalar else { return None };
-        let niche = Niche { offset, value, valid_range };
-        if niche.available(cx) > 0 { Some(niche) } else { None }
-    }
-
-    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
-        let Self { value, valid_range: v, .. } = *self;
-        let size = value.size(cx);
-        assert!(size.bits() <= 128);
-        let max_value = size.unsigned_int_max();
-
-        // Find out how many values are outside the valid range.
-        let niche = v.end.wrapping_add(1)..v.start;
-        niche.end.wrapping_sub(niche.start) & max_value
-    }
-
-    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
-        assert!(count > 0);
-
-        let Self { value, valid_range: v, .. } = *self;
-        let size = value.size(cx);
-        assert!(size.bits() <= 128);
-        let max_value = size.unsigned_int_max();
-
-        let niche = v.end.wrapping_add(1)..v.start;
-        let available = niche.end.wrapping_sub(niche.start) & max_value;
-        if count > available {
-            return None;
-        }
-
-        // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
-        // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
-        // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
-        // Having `None` in niche zero can enable some special optimizations.
-        //
-        // Bound selection criteria:
-        // 1. Select closest to zero given wrapping semantics.
-        // 2. Avoid moving past zero if possible.
-        //
-        // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
-        // If niche zero is already reserved, the selection of bounds are of little interest.
-        let move_start = |v: WrappingRange| {
-            let start = v.start.wrapping_sub(count) & max_value;
-            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
-        };
-        let move_end = |v: WrappingRange| {
-            let start = v.end.wrapping_add(1) & max_value;
-            let end = v.end.wrapping_add(count) & max_value;
-            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
-        };
-        let distance_end_zero = max_value - v.end;
-        if v.start > v.end {
-            // zero is unavailable because wrapping occurs
-            move_end(v)
-        } else if v.start <= distance_end_zero {
-            if count <= v.start {
-                move_start(v)
-            } else {
-                // moved past zero, use other bound
-                move_end(v)
-            }
-        } else {
-            let end = v.end.wrapping_add(count) & max_value;
-            let overshot_zero = (1..=v.end).contains(&end);
-            if overshot_zero {
-                // moved past zero, use other bound
-                move_start(v)
-            } else {
-                move_end(v)
-            }
-        }
-    }
-}
-
-#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
-pub struct LayoutS<'a> {
-    /// Says where the fields are located within the layout.
-    pub fields: FieldsShape,
-
-    /// Encodes information about multi-variant layouts.
-    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
-    /// shared between all variants. One of them will be the discriminant,
-    /// but e.g. generators can have more.
-    ///
-    /// To access all fields of this layout, both `fields` and the fields of the active variant
-    /// must be taken into account.
-    pub variants: Variants<'a>,
-
-    /// The `abi` defines how this data is passed between functions, and it defines
-    /// value restrictions via `valid_range`.
-    ///
-    /// Note that this is entirely orthogonal to the recursive structure defined by
-    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
-    /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
-    /// have to be taken into account to find all fields of this layout.
-    pub abi: Abi,
-
-    /// The leaf scalar with the largest number of invalid values
-    /// (i.e. outside of its `valid_range`), if it exists.
-    pub largest_niche: Option<Niche>,
-
-    pub align: AbiAndPrefAlign,
-    pub size: Size,
-}
-
-impl<'a> LayoutS<'a> {
-    pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
-        let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
-        let size = scalar.size(cx);
-        let align = scalar.align(cx);
-        LayoutS {
-            variants: Variants::Single { index: VariantIdx::new(0) },
-            fields: FieldsShape::Primitive,
-            abi: Abi::Scalar(scalar),
-            largest_niche,
-            size,
-            align,
-        }
-    }
-}
-
-impl<'a> fmt::Debug for LayoutS<'a> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        // This is how `Layout` used to print before it become
-        // `Interned<LayoutS>`. We print it like this to avoid having to update
-        // expected output in a lot of tests.
-        let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
-        f.debug_struct("Layout")
-            .field("size", size)
-            .field("align", align)
-            .field("abi", abi)
-            .field("fields", fields)
-            .field("largest_niche", largest_niche)
-            .field("variants", variants)
-            .finish()
-    }
-}
-
 #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
 #[rustc_pass_by_value]
-pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
+pub struct Layout<'a>(pub Interned<'a, LayoutS<VariantIdx>>);
 
 impl<'a> fmt::Debug for Layout<'a> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -1324,7 +41,7 @@ impl<'a> Layout<'a> {
         &self.0.0.fields
     }
 
-    pub fn variants(self) -> &'a Variants<'a> {
+    pub fn variants(self) -> &'a Variants<VariantIdx> {
         &self.0.0.variants
     }
 
@@ -1359,47 +76,12 @@ pub struct TyAndLayout<'a, Ty> {
 }
 
 impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
-    type Target = &'a LayoutS<'a>;
-    fn deref(&self) -> &&'a LayoutS<'a> {
+    type Target = &'a LayoutS<VariantIdx>;
+    fn deref(&self) -> &&'a LayoutS<VariantIdx> {
         &self.layout.0.0
     }
 }
 
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum PointerKind {
-    /// Most general case, we know no restrictions to tell LLVM.
-    SharedMutable,
-
-    /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
-    Frozen,
-
-    /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
-    UniqueBorrowed,
-
-    /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
-    UniqueBorrowedPinned,
-
-    /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
-    /// nor `dereferenceable`.
-    UniqueOwned,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct PointeeInfo {
-    pub size: Size,
-    pub align: Align,
-    pub safe: Option<PointerKind>,
-    pub address_space: AddressSpace,
-}
-
-/// Used in `might_permit_raw_init` to indicate the kind of initialisation
-/// that is checked to be valid
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum InitKind {
-    Zero,
-    UninitMitigated0x01Fill,
-}
-
 /// Trait that needs to be implemented by the higher-level type representation
 /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
 pub trait TyAbiInterface<'a, C>: Sized {
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index aaba0d7f093..b69a0a645a4 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -35,10 +35,7 @@ pub mod spec;
 #[cfg(test)]
 mod tests;
 
-/// Requirements for a `StableHashingContext` to be used in this crate.
-/// This is a hack to allow using the `HashStable_Generic` derive macro
-/// instead of implementing everything in `rustc_middle`.
-pub trait HashStableContext {}
+pub use rustc_abi::HashStableContext;
 
 /// The name of rustc's own place to organize libraries.
 ///
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index c633ef1e761..bd5b10d6aa7 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -35,7 +35,10 @@
 //! to the list specified by the target, rather than replace.
 
 use crate::abi::call::Conv;
-use crate::abi::Endian;
+use crate::abi::{
+    AbiAndPrefAlign, AddressSpace, Align, Endian, Integer, Size, TargetDataLayout,
+    TargetDataLayoutErrors,
+};
 use crate::json::{Json, ToJson};
 use crate::spec::abi::{lookup as lookup_abi, Abi};
 use crate::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault};
@@ -1317,6 +1320,120 @@ pub struct Target {
     pub options: TargetOptions,
 }
 
+impl Target {
+    pub fn parse_data_layout<'a>(&'a self) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
+        // Parse an address space index from a string.
+        let parse_address_space = |s: &'a str, cause: &'a str| {
+            s.parse::<u32>().map(AddressSpace).map_err(|err| {
+                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
+            })
+        };
+
+        // Parse a bit count from a string.
+        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
+            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
+                kind,
+                bit: s,
+                cause,
+                err,
+            })
+        };
+
+        // Parse a size string.
+        let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
+
+        // Parse an alignment string.
+        let align = |s: &[&'a str], cause: &'a str| {
+            if s.is_empty() {
+                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
+            }
+            let align_from_bits = |bits| {
+                Align::from_bits(bits)
+                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
+            };
+            let abi = parse_bits(s[0], "alignment", cause)?;
+            let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
+            Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
+        };
+
+        let mut dl = TargetDataLayout::default();
+        let mut i128_align_src = 64;
+        for spec in self.data_layout.split('-') {
+            let spec_parts = spec.split(':').collect::<Vec<_>>();
+
+            match &*spec_parts {
+                ["e"] => dl.endian = Endian::Little,
+                ["E"] => dl.endian = Endian::Big,
+                [p] if p.starts_with('P') => {
+                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
+                }
+                ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
+                ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
+                ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
+                [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
+                    dl.pointer_size = size(s, p)?;
+                    dl.pointer_align = align(a, p)?;
+                }
+                [s, ref a @ ..] if s.starts_with('i') => {
+                    let Ok(bits) = s[1..].parse::<u64>() else {
+                        size(&s[1..], "i")?; // For the user error.
+                        continue;
+                    };
+                    let a = align(a, s)?;
+                    match bits {
+                        1 => dl.i1_align = a,
+                        8 => dl.i8_align = a,
+                        16 => dl.i16_align = a,
+                        32 => dl.i32_align = a,
+                        64 => dl.i64_align = a,
+                        _ => {}
+                    }
+                    if bits >= i128_align_src && bits <= 128 {
+                        // Default alignment for i128 is decided by taking the alignment of
+                        // largest-sized i{64..=128}.
+                        i128_align_src = bits;
+                        dl.i128_align = a;
+                    }
+                }
+                [s, ref a @ ..] if s.starts_with('v') => {
+                    let v_size = size(&s[1..], "v")?;
+                    let a = align(a, s)?;
+                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
+                        v.1 = a;
+                        continue;
+                    }
+                    // No existing entry, add a new one.
+                    dl.vector_align.push((v_size, a));
+                }
+                _ => {} // Ignore everything else.
+            }
+        }
+
+        // Perform consistency checks against the Target information.
+        if dl.endian != self.endian {
+            return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
+                dl: dl.endian.as_str(),
+                target: self.endian.as_str(),
+            });
+        }
+
+        let target_pointer_width: u64 = self.pointer_width.into();
+        if dl.pointer_size.bits() != target_pointer_width {
+            return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
+                pointer_size: dl.pointer_size.bits(),
+                target: self.pointer_width,
+            });
+        }
+
+        dl.c_enum_min_size = match Integer::from_size(Size::from_bits(self.c_enum_min_bits)) {
+            Ok(bits) => bits,
+            Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
+        };
+
+        Ok(dl)
+    }
+}
+
 pub trait HasTargetSpec {
     fn target_spec(&self) -> &Target;
 }
diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml
index 9474e6df567..a432498abcc 100644
--- a/compiler/rustc_traits/Cargo.toml
+++ b/compiler/rustc_traits/Cargo.toml
@@ -12,6 +12,7 @@ rustc_hir = { path = "../rustc_hir" }
 rustc_index = { path = "../rustc_index" }
 rustc_ast = { path = "../rustc_ast" }
 rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
 chalk-ir = "0.87.0"
 chalk-engine = "0.87.0"
 chalk-solve = "0.87.0"
diff --git a/compiler/rustc_traits/src/chalk/db.rs b/compiler/rustc_traits/src/chalk/db.rs
index d15707e5ced..344c8b93c17 100644
--- a/compiler/rustc_traits/src/chalk/db.rs
+++ b/compiler/rustc_traits/src/chalk/db.rs
@@ -9,9 +9,9 @@
 use rustc_middle::traits::ChalkRustInterner as RustInterner;
 use rustc_middle::ty::{self, AssocKind, EarlyBinder, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable};
 use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_target::abi::{Integer, IntegerType};
 
 use rustc_ast::ast;
-use rustc_attr as attr;
 
 use rustc_hir::def_id::DefId;
 
@@ -218,21 +218,21 @@ impl<'tcx> chalk_solve::RustIrDatabase<RustInterner<'tcx>> for RustIrDatabase<'t
             c: adt_def.repr().c(),
             packed: adt_def.repr().packed(),
             int: adt_def.repr().int.map(|i| match i {
-                attr::IntType::SignedInt(ty) => match ty {
-                    ast::IntTy::Isize => int(chalk_ir::IntTy::Isize),
-                    ast::IntTy::I8 => int(chalk_ir::IntTy::I8),
-                    ast::IntTy::I16 => int(chalk_ir::IntTy::I16),
-                    ast::IntTy::I32 => int(chalk_ir::IntTy::I32),
-                    ast::IntTy::I64 => int(chalk_ir::IntTy::I64),
-                    ast::IntTy::I128 => int(chalk_ir::IntTy::I128),
+                IntegerType::Pointer(true) => int(chalk_ir::IntTy::Isize),
+                IntegerType::Pointer(false) => uint(chalk_ir::UintTy::Usize),
+                IntegerType::Fixed(i, true) => match i {
+                    Integer::I8 => int(chalk_ir::IntTy::I8),
+                    Integer::I16 => int(chalk_ir::IntTy::I16),
+                    Integer::I32 => int(chalk_ir::IntTy::I32),
+                    Integer::I64 => int(chalk_ir::IntTy::I64),
+                    Integer::I128 => int(chalk_ir::IntTy::I128),
                 },
-                attr::IntType::UnsignedInt(ty) => match ty {
-                    ast::UintTy::Usize => uint(chalk_ir::UintTy::Usize),
-                    ast::UintTy::U8 => uint(chalk_ir::UintTy::U8),
-                    ast::UintTy::U16 => uint(chalk_ir::UintTy::U16),
-                    ast::UintTy::U32 => uint(chalk_ir::UintTy::U32),
-                    ast::UintTy::U64 => uint(chalk_ir::UintTy::U64),
-                    ast::UintTy::U128 => uint(chalk_ir::UintTy::U128),
+                IntegerType::Fixed(i, false) => match i {
+                    Integer::I8 => uint(chalk_ir::UintTy::U8),
+                    Integer::I16 => uint(chalk_ir::UintTy::U16),
+                    Integer::I32 => uint(chalk_ir::UintTy::U32),
+                    Integer::I64 => uint(chalk_ir::UintTy::U64),
+                    Integer::I128 => uint(chalk_ir::UintTy::U128),
                 },
             }),
         })
diff --git a/compiler/rustc_ty_utils/Cargo.toml b/compiler/rustc_ty_utils/Cargo.toml
index 5e4ba473061..52fbd3ae047 100644
--- a/compiler/rustc_ty_utils/Cargo.toml
+++ b/compiler/rustc_ty_utils/Cargo.toml
@@ -4,8 +4,6 @@ version = "0.0.0"
 edition = "2021"
 
 [dependencies]
-rand = "0.8.4"
-rand_xoshiro = "0.6.0"
 tracing = "0.1"
 rustc_middle = { path = "../rustc_middle" }
 rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 07af3dc5164..7a1cc1e9e6d 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -13,13 +13,8 @@ use rustc_span::symbol::Symbol;
 use rustc_span::DUMMY_SP;
 use rustc_target::abi::*;
 
-use std::cmp::{self, Ordering};
+use std::fmt::Debug;
 use std::iter;
-use std::num::NonZeroUsize;
-use std::ops::Bound;
-
-use rand::{seq::SliceRandom, SeedableRng};
-use rand_xoshiro::Xoshiro128StarStar;
 
 use crate::layout_sanity_check::sanity_check_layout;
 
@@ -66,16 +61,6 @@ fn layout_of<'tcx>(
     Ok(layout)
 }
 
-#[derive(Copy, Clone, Debug)]
-enum StructKind {
-    /// A tuple, closure, or univariant which cannot be coerced to unsized.
-    AlwaysSized,
-    /// A univariant, the last field of which may be coerced to unsized.
-    MaybeUnsized,
-    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
-    Prefixed(Size, Align),
-}
-
 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
 // This is used to go between `memory_index` (source field order to memory order)
 // and `inverse_memory_index` (memory order to source field order).
@@ -89,40 +74,13 @@ fn invert_mapping(map: &[u32]) -> Vec<u32> {
     inverse
 }
 
-fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
-    let dl = cx.data_layout();
-    let b_align = b.align(dl);
-    let align = a.align(dl).max(b_align).max(dl.aggregate_align);
-    let b_offset = a.size(dl).align_to(b_align.abi);
-    let size = (b_offset + b.size(dl)).align_to(align.abi);
-
-    // HACK(nox): We iter on `b` and then `a` because `max_by_key`
-    // returns the last maximum.
-    let largest_niche = Niche::from_scalar(dl, b_offset, b)
-        .into_iter()
-        .chain(Niche::from_scalar(dl, Size::ZERO, a))
-        .max_by_key(|niche| niche.available(dl));
-
-    LayoutS {
-        variants: Variants::Single { index: VariantIdx::new(0) },
-        fields: FieldsShape::Arbitrary {
-            offsets: vec![Size::ZERO, b_offset],
-            memory_index: vec![0, 1],
-        },
-        abi: Abi::ScalarPair(a, b),
-        largest_niche,
-        align,
-        size,
-    }
-}
-
 fn univariant_uninterned<'tcx>(
     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
     ty: Ty<'tcx>,
     fields: &[TyAndLayout<'_>],
     repr: &ReprOptions,
     kind: StructKind,
-) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
+) -> Result<LayoutS<VariantIdx>, LayoutError<'tcx>> {
     let dl = cx.data_layout();
     let pack = repr.pack;
     if pack.is_some() && repr.align.is_some() {
@@ -130,226 +88,7 @@ fn univariant_uninterned<'tcx>(
         return Err(LayoutError::Unknown(ty));
     }
 
-    let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
-    let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
-
-    let optimize = !repr.inhibit_struct_field_reordering_opt();
-    if optimize {
-        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
-        let optimizing = &mut inverse_memory_index[..end];
-        let effective_field_align = |f: &TyAndLayout<'_>| {
-            if let Some(pack) = pack {
-                // return the packed alignment in bytes
-                f.align.abi.min(pack).bytes()
-            } else {
-                // returns log2(effective-align).
-                // This is ok since `pack` applies to all fields equally.
-                // The calculation assumes that size is an integer multiple of align, except for ZSTs.
-                //
-                // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
-                f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
-            }
-        };
-
-        // If `-Z randomize-layout` was enabled for the type definition we can shuffle
-        // the field ordering to try and catch some code making assumptions about layouts
-        // we don't guarantee
-        if repr.can_randomize_type_layout() {
-            // `ReprOptions.layout_seed` is a deterministic seed that we can use to
-            // randomize field ordering with
-            let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
-
-            // Shuffle the ordering of the fields
-            optimizing.shuffle(&mut rng);
-
-            // Otherwise we just leave things alone and actually optimize the type's fields
-        } else {
-            match kind {
-                StructKind::AlwaysSized | StructKind::MaybeUnsized => {
-                    optimizing.sort_by_key(|&x| {
-                        // Place ZSTs first to avoid "interesting offsets",
-                        // especially with only one or two non-ZST fields.
-                        // Then place largest alignments first, largest niches within an alignment group last
-                        let f = &fields[x as usize];
-                        let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
-                        (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
-                    });
-                }
-
-                StructKind::Prefixed(..) => {
-                    // Sort in ascending alignment so that the layout stays optimal
-                    // regardless of the prefix.
-                    // And put the largest niche in an alignment group at the end
-                    // so it can be used as discriminant in jagged enums
-                    optimizing.sort_by_key(|&x| {
-                        let f = &fields[x as usize];
-                        let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
-                        (effective_field_align(f), niche_size)
-                    });
-                }
-            }
-
-            // FIXME(Kixiron): We can always shuffle fields within a given alignment class
-            //                 regardless of the status of `-Z randomize-layout`
-        }
-    }
-
-    // inverse_memory_index holds field indices by increasing memory offset.
-    // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
-    // We now write field offsets to the corresponding offset slot;
-    // field 5 with offset 0 puts 0 in offsets[5].
-    // At the bottom of this function, we invert `inverse_memory_index` to
-    // produce `memory_index` (see `invert_mapping`).
-
-    let mut sized = true;
-    let mut offsets = vec![Size::ZERO; fields.len()];
-    let mut offset = Size::ZERO;
-    let mut largest_niche = None;
-    let mut largest_niche_available = 0;
-
-    if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
-        let prefix_align =
-            if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
-        align = align.max(AbiAndPrefAlign::new(prefix_align));
-        offset = prefix_size.align_to(prefix_align);
-    }
-
-    for &i in &inverse_memory_index {
-        let field = fields[i as usize];
-        if !sized {
-            cx.tcx.sess.delay_span_bug(
-                DUMMY_SP,
-                &format!(
-                    "univariant: field #{} of `{}` comes after unsized field",
-                    offsets.len(),
-                    ty
-                ),
-            );
-        }
-
-        if field.is_unsized() {
-            sized = false;
-        }
-
-        // Invariant: offset < dl.obj_size_bound() <= 1<<61
-        let field_align = if let Some(pack) = pack {
-            field.align.min(AbiAndPrefAlign::new(pack))
-        } else {
-            field.align
-        };
-        offset = offset.align_to(field_align.abi);
-        align = align.max(field_align);
-
-        debug!("univariant offset: {:?} field: {:#?}", offset, field);
-        offsets[i as usize] = offset;
-
-        if let Some(mut niche) = field.largest_niche {
-            let available = niche.available(dl);
-            if available > largest_niche_available {
-                largest_niche_available = available;
-                niche.offset += offset;
-                largest_niche = Some(niche);
-            }
-        }
-
-        offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
-    }
-
-    if let Some(repr_align) = repr.align {
-        align = align.max(AbiAndPrefAlign::new(repr_align));
-    }
-
-    debug!("univariant min_size: {:?}", offset);
-    let min_size = offset;
-
-    // As stated above, inverse_memory_index holds field indices by increasing offset.
-    // This makes it an already-sorted view of the offsets vec.
-    // To invert it, consider:
-    // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
-    // Field 5 would be the first element, so memory_index is i:
-    // Note: if we didn't optimize, it's already right.
-
-    let memory_index =
-        if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
-
-    let size = min_size.align_to(align.abi);
-    let mut abi = Abi::Aggregate { sized };
-
-    // Unpack newtype ABIs and find scalar pairs.
-    if sized && size.bytes() > 0 {
-        // All other fields must be ZSTs.
-        let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
-        match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
-            // We have exactly one non-ZST field.
-            (Some((i, field)), None, None) => {
-                // Field fills the struct and it has a scalar or scalar pair ABI.
-                if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
-                    match field.abi {
-                        // For plain scalars, or vectors of them, we can't unpack
-                        // newtypes for `#[repr(C)]`, as that affects C ABIs.
-                        Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
-                            abi = field.abi;
-                        }
-                        // But scalar pairs are Rust-specific and get
-                        // treated as aggregates by C ABIs anyway.
-                        Abi::ScalarPair(..) => {
-                            abi = field.abi;
-                        }
-                        _ => {}
-                    }
-                }
-            }
-
-            // Two non-ZST fields, and they're both scalars.
-            (Some((i, a)), Some((j, b)), None) => {
-                match (a.abi, b.abi) {
-                    (Abi::Scalar(a), Abi::Scalar(b)) => {
-                        // Order by the memory placement, not source order.
-                        let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
-                            ((i, a), (j, b))
-                        } else {
-                            ((j, b), (i, a))
-                        };
-                        let pair = scalar_pair(cx, a, b);
-                        let pair_offsets = match pair.fields {
-                            FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                                assert_eq!(memory_index, &[0, 1]);
-                                offsets
-                            }
-                            _ => bug!(),
-                        };
-                        if offsets[i] == pair_offsets[0]
-                            && offsets[j] == pair_offsets[1]
-                            && align == pair.align
-                            && size == pair.size
-                        {
-                            // We can use `ScalarPair` only when it matches our
-                            // already computed layout (including `#[repr(C)]`).
-                            abi = pair.abi;
-                        }
-                    }
-                    _ => {}
-                }
-            }
-
-            _ => {}
-        }
-    }
-
-    if fields.iter().any(|f| f.abi.is_uninhabited()) {
-        abi = Abi::Uninhabited;
-    }
-
-    Ok(LayoutS {
-        variants: Variants::Single { index: VariantIdx::new(0) },
-        fields: FieldsShape::Arbitrary { offsets, memory_index },
-        abi,
-        largest_niche,
-        align,
-        size,
-    })
+    cx.univariant(dl, fields, repr, kind).ok_or(LayoutError::SizeOverflow(ty))
 }
 
 fn layout_of_uncached<'tcx>(
@@ -400,14 +139,7 @@ fn layout_of_uncached<'tcx>(
         }
 
         // The never type.
-        ty::Never => tcx.intern_layout(LayoutS {
-            variants: Variants::Single { index: VariantIdx::new(0) },
-            fields: FieldsShape::Primitive,
-            abi: Abi::Uninhabited,
-            largest_niche: None,
-            align: dl.i8_align,
-            size: Size::ZERO,
-        }),
+        ty::Never => tcx.intern_layout(cx.layout_of_never_type()),
 
         // Potentially-wide pointers.
         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
@@ -436,7 +168,7 @@ fn layout_of_uncached<'tcx>(
             };
 
             // Effectively a (ptr, meta) tuple.
-            tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
+            tcx.intern_layout(cx.scalar_pair(data_ptr, metadata))
         }
 
         ty::Dynamic(_, _, ty::DynStar) => {
@@ -444,7 +176,7 @@ fn layout_of_uncached<'tcx>(
             data.valid_range_mut().start = 0;
             let mut vtable = scalar_unit(Pointer);
             vtable.valid_range_mut().start = 1;
-            tcx.intern_layout(scalar_pair(cx, data, vtable))
+            tcx.intern_layout(cx.scalar_pair(data, vtable))
         }
 
         // Arrays and slices.
@@ -673,681 +405,41 @@ fn layout_of_uncached<'tcx>(
                     return Err(LayoutError::Unknown(ty));
                 }
 
-                let mut align =
-                    if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
-                if let Some(repr_align) = def.repr().align {
-                    align = align.max(AbiAndPrefAlign::new(repr_align));
-                }
-
-                let optimize = !def.repr().inhibit_union_abi_opt();
-                let mut size = Size::ZERO;
-                let mut abi = Abi::Aggregate { sized: true };
-                let index = VariantIdx::new(0);
-                for field in &variants[index] {
-                    assert!(field.is_sized());
-                    align = align.max(field.align);
-
-                    // If all non-ZST fields have the same ABI, forward this ABI
-                    if optimize && !field.is_zst() {
-                        // Discard valid range information and allow undef
-                        let field_abi = match field.abi {
-                            Abi::Scalar(x) => Abi::Scalar(x.to_union()),
-                            Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
-                            Abi::Vector { element: x, count } => {
-                                Abi::Vector { element: x.to_union(), count }
-                            }
-                            Abi::Uninhabited | Abi::Aggregate { .. } => {
-                                Abi::Aggregate { sized: true }
-                            }
-                        };
-
-                        if size == Size::ZERO {
-                            // first non ZST: initialize 'abi'
-                            abi = field_abi;
-                        } else if abi != field_abi {
-                            // different fields have different ABI: reset to Aggregate
-                            abi = Abi::Aggregate { sized: true };
-                        }
-                    }
-
-                    size = cmp::max(size, field.size);
-                }
-
-                if let Some(pack) = def.repr().pack {
-                    align = align.min(AbiAndPrefAlign::new(pack));
-                }
-
-                return Ok(tcx.intern_layout(LayoutS {
-                    variants: Variants::Single { index },
-                    fields: FieldsShape::Union(
-                        NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
-                    ),
-                    abi,
-                    largest_niche: None,
-                    align,
-                    size: size.align_to(align.abi),
-                }));
-            }
-
-            // A variant is absent if it's uninhabited and only has ZST fields.
-            // Present uninhabited variants only require space for their fields,
-            // but *not* an encoding of the discriminant (e.g., a tag value).
-            // See issue #49298 for more details on the need to leave space
-            // for non-ZST uninhabited data (mostly partial initialization).
-            let absent = |fields: &[TyAndLayout<'_>]| {
-                let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
-                let is_zst = fields.iter().all(|f| f.is_zst());
-                uninhabited && is_zst
-            };
-            let (present_first, present_second) = {
-                let mut present_variants = variants
-                    .iter_enumerated()
-                    .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
-                (present_variants.next(), present_variants.next())
-            };
-            let present_first = match present_first {
-                Some(present_first) => present_first,
-                // Uninhabited because it has no variants, or only absent ones.
-                None if def.is_enum() => {
-                    return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
-                }
-                // If it's a struct, still compute a layout so that we can still compute the
-                // field offsets.
-                None => VariantIdx::new(0),
-            };
-
-            let is_struct = !def.is_enum() ||
-                    // Only one variant is present.
-                    (present_second.is_none() &&
-                        // Representation optimizations are allowed.
-                        !def.repr().inhibit_enum_layout_opt());
-            if is_struct {
-                // Struct, or univariant enum equivalent to a struct.
-                // (Typechecking will reject discriminant-sizing attrs.)
-
-                let v = present_first;
-                let kind = if def.is_enum() || variants[v].is_empty() {
-                    StructKind::AlwaysSized
-                } else {
-                    let param_env = tcx.param_env(def.did());
-                    let last_field = def.variant(v).fields.last().unwrap();
-                    let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
-                    if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
-                };
-
-                let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
-                st.variants = Variants::Single { index: v };
-
-                if def.is_unsafe_cell() {
-                    let hide_niches = |scalar: &mut _| match scalar {
-                        Scalar::Initialized { value, valid_range } => {
-                            *valid_range = WrappingRange::full(value.size(dl))
-                        }
-                        // Already doesn't have any niches
-                        Scalar::Union { .. } => {}
-                    };
-                    match &mut st.abi {
-                        Abi::Uninhabited => {}
-                        Abi::Scalar(scalar) => hide_niches(scalar),
-                        Abi::ScalarPair(a, b) => {
-                            hide_niches(a);
-                            hide_niches(b);
-                        }
-                        Abi::Vector { element, count: _ } => hide_niches(element),
-                        Abi::Aggregate { sized: _ } => {}
-                    }
-                    st.largest_niche = None;
-                    return Ok(tcx.intern_layout(st));
-                }
-
-                let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
-                match st.abi {
-                    Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
-                        // the asserts ensure that we are not using the
-                        // `#[rustc_layout_scalar_valid_range(n)]`
-                        // attribute to widen the range of anything as that would probably
-                        // result in UB somewhere
-                        // FIXME(eddyb) the asserts are probably not needed,
-                        // as larger validity ranges would result in missed
-                        // optimizations, *not* wrongly assuming the inner
-                        // value is valid. e.g. unions enlarge validity ranges,
-                        // because the values may be uninitialized.
-                        if let Bound::Included(start) = start {
-                            // FIXME(eddyb) this might be incorrect - it doesn't
-                            // account for wrap-around (end < start) ranges.
-                            let valid_range = scalar.valid_range_mut();
-                            assert!(valid_range.start <= start);
-                            valid_range.start = start;
-                        }
-                        if let Bound::Included(end) = end {
-                            // FIXME(eddyb) this might be incorrect - it doesn't
-                            // account for wrap-around (end < start) ranges.
-                            let valid_range = scalar.valid_range_mut();
-                            assert!(valid_range.end >= end);
-                            valid_range.end = end;
-                        }
-
-                        // Update `largest_niche` if we have introduced a larger niche.
-                        let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
-                        if let Some(niche) = niche {
-                            match st.largest_niche {
-                                Some(largest_niche) => {
-                                    // Replace the existing niche even if they're equal,
-                                    // because this one is at a lower offset.
-                                    if largest_niche.available(dl) <= niche.available(dl) {
-                                        st.largest_niche = Some(niche);
-                                    }
-                                }
-                                None => st.largest_niche = Some(niche),
-                            }
-                        }
-                    }
-                    _ => assert!(
-                        start == Bound::Unbounded && end == Bound::Unbounded,
-                        "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
-                        def,
-                        st,
-                    ),
-                }
-
-                return Ok(tcx.intern_layout(st));
-            }
-
-            // At this point, we have handled all unions and
-            // structs. (We have also handled univariant enums
-            // that allow representation optimization.)
-            assert!(def.is_enum());
-
-            // Until we've decided whether to use the tagged or
-            // niche filling LayoutS, we don't want to intern the
-            // variant layouts, so we can't store them in the
-            // overall LayoutS. Store the overall LayoutS
-            // and the variant LayoutSs here until then.
-            struct TmpLayout<'tcx> {
-                layout: LayoutS<'tcx>,
-                variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
+                return Ok(tcx.intern_layout(
+                    cx.layout_of_union(&def.repr(), &variants).ok_or(LayoutError::Unknown(ty))?,
+                ));
             }
 
-            let calculate_niche_filling_layout =
-                || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
-                    // The current code for niche-filling relies on variant indices
-                    // instead of actual discriminants, so enums with
-                    // explicit discriminants (RFC #2363) would misbehave.
-                    if def.repr().inhibit_enum_layout_opt()
+            tcx.intern_layout(
+                cx.layout_of_struct_or_enum(
+                    &def.repr(),
+                    &variants,
+                    def.is_enum(),
+                    def.is_unsafe_cell(),
+                    tcx.layout_scalar_valid_range(def.did()),
+                    |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max),
+                    def.is_enum()
+                        .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
+                        .into_iter()
+                        .flatten(),
+                    def.repr().inhibit_enum_layout_opt()
                         || def
                             .variants()
                             .iter_enumerated()
-                            .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
-                    {
-                        return Ok(None);
-                    }
-
-                    if variants.len() < 2 {
-                        return Ok(None);
-                    }
-
-                    let mut align = dl.aggregate_align;
-                    let mut variant_layouts = variants
-                        .iter_enumerated()
-                        .map(|(j, v)| {
-                            let mut st = univariant_uninterned(
-                                cx,
-                                ty,
-                                v,
-                                &def.repr(),
-                                StructKind::AlwaysSized,
-                            )?;
-                            st.variants = Variants::Single { index: j };
-
-                            align = align.max(st.align);
-
-                            Ok(st)
-                        })
-                        .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
-                    let largest_variant_index = match variant_layouts
-                        .iter_enumerated()
-                        .max_by_key(|(_i, layout)| layout.size.bytes())
-                        .map(|(i, _layout)| i)
+                            .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())),
                     {
-                        None => return Ok(None),
-                        Some(i) => i,
-                    };
-
-                    let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
-                    let needs_disc = |index: VariantIdx| {
-                        index != largest_variant_index && !absent(&variants[index])
-                    };
-                    let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
-                        ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
-
-                    let count = niche_variants.size_hint().1.unwrap() as u128;
-
-                    // Find the field with the largest niche
-                    let (field_index, niche, (niche_start, niche_scalar)) = match variants
-                        [largest_variant_index]
-                        .iter()
-                        .enumerate()
-                        .filter_map(|(j, field)| Some((j, field.largest_niche?)))
-                        .max_by_key(|(_, niche)| niche.available(dl))
-                        .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
-                    {
-                        None => return Ok(None),
-                        Some(x) => x,
-                    };
-
-                    let niche_offset = niche.offset
-                        + variant_layouts[largest_variant_index].fields.offset(field_index);
-                    let niche_size = niche.value.size(dl);
-                    let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
-
-                    let all_variants_fit =
-                        variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
-                            if i == largest_variant_index {
-                                return true;
-                            }
-
-                            layout.largest_niche = None;
-
-                            if layout.size <= niche_offset {
-                                // This variant will fit before the niche.
-                                return true;
-                            }
-
-                            // Determine if it'll fit after the niche.
-                            let this_align = layout.align.abi;
-                            let this_offset = (niche_offset + niche_size).align_to(this_align);
-
-                            if this_offset + layout.size > size {
-                                return false;
-                            }
-
-                            // It'll fit, but we need to make some adjustments.
-                            match layout.fields {
-                                FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                                    for (j, offset) in offsets.iter_mut().enumerate() {
-                                        if !variants[i][j].is_zst() {
-                                            *offset += this_offset;
-                                        }
-                                    }
-                                }
-                                _ => {
-                                    panic!("Layout of fields should be Arbitrary for variants")
+                        let param_env = tcx.param_env(def.did());
+                        def.is_struct()
+                            && match def.variants().iter().next().and_then(|x| x.fields.last()) {
+                                Some(last_field) => {
+                                    tcx.type_of(last_field.did).is_sized(tcx, param_env)
                                 }
+                                None => false,
                             }
-
-                            // It can't be a Scalar or ScalarPair because the offset isn't 0.
-                            if !layout.abi.is_uninhabited() {
-                                layout.abi = Abi::Aggregate { sized: true };
-                            }
-                            layout.size += this_offset;
-
-                            true
-                        });
-
-                    if !all_variants_fit {
-                        return Ok(None);
-                    }
-
-                    let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
-
-                    let others_zst = variant_layouts
-                        .iter_enumerated()
-                        .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
-                    let same_size = size == variant_layouts[largest_variant_index].size;
-                    let same_align = align == variant_layouts[largest_variant_index].align;
-
-                    let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
-                        Abi::Uninhabited
-                    } else if same_size && same_align && others_zst {
-                        match variant_layouts[largest_variant_index].abi {
-                            // When the total alignment and size match, we can use the
-                            // same ABI as the scalar variant with the reserved niche.
-                            Abi::Scalar(_) => Abi::Scalar(niche_scalar),
-                            Abi::ScalarPair(first, second) => {
-                                // Only the niche is guaranteed to be initialised,
-                                // so use union layouts for the other primitive.
-                                if niche_offset == Size::ZERO {
-                                    Abi::ScalarPair(niche_scalar, second.to_union())
-                                } else {
-                                    Abi::ScalarPair(first.to_union(), niche_scalar)
-                                }
-                            }
-                            _ => Abi::Aggregate { sized: true },
-                        }
-                    } else {
-                        Abi::Aggregate { sized: true }
-                    };
-
-                    let layout = LayoutS {
-                        variants: Variants::Multiple {
-                            tag: niche_scalar,
-                            tag_encoding: TagEncoding::Niche {
-                                untagged_variant: largest_variant_index,
-                                niche_variants,
-                                niche_start,
-                            },
-                            tag_field: 0,
-                            variants: IndexVec::new(),
-                        },
-                        fields: FieldsShape::Arbitrary {
-                            offsets: vec![niche_offset],
-                            memory_index: vec![0],
-                        },
-                        abi,
-                        largest_niche,
-                        size,
-                        align,
-                    };
-
-                    Ok(Some(TmpLayout { layout, variants: variant_layouts }))
-                };
-
-            let niche_filling_layout = calculate_niche_filling_layout()?;
-
-            let (mut min, mut max) = (i128::MAX, i128::MIN);
-            let discr_type = def.repr().discr_type();
-            let bits = Integer::from_attr(cx, discr_type).size().bits();
-            for (i, discr) in def.discriminants(tcx) {
-                if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
-                    continue;
-                }
-                let mut x = discr.val as i128;
-                if discr_type.is_signed() {
-                    // sign extend the raw representation to be an i128
-                    x = (x << (128 - bits)) >> (128 - bits);
-                }
-                if x < min {
-                    min = x;
-                }
-                if x > max {
-                    max = x;
-                }
-            }
-            // We might have no inhabited variants, so pretend there's at least one.
-            if (min, max) == (i128::MAX, i128::MIN) {
-                min = 0;
-                max = 0;
-            }
-            assert!(min <= max, "discriminant range is {}...{}", min, max);
-            let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
-
-            let mut align = dl.aggregate_align;
-            let mut size = Size::ZERO;
-
-            // We're interested in the smallest alignment, so start large.
-            let mut start_align = Align::from_bytes(256).unwrap();
-            assert_eq!(Integer::for_align(dl, start_align), None);
-
-            // repr(C) on an enum tells us to make a (tag, union) layout,
-            // so we need to grow the prefix alignment to be at least
-            // the alignment of the union. (This value is used both for
-            // determining the alignment of the overall enum, and the
-            // determining the alignment of the payload after the tag.)
-            let mut prefix_align = min_ity.align(dl).abi;
-            if def.repr().c() {
-                for fields in &variants {
-                    for field in fields {
-                        prefix_align = prefix_align.max(field.align.abi);
-                    }
-                }
-            }
-
-            // Create the set of structs that represent each variant.
-            let mut layout_variants = variants
-                .iter_enumerated()
-                .map(|(i, field_layouts)| {
-                    let mut st = univariant_uninterned(
-                        cx,
-                        ty,
-                        &field_layouts,
-                        &def.repr(),
-                        StructKind::Prefixed(min_ity.size(), prefix_align),
-                    )?;
-                    st.variants = Variants::Single { index: i };
-                    // Find the first field we can't move later
-                    // to make room for a larger discriminant.
-                    for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
-                        if !field.is_zst() || field.align.abi.bytes() != 1 {
-                            start_align = start_align.min(field.align.abi);
-                            break;
-                        }
-                    }
-                    size = cmp::max(size, st.size);
-                    align = align.max(st.align);
-                    Ok(st)
-                })
-                .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
-            // Align the maximum variant size to the largest alignment.
-            size = size.align_to(align.abi);
-
-            if size.bytes() >= dl.obj_size_bound() {
-                return Err(LayoutError::SizeOverflow(ty));
-            }
-
-            let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
-            if typeck_ity < min_ity {
-                // It is a bug if Layout decided on a greater discriminant size than typeck for
-                // some reason at this point (based on values discriminant can take on). Mostly
-                // because this discriminant will be loaded, and then stored into variable of
-                // type calculated by typeck. Consider such case (a bug): typeck decided on
-                // byte-sized discriminant, but layout thinks we need a 16-bit to store all
-                // discriminant values. That would be a bug, because then, in codegen, in order
-                // to store this 16-bit discriminant into 8-bit sized temporary some of the
-                // space necessary to represent would have to be discarded (or layout is wrong
-                // on thinking it needs 16 bits)
-                bug!(
-                    "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
-                    min_ity,
-                    typeck_ity
-                );
-                // However, it is fine to make discr type however large (as an optimisation)
-                // after this point – we’ll just truncate the value we load in codegen.
-            }
-
-            // Check to see if we should use a different type for the
-            // discriminant. We can safely use a type with the same size
-            // as the alignment of the first field of each variant.
-            // We increase the size of the discriminant to avoid LLVM copying
-            // padding when it doesn't need to. This normally causes unaligned
-            // load/stores and excessive memcpy/memset operations. By using a
-            // bigger integer size, LLVM can be sure about its contents and
-            // won't be so conservative.
-
-            // Use the initial field alignment
-            let mut ity = if def.repr().c() || def.repr().int.is_some() {
-                min_ity
-            } else {
-                Integer::for_align(dl, start_align).unwrap_or(min_ity)
-            };
-
-            // If the alignment is not larger than the chosen discriminant size,
-            // don't use the alignment as the final size.
-            if ity <= min_ity {
-                ity = min_ity;
-            } else {
-                // Patch up the variants' first few fields.
-                let old_ity_size = min_ity.size();
-                let new_ity_size = ity.size();
-                for variant in &mut layout_variants {
-                    match variant.fields {
-                        FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                            for i in offsets {
-                                if *i <= old_ity_size {
-                                    assert_eq!(*i, old_ity_size);
-                                    *i = new_ity_size;
-                                }
-                            }
-                            // We might be making the struct larger.
-                            if variant.size <= old_ity_size {
-                                variant.size = new_ity_size;
-                            }
-                        }
-                        _ => bug!(),
-                    }
-                }
-            }
-
-            let tag_mask = ity.size().unsigned_int_max();
-            let tag = Scalar::Initialized {
-                value: Int(ity, signed),
-                valid_range: WrappingRange {
-                    start: (min as u128 & tag_mask),
-                    end: (max as u128 & tag_mask),
-                },
-            };
-            let mut abi = Abi::Aggregate { sized: true };
-
-            if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
-                abi = Abi::Uninhabited;
-            } else if tag.size(dl) == size {
-                // Make sure we only use scalar layout when the enum is entirely its
-                // own tag (i.e. it has no padding nor any non-ZST variant fields).
-                abi = Abi::Scalar(tag);
-            } else {
-                // Try to use a ScalarPair for all tagged enums.
-                let mut common_prim = None;
-                let mut common_prim_initialized_in_all_variants = true;
-                for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
-                    let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
-                            bug!();
-                        };
-                    let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
-                    let (field, offset) = match (fields.next(), fields.next()) {
-                        (None, None) => {
-                            common_prim_initialized_in_all_variants = false;
-                            continue;
-                        }
-                        (Some(pair), None) => pair,
-                        _ => {
-                            common_prim = None;
-                            break;
-                        }
-                    };
-                    let prim = match field.abi {
-                        Abi::Scalar(scalar) => {
-                            common_prim_initialized_in_all_variants &=
-                                matches!(scalar, Scalar::Initialized { .. });
-                            scalar.primitive()
-                        }
-                        _ => {
-                            common_prim = None;
-                            break;
-                        }
-                    };
-                    if let Some(pair) = common_prim {
-                        // This is pretty conservative. We could go fancier
-                        // by conflating things like i32 and u32, or even
-                        // realising that (u8, u8) could just cohabit with
-                        // u16 or even u32.
-                        if pair != (prim, offset) {
-                            common_prim = None;
-                            break;
-                        }
-                    } else {
-                        common_prim = Some((prim, offset));
-                    }
-                }
-                if let Some((prim, offset)) = common_prim {
-                    let prim_scalar = if common_prim_initialized_in_all_variants {
-                        scalar_unit(prim)
-                    } else {
-                        // Common prim might be uninit.
-                        Scalar::Union { value: prim }
-                    };
-                    let pair = scalar_pair(cx, tag, prim_scalar);
-                    let pair_offsets = match pair.fields {
-                        FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                            assert_eq!(memory_index, &[0, 1]);
-                            offsets
-                        }
-                        _ => bug!(),
-                    };
-                    if pair_offsets[0] == Size::ZERO
-                        && pair_offsets[1] == *offset
-                        && align == pair.align
-                        && size == pair.size
-                    {
-                        // We can use `ScalarPair` only when it matches our
-                        // already computed layout (including `#[repr(C)]`).
-                        abi = pair.abi;
-                    }
-                }
-            }
-
-            // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
-            // variants to ensure they are consistent. This is because a downcast is
-            // semantically a NOP, and thus should not affect layout.
-            if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
-                for variant in &mut layout_variants {
-                    // We only do this for variants with fields; the others are not accessed anyway.
-                    // Also do not overwrite any already existing "clever" ABIs.
-                    if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
-                        variant.abi = abi;
-                        // Also need to bump up the size and alignment, so that the entire value fits in here.
-                        variant.size = cmp::max(variant.size, size);
-                        variant.align.abi = cmp::max(variant.align.abi, align.abi);
-                    }
-                }
-            }
-
-            let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
-
-            let tagged_layout = LayoutS {
-                variants: Variants::Multiple {
-                    tag,
-                    tag_encoding: TagEncoding::Direct,
-                    tag_field: 0,
-                    variants: IndexVec::new(),
-                },
-                fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
-                largest_niche,
-                abi,
-                align,
-                size,
-            };
-
-            let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
-
-            let mut best_layout = match (tagged_layout, niche_filling_layout) {
-                (tl, Some(nl)) => {
-                    // Pick the smaller layout; otherwise,
-                    // pick the layout with the larger niche; otherwise,
-                    // pick tagged as it has simpler codegen.
-                    use Ordering::*;
-                    let niche_size = |tmp_l: &TmpLayout<'_>| {
-                        tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
-                    };
-                    match (
-                        tl.layout.size.cmp(&nl.layout.size),
-                        niche_size(&tl).cmp(&niche_size(&nl)),
-                    ) {
-                        (Greater, _) => nl,
-                        (Equal, Less) => nl,
-                        _ => tl,
-                    }
-                }
-                (tl, None) => tl,
-            };
-
-            // Now we can intern the variant layouts and store them in the enum layout.
-            best_layout.layout.variants = match best_layout.layout.variants {
-                Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
-                    tag,
-                    tag_encoding,
-                    tag_field,
-                    variants: best_layout
-                        .variants
-                        .into_iter()
-                        .map(|layout| tcx.intern_layout(layout))
-                        .collect(),
-                },
-                _ => bug!(),
-            };
-
-            tcx.intern_layout(best_layout.layout)
+                    },
+                )
+                .ok_or(LayoutError::SizeOverflow(ty))?,
+            )
         }
 
         // Types with no meaningful known layout.
@@ -1657,13 +749,13 @@ fn generator_layout<'tcx>(
 
             size = size.max(variant.size);
             align = align.max(variant.align);
-            Ok(tcx.intern_layout(variant))
+            Ok(variant)
         })
         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
     size = size.align_to(align.abi);
 
-    let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+    let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
         Abi::Uninhabited
     } else {
         Abi::Aggregate { sized: true }
diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
index ee5e7bc2359..9eb8f684bdb 100644
--- a/compiler/rustc_ty_utils/src/layout_sanity_check.rs
+++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
@@ -249,27 +249,27 @@ pub(super) fn sanity_check_layout<'tcx>(
         if let Variants::Multiple { variants, .. } = &layout.variants {
             for variant in variants.iter() {
                 // No nested "multiple".
-                assert!(matches!(variant.variants(), Variants::Single { .. }));
+                assert!(matches!(variant.variants, Variants::Single { .. }));
                 // Variants should have the same or a smaller size as the full thing,
                 // and same for alignment.
-                if variant.size() > layout.size {
+                if variant.size > layout.size {
                     bug!(
                         "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
                         layout.size.bytes(),
-                        variant.size().bytes(),
+                        variant.size.bytes(),
                     )
                 }
-                if variant.align().abi > layout.align.abi {
+                if variant.align.abi > layout.align.abi {
                     bug!(
                         "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
                         layout.align.abi.bytes(),
-                        variant.align().abi.bytes(),
+                        variant.align.abi.bytes(),
                     )
                 }
                 // Skip empty variants.
-                if variant.size() == Size::ZERO
-                    || variant.fields().count() == 0
-                    || variant.abi().is_uninhabited()
+                if variant.size == Size::ZERO
+                    || variant.fields.count() == 0
+                    || variant.abi.is_uninhabited()
                 {
                     // These are never actually accessed anyway, so we can skip the coherence check
                     // for them. They also fail that check, since they have
@@ -282,7 +282,7 @@ pub(super) fn sanity_check_layout<'tcx>(
                 let scalar_coherent = |s1: Scalar, s2: Scalar| {
                     s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx)
                 };
-                let abi_coherent = match (layout.abi, variant.abi()) {
+                let abi_coherent = match (layout.abi, variant.abi) {
                     (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
                     (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
                         scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
diff --git a/src/librustdoc/html/render/print_item.rs b/src/librustdoc/html/render/print_item.rs
index 0f9b3b15c77..acbe3f22889 100644
--- a/src/librustdoc/html/render/print_item.rs
+++ b/src/librustdoc/html/render/print_item.rs
@@ -10,7 +10,7 @@ use rustc_middle::ty::layout::LayoutError;
 use rustc_middle::ty::{self, Adt, TyCtxt};
 use rustc_span::hygiene::MacroKind;
 use rustc_span::symbol::{kw, sym, Symbol};
-use rustc_target::abi::{Layout, Primitive, TagEncoding, Variants};
+use rustc_target::abi::{LayoutS, Primitive, TagEncoding, VariantIdx, Variants};
 use std::cmp::Ordering;
 use std::fmt;
 use std::rc::Rc;
@@ -1892,11 +1892,11 @@ fn document_non_exhaustive(w: &mut Buffer, item: &clean::Item) {
 }
 
 fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
-    fn write_size_of_layout(w: &mut Buffer, layout: Layout<'_>, tag_size: u64) {
-        if layout.abi().is_unsized() {
+    fn write_size_of_layout(w: &mut Buffer, layout: &LayoutS<VariantIdx>, tag_size: u64) {
+        if layout.abi.is_unsized() {
             write!(w, "(unsized)");
         } else {
-            let bytes = layout.size().bytes() - tag_size;
+            let bytes = layout.size.bytes() - tag_size;
             write!(w, "{size} byte{pl}", size = bytes, pl = if bytes == 1 { "" } else { "s" },);
         }
     }
@@ -1927,7 +1927,7 @@ fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
                  chapter for details on type layout guarantees.</p></div>"
             );
             w.write_str("<p><strong>Size:</strong> ");
-            write_size_of_layout(w, ty_layout.layout, 0);
+            write_size_of_layout(w, &ty_layout.layout.0, 0);
             writeln!(w, "</p>");
             if let Variants::Multiple { variants, tag, tag_encoding, .. } =
                 &ty_layout.layout.variants()
@@ -1953,7 +1953,7 @@ fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
                     for (index, layout) in variants.iter_enumerated() {
                         let name = adt.variant(index).name;
                         write!(w, "<li><code>{name}</code>: ", name = name);
-                        write_size_of_layout(w, *layout, tag_size);
+                        write_size_of_layout(w, layout, tag_size);
                         writeln!(w, "</li>");
                     }
                     w.write_str("</ul>");
diff --git a/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs b/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs
index 88deb4565eb..adbcfd3189b 100644
--- a/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs
+++ b/src/tools/clippy/clippy_lints/src/casts/cast_possible_truncation.rs
@@ -2,12 +2,11 @@ use clippy_utils::consts::{constant, Constant};
 use clippy_utils::diagnostics::span_lint;
 use clippy_utils::expr_or_init;
 use clippy_utils::ty::{get_discriminant_value, is_isize_or_usize};
-use rustc_ast::ast;
-use rustc_attr::IntType;
 use rustc_hir::def::{DefKind, Res};
 use rustc_hir::{BinOpKind, Expr, ExprKind};
 use rustc_lint::LateContext;
 use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_target::abi::IntegerType;
 
 use super::{utils, CAST_ENUM_TRUNCATION, CAST_POSSIBLE_TRUNCATION};
 
@@ -122,7 +121,7 @@ pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>,
             let cast_from_ptr_size = def.repr().int.map_or(true, |ty| {
                 matches!(
                     ty,
-                    IntType::SignedInt(ast::IntTy::Isize) | IntType::UnsignedInt(ast::UintTy::Usize)
+                    IntegerType::Pointer(_),
                 )
             });
             let suffix = match (cast_from_ptr_size, is_isize_or_usize(cast_to)) {
diff --git a/src/tools/clippy/clippy_lints/src/lib.rs b/src/tools/clippy/clippy_lints/src/lib.rs
index b481314abed..601990cd6a3 100644
--- a/src/tools/clippy/clippy_lints/src/lib.rs
+++ b/src/tools/clippy/clippy_lints/src/lib.rs
@@ -26,7 +26,6 @@
 extern crate rustc_arena;
 extern crate rustc_ast;
 extern crate rustc_ast_pretty;
-extern crate rustc_attr;
 extern crate rustc_data_structures;
 extern crate rustc_driver;
 extern crate rustc_errors;