about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_abi/src/layout.rs75
-rw-r--r--compiler/rustc_abi/src/lib.rs23
-rw-r--r--compiler/rustc_index/src/vec.rs43
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs3
-rw-r--r--compiler/rustc_mir_transform/src/generator.rs2
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs87
-rw-r--r--compiler/rustc_ty_utils/src/lib.rs1
7 files changed, 133 insertions, 101 deletions
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index aea59ee6aea..c863acde7b0 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -8,19 +8,6 @@ use rand_xoshiro::Xoshiro128StarStar;
 
 use tracing::debug;
 
-// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
-// This is used to go between `memory_index` (source field order to memory order)
-// and `inverse_memory_index` (memory order to source field order).
-// See also `FieldsShape::Arbitrary::memory_index` for more details.
-// FIXME(eddyb) build a better abstraction for permutations, if possible.
-fn invert_mapping(map: &[u32]) -> Vec<u32> {
-    let mut inverse = vec![0; map.len()];
-    for i in 0..map.len() {
-        inverse[map[i] as usize] = i as u32;
-    }
-    inverse
-}
-
 pub trait LayoutCalculator {
     type TargetDataLayoutRef: Borrow<TargetDataLayout>;
 
@@ -45,8 +32,8 @@ pub trait LayoutCalculator {
         LayoutS {
             variants: Variants::Single { index: FIRST_VARIANT },
             fields: FieldsShape::Arbitrary {
-                offsets: vec![Size::ZERO, b_offset],
-                memory_index: vec![0, 1],
+                offsets: [Size::ZERO, b_offset].into(),
+                memory_index: [0, 1].into(),
             },
             abi: Abi::ScalarPair(a, b),
             largest_niche,
@@ -58,18 +45,18 @@ pub trait LayoutCalculator {
     fn univariant(
         &self,
         dl: &TargetDataLayout,
-        fields: &[Layout<'_>],
+        fields: &IndexSlice<FieldIdx, Layout<'_>>,
         repr: &ReprOptions,
         kind: StructKind,
     ) -> Option<LayoutS> {
         let pack = repr.pack;
         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-        let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+        let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
         let optimize = !repr.inhibit_struct_field_reordering_opt();
         if optimize {
             let end =
                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
-            let optimizing = &mut inverse_memory_index[..end];
+            let optimizing = &mut inverse_memory_index.raw[..end];
             let effective_field_align = |layout: Layout<'_>| {
                 if let Some(pack) = pack {
                     // return the packed alignment in bytes
@@ -105,7 +92,7 @@ pub trait LayoutCalculator {
                             // Place ZSTs first to avoid "interesting offsets",
                             // especially with only one or two non-ZST fields.
                             // Then place largest alignments first, largest niches within an alignment group last
-                            let f = fields[x as usize];
+                            let f = fields[x];
                             let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
                             (!f.0.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
                         });
@@ -117,7 +104,7 @@ pub trait LayoutCalculator {
                         // And put the largest niche in an alignment group at the end
                         // so it can be used as discriminant in jagged enums
                         optimizing.sort_by_key(|&x| {
-                            let f = fields[x as usize];
+                            let f = fields[x];
                             let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
                             (effective_field_align(f), niche_size)
                         });
@@ -135,7 +122,7 @@ pub trait LayoutCalculator {
         // At the bottom of this function, we invert `inverse_memory_index` to
         // produce `memory_index` (see `invert_mapping`).
         let mut sized = true;
-        let mut offsets = vec![Size::ZERO; fields.len()];
+        let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
         let mut offset = Size::ZERO;
         let mut largest_niche = None;
         let mut largest_niche_available = 0;
@@ -146,7 +133,7 @@ pub trait LayoutCalculator {
             offset = prefix_size.align_to(prefix_align);
         }
         for &i in &inverse_memory_index {
-            let field = &fields[i as usize];
+            let field = &fields[i];
             if !sized {
                 self.delay_bug(&format!(
                     "univariant: field #{} comes after unsized field",
@@ -168,7 +155,7 @@ pub trait LayoutCalculator {
             align = align.max(field_align);
 
             debug!("univariant offset: {:?} field: {:#?}", offset, field);
-            offsets[i as usize] = offset;
+            offsets[i] = offset;
 
             if let Some(mut niche) = field.largest_niche() {
                 let available = niche.available(dl);
@@ -192,14 +179,18 @@ pub trait LayoutCalculator {
         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
         // Field 5 would be the first element, so memory_index is i:
         // Note: if we didn't optimize, it's already right.
-        let memory_index =
-            if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+        let memory_index = if optimize {
+            inverse_memory_index.invert_bijective_mapping()
+        } else {
+            debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
+            inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
+        };
         let size = min_size.align_to(align.abi);
         let mut abi = Abi::Aggregate { sized };
         // Unpack newtype ABIs and find scalar pairs.
         if sized && size.bytes() > 0 {
             // All other fields must be ZSTs.
-            let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.0.is_zst());
+            let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
 
             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
                 // We have exactly one non-ZST field.
@@ -238,13 +229,13 @@ pub trait LayoutCalculator {
                             let pair = self.scalar_pair(a, b);
                             let pair_offsets = match pair.fields {
                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                                    assert_eq!(memory_index, &[0, 1]);
+                                    assert_eq!(memory_index.raw, [0, 1]);
                                     offsets
                                 }
                                 _ => panic!(),
                             };
-                            if offsets[i] == pair_offsets[0]
-                                && offsets[j] == pair_offsets[1]
+                            if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
+                                && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
                                 && align == pair.align
                                 && size == pair.size
                             {
@@ -289,7 +280,7 @@ pub trait LayoutCalculator {
     fn layout_of_struct_or_enum(
         &self,
         repr: &ReprOptions,
-        variants: &IndexSlice<VariantIdx, Vec<Layout<'_>>>,
+        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
         is_enum: bool,
         is_unsafe_cell: bool,
         scalar_valid_range: (Bound<u128>, Bound<u128>),
@@ -312,7 +303,7 @@ pub trait LayoutCalculator {
         // but *not* an encoding of the discriminant (e.g., a tag value).
         // See issue #49298 for more details on the need to leave space
         // for non-ZST uninhabited data (mostly partial initialization).
-        let absent = |fields: &[Layout<'_>]| {
+        let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
             let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
             let is_zst = fields.iter().all(|f| f.0.is_zst());
             uninhabited && is_zst
@@ -510,7 +501,7 @@ pub trait LayoutCalculator {
                 // It'll fit, but we need to make some adjustments.
                 match layout.fields {
                     FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                        for (j, offset) in offsets.iter_mut().enumerate() {
+                        for (j, offset) in offsets.iter_enumerated_mut() {
                             if !variants[i][j].0.is_zst() {
                                 *offset += this_offset;
                             }
@@ -577,8 +568,8 @@ pub trait LayoutCalculator {
                     variants: IndexVec::new(),
                 },
                 fields: FieldsShape::Arbitrary {
-                    offsets: vec![niche_offset],
-                    memory_index: vec![0],
+                    offsets: [niche_offset].into(),
+                    memory_index: [0].into(),
                 },
                 abi,
                 largest_niche,
@@ -651,7 +642,8 @@ pub trait LayoutCalculator {
                 st.variants = Variants::Single { index: i };
                 // Find the first field we can't move later
                 // to make room for a larger discriminant.
-                for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) {
+                for field_idx in st.fields.index_by_increasing_offset() {
+                    let field = &field_layouts[FieldIdx::from_usize(field_idx)];
                     if !field.0.is_zst() || field.align().abi.bytes() != 1 {
                         start_align = start_align.min(field.align().abi);
                         break;
@@ -802,13 +794,13 @@ pub trait LayoutCalculator {
                 let pair = self.scalar_pair(tag, prim_scalar);
                 let pair_offsets = match pair.fields {
                     FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                        assert_eq!(memory_index, &[0, 1]);
+                        assert_eq!(memory_index.raw, [0, 1]);
                         offsets
                     }
                     _ => panic!(),
                 };
-                if pair_offsets[0] == Size::ZERO
-                    && pair_offsets[1] == *offset
+                if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
+                    && pair_offsets[FieldIdx::from_u32(1)] == *offset
                     && align == pair.align
                     && size == pair.size
                 {
@@ -844,7 +836,10 @@ pub trait LayoutCalculator {
                 tag_field: 0,
                 variants: IndexVec::new(),
             },
-            fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
+            fields: FieldsShape::Arbitrary {
+                offsets: [Size::ZERO].into(),
+                memory_index: [0].into(),
+            },
             largest_niche,
             abi,
             align,
@@ -883,7 +878,7 @@ pub trait LayoutCalculator {
     fn layout_of_union(
         &self,
         repr: &ReprOptions,
-        variants: &IndexSlice<VariantIdx, Vec<Layout<'_>>>,
+        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
     ) -> Option<LayoutS> {
         let dl = self.current_data_layout();
         let dl = dl.borrow();
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 7b5732b488b..da91776ef67 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1108,7 +1108,7 @@ pub enum FieldsShape {
         /// ordered to match the source definition order.
         /// This vector does not go in increasing order.
         // FIXME(eddyb) use small vector optimization for the common case.
-        offsets: Vec<Size>,
+        offsets: IndexVec<FieldIdx, Size>,
 
         /// Maps source order field indices to memory order indices,
         /// depending on how the fields were reordered (if at all).
@@ -1122,7 +1122,7 @@ pub enum FieldsShape {
         ///
         // FIXME(eddyb) build a better abstraction for permutations, if possible.
         // FIXME(camlorn) also consider small vector optimization here.
-        memory_index: Vec<u32>,
+        memory_index: IndexVec<FieldIdx, u32>,
     },
 }
 
@@ -1157,7 +1157,7 @@ impl FieldsShape {
                 assert!(i < count);
                 stride * i
             }
-            FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
+            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],
         }
     }
 
@@ -1168,7 +1168,9 @@ impl FieldsShape {
                 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
             }
             FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
-            FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
+            FieldsShape::Arbitrary { ref memory_index, .. } => {
+                memory_index[FieldIdx::from_usize(i)].try_into().unwrap()
+            }
         }
     }
 
@@ -1176,20 +1178,17 @@ impl FieldsShape {
     #[inline]
     pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
         let mut inverse_small = [0u8; 64];
-        let mut inverse_big = vec![];
+        let mut inverse_big = IndexVec::new();
         let use_small = self.count() <= inverse_small.len();
 
         // We have to write this logic twice in order to keep the array small.
         if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
             if use_small {
-                for i in 0..self.count() {
-                    inverse_small[memory_index[i] as usize] = i as u8;
+                for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
+                    inverse_small[mem_idx as usize] = field_idx.as_u32() as u8;
                 }
             } else {
-                inverse_big = vec![0; self.count()];
-                for i in 0..self.count() {
-                    inverse_big[memory_index[i] as usize] = i as u32;
-                }
+                inverse_big = memory_index.invert_bijective_mapping();
             }
         }
 
@@ -1199,7 +1198,7 @@ impl FieldsShape {
                 if use_small {
                     inverse_small[i] as usize
                 } else {
-                    inverse_big[i] as usize
+                    inverse_big[i as u32].as_usize()
                 }
             }
         })
diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs
index 5945de2302a..45dcb25cea0 100644
--- a/compiler/rustc_index/src/vec.rs
+++ b/compiler/rustc_index/src/vec.rs
@@ -24,6 +24,7 @@ pub trait Idx: Copy + 'static + Eq + PartialEq + Debug + Hash {
     }
 
     #[inline]
+    #[must_use = "Use `increment_by` if you wanted to update the index in-place"]
     fn plus(self, amount: usize) -> Self {
         Self::new(self.index() + amount)
     }
@@ -284,6 +285,11 @@ impl<I: Idx, T: Clone> ToOwned for IndexSlice<I, T> {
 
 impl<I: Idx, T> IndexSlice<I, T> {
     #[inline]
+    pub fn empty() -> &'static Self {
+        Default::default()
+    }
+
+    #[inline]
     pub fn from_raw(raw: &[T]) -> &Self {
         let ptr: *const [T] = raw;
         // SAFETY: `IndexSlice` is `repr(transparent)` over a normal slice
@@ -398,6 +404,36 @@ impl<I: Idx, T> IndexSlice<I, T> {
     }
 }
 
+impl<I: Idx, J: Idx> IndexSlice<I, J> {
+    /// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`,
+    /// assuming the values in `self` are a permutation of `0..self.len()`.
+    ///
+    /// This is used to go between `memory_index` (source field order to memory order)
+    /// and `inverse_memory_index` (memory order to source field order).
+    /// See also `FieldsShape::Arbitrary::memory_index` for more details.
+    // FIXME(eddyb) build a better abstraction for permutations, if possible.
+    pub fn invert_bijective_mapping(&self) -> IndexVec<J, I> {
+        debug_assert_eq!(
+            self.iter().map(|x| x.index() as u128).sum::<u128>(),
+            (0..self.len() as u128).sum::<u128>(),
+            "The values aren't 0..N in input {self:?}",
+        );
+
+        let mut inverse = IndexVec::from_elem_n(Idx::new(0), self.len());
+        for (i1, &i2) in self.iter_enumerated() {
+            inverse[i2] = i1;
+        }
+
+        debug_assert_eq!(
+            inverse.iter().map(|x| x.index() as u128).sum::<u128>(),
+            (0..inverse.len() as u128).sum::<u128>(),
+            "The values aren't 0..N in result {self:?}",
+        );
+
+        inverse
+    }
+}
+
 /// `IndexVec` is often used as a map, so it provides some map-like APIs.
 impl<I: Idx, T> IndexVec<I, Option<T>> {
     #[inline]
@@ -502,6 +538,13 @@ impl<I: Idx, T> FromIterator<T> for IndexVec<I, T> {
     }
 }
 
+impl<I: Idx, T, const N: usize> From<[T; N]> for IndexVec<I, T> {
+    #[inline]
+    fn from(array: [T; N]) -> Self {
+        IndexVec::from_raw(array.into())
+    }
+}
+
 impl<I: Idx, T> IntoIterator for IndexVec<I, T> {
     type Item = T;
     type IntoIter = vec::IntoIter<T>;
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 0f70b315aa6..8af9acfadde 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -5,6 +5,7 @@ use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitableExt};
 use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
 use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
+use rustc_index::vec::IndexVec;
 use rustc_session::config::OptLevel;
 use rustc_span::symbol::{sym, Symbol};
 use rustc_span::{Span, DUMMY_SP};
@@ -635,7 +636,7 @@ where
                     variants: Variants::Single { index: variant_index },
                     fields: match NonZeroUsize::new(fields) {
                         Some(fields) => FieldsShape::Union(fields),
-                        None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
+                        None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
                     },
                     abi: Abi::Uninhabited,
                     largest_niche: None,
diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs
index af6422c7246..8f29066b6e2 100644
--- a/compiler/rustc_mir_transform/src/generator.rs
+++ b/compiler/rustc_mir_transform/src/generator.rs
@@ -287,7 +287,7 @@ impl<'tcx> TransformVisitor<'tcx> {
         statements.push(Statement {
             kind: StatementKind::Assign(Box::new((
                 Place::return_place(),
-                Rvalue::Aggregate(Box::new(kind), IndexVec::from_iter([val])),
+                Rvalue::Aggregate(Box::new(kind), [val].into()),
             ))),
             source_info,
         });
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 73f86f74d14..d4420ec88db 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -1,7 +1,7 @@
 use hir::def_id::DefId;
 use rustc_hir as hir;
 use rustc_index::bit_set::BitSet;
-use rustc_index::vec::IndexVec;
+use rustc_index::vec::{IndexSlice, IndexVec};
 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
 use rustc_middle::ty::layout::{
     IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
@@ -62,23 +62,10 @@ fn layout_of<'tcx>(
     Ok(layout)
 }
 
-// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
-// This is used to go between `memory_index` (source field order to memory order)
-// and `inverse_memory_index` (memory order to source field order).
-// See also `FieldsShape::Arbitrary::memory_index` for more details.
-// FIXME(eddyb) build a better abstraction for permutations, if possible.
-fn invert_mapping(map: &[u32]) -> Vec<u32> {
-    let mut inverse = vec![0; map.len()];
-    for i in 0..map.len() {
-        inverse[map[i] as usize] = i as u32;
-    }
-    inverse
-}
-
 fn univariant_uninterned<'tcx>(
     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
     ty: Ty<'tcx>,
-    fields: &[Layout<'_>],
+    fields: &IndexSlice<FieldIdx, Layout<'_>>,
     repr: &ReprOptions,
     kind: StructKind,
 ) -> Result<LayoutS, LayoutError<'tcx>> {
@@ -106,7 +93,7 @@ fn layout_of_uncached<'tcx>(
     };
     let scalar = |value: Primitive| tcx.mk_layout(LayoutS::scalar(cx, scalar_unit(value)));
 
-    let univariant = |fields: &[Layout<'_>], repr: &ReprOptions, kind| {
+    let univariant = |fields: &IndexSlice<FieldIdx, Layout<'_>>, repr: &ReprOptions, kind| {
         Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
     };
     debug_assert!(!ty.has_non_region_infer());
@@ -256,12 +243,14 @@ fn layout_of_uncached<'tcx>(
         }),
 
         // Odd unit types.
-        ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
+        ty::FnDef(..) => {
+            univariant(IndexSlice::empty(), &ReprOptions::default(), StructKind::AlwaysSized)?
+        }
         ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
             let mut unit = univariant_uninterned(
                 cx,
                 ty,
-                &[],
+                IndexSlice::empty(),
                 &ReprOptions::default(),
                 StructKind::AlwaysSized,
             )?;
@@ -277,7 +266,7 @@ fn layout_of_uncached<'tcx>(
         ty::Closure(_, ref substs) => {
             let tys = substs.as_closure().upvar_tys();
             univariant(
-                &tys.map(|ty| Ok(cx.layout_of(ty)?.layout)).collect::<Result<Vec<_>, _>>()?,
+                &tys.map(|ty| Ok(cx.layout_of(ty)?.layout)).try_collect::<IndexVec<_, _>>()?,
                 &ReprOptions::default(),
                 StructKind::AlwaysSized,
             )?
@@ -288,7 +277,7 @@ fn layout_of_uncached<'tcx>(
                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
 
             univariant(
-                &tys.iter().map(|k| Ok(cx.layout_of(k)?.layout)).collect::<Result<Vec<_>, _>>()?,
+                &tys.iter().map(|k| Ok(cx.layout_of(k)?.layout)).try_collect::<IndexVec<_, _>>()?,
                 &ReprOptions::default(),
                 kind,
             )?
@@ -393,7 +382,7 @@ fn layout_of_uncached<'tcx>(
 
             // Compute the placement of the vector fields:
             let fields = if is_array {
-                FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
+                FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() }
             } else {
                 FieldsShape::Array { stride: e_ly.size, count: e_len }
             };
@@ -418,9 +407,9 @@ fn layout_of_uncached<'tcx>(
                     v.fields
                         .iter()
                         .map(|field| Ok(cx.layout_of(field.ty(tcx, substs))?.layout))
-                        .collect::<Result<Vec<_>, _>>()
+                        .try_collect::<IndexVec<_, _>>()
                 })
-                .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+                .try_collect::<IndexVec<VariantIdx, _>>()?;
 
             if def.is_union() {
                 if def.repr().pack.is_some() && def.repr().align.is_some() {
@@ -492,8 +481,7 @@ fn layout_of_uncached<'tcx>(
 enum SavedLocalEligibility {
     Unassigned,
     Assigned(VariantIdx),
-    // FIXME: Use newtype_index so we aren't wasting bytes
-    Ineligible(Option<u32>),
+    Ineligible(Option<FieldIdx>),
 }
 
 // When laying out generators, we divide our saved local fields into two
@@ -522,7 +510,7 @@ fn generator_saved_local_eligibility(
     use SavedLocalEligibility::*;
 
     let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
-        IndexVec::from_elem_n(Unassigned, info.field_tys.len());
+        IndexVec::from_elem(Unassigned, &info.field_tys);
 
     // The saved locals not eligible for overlap. These will get
     // "promoted" to the prefix of our generator.
@@ -605,7 +593,7 @@ fn generator_saved_local_eligibility(
     // Write down the order of our locals that will be promoted to the prefix.
     {
         for (idx, local) in ineligible_locals.iter().enumerate() {
-            assignments[local] = Ineligible(Some(idx as u32));
+            assignments[local] = Ineligible(Some(FieldIdx::from_usize(idx)));
         }
     }
     debug!("generator saved local assignments: {:?}", assignments);
@@ -654,7 +642,7 @@ fn generator_layout<'tcx>(
         .map(|ty| Ok(cx.layout_of(ty)?.layout))
         .chain(iter::once(Ok(tag_layout)))
         .chain(promoted_layouts)
-        .collect::<Result<Vec<_>, _>>()?;
+        .try_collect::<IndexVec<_, _>>()?;
     let prefix = univariant_uninterned(
         cx,
         ty,
@@ -672,26 +660,28 @@ fn generator_layout<'tcx>(
     debug!("prefix = {:#?}", prefix);
     let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
         FieldsShape::Arbitrary { mut offsets, memory_index } => {
-            let mut inverse_memory_index = invert_mapping(&memory_index);
+            let mut inverse_memory_index = memory_index.invert_bijective_mapping();
 
             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
             // "outer" and "promoted" fields respectively.
-            let b_start = (tag_index + 1) as u32;
-            let offsets_b = offsets.split_off(b_start as usize);
+            let b_start = FieldIdx::from_usize(tag_index + 1);
+            let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.as_usize()));
             let offsets_a = offsets;
 
             // Disentangle the "a" and "b" components of `inverse_memory_index`
             // by preserving the order but keeping only one disjoint "half" each.
             // FIXME(eddyb) build a better abstraction for permutations, if possible.
-            let inverse_memory_index_b: Vec<_> =
-                inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
-            inverse_memory_index.retain(|&i| i < b_start);
+            let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
+                .iter()
+                .filter_map(|&i| i.as_u32().checked_sub(b_start.as_u32()).map(FieldIdx::from_u32))
+                .collect();
+            inverse_memory_index.raw.retain(|&i| i < b_start);
             let inverse_memory_index_a = inverse_memory_index;
 
             // Since `inverse_memory_index_{a,b}` each only refer to their
             // respective fields, they can be safely inverted
-            let memory_index_a = invert_mapping(&inverse_memory_index_a);
-            let memory_index_b = invert_mapping(&inverse_memory_index_b);
+            let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
+            let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
 
             let outer_fields =
                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
@@ -722,7 +712,7 @@ fn generator_layout<'tcx>(
                 ty,
                 &variant_only_tys
                     .map(|ty| Ok(cx.layout_of(ty)?.layout))
-                    .collect::<Result<Vec<_>, _>>()?,
+                    .try_collect::<IndexVec<_, _>>()?,
                 &ReprOptions::default(),
                 StructKind::Prefixed(prefix_size, prefix_align.abi),
             )?;
@@ -741,13 +731,16 @@ fn generator_layout<'tcx>(
             // promoted fields were being used, but leave the elements not in the
             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
             // obtain a valid (bijective) mapping.
-            const INVALID_FIELD_IDX: u32 = !0;
-            let mut combined_inverse_memory_index =
-                vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
+            const INVALID_FIELD_IDX: FieldIdx = FieldIdx::MAX;
+            debug_assert!(variant_fields.next_index() <= INVALID_FIELD_IDX);
+
+            let mut combined_inverse_memory_index = IndexVec::from_elem_n(
+                INVALID_FIELD_IDX,
+                promoted_memory_index.len() + memory_index.len(),
+            );
             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
             let combined_offsets = variant_fields
-                .iter()
-                .enumerate()
+                .iter_enumerated()
                 .map(|(i, local)| {
                     let (offset, memory_index) = match assignments[*local] {
                         Unassigned => bug!(),
@@ -756,19 +749,19 @@ fn generator_layout<'tcx>(
                             (offset, promoted_memory_index.len() as u32 + memory_index)
                         }
                         Ineligible(field_idx) => {
-                            let field_idx = field_idx.unwrap() as usize;
+                            let field_idx = field_idx.unwrap();
                             (promoted_offsets[field_idx], promoted_memory_index[field_idx])
                         }
                     };
-                    combined_inverse_memory_index[memory_index as usize] = i as u32;
+                    combined_inverse_memory_index[memory_index] = i;
                     offset
                 })
                 .collect();
 
             // Remove the unused slots and invert the mapping to obtain the
             // combined `memory_index` (also see previous comment).
-            combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
-            let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
+            combined_inverse_memory_index.raw.retain(|&i| i != INVALID_FIELD_IDX);
+            let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
 
             variant.fields = FieldsShape::Arbitrary {
                 offsets: combined_offsets,
@@ -779,7 +772,7 @@ fn generator_layout<'tcx>(
             align = align.max(variant.align);
             Ok(variant)
         })
-        .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+        .try_collect::<IndexVec<VariantIdx, _>>()?;
 
     size = size.align_to(align.abi);
 
diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs
index 3865a8f3223..9195964a2f3 100644
--- a/compiler/rustc_ty_utils/src/lib.rs
+++ b/compiler/rustc_ty_utils/src/lib.rs
@@ -5,6 +5,7 @@
 //! This API is completely unstable and subject to change.
 
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(iterator_try_collect)]
 #![feature(let_chains)]
 #![feature(never_type)]
 #![feature(box_patterns)]