about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_abi/src/layout.rs581
-rw-r--r--compiler/rustc_feature/src/accepted.rs2
-rw-r--r--compiler/rustc_feature/src/active.rs6
-rw-r--r--compiler/rustc_feature/src/removed.rs2
-rw-r--r--compiler/rustc_metadata/src/creader.rs10
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs15
-rw-r--r--compiler/rustc_resolve/src/late.rs6
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt.rs19
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs58
-rw-r--r--compiler/rustc_trait_selection/src/solve/mod.rs22
10 files changed, 473 insertions, 248 deletions
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index f3af031ade4..b4597d5bc78 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -1,4 +1,5 @@
 use super::*;
+use std::fmt::Write;
 use std::{borrow::Borrow, cmp, iter, ops::Bound};
 
 #[cfg(feature = "randomize")]
@@ -49,220 +50,60 @@ pub trait LayoutCalculator {
         repr: &ReprOptions,
         kind: StructKind,
     ) -> Option<LayoutS> {
-        let pack = repr.pack;
-        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-        let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
-        let optimize = !repr.inhibit_struct_field_reordering_opt();
-        if optimize {
-            let end =
-                if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
-            let optimizing = &mut inverse_memory_index.raw[..end];
-            let effective_field_align = |layout: Layout<'_>| {
-                if let Some(pack) = pack {
-                    // return the packed alignment in bytes
-                    layout.align().abi.min(pack).bytes()
-                } else {
-                    // returns log2(effective-align).
-                    // This is ok since `pack` applies to all fields equally.
-                    // The calculation assumes that size is an integer multiple of align, except for ZSTs.
-                    //
-                    // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
-                    layout.align().abi.bytes().max(layout.size().bytes()).trailing_zeros() as u64
-                }
-            };
-
-            // If `-Z randomize-layout` was enabled for the type definition we can shuffle
-            // the field ordering to try and catch some code making assumptions about layouts
-            // we don't guarantee
-            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
-                #[cfg(feature = "randomize")]
-                {
-                    // `ReprOptions.layout_seed` is a deterministic seed that we can use to
-                    // randomize field ordering with
-                    let mut rng =
-                        Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
-
-                    // Shuffle the ordering of the fields
-                    optimizing.shuffle(&mut rng);
-                }
-                // Otherwise we just leave things alone and actually optimize the type's fields
-            } else {
-                match kind {
-                    StructKind::AlwaysSized | StructKind::MaybeUnsized => {
-                        optimizing.sort_by_key(|&x| {
-                            // Place ZSTs first to avoid "interesting offsets",
-                            // especially with only one or two non-ZST fields.
-                            // Then place largest alignments first, largest niches within an alignment group last
-                            let f = fields[x];
-                            let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
-                            (!f.0.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
-                        });
-                    }
-
-                    StructKind::Prefixed(..) => {
-                        // Sort in ascending alignment so that the layout stays optimal
-                        // regardless of the prefix.
-                        // And put the largest niche in an alignment group at the end
-                        // so it can be used as discriminant in jagged enums
-                        optimizing.sort_by_key(|&x| {
-                            let f = fields[x];
-                            let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
-                            (effective_field_align(f), niche_size)
-                        });
+        let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
+        // Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
+        // data-carrying variants can be packed into the space after/before the niche.
+        // If the default field ordering does not give us a niche at the front then we do a second
+        // run and bias niches to the right and then check which one is closer to one of the struct's
+        // edges.
+        if let Some(layout) = &layout {
+            if let Some(niche) = layout.largest_niche {
+                let head_space = niche.offset.bytes();
+                let niche_length = niche.value.size(dl).bytes();
+                let tail_space = layout.size.bytes() - head_space - niche_length;
+
+                // This may end up doing redundant work if the niche is already in the last field
+                // (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
+                // the unpadded size so we try anyway.
+                if fields.len() > 1 && head_space != 0 && tail_space > 0 {
+                    let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
+                        .expect("alt layout should always work");
+                    let niche = alt_layout
+                        .largest_niche
+                        .expect("alt layout should have a niche like the regular one");
+                    let alt_head_space = niche.offset.bytes();
+                    let alt_niche_len = niche.value.size(dl).bytes();
+                    let alt_tail_space = alt_layout.size.bytes() - alt_head_space - alt_niche_len;
+
+                    debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
+
+                    let prefer_alt_layout =
+                        alt_head_space > head_space && alt_head_space > tail_space;
+
+                    debug!(
+                        "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
+                        layout: {}\n\
+                        alt_layout: {}\n",
+                        layout.size.bytes(),
+                        head_space,
+                        niche_length,
+                        tail_space,
+                        alt_head_space,
+                        alt_niche_len,
+                        alt_tail_space,
+                        layout.fields.count(),
+                        prefer_alt_layout,
+                        format_field_niches(&layout, &fields, &dl),
+                        format_field_niches(&alt_layout, &fields, &dl),
+                    );
+
+                    if prefer_alt_layout {
+                        return Some(alt_layout);
                     }
                 }
-
-                // FIXME(Kixiron): We can always shuffle fields within a given alignment class
-                //                 regardless of the status of `-Z randomize-layout`
             }
         }
-        // inverse_memory_index holds field indices by increasing memory offset.
-        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
-        // We now write field offsets to the corresponding offset slot;
-        // field 5 with offset 0 puts 0 in offsets[5].
-        // At the bottom of this function, we invert `inverse_memory_index` to
-        // produce `memory_index` (see `invert_mapping`).
-        let mut sized = true;
-        let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
-        let mut offset = Size::ZERO;
-        let mut largest_niche = None;
-        let mut largest_niche_available = 0;
-        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
-            let prefix_align =
-                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
-            align = align.max(AbiAndPrefAlign::new(prefix_align));
-            offset = prefix_size.align_to(prefix_align);
-        }
-        for &i in &inverse_memory_index {
-            let field = &fields[i];
-            if !sized {
-                self.delay_bug(&format!(
-                    "univariant: field #{} comes after unsized field",
-                    offsets.len(),
-                ));
-            }
-
-            if field.0.is_unsized() {
-                sized = false;
-            }
-
-            // Invariant: offset < dl.obj_size_bound() <= 1<<61
-            let field_align = if let Some(pack) = pack {
-                field.align().min(AbiAndPrefAlign::new(pack))
-            } else {
-                field.align()
-            };
-            offset = offset.align_to(field_align.abi);
-            align = align.max(field_align);
-
-            debug!("univariant offset: {:?} field: {:#?}", offset, field);
-            offsets[i] = offset;
-
-            if let Some(mut niche) = field.largest_niche() {
-                let available = niche.available(dl);
-                if available > largest_niche_available {
-                    largest_niche_available = available;
-                    niche.offset += offset;
-                    largest_niche = Some(niche);
-                }
-            }
-
-            offset = offset.checked_add(field.size(), dl)?;
-        }
-        if let Some(repr_align) = repr.align {
-            align = align.max(AbiAndPrefAlign::new(repr_align));
-        }
-        debug!("univariant min_size: {:?}", offset);
-        let min_size = offset;
-        // As stated above, inverse_memory_index holds field indices by increasing offset.
-        // This makes it an already-sorted view of the offsets vec.
-        // To invert it, consider:
-        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
-        // Field 5 would be the first element, so memory_index is i:
-        // Note: if we didn't optimize, it's already right.
-        let memory_index = if optimize {
-            inverse_memory_index.invert_bijective_mapping()
-        } else {
-            debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
-            inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
-        };
-        let size = min_size.align_to(align.abi);
-        let mut abi = Abi::Aggregate { sized };
-        // Unpack newtype ABIs and find scalar pairs.
-        if sized && size.bytes() > 0 {
-            // All other fields must be ZSTs.
-            let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
-
-            match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
-                // We have exactly one non-ZST field.
-                (Some((i, field)), None, None) => {
-                    // Field fills the struct and it has a scalar or scalar pair ABI.
-                    if offsets[i].bytes() == 0
-                        && align.abi == field.align().abi
-                        && size == field.size()
-                    {
-                        match field.abi() {
-                            // For plain scalars, or vectors of them, we can't unpack
-                            // newtypes for `#[repr(C)]`, as that affects C ABIs.
-                            Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
-                                abi = field.abi();
-                            }
-                            // But scalar pairs are Rust-specific and get
-                            // treated as aggregates by C ABIs anyway.
-                            Abi::ScalarPair(..) => {
-                                abi = field.abi();
-                            }
-                            _ => {}
-                        }
-                    }
-                }
-
-                // Two non-ZST fields, and they're both scalars.
-                (Some((i, a)), Some((j, b)), None) => {
-                    match (a.abi(), b.abi()) {
-                        (Abi::Scalar(a), Abi::Scalar(b)) => {
-                            // Order by the memory placement, not source order.
-                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
-                                ((i, a), (j, b))
-                            } else {
-                                ((j, b), (i, a))
-                            };
-                            let pair = self.scalar_pair(a, b);
-                            let pair_offsets = match pair.fields {
-                                FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                                    assert_eq!(memory_index.raw, [0, 1]);
-                                    offsets
-                                }
-                                _ => panic!(),
-                            };
-                            if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
-                                && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
-                                && align == pair.align
-                                && size == pair.size
-                            {
-                                // We can use `ScalarPair` only when it matches our
-                                // already computed layout (including `#[repr(C)]`).
-                                abi = pair.abi;
-                            }
-                        }
-                        _ => {}
-                    }
-                }
-
-                _ => {}
-            }
-        }
-        if fields.iter().any(|f| f.abi().is_uninhabited()) {
-            abi = Abi::Uninhabited;
-        }
-        Some(LayoutS {
-            variants: Variants::Single { index: FIRST_VARIANT },
-            fields: FieldsShape::Arbitrary { offsets, memory_index },
-            abi,
-            largest_niche,
-            align,
-            size,
-        })
+        layout
     }
 
     fn layout_of_never_type(&self) -> LayoutS {
@@ -934,3 +775,323 @@ pub trait LayoutCalculator {
         })
     }
 }
+
+/// Determines towards which end of a struct layout optimizations will try to place the best niches.
+enum NicheBias {
+    Start,
+    End,
+}
+
+fn univariant(
+    this: &(impl LayoutCalculator + ?Sized),
+    dl: &TargetDataLayout,
+    fields: &IndexSlice<FieldIdx, Layout<'_>>,
+    repr: &ReprOptions,
+    kind: StructKind,
+    niche_bias: NicheBias,
+) -> Option<LayoutS> {
+    let pack = repr.pack;
+    let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+    let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
+    let optimize = !repr.inhibit_struct_field_reordering_opt();
+    if optimize && fields.len() > 1 {
+        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+        let optimizing = &mut inverse_memory_index.raw[..end];
+
+        // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+        // the field ordering to try and catch some code making assumptions about layouts
+        // we don't guarantee
+        if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
+            #[cfg(feature = "randomize")]
+            {
+                // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+                // randomize field ordering with
+                let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
+
+                // Shuffle the ordering of the fields
+                optimizing.shuffle(&mut rng);
+            }
+            // Otherwise we just leave things alone and actually optimize the type's fields
+        } else {
+            let max_field_align = fields.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
+            let largest_niche_size = fields
+                .iter()
+                .filter_map(|f| f.largest_niche())
+                .map(|n| n.available(dl))
+                .max()
+                .unwrap_or(0);
+
+            // Calculates a sort key to group fields by their alignment or possibly some size-derived
+            // pseudo-alignment.
+            let alignment_group_key = |layout: Layout<'_>| {
+                if let Some(pack) = pack {
+                    // return the packed alignment in bytes
+                    layout.align().abi.min(pack).bytes()
+                } else {
+                    // returns log2(effective-align).
+                    // This is ok since `pack` applies to all fields equally.
+                    // The calculation assumes that size is an integer multiple of align, except for ZSTs.
+                    //
+                    let align = layout.align().abi.bytes();
+                    let size = layout.size().bytes();
+                    let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
+                    // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
+                    let size_as_align = align.max(size).trailing_zeros();
+                    let size_as_align = if largest_niche_size > 0 {
+                        match niche_bias {
+                            // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
+                            // to the front in the first case (for aligned loads) but keep the bool in front
+                            // in the second case for its niches.
+                            NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
+                            // When moving niches towards the end of the struct then for
+                            // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
+                            // in the align-1 group because its bool can be moved closer to the end.
+                            NicheBias::End if niche_size == largest_niche_size => {
+                                align.trailing_zeros()
+                            }
+                            NicheBias::End => size_as_align,
+                        }
+                    } else {
+                        size_as_align
+                    };
+                    size_as_align as u64
+                }
+            };
+
+            match kind {
+                StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+                    // Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
+                    // to get one niche into the preferred position. If it ever supported multiple niches
+                    // then a more advanced pick-and-pack approach could provide better results.
+                    // But even for the single-niche cache it's not optimal. E.g. for
+                    // A(u32, (bool, u8), u16) it would be possible to move the bool to the front
+                    // but it would require packing the tuple together with the u16 to build a 4-byte
+                    // group so that the u32 can be placed after it without padding. This kind
+                    // of packing can't be achieved by sorting.
+                    optimizing.sort_by_key(|&x| {
+                        let f = fields[x];
+                        let field_size = f.size().bytes();
+                        let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+                        let niche_size_key = match niche_bias {
+                            // large niche first
+                            NicheBias::Start => !niche_size,
+                            // large niche last
+                            NicheBias::End => niche_size,
+                        };
+                        let inner_niche_offset_key = match niche_bias {
+                            NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
+                            NicheBias::End => f.largest_niche().map_or(0, |n| {
+                                !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
+                            }),
+                        };
+
+                        (
+                            // Place ZSTs first to avoid "interesting offsets", especially with only one
+                            // or two non-ZST fields. This helps Scalar/ScalarPair layouts.
+                            !f.0.is_zst(),
+                            // Then place largest alignments first.
+                            cmp::Reverse(alignment_group_key(f)),
+                            // Then prioritize niche placement within alignment group according to
+                            // `niche_bias_start`.
+                            niche_size_key,
+                            // Then among fields with equally-sized niches prefer the ones
+                            // closer to the start/end of the field.
+                            inner_niche_offset_key,
+                        )
+                    });
+                }
+
+                StructKind::Prefixed(..) => {
+                    // Sort in ascending alignment so that the layout stays optimal
+                    // regardless of the prefix.
+                    // And put the largest niche in an alignment group at the end
+                    // so it can be used as discriminant in jagged enums
+                    optimizing.sort_by_key(|&x| {
+                        let f = fields[x];
+                        let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
+                        (alignment_group_key(f), niche_size)
+                    });
+                }
+            }
+
+            // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+            //                 regardless of the status of `-Z randomize-layout`
+        }
+    }
+    // inverse_memory_index holds field indices by increasing memory offset.
+    // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+    // We now write field offsets to the corresponding offset slot;
+    // field 5 with offset 0 puts 0 in offsets[5].
+    // At the bottom of this function, we invert `inverse_memory_index` to
+    // produce `memory_index` (see `invert_mapping`).
+    let mut sized = true;
+    let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
+    let mut offset = Size::ZERO;
+    let mut largest_niche = None;
+    let mut largest_niche_available = 0;
+    if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+        let prefix_align =
+            if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+        align = align.max(AbiAndPrefAlign::new(prefix_align));
+        offset = prefix_size.align_to(prefix_align);
+    }
+    for &i in &inverse_memory_index {
+        let field = &fields[i];
+        if !sized {
+            this.delay_bug(&format!(
+                "univariant: field #{} comes after unsized field",
+                offsets.len(),
+            ));
+        }
+
+        if field.0.is_unsized() {
+            sized = false;
+        }
+
+        // Invariant: offset < dl.obj_size_bound() <= 1<<61
+        let field_align = if let Some(pack) = pack {
+            field.align().min(AbiAndPrefAlign::new(pack))
+        } else {
+            field.align()
+        };
+        offset = offset.align_to(field_align.abi);
+        align = align.max(field_align);
+
+        debug!("univariant offset: {:?} field: {:#?}", offset, field);
+        offsets[i] = offset;
+
+        if let Some(mut niche) = field.largest_niche() {
+            let available = niche.available(dl);
+            // Pick up larger niches.
+            let prefer_new_niche = match niche_bias {
+                NicheBias::Start => available > largest_niche_available,
+                // if there are several niches of the same size then pick the last one
+                NicheBias::End => available >= largest_niche_available,
+            };
+            if prefer_new_niche {
+                largest_niche_available = available;
+                niche.offset += offset;
+                largest_niche = Some(niche);
+            }
+        }
+
+        offset = offset.checked_add(field.size(), dl)?;
+    }
+    if let Some(repr_align) = repr.align {
+        align = align.max(AbiAndPrefAlign::new(repr_align));
+    }
+    debug!("univariant min_size: {:?}", offset);
+    let min_size = offset;
+    // As stated above, inverse_memory_index holds field indices by increasing offset.
+    // This makes it an already-sorted view of the offsets vec.
+    // To invert it, consider:
+    // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+    // Field 5 would be the first element, so memory_index is i:
+    // Note: if we didn't optimize, it's already right.
+    let memory_index = if optimize {
+        inverse_memory_index.invert_bijective_mapping()
+    } else {
+        debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
+        inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
+    };
+    let size = min_size.align_to(align.abi);
+    let mut abi = Abi::Aggregate { sized };
+    // Unpack newtype ABIs and find scalar pairs.
+    if sized && size.bytes() > 0 {
+        // All other fields must be ZSTs.
+        let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
+
+        match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+            // We have exactly one non-ZST field.
+            (Some((i, field)), None, None) => {
+                // Field fills the struct and it has a scalar or scalar pair ABI.
+                if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
+                {
+                    match field.abi() {
+                        // For plain scalars, or vectors of them, we can't unpack
+                        // newtypes for `#[repr(C)]`, as that affects C ABIs.
+                        Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+                            abi = field.abi();
+                        }
+                        // But scalar pairs are Rust-specific and get
+                        // treated as aggregates by C ABIs anyway.
+                        Abi::ScalarPair(..) => {
+                            abi = field.abi();
+                        }
+                        _ => {}
+                    }
+                }
+            }
+
+            // Two non-ZST fields, and they're both scalars.
+            (Some((i, a)), Some((j, b)), None) => {
+                match (a.abi(), b.abi()) {
+                    (Abi::Scalar(a), Abi::Scalar(b)) => {
+                        // Order by the memory placement, not source order.
+                        let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+                            ((i, a), (j, b))
+                        } else {
+                            ((j, b), (i, a))
+                        };
+                        let pair = this.scalar_pair(a, b);
+                        let pair_offsets = match pair.fields {
+                            FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+                                assert_eq!(memory_index.raw, [0, 1]);
+                                offsets
+                            }
+                            _ => panic!(),
+                        };
+                        if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
+                            && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
+                            && align == pair.align
+                            && size == pair.size
+                        {
+                            // We can use `ScalarPair` only when it matches our
+                            // already computed layout (including `#[repr(C)]`).
+                            abi = pair.abi;
+                        }
+                    }
+                    _ => {}
+                }
+            }
+
+            _ => {}
+        }
+    }
+    if fields.iter().any(|f| f.abi().is_uninhabited()) {
+        abi = Abi::Uninhabited;
+    }
+    Some(LayoutS {
+        variants: Variants::Single { index: FIRST_VARIANT },
+        fields: FieldsShape::Arbitrary { offsets, memory_index },
+        abi,
+        largest_niche,
+        align,
+        size,
+    })
+}
+
+fn format_field_niches(
+    layout: &LayoutS,
+    fields: &IndexSlice<FieldIdx, Layout<'_>>,
+    dl: &TargetDataLayout,
+) -> String {
+    let mut s = String::new();
+    for i in layout.fields.index_by_increasing_offset() {
+        let offset = layout.fields.offset(i);
+        let f = fields[i.into()];
+        write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
+        if let Some(n) = f.largest_niche() {
+            write!(
+                s,
+                " n{}b{}s{}",
+                n.offset.bytes(),
+                n.available(dl).ilog2(),
+                n.value.size(dl).bytes()
+            )
+            .unwrap();
+        }
+        write!(s, "] ").unwrap();
+    }
+    s
+}
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
index 3d644de1665..3b9fc5e9a51 100644
--- a/compiler/rustc_feature/src/accepted.rs
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -239,7 +239,7 @@ declare_features! (
     /// Allows using `Self` and associated types in struct expressions and patterns.
     (accepted, more_struct_aliases, "1.16.0", Some(37544), None),
     /// Allows using the MOVBE target feature.
-    (accepted, movbe_target_feature, "CURRENT_RUSTC_VERSION", Some(44839), None),
+    (accepted, movbe_target_feature, "1.70.0", Some(44839), None),
     /// Allows patterns with concurrent by-move and by-ref bindings.
     /// For example, you can write `Foo(a, ref b)` where `a` is by-move and `b` is by-ref.
     (accepted, move_ref_pattern, "1.49.0", Some(68354), None),
diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs
index 48f5bd1cb50..052d312d9a0 100644
--- a/compiler/rustc_feature/src/active.rs
+++ b/compiler/rustc_feature/src/active.rs
@@ -417,7 +417,7 @@ declare_features! (
     /// Allows `if let` guard in match arms.
     (active, if_let_guard, "1.47.0", Some(51114), None),
     /// Allows `impl Trait` to be used inside associated types (RFC 2515).
-    (active, impl_trait_in_assoc_type, "CURRENT_RUSTC_VERSION", Some(63063), None),
+    (active, impl_trait_in_assoc_type, "1.70.0", Some(63063), None),
     /// Allows `impl Trait` as output type in `Fn` traits in return position of functions.
     (active, impl_trait_in_fn_trait_return, "1.64.0", Some(99697), None),
     /// Allows referencing `Self` and projections in impl-trait.
@@ -498,7 +498,7 @@ declare_features! (
     /// Allows return-position `impl Trait` in traits.
     (incomplete, return_position_impl_trait_in_trait, "1.65.0", Some(91611), None),
     /// Allows bounding the return type of AFIT/RPITIT.
-    (incomplete, return_type_notation, "CURRENT_RUSTC_VERSION", Some(109417), None),
+    (incomplete, return_type_notation, "1.70.0", Some(109417), None),
     /// Allows `extern "rust-cold"`.
     (active, rust_cold_cc, "1.63.0", Some(97544), None),
     /// Allows the use of SIMD types in functions declared in `extern` blocks.
@@ -521,7 +521,7 @@ declare_features! (
     /// Dyn upcasting is casting, e.g., `dyn Foo -> dyn Bar` where `Foo: Bar`.
     (active, trait_upcasting, "1.56.0", Some(65991), None),
     /// Allows for transmuting between arrays with sizes that contain generic consts.
-    (active, transmute_generic_consts, "CURRENT_RUSTC_VERSION", Some(109929), None),
+    (active, transmute_generic_consts, "1.70.0", Some(109929), None),
     /// Allows #[repr(transparent)] on unions (RFC 2645).
     (active, transparent_unions, "1.37.0", Some(60405), None),
     /// Allows inconsistent bounds in where clauses.
diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs
index 876a31abdf8..8bca24b2bf0 100644
--- a/compiler/rustc_feature/src/removed.rs
+++ b/compiler/rustc_feature/src/removed.rs
@@ -53,7 +53,7 @@ declare_features! (
     (removed, await_macro, "1.38.0", Some(50547), None,
      Some("subsumed by `.await` syntax")),
     /// Allows using the `box $expr` syntax.
-    (removed, box_syntax, "CURRENT_RUSTC_VERSION", Some(49733), None, Some("replaced with `#[rustc_box]`")),
+    (removed, box_syntax, "1.70.0", Some(49733), None, Some("replaced with `#[rustc_box]`")),
     /// Allows capturing disjoint fields in a closure/generator (RFC 2229).
     (removed, capture_disjoint_fields, "1.49.0", Some(53488), None, Some("stabilized in Rust 2021")),
     /// Allows comparing raw pointers during const eval.
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
index 179453238f2..01b69966ca9 100644
--- a/compiler/rustc_metadata/src/creader.rs
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -27,6 +27,7 @@ use rustc_span::{Span, DUMMY_SP};
 use rustc_target::spec::{PanicStrategy, TargetTriple};
 
 use proc_macro::bridge::client::ProcMacro;
+use std::error::Error;
 use std::ops::Fn;
 use std::path::Path;
 use std::time::Duration;
@@ -1094,5 +1095,12 @@ fn load_dylib(path: &Path, max_attempts: usize) -> Result<libloading::Library, S
     }
 
     debug!("Failed to load proc-macro `{}` even after {} attempts.", path.display(), max_attempts);
-    Err(format!("{} (retried {} times)", last_error.unwrap(), max_attempts))
+
+    let last_error = last_error.unwrap();
+    let message = if let Some(src) = last_error.source() {
+        format!("{last_error} ({src}) (retried {max_attempts} times)")
+    } else {
+        format!("{last_error} (retried {max_attempts} times)")
+    };
+    Err(message)
 }
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index 1e115be2c2a..443f469ce52 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -1,7 +1,7 @@
 use crate::deref_separator::deref_finder;
 use crate::MirPass;
-use rustc_data_structures::fx::FxHashMap;
 use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
 use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, TyCtxt};
@@ -84,12 +84,13 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
 
             let reachable = traversal::reachable_as_bitset(body);
 
+            let drop_flags = IndexVec::from_elem(None, &env.move_data.move_paths);
             ElaborateDropsCtxt {
                 tcx,
                 body,
                 env: &env,
                 init_data: InitializationData { inits, uninits },
-                drop_flags: Default::default(),
+                drop_flags,
                 patch: MirPatch::new(body),
                 un_derefer: un_derefer,
                 reachable,
@@ -293,7 +294,7 @@ struct ElaborateDropsCtxt<'a, 'tcx> {
     body: &'a Body<'tcx>,
     env: &'a MoveDataParamEnv<'tcx>,
     init_data: InitializationData<'a, 'tcx>,
-    drop_flags: FxHashMap<MovePathIndex, Local>,
+    drop_flags: IndexVec<MovePathIndex, Option<Local>>,
     patch: MirPatch<'tcx>,
     un_derefer: UnDerefer<'tcx>,
     reachable: BitSet<BasicBlock>,
@@ -312,11 +313,11 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
         let tcx = self.tcx;
         let patch = &mut self.patch;
         debug!("create_drop_flag({:?})", self.body.span);
-        self.drop_flags.entry(index).or_insert_with(|| patch.new_internal(tcx.types.bool, span));
+        self.drop_flags[index].get_or_insert_with(|| patch.new_internal(tcx.types.bool, span));
     }
 
     fn drop_flag(&mut self, index: MovePathIndex) -> Option<Place<'tcx>> {
-        self.drop_flags.get(&index).map(|t| Place::from(*t))
+        self.drop_flags[index].map(Place::from)
     }
 
     /// create a patch that elaborates all drops in the input
@@ -463,7 +464,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
     }
 
     fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) {
-        if let Some(&flag) = self.drop_flags.get(&path) {
+        if let Some(flag) = self.drop_flags[path] {
             let span = self.patch.source_info_for_location(self.body, loc).span;
             let val = self.constant_bool(span, val.value());
             self.patch.add_assign(loc, Place::from(flag), val);
@@ -474,7 +475,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
         let loc = Location::START;
         let span = self.patch.source_info_for_location(self.body, loc).span;
         let false_ = self.constant_bool(span, false);
-        for flag in self.drop_flags.values() {
+        for flag in self.drop_flags.iter().flatten() {
             self.patch.add_assign(loc, Place::from(*flag), false_.clone());
         }
     }
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index a97857e05e2..5c02e7193a2 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -859,13 +859,9 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
                             sig.decl.inputs.iter().map(|Param { ty, .. }| (None, &**ty)),
                             &sig.decl.output,
                         );
-
-                        this.record_lifetime_params_for_async(
-                            fn_id,
-                            sig.header.asyncness.opt_return_id(),
-                        );
                     },
                 );
+                self.record_lifetime_params_for_async(fn_id, sig.header.asyncness.opt_return_id());
                 return;
             }
             FnKind::Fn(..) => {
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
index bd52957d162..63a73f8d50d 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
@@ -3,7 +3,8 @@ use rustc_infer::infer::at::ToTrace;
 use rustc_infer::infer::canonical::CanonicalVarValues;
 use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
 use rustc_infer::infer::{
-    DefineOpaqueTypes, InferCtxt, InferOk, LateBoundRegionConversionTime, TyCtxtInferExt,
+    DefineOpaqueTypes, InferCtxt, InferOk, LateBoundRegionConversionTime, RegionVariableOrigin,
+    TyCtxtInferExt,
 };
 use rustc_infer::traits::query::NoSolution;
 use rustc_infer::traits::ObligationCause;
@@ -223,18 +224,20 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
         {
             debug!("rerunning goal to check result is stable");
             let (_orig_values, canonical_goal) = self.canonicalize_goal(goal);
-            let canonical_response =
+            let new_canonical_response =
                 EvalCtxt::evaluate_canonical_goal(self.tcx(), self.search_graph, canonical_goal)?;
-            if !canonical_response.value.var_values.is_identity() {
+            if !new_canonical_response.value.var_values.is_identity() {
                 bug!(
                     "unstable result: re-canonicalized goal={canonical_goal:#?} \
-                     response={canonical_response:#?}"
+                    first_response={canonical_response:#?} \
+                    second_response={new_canonical_response:#?}"
                 );
             }
-            if certainty != canonical_response.value.certainty {
+            if certainty != new_canonical_response.value.certainty {
                 bug!(
                     "unstable certainty: {certainty:#?} re-canonicalized goal={canonical_goal:#?} \
-                     response={canonical_response:#?}"
+                     first_response={canonical_response:#?} \
+                     second_response={new_canonical_response:#?}"
                 );
             }
         }
@@ -434,6 +437,10 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
         })
     }
 
+    pub(super) fn next_region_infer(&self) -> ty::Region<'tcx> {
+        self.infcx.next_region_var(RegionVariableOrigin::MiscVariable(DUMMY_SP))
+    }
+
     pub(super) fn next_const_infer(&self, ty: Ty<'tcx>) -> ty::Const<'tcx> {
         self.infcx.next_const_var(
             ty,
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
index 226d29687e3..67ad7fb4bd2 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
@@ -16,7 +16,7 @@ use rustc_infer::infer::canonical::query_response::make_query_region_constraints
 use rustc_infer::infer::canonical::CanonicalVarValues;
 use rustc_infer::infer::canonical::{CanonicalExt, QueryRegionConstraints};
 use rustc_middle::traits::query::NoSolution;
-use rustc_middle::traits::solve::{ExternalConstraints, ExternalConstraintsData};
+use rustc_middle::traits::solve::{ExternalConstraints, ExternalConstraintsData, MaybeCause};
 use rustc_middle::ty::{self, BoundVar, GenericArgKind};
 use rustc_span::DUMMY_SP;
 use std::iter;
@@ -60,9 +60,27 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
 
         let certainty = certainty.unify_with(goals_certainty);
 
-        let external_constraints = self.compute_external_query_constraints()?;
+        let response = match certainty {
+            Certainty::Yes | Certainty::Maybe(MaybeCause::Ambiguity) => {
+                let external_constraints = self.compute_external_query_constraints()?;
+                Response { var_values: self.var_values, external_constraints, certainty }
+            }
+            Certainty::Maybe(MaybeCause::Overflow) => {
+                // If we have overflow, it's probable that we're substituting a type
+                // into itself infinitely and any partial substitutions in the query
+                // response are probably not useful anyways, so just return an empty
+                // query response.
+                //
+                // This may prevent us from potentially useful inference, e.g.
+                // 2 candidates, one ambiguous and one overflow, which both
+                // have the same inference constraints.
+                //
+                // Changing this to retain some constraints in the future
+                // won't be a breaking change, so this is good enough for now.
+                return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow));
+            }
+        };
 
-        let response = Response { var_values: self.var_values, external_constraints, certainty };
         let canonical = Canonicalizer::canonicalize(
             self.infcx,
             CanonicalizeMode::Response { max_input_universe: self.max_input_universe },
@@ -72,6 +90,40 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
         Ok(canonical)
     }
 
+    /// Constructs a totally unconstrained, ambiguous response to a goal.
+    ///
+    /// Take care when using this, since often it's useful to respond with
+    /// ambiguity but return constrained variables to guide inference.
+    pub(in crate::solve) fn make_ambiguous_response_no_constraints(
+        &self,
+        maybe_cause: MaybeCause,
+    ) -> CanonicalResponse<'tcx> {
+        let unconstrained_response = Response {
+            var_values: CanonicalVarValues {
+                var_values: self.tcx().mk_substs_from_iter(self.var_values.var_values.iter().map(
+                    |arg| -> ty::GenericArg<'tcx> {
+                        match arg.unpack() {
+                            GenericArgKind::Lifetime(_) => self.next_region_infer().into(),
+                            GenericArgKind::Type(_) => self.next_ty_infer().into(),
+                            GenericArgKind::Const(ct) => self.next_const_infer(ct.ty()).into(),
+                        }
+                    },
+                )),
+            },
+            external_constraints: self
+                .tcx()
+                .mk_external_constraints(ExternalConstraintsData::default()),
+            certainty: Certainty::Maybe(maybe_cause),
+        };
+
+        Canonicalizer::canonicalize(
+            self.infcx,
+            CanonicalizeMode::Response { max_input_universe: self.max_input_universe },
+            &mut Default::default(),
+            unconstrained_response,
+        )
+    }
+
     #[instrument(level = "debug", skip(self), ret)]
     fn compute_external_query_constraints(&self) -> Result<ExternalConstraints<'tcx>, NoSolution> {
         // Cannot use `take_registered_region_obligations` as we may compute the response
diff --git a/compiler/rustc_trait_selection/src/solve/mod.rs b/compiler/rustc_trait_selection/src/solve/mod.rs
index 19bcbd46144..d94679fef28 100644
--- a/compiler/rustc_trait_selection/src/solve/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/mod.rs
@@ -340,17 +340,17 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
         if responses.is_empty() {
             return Err(NoSolution);
         }
-        let certainty = responses.iter().fold(Certainty::AMBIGUOUS, |certainty, response| {
-            certainty.unify_with(response.value.certainty)
-        });
-
-        let response = self.evaluate_added_goals_and_make_canonical_response(certainty);
-        if let Ok(response) = response {
-            assert!(response.has_no_inference_or_external_constraints());
-            Ok(response)
-        } else {
-            bug!("failed to make floundered response: {responses:?}");
-        }
+
+        let Certainty::Maybe(maybe_cause) = responses.iter().fold(
+            Certainty::AMBIGUOUS,
+            |certainty, response| {
+                certainty.unify_with(response.value.certainty)
+            },
+        ) else {
+            bug!("expected flounder response to be ambiguous")
+        };
+
+        Ok(self.make_ambiguous_response_no_constraints(maybe_cause))
     }
 }