about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs2
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs66
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs17
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs5
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs9
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs25
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs2
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs2
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs82
-rw-r--r--compiler/rustc_const_eval/src/util/call_kind.rs2
16 files changed, 140 insertions, 90 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index df809e82701..946546263ea 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -38,7 +38,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
             || matches!(
                 ecx.tcx.def_kind(cid.instance.def_id()),
                 DefKind::Const
-                    | DefKind::Static
+                    | DefKind::Static(_)
                     | DefKind::ConstParam
                     | DefKind::AnonConst
                     | DefKind::InlineConst
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index 05fbbf45d7c..19a543ae777 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -1,7 +1,8 @@
 use rustc_hir as hir;
+use rustc_hir::def::DefKind;
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{DefIdTree, TyCtxt};
 use rustc_span::symbol::Symbol;
 use rustc_target::spec::abi::Abi;
 
@@ -16,44 +17,47 @@ pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
 }
 
 pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
-    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-    let parent_id = tcx.hir().get_parent_node(hir_id);
-    matches!(
-        tcx.hir().get(parent_id),
-        hir::Node::Item(hir::Item {
-            kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
-            ..
-        })
-    )
+    let parent_id = tcx.local_parent(def_id).unwrap();
+    tcx.def_kind(parent_id) == DefKind::Impl
+        && tcx.impl_constness(parent_id) == hir::Constness::Const
 }
 
 /// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether
 /// said intrinsic has a `rustc_const_{un,}stable` attribute.
-fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+fn impl_constness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::Constness {
     let def_id = def_id.expect_local();
     let node = tcx.hir().get_by_def_id(def_id);
 
-    if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) =
-        node
-    {
-        // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
-        // foreign items cannot be evaluated at compile-time.
-        let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-        if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) {
-            tcx.lookup_const_stability(def_id).is_some()
-        } else {
-            false
-        }
-    } else if let Some(fn_kind) = node.fn_kind() {
-        if fn_kind.constness() == hir::Constness::Const {
-            return true;
+    match node {
+        hir::Node::Ctor(_) => hir::Constness::Const,
+        hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
+        hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
+            // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
+            // foreign items cannot be evaluated at compile-time.
+            let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+            let is_const = if let Abi::RustIntrinsic | Abi::PlatformIntrinsic =
+                tcx.hir().get_foreign_abi(hir_id)
+            {
+                tcx.lookup_const_stability(def_id).is_some()
+            } else {
+                false
+            };
+            if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
         }
+        _ => {
+            if let Some(fn_kind) = node.fn_kind() {
+                if fn_kind.constness() == hir::Constness::Const {
+                    return hir::Constness::Const;
+                }
 
-        // If the function itself is not annotated with `const`, it may still be a `const fn`
-        // if it resides in a const trait impl.
-        is_parent_const_impl_raw(tcx, def_id)
-    } else {
-        matches!(node, hir::Node::Ctor(_))
+                // If the function itself is not annotated with `const`, it may still be a `const fn`
+                // if it resides in a const trait impl.
+                let is_const = is_parent_const_impl_raw(tcx, def_id);
+                if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+            } else {
+                hir::Constness::NotConst
+            }
+        }
     }
 }
 
@@ -77,5 +81,5 @@ fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
 }
 
 pub fn provide(providers: &mut Providers) {
-    *providers = Providers { is_const_fn_raw, is_promotable_const_fn, ..*providers };
+    *providers = Providers { impl_constness, is_promotable_const_fn, ..*providers };
 }
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index d78c7a9fad9..1b8186b5aad 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -23,8 +23,8 @@ use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayou
 
 use super::{
     AllocCheck, AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine,
-    MemPlace, MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, Pointer, Provenance,
-    Scalar, ScalarMaybeUninit, StackPopJump,
+    MemPlace, MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, Pointer,
+    PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit, StackPopJump,
 };
 use crate::transform::validate::equal_up_to_regions;
 
@@ -444,6 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         match scalar.try_to_int() {
             Ok(int) => int.is_null(),
             Err(_) => {
+                // Can only happen during CTFE.
                 let ptr = self.scalar_to_ptr(scalar);
                 match self.memory.ptr_try_get_alloc(ptr) {
                     Ok((alloc_id, offset, _)) => {
@@ -455,7 +456,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         // Note that one-past-the-end (offset == size) is still inbounds, and never null.
                         offset > size
                     }
-                    Err(offset) => offset == 0,
+                    Err(_offset) => bug!("a non-int scalar is always a pointer"),
                 }
             }
         }
@@ -677,7 +678,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let size = size.align_to(align);
 
                 // Check if this brought us over the size limit.
-                if size.bytes() >= self.tcx.data_layout.obj_size_bound() {
+                if size > self.max_size_of_val() {
                     throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
                 }
                 Ok(Some((size, align)))
@@ -693,9 +694,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let elem = layout.field(self, 0);
 
                 // Make sure the slice is not too big.
-                let size = elem.size.checked_mul(len, self).ok_or_else(|| {
-                    err_ub!(InvalidMeta("slice is bigger than largest supported object"))
-                })?;
+                let size = elem.size.bytes().saturating_mul(len); // we rely on `max_size_of_val` being smaller than `u64::MAX`.
+                let size = Size::from_bytes(size);
+                if size > self.max_size_of_val() {
+                    throw_ub!(InvalidMeta("slice is bigger than largest supported object"));
+                }
                 Ok(Some((size, elem.align.abi)))
             }
 
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 43ab74f4b88..f1acb9e41c4 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -73,7 +73,7 @@ struct IsStaticOrFn;
 
 /// Intern an allocation without looking at its children.
 /// `mode` is the mode of the environment where we found this pointer.
-/// `mutablity` is the mutability of the place to be interned; even if that says
+/// `mutability` is the mutability of the place to be interned; even if that says
 /// `immutable` things might become mutable if `ty` is not frozen.
 /// `ty` can be `None` if there is no potential interior mutability
 /// to account for (e.g. for vtables).
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 5eff7d693c5..c80d7d71787 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -531,7 +531,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
         // We cannot overflow i64 as a type's size must be <= isize::MAX.
         let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
-        // The computed offset, in bytes, cannot overflow an isize.
+        // The computed offset, in bytes, must not overflow an isize.
+        // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
+        // the difference to be noticeable.
         let offset_bytes =
             offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
         // The offset being in bounds cannot rely on "wrapping around" the address space.
@@ -563,6 +565,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let count = self.read_scalar(&count)?.to_machine_usize(self)?;
         let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
         let (size, align) = (layout.size, layout.align.abi);
+        // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+        // but no actual allocation can be big enough for the difference to be noticeable.
         let size = size.checked_mul(count, self).ok_or_else(|| {
             err_ub_format!(
                 "overflow computing total size of `{}`",
@@ -588,6 +592,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let byte = self.read_scalar(&byte)?.to_u8()?;
         let count = self.read_scalar(&count)?.to_machine_usize(self)?;
 
+        // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+        // but no actual allocation can be big enough for the difference to be noticeable.
         let len = layout
             .size
             .checked_mul(count, self)
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 871b7578abd..4a235809119 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -1011,7 +1011,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         let src_parts = self.get_ptr_access(src, size, src_align)?;
         let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
 
-        // FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
+        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
         // and once below to get the underlying `&[mut] Allocation`.
 
         // Source alloc preparations and access hooks.
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index b1784b12c65..5b8d74b4307 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -876,7 +876,7 @@ where
         if src.layout.size != dest.layout.size {
             // FIXME: This should be an assert instead of an error, but if we transmute within an
             // array length computation, `typeck` may not have yet been run and errored out. In fact
-            // most likey we *are* running `typeck` right now. Investigate whether we can bail out
+            // most likely we *are* running `typeck` right now. Investigate whether we can bail out
             // on `typeck_results().has_errors` at all const eval entry points.
             debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
             self.tcx.sess.delay_span_bug(
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 0701e0ded97..4272bfd5d6c 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -39,7 +39,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ///
     /// This is used by [priroda](https://github.com/oli-obk/priroda)
     ///
-    /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
+    /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
     #[inline(always)]
     pub fn step(&mut self) -> InterpResult<'tcx, bool> {
         if self.stack().is_empty() {
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 57a93ed4d55..d33358499e2 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -329,7 +329,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
                 // Compute callee information using the `instance` returned by
                 // `find_mir_or_eval_fn`.
-                // FIXME: for variadic support, do we have to somehow determine calle's extra_args?
+                // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
                 let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
 
                 if callee_fn_abi.c_variadic != caller_fn_abi.c_variadic {
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
index 131674decc9..fc60a40e2ad 100644
--- a/compiler/rustc_const_eval/src/interpret/traits.rs
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -110,16 +110,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             .read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())?
             .check_init()?;
         let size = size.to_machine_usize(self)?;
+        let size = Size::from_bytes(size);
         let align = vtable
             .read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())?
             .check_init()?;
         let align = align.to_machine_usize(self)?;
         let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?;
 
-        if size >= self.tcx.data_layout.obj_size_bound() {
+        if size > self.max_size_of_val() {
             throw_ub!(InvalidVtableSize);
         }
-        Ok((Size::from_bytes(size), align))
+        Ok((size, align))
     }
 
     pub fn read_new_vtable_after_trait_upcasting_from_vtable(
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 8bdafa87623..9da7f5e30cb 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -21,7 +21,7 @@ use std::hash::Hash;
 
 use super::{
     alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
-    MemPlaceMeta, OpTy, ScalarMaybeUninit, ValueVisitor,
+    MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
 };
 
 macro_rules! throw_validation_failure {
@@ -521,8 +521,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                 // NOTE: Keep this in sync with the array optimization for int/float
                 // types below!
                 if M::enforce_number_validity(self.ecx) {
-                    // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
-                    let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_ok());
+                    // Integers/floats with number validity: Must be scalar bits, pointers are dangerous.
+                    // As a special exception we *do* match on a `Scalar` here, since we truly want
+                    // to know its underlying representation (and *not* cast it to an integer).
+                    let is_bits =
+                        value.check_init().map_or(false, |v| matches!(v, Scalar::Int(..)));
                     if !is_bits {
                         throw_validation_failure!(self.path,
                             { "{:x}", value } expected { "initialized plain (non-pointer) bytes" }
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index eb01e261c1a..e203c79030d 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -11,7 +11,7 @@ use rustc_middle::mir::*;
 use rustc_middle::ty::cast::CastTy;
 use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
-use rustc_middle::ty::{Binder, TraitPredicate, TraitRef};
+use rustc_middle::ty::{Binder, TraitPredicate, TraitRef, TypeFoldable};
 use rustc_mir_dataflow::{self, Analysis};
 use rustc_span::{sym, Span, Symbol};
 use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
@@ -47,7 +47,10 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
         location: Location,
     ) -> bool {
         let ty = ccx.body.local_decls[local].ty;
-        if !NeedsDrop::in_any_value_of_ty(ccx, ty) {
+        // Peeking into opaque types causes cycles if the current function declares said opaque
+        // type. Thus we avoid short circuiting on the type and instead run the more expensive
+        // analysis that looks at the actual usage within this function
+        if !ty.has_opaque_types() && !NeedsDrop::in_any_value_of_ty(ccx, ty) {
             return false;
         }
 
@@ -101,7 +104,10 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
         location: Location,
     ) -> bool {
         let ty = ccx.body.local_decls[local].ty;
-        if !HasMutInterior::in_any_value_of_ty(ccx, ty) {
+        // Peeking into opaque types causes cycles if the current function declares said opaque
+        // type. Thus we avoid short circuiting on the type and instead run the more expensive
+        // analysis that looks at the actual usage within this function
+        if !ty.has_opaque_types() && !HasMutInterior::in_any_value_of_ty(ccx, ty) {
             return false;
         }
 
@@ -148,7 +154,12 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
 
             // If we know that all values of the return type are structurally matchable, there's no
             // need to run dataflow.
-            _ if !CustomEq::in_any_value_of_ty(ccx, ccx.body.return_ty()) => false,
+            // Opaque types do not participate in const generics or pattern matching, so we can safely count them out.
+            _ if ccx.body.return_ty().has_opaque_types()
+                || !CustomEq::in_any_value_of_ty(ccx, ccx.body.return_ty()) =>
+            {
+                false
+            }
 
             hir::ConstContext::Const | hir::ConstContext::Static(_) => {
                 let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
@@ -255,8 +266,8 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
         // "secondary" errors if they occurred.
         let secondary_errors = mem::take(&mut self.secondary_errors);
         if self.error_emitted.is_none() {
-            for error in secondary_errors {
-                self.tcx.sess.diagnostic().emit_diagnostic(&error);
+            for mut error in secondary_errors {
+                self.tcx.sess.diagnostic().emit_diagnostic(&mut error);
             }
         } else {
             assert!(self.tcx.sess.has_errors().is_some());
@@ -942,7 +953,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 if callee_is_unstable_unmarked {
                     trace!("callee_is_unstable_unmarked");
                     // We do not use `const` modifiers for intrinsic "functions", as intrinsics are
-                    // `extern` funtions, and these have no way to get marked `const`. So instead we
+                    // `extern` functions, and these have no way to get marked `const`. So instead we
                     // use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
                     if self.ccx.is_const_stable_const_fn() || is_intrinsic {
                         self.check_op(ops::FnCallUnstable(callee, None));
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index 1e02129855e..9fd94dc334f 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -56,7 +56,7 @@ pub trait Qualif {
     /// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
     ///
     /// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
-    /// propagation is context-insenstive, this includes function arguments and values returned
+    /// propagation is context-insensitive, this includes function arguments and values returned
     /// from a call to another function.
     ///
     /// It also determines the `Qualif`s for primitive types.
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index 30764f689c9..faea2111d92 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -42,7 +42,7 @@ pub struct PromoteTemps<'tcx> {
 
 impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
     fn phase_change(&self) -> Option<MirPhase> {
-        Some(MirPhase::ConstPromotion)
+        Some(MirPhase::ConstsPromoted)
     }
 
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index cf15fc4ddc3..263959f3cb3 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -79,7 +79,6 @@ pub fn equal_up_to_regions<'tcx>(
     }
 
     // Normalize lifetimes away on both sides, then compare.
-    let param_env = param_env.with_reveal_all_normalized(tcx);
     let normalize = |ty: Ty<'tcx>| {
         tcx.normalize_erasing_regions(
             param_env,
@@ -170,9 +169,12 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
             // Equal types, all is good.
             return true;
         }
+        // Normalization reveals opaque types, but we may be validating MIR while computing
+        // said opaque types, causing cycles.
+        if (src, dest).has_opaque_types() {
+            return true;
+        }
         // Normalize projections and things like that.
-        // FIXME: We need to reveal_all, as some optimizations change types in ways
-        // that require unfolding opaque types.
         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
         let src = self.tcx.normalize_erasing_regions(param_env, src);
         let dest = self.tcx.normalize_erasing_regions(param_env, dest);
@@ -266,22 +268,15 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                             );
                         }
                     }
-                    // The deaggregator currently does not deaggreagate arrays.
-                    // So for now, we ignore them here.
-                    Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {}
-                    // All other aggregates must be gone after some phases.
-                    Rvalue::Aggregate(box kind, _) => {
-                        if self.mir_phase > MirPhase::DropLowering
-                            && !matches!(kind, AggregateKind::Generator(..))
-                        {
-                            // Generators persist until the state machine transformation, but all
-                            // other aggregates must have been lowered.
-                            self.fail(
-                                location,
-                                format!("{:?} have been lowered to field assignments", rvalue),
-                            )
-                        } else if self.mir_phase > MirPhase::GeneratorLowering {
-                            // No more aggregates after drop and generator lowering.
+                    Rvalue::Aggregate(agg_kind, _) => {
+                        let disallowed = match **agg_kind {
+                            AggregateKind::Array(..) => false,
+                            AggregateKind::Generator(..) => {
+                                self.mir_phase >= MirPhase::GeneratorsLowered
+                            }
+                            _ => self.mir_phase >= MirPhase::Deaggregated,
+                        };
+                        if disallowed {
                             self.fail(
                                 location,
                                 format!("{:?} have been lowered to field assignments", rvalue),
@@ -289,7 +284,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                         }
                     }
                     Rvalue::Ref(_, BorrowKind::Shallow, _) => {
-                        if self.mir_phase > MirPhase::DropLowering {
+                        if self.mir_phase >= MirPhase::DropsLowered {
                             self.fail(
                                 location,
                                 "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
@@ -300,7 +295,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 }
             }
             StatementKind::AscribeUserType(..) => {
-                if self.mir_phase > MirPhase::DropLowering {
+                if self.mir_phase >= MirPhase::DropsLowered {
                     self.fail(
                         location,
                         "`AscribeUserType` should have been removed after drop lowering phase",
@@ -308,7 +303,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 }
             }
             StatementKind::FakeRead(..) => {
-                if self.mir_phase > MirPhase::DropLowering {
+                if self.mir_phase >= MirPhase::DropsLowered {
                     self.fail(
                         location,
                         "`FakeRead` should have been removed after drop lowering phase",
@@ -351,10 +346,18 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
                 }
             }
-            StatementKind::SetDiscriminant { .. }
-            | StatementKind::StorageLive(..)
+            StatementKind::SetDiscriminant { .. } => {
+                if self.mir_phase < MirPhase::DropsLowered {
+                    self.fail(location, "`SetDiscriminant` is not allowed until drop elaboration");
+                }
+            }
+            StatementKind::Retag(_, _) => {
+                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+                // seem to fail to set their `MirPhase` correctly.
+            }
+            StatementKind::StorageLive(..)
             | StatementKind::StorageDead(..)
-            | StatementKind::Retag(_, _)
             | StatementKind::Coverage(_)
             | StatementKind::Nop => {}
         }
@@ -424,10 +427,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 }
             }
             TerminatorKind::DropAndReplace { target, unwind, .. } => {
-                if self.mir_phase > MirPhase::DropLowering {
+                if self.mir_phase >= MirPhase::DropsLowered {
                     self.fail(
                         location,
-                        "`DropAndReplace` is not permitted to exist after drop elaboration",
+                        "`DropAndReplace` should have been removed during drop elaboration",
                     );
                 }
                 self.check_edge(location, *target, EdgeKind::Normal);
@@ -494,7 +497,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 }
             }
             TerminatorKind::Yield { resume, drop, .. } => {
-                if self.mir_phase > MirPhase::GeneratorLowering {
+                if self.mir_phase >= MirPhase::GeneratorsLowered {
                     self.fail(location, "`Yield` should have been replaced by generator lowering");
                 }
                 self.check_edge(location, *resume, EdgeKind::Normal);
@@ -503,10 +506,22 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 }
             }
             TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+                if self.mir_phase >= MirPhase::DropsLowered {
+                    self.fail(
+                        location,
+                        "`FalseEdge` should have been removed after drop elaboration",
+                    );
+                }
                 self.check_edge(location, *real_target, EdgeKind::Normal);
                 self.check_edge(location, *imaginary_target, EdgeKind::Normal);
             }
             TerminatorKind::FalseUnwind { real_target, unwind } => {
+                if self.mir_phase >= MirPhase::DropsLowered {
+                    self.fail(
+                        location,
+                        "`FalseUnwind` should have been removed after drop elaboration",
+                    );
+                }
                 self.check_edge(location, *real_target, EdgeKind::Normal);
                 if let Some(unwind) = unwind {
                     self.check_edge(location, *unwind, EdgeKind::Unwind);
@@ -520,12 +535,19 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     self.check_edge(location, *cleanup, EdgeKind::Unwind);
                 }
             }
+            TerminatorKind::GeneratorDrop => {
+                if self.mir_phase >= MirPhase::GeneratorsLowered {
+                    self.fail(
+                        location,
+                        "`GeneratorDrop` should have been replaced by generator lowering",
+                    );
+                }
+            }
             // Nothing to validate for these.
             TerminatorKind::Resume
             | TerminatorKind::Abort
             | TerminatorKind::Return
-            | TerminatorKind::Unreachable
-            | TerminatorKind::GeneratorDrop => {}
+            | TerminatorKind::Unreachable => {}
         }
 
         self.super_terminator(terminator, location);
diff --git a/compiler/rustc_const_eval/src/util/call_kind.rs b/compiler/rustc_const_eval/src/util/call_kind.rs
index 2165989b398..60b45856f51 100644
--- a/compiler/rustc_const_eval/src/util/call_kind.rs
+++ b/compiler/rustc_const_eval/src/util/call_kind.rs
@@ -45,7 +45,7 @@ pub enum CallKind<'tcx> {
     },
     /// A call to `Fn(..)::call(..)`, desugared from `my_closure(a, b, c)`
     FnCall { fn_trait_id: DefId, self_ty: Ty<'tcx> },
-    /// A call to an operator trait, desuraged from operator syntax (e.g. `a << b`)
+    /// A call to an operator trait, desugared from operator syntax (e.g. `a << b`)
     Operator { self_arg: Option<Ident>, trait_id: DefId, self_ty: Ty<'tcx> },
     DerefCoercion {
         /// The `Span` of the `Target` associated type