about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/check_consts/check.rs282
-rw-r--r--compiler/rustc_const_eval/src/check_consts/mod.rs30
-rw-r--r--compiler/rustc_const_eval/src/check_consts/ops.rs132
-rw-r--r--compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs67
-rw-r--r--compiler/rustc_const_eval/src/check_consts/qualifs.rs151
-rw-r--r--compiler/rustc_const_eval/src/check_consts/resolver.rs6
-rw-r--r--compiler/rustc_const_eval/src/const_eval/dummy_machine.rs6
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs20
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs78
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs35
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs28
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs12
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs34
-rw-r--r--compiler/rustc_const_eval/src/errors.rs12
-rw-r--r--compiler/rustc_const_eval/src/interpret/call.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs68
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs89
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs16
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs16
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs178
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs54
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs59
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/stack.rs21
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs18
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs34
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs105
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs4
-rw-r--r--compiler/rustc_const_eval/src/lib.rs8
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs6
-rw-r--r--compiler/rustc_const_eval/src/util/caller_location.rs13
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs11
-rw-r--r--compiler/rustc_const_eval/src/util/compare_types.rs13
37 files changed, 854 insertions, 778 deletions
diff --git a/compiler/rustc_const_eval/src/check_consts/check.rs b/compiler/rustc_const_eval/src/check_consts/check.rs
index aea3d5bd3e7..f4257ad9671 100644
--- a/compiler/rustc_const_eval/src/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/check_consts/check.rs
@@ -3,9 +3,10 @@
 use std::assert_matches::assert_matches;
 use std::borrow::Cow;
 use std::mem;
+use std::num::NonZero;
 use std::ops::Deref;
 
-use rustc_attr::{ConstStability, StabilityLevel};
+use rustc_attr_parsing::{ConstStability, StabilityLevel};
 use rustc_errors::{Diag, ErrorGuaranteed};
 use rustc_hir::def_id::DefId;
 use rustc_hir::{self as hir, LangItem};
@@ -15,15 +16,14 @@ use rustc_middle::mir::visit::Visitor;
 use rustc_middle::mir::*;
 use rustc_middle::span_bug;
 use rustc_middle::ty::adjustment::PointerCoercion;
-use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TypeVisitableExt};
+use rustc_middle::ty::{self, Ty, TypeVisitableExt};
 use rustc_mir_dataflow::Analysis;
-use rustc_mir_dataflow::impls::MaybeStorageLive;
-use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_mir_dataflow::impls::{MaybeStorageLive, always_storage_live_locals};
 use rustc_span::{Span, Symbol, sym};
 use rustc_trait_selection::traits::{
     Obligation, ObligationCause, ObligationCauseCode, ObligationCtxt,
 };
-use tracing::{debug, instrument, trace};
+use tracing::{instrument, trace};
 
 use super::ops::{self, NonConstOp, Status};
 use super::qualifs::{self, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
@@ -46,7 +46,7 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
     /// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
     ///
     /// Only updates the cursor if absolutely necessary
-    fn needs_drop(
+    pub(crate) fn needs_drop(
         &mut self,
         ccx: &'mir ConstCx<'mir, 'tcx>,
         local: Local,
@@ -271,9 +271,18 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
     /// context.
     pub fn check_op_spanned<O: NonConstOp<'tcx>>(&mut self, op: O, span: Span) {
         let gate = match op.status_in_item(self.ccx) {
-            Status::Unstable { gate, safe_to_expose_on_stable, is_function_call }
-                if self.tcx.features().enabled(gate) =>
-            {
+            Status::Unstable {
+                gate,
+                safe_to_expose_on_stable,
+                is_function_call,
+                gate_already_checked,
+            } if gate_already_checked || self.tcx.features().enabled(gate) => {
+                if gate_already_checked {
+                    assert!(
+                        !safe_to_expose_on_stable,
+                        "setting `gate_already_checked` without `safe_to_expose_on_stable` makes no sense"
+                    );
+                }
                 // Generally this is allowed since the feature gate is enabled -- except
                 // if this function wants to be safe-to-expose-on-stable.
                 if !safe_to_expose_on_stable
@@ -361,34 +370,24 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
         !is_transient
     }
 
+    /// Returns whether there are const-conditions.
     fn revalidate_conditional_constness(
         &mut self,
         callee: DefId,
         callee_args: ty::GenericArgsRef<'tcx>,
-        call_source: CallSource,
         call_span: Span,
-    ) {
+    ) -> bool {
         let tcx = self.tcx;
         if !tcx.is_conditionally_const(callee) {
-            return;
+            return false;
         }
 
         let const_conditions = tcx.const_conditions(callee).instantiate(tcx, callee_args);
-        // If there are any const conditions on this fn and `const_trait_impl`
-        // is not enabled, simply bail. We shouldn't be able to call conditionally
-        // const functions on stable.
-        if !const_conditions.is_empty() && !tcx.features().const_trait_impl() {
-            self.check_op(ops::FnCallNonConst {
-                callee,
-                args: callee_args,
-                span: call_span,
-                call_source,
-                feature: Some(sym::const_trait_impl),
-            });
-            return;
+        if const_conditions.is_empty() {
+            return false;
         }
 
-        let infcx = tcx.infer_ctxt().build(self.body.typing_mode(tcx));
+        let (infcx, param_env) = tcx.infer_ctxt().build_with_typing_env(self.body.typing_env(tcx));
         let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
 
         let body_id = self.body.source.def_id().expect_local();
@@ -398,11 +397,8 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
                 ty::BoundConstness::Const
             }
         };
-        let const_conditions = ocx.normalize(
-            &ObligationCause::misc(call_span, body_id),
-            self.param_env,
-            const_conditions,
-        );
+        let const_conditions =
+            ocx.normalize(&ObligationCause::misc(call_span, body_id), param_env, const_conditions);
         ocx.register_obligations(const_conditions.into_iter().map(|(trait_ref, span)| {
             Obligation::new(
                 tcx,
@@ -411,7 +407,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
                     body_id,
                     ObligationCauseCode::WhereClause(callee, span),
                 ),
-                self.param_env,
+                param_env,
                 trait_ref.to_host_effect_clause(tcx, host_polarity),
             )
         }));
@@ -421,6 +417,45 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
             tcx.dcx()
                 .span_delayed_bug(call_span, "this should have reported a ~const error in HIR");
         }
+
+        true
+    }
+
+    pub fn check_drop_terminator(
+        &mut self,
+        dropped_place: Place<'tcx>,
+        location: Location,
+        terminator_span: Span,
+    ) {
+        let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
+
+        let needs_drop = if let Some(local) = dropped_place.as_local() {
+            self.qualifs.needs_drop(self.ccx, local, location)
+        } else {
+            qualifs::NeedsDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place)
+        };
+        // If this type doesn't need a drop at all, then there's nothing to enforce.
+        if !needs_drop {
+            return;
+        }
+
+        let mut err_span = self.span;
+        let needs_non_const_drop = if let Some(local) = dropped_place.as_local() {
+            // Use the span where the local was declared as the span of the drop error.
+            err_span = self.body.local_decls[local].source_info.span;
+            self.qualifs.needs_non_const_drop(self.ccx, local, location)
+        } else {
+            qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place)
+        };
+
+        self.check_op_spanned(
+            ops::LiveDrop {
+                dropped_at: terminator_span,
+                dropped_ty: ty_of_dropped_place,
+                needs_non_const_drop,
+            },
+            err_span,
+        );
     }
 }
 
@@ -538,12 +573,27 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
             ) => {}
             Rvalue::ShallowInitBox(_, _) => {}
 
-            Rvalue::UnaryOp(_, operand) => {
+            Rvalue::UnaryOp(op, operand) => {
                 let ty = operand.ty(self.body, self.tcx);
-                if is_int_bool_float_or_char(ty) {
-                    // Int, bool, float, and char operations are fine.
-                } else {
-                    span_bug!(self.span, "non-primitive type in `Rvalue::UnaryOp`: {:?}", ty);
+                match op {
+                    UnOp::Not | UnOp::Neg => {
+                        if is_int_bool_float_or_char(ty) {
+                            // Int, bool, float, and char operations are fine.
+                        } else {
+                            span_bug!(
+                                self.span,
+                                "non-primitive type in `Rvalue::UnaryOp{op:?}`: {ty:?}",
+                            );
+                        }
+                    }
+                    UnOp::PtrMetadata => {
+                        if !ty.is_ref() && !ty.is_unsafe_ptr() {
+                            span_bug!(
+                                self.span,
+                                "non-pointer type in `Rvalue::UnaryOp({op:?})`: {ty:?}",
+                            );
+                        }
+                    }
                 }
             }
 
@@ -610,6 +660,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
             | StatementKind::Coverage(..)
             | StatementKind::Intrinsic(..)
             | StatementKind::ConstEvalCounter
+            | StatementKind::BackwardIncompatibleDropHint { .. }
             | StatementKind::Nop => {}
         }
     }
@@ -627,11 +678,11 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     _ => unreachable!(),
                 };
 
-                let ConstCx { tcx, body, param_env, .. } = *self.ccx;
+                let ConstCx { tcx, body, .. } = *self.ccx;
 
                 let fn_ty = func.ty(body, tcx);
 
-                let (mut callee, mut fn_args) = match *fn_ty.kind() {
+                let (callee, fn_args) = match *fn_ty.kind() {
                     ty::FnDef(def_id, fn_args) => (def_id, fn_args),
 
                     ty::FnPtr(..) => {
@@ -645,57 +696,38 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     }
                 };
 
-                self.revalidate_conditional_constness(callee, fn_args, call_source, *fn_span);
+                let has_const_conditions =
+                    self.revalidate_conditional_constness(callee, fn_args, *fn_span);
 
-                let mut is_trait = false;
                 // Attempting to call a trait method?
                 if let Some(trait_did) = tcx.trait_of_item(callee) {
-                    trace!("attempting to call a trait method");
+                    // We can't determine the actual callee here, so we have to do different checks
+                    // than usual.
 
+                    trace!("attempting to call a trait method");
                     let trait_is_const = tcx.is_const_trait(trait_did);
-                    // trait method calls are only permitted when `effects` is enabled.
-                    // typeck ensures the conditions for calling a const trait method are met,
-                    // so we only error if the trait isn't const. We try to resolve the trait
-                    // into the concrete method, and uses that for const stability checks.
-                    // FIXME(const_trait_impl) we might consider moving const stability checks
-                    // to typeck as well.
-                    if tcx.features().const_trait_impl() && trait_is_const {
-                        // This skips the check below that ensures we only call `const fn`.
-                        is_trait = true;
-
-                        if let Ok(Some(instance)) =
-                            Instance::try_resolve(tcx, param_env, callee, fn_args)
-                            && let InstanceKind::Item(def) = instance.def
-                        {
-                            // Resolve a trait method call to its concrete implementation, which may be in a
-                            // `const` trait impl. This is only used for the const stability check below, since
-                            // we want to look at the concrete impl's stability.
-                            fn_args = instance.args;
-                            callee = def;
-                        }
+
+                    if trait_is_const {
+                        // Trait calls are always conditionally-const.
+                        self.check_op(ops::ConditionallyConstCall { callee, args: fn_args });
+                        // FIXME(const_trait_impl): do a more fine-grained check whether this
+                        // particular trait can be const-stably called.
                     } else {
-                        // if the trait is const but the user has not enabled the feature(s),
-                        // suggest them.
-                        let feature = if trait_is_const {
-                            Some(if tcx.features().const_trait_impl() {
-                                sym::effects
-                            } else {
-                                sym::const_trait_impl
-                            })
-                        } else {
-                            None
-                        };
+                        // Not even a const trait.
                         self.check_op(ops::FnCallNonConst {
                             callee,
                             args: fn_args,
                             span: *fn_span,
                             call_source,
-                            feature,
                         });
-                        // If we allowed this, we're in miri-unleashed mode, so we might
-                        // as well skip the remaining checks.
-                        return;
                     }
+                    // That's all we can check here.
+                    return;
+                }
+
+                // Even if we know the callee, ensure we can use conditionally-const calls.
+                if has_const_conditions {
+                    self.check_op(ops::ConditionallyConstCall { callee, args: fn_args });
                 }
 
                 // At this point, we are calling a function, `callee`, whose `DefId` is known...
@@ -736,6 +768,13 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
 
                 // Intrinsics are language primitives, not regular calls, so treat them separately.
                 if let Some(intrinsic) = tcx.intrinsic(callee) {
+                    if !tcx.is_const_fn(callee) {
+                        // Non-const intrinsic.
+                        self.check_op(ops::IntrinsicNonConst { name: intrinsic.name });
+                        // If we allowed this, we're in miri-unleashed mode, so we might
+                        // as well skip the remaining checks.
+                        return;
+                    }
                     // We use `intrinsic.const_stable` to determine if this can be safely exposed to
                     // stable code, rather than `const_stable_indirect`. This is to make
                     // `#[rustc_const_stable_indirect]` an attribute that is always safe to add.
@@ -743,17 +782,12 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     // fallback body is safe to expose on stable.
                     let is_const_stable = intrinsic.const_stable
                         || (!intrinsic.must_be_overridden
-                            && tcx.is_const_fn(callee)
                             && is_safe_to_expose_on_stable_const_fn(tcx, callee));
                     match tcx.lookup_const_stability(callee) {
                         None => {
-                            // Non-const intrinsic.
-                            self.check_op(ops::IntrinsicNonConst { name: intrinsic.name });
-                        }
-                        Some(ConstStability { feature: None, .. }) => {
-                            // Intrinsic does not need a separate feature gate (we rely on the
-                            // regular stability checker). However, we have to worry about recursive
-                            // const stability.
+                            // This doesn't need a separate const-stability check -- const-stability equals
+                            // regular stability, and regular stability is checked separately.
+                            // However, we *do* have to worry about *recursive* const stability.
                             if !is_const_stable && self.enforce_recursive_const_stability() {
                                 self.dcx().emit_err(errors::UnmarkedIntrinsicExposed {
                                     span: self.span,
@@ -762,20 +796,20 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                             }
                         }
                         Some(ConstStability {
-                            feature: Some(feature),
                             level: StabilityLevel::Unstable { .. },
+                            feature,
                             ..
                         }) => {
                             self.check_op(ops::IntrinsicUnstable {
                                 name: intrinsic.name,
                                 feature,
-                                const_stable: is_const_stable,
+                                const_stable_indirect: is_const_stable,
                             });
                         }
                         Some(ConstStability { level: StabilityLevel::Stable { .. }, .. }) => {
                             // All good. Note that a `#[rustc_const_stable]` intrinsic (meaning it
                             // can be *directly* invoked from stable const code) does not always
-                            // have the `#[rustc_const_stable_intrinsic]` attribute (which controls
+                            // have the `#[rustc_intrinsic_const_stable_indirect]` attribute (which controls
                             // exposing an intrinsic indirectly); we accept this call anyway.
                         }
                     }
@@ -783,14 +817,12 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     return;
                 }
 
-                // Trait functions are not `const fn` so we have to skip them here.
-                if !tcx.is_const_fn(callee) && !is_trait {
+                if !tcx.is_const_fn(callee) {
                     self.check_op(ops::FnCallNonConst {
                         callee,
                         args: fn_args,
                         span: *fn_span,
                         call_source,
-                        feature: None,
                     });
                     // If we allowed this, we're in miri-unleashed mode, so we might
                     // as well skip the remaining checks.
@@ -802,7 +834,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     Some(ConstStability { level: StabilityLevel::Stable { .. }, .. }) => {
                         // All good.
                     }
-                    None | Some(ConstStability { feature: None, .. }) => {
+                    None => {
                         // This doesn't need a separate const-stability check -- const-stability equals
                         // regular stability, and regular stability is checked separately.
                         // However, we *do* have to worry about *recursive* const stability.
@@ -816,8 +848,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                         }
                     }
                     Some(ConstStability {
-                        feature: Some(feature),
-                        level: StabilityLevel::Unstable { implied_by: implied_feature, .. },
+                        level: StabilityLevel::Unstable { implied_by: implied_feature, issue, .. },
+                        feature,
                         ..
                     }) => {
                         // An unstable const fn with a feature gate.
@@ -826,6 +858,12 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
 
                         // We only honor `span.allows_unstable` aka `#[allow_internal_unstable]` if
                         // the callee is safe to expose, to avoid bypassing recursive stability.
+                        // This is not ideal since it means the user sees an error, not the macro
+                        // author, but that's also the case if one forgets to set
+                        // `#[allow_internal_unstable]` in the first place. Note that this cannot be
+                        // integrated in the check below since we want to enforce
+                        // `callee_safe_to_expose_on_stable` even if
+                        // `!self.enforce_recursive_const_stability()`.
                         if (self.span.allows_unstable(feature)
                             || implied_feature.is_some_and(|f| self.span.allows_unstable(f)))
                             && callee_safe_to_expose_on_stable
@@ -839,16 +877,30 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                         // to allow this.
                         let feature_enabled = callee.is_local()
                             || tcx.features().enabled(feature)
-                            || implied_feature.is_some_and(|f| tcx.features().enabled(f));
-                        // We do *not* honor this if we are in the "danger zone": we have to enforce
-                        // recursive const-stability and the callee is not safe-to-expose. In that
-                        // case we need `check_op` to do the check.
-                        let danger_zone = !callee_safe_to_expose_on_stable
-                            && self.enforce_recursive_const_stability();
-                        if danger_zone || !feature_enabled {
+                            || implied_feature.is_some_and(|f| tcx.features().enabled(f))
+                            || {
+                                // When we're compiling the compiler itself we may pull in
+                                // crates from crates.io, but those crates may depend on other
+                                // crates also pulled in from crates.io. We want to ideally be
+                                // able to compile everything without requiring upstream
+                                // modifications, so in the case that this looks like a
+                                // `rustc_private` crate (e.g., a compiler crate) and we also have
+                                // the `-Z force-unstable-if-unmarked` flag present (we're
+                                // compiling a compiler crate), then let this missing feature
+                                // annotation slide.
+                                // This matches what we do in `eval_stability_allow_unstable` for
+                                // regular stability.
+                                feature == sym::rustc_private
+                                    && issue == NonZero::new(27812)
+                                    && self.tcx.sess.opts.unstable_opts.force_unstable_if_unmarked
+                            };
+                        // Even if the feature is enabled, we still need check_op to double-check
+                        // this if the callee is not safe to expose on stable.
+                        if !feature_enabled || !callee_safe_to_expose_on_stable {
                             self.check_op(ops::FnCallUnstable {
                                 def_id: callee,
                                 feature,
+                                feature_enabled,
                                 safe_to_expose_on_stable: callee_safe_to_expose_on_stable,
                             });
                         }
@@ -865,35 +917,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     return;
                 }
 
-                let mut err_span = self.span;
-                let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
-
-                let ty_needs_non_const_drop =
-                    qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
-
-                debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
-
-                if !ty_needs_non_const_drop {
-                    return;
-                }
-
-                let needs_non_const_drop = if let Some(local) = dropped_place.as_local() {
-                    // Use the span where the local was declared as the span of the drop error.
-                    err_span = self.body.local_decls[local].source_info.span;
-                    self.qualifs.needs_non_const_drop(self.ccx, local, location)
-                } else {
-                    true
-                };
-
-                if needs_non_const_drop {
-                    self.check_op_spanned(
-                        ops::LiveDrop {
-                            dropped_at: Some(terminator.source_info.span),
-                            dropped_ty: ty_of_dropped_place,
-                        },
-                        err_span,
-                    );
-                }
+                self.check_drop_terminator(*dropped_place, location, terminator.source_info.span);
             }
 
             TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
diff --git a/compiler/rustc_const_eval/src/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs
index dcdaafaecc2..ab68691f1b9 100644
--- a/compiler/rustc_const_eval/src/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/check_consts/mod.rs
@@ -9,7 +9,7 @@ use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_middle::ty::{self, PolyFnSig, TyCtxt};
 use rustc_middle::{bug, mir};
 use rustc_span::Symbol;
-use {rustc_attr as attr, rustc_hir as hir};
+use {rustc_attr_parsing as attr, rustc_hir as hir};
 
 pub use self::qualifs::Qualif;
 
@@ -24,17 +24,15 @@ mod resolver;
 pub struct ConstCx<'mir, 'tcx> {
     pub body: &'mir mir::Body<'tcx>,
     pub tcx: TyCtxt<'tcx>,
-    pub param_env: ty::ParamEnv<'tcx>,
+    pub typing_env: ty::TypingEnv<'tcx>,
     pub const_kind: Option<hir::ConstContext>,
 }
 
 impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
     pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
-        let def_id = body.source.def_id().expect_local();
-        let param_env = tcx.param_env(def_id);
-
+        let typing_env = body.typing_env(tcx);
         let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
-        ConstCx { body, tcx, param_env, const_kind }
+        ConstCx { body, tcx, typing_env, const_kind }
     }
 
     pub(crate) fn dcx(&self) -> DiagCtxtHandle<'tcx> {
@@ -53,10 +51,11 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
     }
 
     pub fn enforce_recursive_const_stability(&self) -> bool {
-        // We can skip this if `staged_api` is not enabled, since in such crates
-        // `lookup_const_stability` will always be `None`.
+        // We can skip this if neither `staged_api` nor `-Zforce-unstable-if-unmarked` are enabled,
+        // since in such crates `lookup_const_stability` will always be `None`.
         self.const_kind == Some(hir::ConstContext::ConstFn)
-            && self.tcx.features().staged_api()
+            && (self.tcx.features().staged_api()
+                || self.tcx.sess.opts.unstable_opts.force_unstable_if_unmarked)
             && is_safe_to_expose_on_stable_const_fn(self.tcx, self.def_id().to_def_id())
     }
 
@@ -109,14 +108,15 @@ pub fn is_safe_to_expose_on_stable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> b
 
     match tcx.lookup_const_stability(def_id) {
         None => {
-            // Only marked functions can be trusted. Note that this may be a function in a
-            // non-staged-API crate where no recursive checks were done!
-            false
+            // In a `staged_api` crate, we do enforce recursive const stability for all unmarked
+            // functions, so we can trust local functions. But in another crate we don't know which
+            // rules were applied, so we can't trust that.
+            def_id.is_local() && tcx.features().staged_api()
         }
         Some(stab) => {
-            // We consider things safe-to-expose if they are stable, if they don't have any explicit
-            // const stability attribute, or if they are marked as `const_stable_indirect`.
-            stab.is_const_stable() || stab.feature.is_none() || stab.const_stable_indirect
+            // We consider things safe-to-expose if they are stable or if they are marked as
+            // `const_stable_indirect`.
+            stab.is_const_stable() || stab.const_stable_indirect
         }
     }
 }
diff --git a/compiler/rustc_const_eval/src/check_consts/ops.rs b/compiler/rustc_const_eval/src/check_consts/ops.rs
index 2931159842f..afb7900c4b0 100644
--- a/compiler/rustc_const_eval/src/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/check_consts/ops.rs
@@ -15,8 +15,7 @@ use rustc_middle::ty::{
     suggest_constraining_type_param,
 };
 use rustc_middle::util::{CallDesugaringKind, CallKind, call_kind};
-use rustc_span::symbol::sym;
-use rustc_span::{BytePos, Pos, Span, Symbol};
+use rustc_span::{BytePos, Pos, Span, Symbol, sym};
 use rustc_trait_selection::traits::SelectionContext;
 use tracing::debug;
 
@@ -28,6 +27,9 @@ pub enum Status {
     Unstable {
         /// The feature that must be enabled to use this operation.
         gate: Symbol,
+        /// Whether the feature gate was already checked (because the logic is a bit more
+        /// complicated than just checking a single gate).
+        gate_already_checked: bool,
         /// Whether it is allowed to use this operation from stable `const fn`.
         /// This will usually be `false`.
         safe_to_expose_on_stable: bool,
@@ -70,6 +72,38 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
     }
 }
 
+/// A call to a function that is in a trait, or has trait bounds that make it conditionally-const.
+#[derive(Debug)]
+pub(crate) struct ConditionallyConstCall<'tcx> {
+    pub callee: DefId,
+    pub args: GenericArgsRef<'tcx>,
+}
+
+impl<'tcx> NonConstOp<'tcx> for ConditionallyConstCall<'tcx> {
+    fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+        // We use the `const_trait_impl` gate for all conditionally-const calls.
+        Status::Unstable {
+            gate: sym::const_trait_impl,
+            gate_already_checked: false,
+            safe_to_expose_on_stable: false,
+            // We don't want the "mark the callee as `#[rustc_const_stable_indirect]`" hint
+            is_function_call: false,
+        }
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.tcx.sess.create_feature_err(
+            errors::ConditionallyConstCall {
+                span,
+                def_path_str: ccx.tcx.def_path_str_with_args(self.callee, self.args),
+                def_descr: ccx.tcx.def_descr(self.callee),
+                kind: ccx.const_kind(),
+            },
+            sym::const_trait_impl,
+        )
+    }
+}
+
 /// A function call where the callee is not marked as `const`.
 #[derive(Debug, Clone, Copy)]
 pub(crate) struct FnCallNonConst<'tcx> {
@@ -77,7 +111,6 @@ pub(crate) struct FnCallNonConst<'tcx> {
     pub args: GenericArgsRef<'tcx>,
     pub span: Span,
     pub call_source: CallSource,
-    pub feature: Option<Symbol>,
 }
 
 impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
@@ -85,8 +118,8 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
     #[allow(rustc::diagnostic_outside_of_impl)]
     #[allow(rustc::untranslatable_diagnostic)]
     fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, _: Span) -> Diag<'tcx> {
-        let FnCallNonConst { callee, args, span, call_source, feature } = *self;
-        let ConstCx { tcx, param_env, .. } = *ccx;
+        let FnCallNonConst { callee, args, span, call_source } = *self;
+        let ConstCx { tcx, typing_env, .. } = *ccx;
         let caller = ccx.def_id();
 
         let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
@@ -106,19 +139,17 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
                             err,
                             param_ty.name.as_str(),
                             &constraint,
-                            None,
+                            Some(trait_ref.def_id),
                             None,
                         );
                     }
                 }
                 ty::Adt(..) => {
+                    let (infcx, param_env) = tcx.infer_ctxt().build_with_typing_env(typing_env);
                     let obligation =
                         Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref);
-
-                    let infcx = tcx.infer_ctxt().build(ccx.body.typing_mode(tcx));
                     let mut selcx = SelectionContext::new(&infcx);
                     let implsrc = selcx.select(&obligation);
-
                     if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
                         // FIXME(const_trait_impl) revisit this
                         if !tcx.is_const_trait_impl(data.impl_def_id) {
@@ -132,7 +163,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
         };
 
         let call_kind =
-            call_kind(tcx, ccx.param_env, callee, args, span, call_source.from_hir_call(), None);
+            call_kind(tcx, ccx.typing_env, callee, args, span, call_source.from_hir_call(), None);
 
         debug!(?call_kind);
 
@@ -148,8 +179,10 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
                     };
                 }
 
-                let mut err = match kind {
-                    CallDesugaringKind::ForLoopIntoIter => {
+                // Don't point at the trait if this is a desugaring...
+                // FIXME(const_trait_impl): we could perhaps do this for `Iterator`.
+                match kind {
+                    CallDesugaringKind::ForLoopIntoIter | CallDesugaringKind::ForLoopNext => {
                         error!(NonConstForLoopIntoIter)
                     }
                     CallDesugaringKind::QuestionBranch => {
@@ -164,10 +197,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
                     CallDesugaringKind::Await => {
                         error!(NonConstAwait)
                     }
-                };
-
-                diag_trait(&mut err, self_ty, kind.trait_def_id(tcx));
-                err
+                }
             }
             CallKind::FnCall { fn_trait_id, self_ty } => {
                 let note = match self_ty.kind() {
@@ -285,14 +315,6 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
             ccx.const_kind(),
         ));
 
-        if let Some(feature) = feature {
-            ccx.tcx.disabled_nightly_features(
-                &mut err,
-                Some(ccx.tcx.local_def_id_to_hir_id(caller)),
-                [(String::new(), feature)],
-            );
-        }
-
         if let ConstContext::Static(_) = ccx.const_kind() {
             err.note(fluent_generated::const_eval_lazy_lock);
         }
@@ -308,6 +330,9 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
 pub(crate) struct FnCallUnstable {
     pub def_id: DefId,
     pub feature: Symbol,
+    /// If this is true, then the feature is enabled, but we need to still check if it is safe to
+    /// expose on stable.
+    pub feature_enabled: bool,
     pub safe_to_expose_on_stable: bool,
 }
 
@@ -315,12 +340,14 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
     fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
         Status::Unstable {
             gate: self.feature,
+            gate_already_checked: self.feature_enabled,
             safe_to_expose_on_stable: self.safe_to_expose_on_stable,
             is_function_call: true,
         }
     }
 
     fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        assert!(!self.feature_enabled);
         let mut err = ccx.dcx().create_err(errors::UnstableConstFn {
             span,
             def_path: ccx.tcx.def_path_str(self.def_id),
@@ -354,14 +381,15 @@ impl<'tcx> NonConstOp<'tcx> for IntrinsicNonConst {
 pub(crate) struct IntrinsicUnstable {
     pub name: Symbol,
     pub feature: Symbol,
-    pub const_stable: bool,
+    pub const_stable_indirect: bool,
 }
 
 impl<'tcx> NonConstOp<'tcx> for IntrinsicUnstable {
     fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
         Status::Unstable {
             gate: self.feature,
-            safe_to_expose_on_stable: self.const_stable,
+            gate_already_checked: false,
+            safe_to_expose_on_stable: self.const_stable_indirect,
             // We do *not* want to suggest to mark the intrinsic as `const_stable_indirect`,
             // that's not a trivial change!
             is_function_call: false,
@@ -388,6 +416,7 @@ impl<'tcx> NonConstOp<'tcx> for Coroutine {
         {
             Status::Unstable {
                 gate: sym::const_async_blocks,
+                gate_already_checked: false,
                 safe_to_expose_on_stable: false,
                 is_function_call: false,
             }
@@ -398,15 +427,8 @@ impl<'tcx> NonConstOp<'tcx> for Coroutine {
 
     fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
         let msg = format!("{:#}s are not allowed in {}s", self.0, ccx.const_kind());
-        if let hir::CoroutineKind::Desugared(
-            hir::CoroutineDesugaring::Async,
-            hir::CoroutineSource::Block,
-        ) = self.0
-        {
-            ccx.tcx.sess.create_feature_err(
-                errors::UnallowedOpInConstContext { span, msg },
-                sym::const_async_blocks,
-            )
+        if let Status::Unstable { gate, .. } = self.status_in_item(ccx) {
+            ccx.tcx.sess.create_feature_err(errors::UnallowedOpInConstContext { span, msg }, gate)
         } else {
             ccx.dcx().create_err(errors::UnallowedOpInConstContext { span, msg })
         }
@@ -435,17 +457,43 @@ impl<'tcx> NonConstOp<'tcx> for InlineAsm {
 
 #[derive(Debug)]
 pub(crate) struct LiveDrop<'tcx> {
-    pub dropped_at: Option<Span>,
+    pub dropped_at: Span,
     pub dropped_ty: Ty<'tcx>,
+    pub needs_non_const_drop: bool,
 }
 impl<'tcx> NonConstOp<'tcx> for LiveDrop<'tcx> {
+    fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+        if self.needs_non_const_drop {
+            Status::Forbidden
+        } else {
+            Status::Unstable {
+                gate: sym::const_destruct,
+                gate_already_checked: false,
+                safe_to_expose_on_stable: false,
+                is_function_call: false,
+            }
+        }
+    }
+
     fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
-        ccx.dcx().create_err(errors::LiveDrop {
-            span,
-            dropped_ty: self.dropped_ty,
-            kind: ccx.const_kind(),
-            dropped_at: self.dropped_at,
-        })
+        if self.needs_non_const_drop {
+            ccx.dcx().create_err(errors::LiveDrop {
+                span,
+                dropped_ty: self.dropped_ty,
+                kind: ccx.const_kind(),
+                dropped_at: self.dropped_at,
+            })
+        } else {
+            ccx.tcx.sess.create_feature_err(
+                errors::LiveDrop {
+                    span,
+                    dropped_ty: self.dropped_ty,
+                    kind: ccx.const_kind(),
+                    dropped_at: self.dropped_at,
+                },
+                sym::const_destruct,
+            )
+        }
     }
 }
 
diff --git a/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs
index 0173a528c22..16e142a85ee 100644
--- a/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs
@@ -1,14 +1,11 @@
 use rustc_middle::mir::visit::Visitor;
 use rustc_middle::mir::{self, BasicBlock, Location};
-use rustc_middle::ty::{Ty, TyCtxt};
-use rustc_span::Span;
-use rustc_span::symbol::sym;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::sym;
 use tracing::trace;
 
 use super::ConstCx;
-use super::check::Qualifs;
-use super::ops::{self, NonConstOp};
-use super::qualifs::{NeedsNonConstDrop, Qualif};
+use crate::check_consts::check::Checker;
 use crate::check_consts::rustc_allow_const_fn_unstable;
 
 /// Returns `true` if we should use the more precise live drop checker that runs after drop
@@ -32,44 +29,29 @@ pub fn checking_enabled(ccx: &ConstCx<'_, '_>) -> bool {
 /// This is separate from the rest of the const checking logic because it must run after drop
 /// elaboration.
 pub fn check_live_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
-    let def_id = body.source.def_id().expect_local();
-    let const_kind = tcx.hir().body_const_context(def_id);
-    if const_kind.is_none() {
+    let ccx = ConstCx::new(tcx, body);
+    if ccx.const_kind.is_none() {
         return;
     }
 
-    if tcx.has_attr(def_id, sym::rustc_do_not_const_check) {
+    if tcx.has_attr(body.source.def_id(), sym::rustc_do_not_const_check) {
         return;
     }
 
-    let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
     if !checking_enabled(&ccx) {
         return;
     }
 
-    let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
+    // I know it's not great to be creating a new const checker, but I'd
+    // rather use it so we can deduplicate the error emitting logic that
+    // it contains.
+    let mut visitor = CheckLiveDrops { checker: Checker::new(&ccx) };
 
     visitor.visit_body(body);
 }
 
 struct CheckLiveDrops<'mir, 'tcx> {
-    ccx: &'mir ConstCx<'mir, 'tcx>,
-    qualifs: Qualifs<'mir, 'tcx>,
-}
-
-// So we can access `body` and `tcx`.
-impl<'mir, 'tcx> std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
-    type Target = ConstCx<'mir, 'tcx>;
-
-    fn deref(&self) -> &Self::Target {
-        self.ccx
-    }
-}
-
-impl<'tcx> CheckLiveDrops<'_, 'tcx> {
-    fn check_live_drop(&self, span: Span, dropped_ty: Ty<'tcx>) {
-        ops::LiveDrop { dropped_at: None, dropped_ty }.build_error(self.ccx, span).emit();
-    }
+    checker: Checker<'mir, 'tcx>,
 }
 
 impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
@@ -89,28 +71,11 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
 
         match &terminator.kind {
             mir::TerminatorKind::Drop { place: dropped_place, .. } => {
-                let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
-
-                if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
-                    // Instead of throwing a bug, we just return here. This is because we have to
-                    // run custom `const Drop` impls.
-                    return;
-                }
-
-                if dropped_place.is_indirect() {
-                    self.check_live_drop(terminator.source_info.span, dropped_ty);
-                    return;
-                }
-
-                // Drop elaboration is not precise enough to accept code like
-                // `tests/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
-                // initialized with `None` and never changed, it still emits drop glue.
-                // Hence we additionally check the qualifs here to allow more code to pass.
-                if self.qualifs.needs_non_const_drop(self.ccx, dropped_place.local, location) {
-                    // Use the span where the dropped local was declared for the error.
-                    let span = self.body.local_decls[dropped_place.local].source_info.span;
-                    self.check_live_drop(span, dropped_ty);
-                }
+                self.checker.check_drop_terminator(
+                    *dropped_place,
+                    location,
+                    terminator.source_info.span,
+                );
             }
 
             mir::TerminatorKind::UnwindTerminate(_)
diff --git a/compiler/rustc_const_eval/src/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/check_consts/qualifs.rs
index 29a08579175..e244b50a4b5 100644
--- a/compiler/rustc_const_eval/src/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/check_consts/qualifs.rs
@@ -2,11 +2,14 @@
 //!
 //! See the `Qualif` trait for more info.
 
+// FIXME(const_trait_impl): This API should be really reworked. It's dangerously general for
+// having basically only two use-cases that act in different ways.
+
 use rustc_errors::ErrorGuaranteed;
 use rustc_hir::LangItem;
 use rustc_infer::infer::TyCtxtInferExt;
 use rustc_middle::mir::*;
-use rustc_middle::ty::{self, AdtDef, GenericArgsRef, Ty};
+use rustc_middle::ty::{self, AdtDef, Ty};
 use rustc_middle::{bug, mir};
 use rustc_trait_selection::traits::{Obligation, ObligationCause, ObligationCtxt};
 use tracing::instrument;
@@ -59,26 +62,12 @@ pub trait Qualif {
     /// It also determines the `Qualif`s for primitive types.
     fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
 
-    /// Returns `true` if this `Qualif` is inherent to the given struct or enum.
-    ///
-    /// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
-    /// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
-    /// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
-    /// with a custom `Drop` impl is inherently `NeedsDrop`.
-    ///
-    /// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
-    fn in_adt_inherently<'tcx>(
-        cx: &ConstCx<'_, 'tcx>,
-        adt: AdtDef<'tcx>,
-        args: GenericArgsRef<'tcx>,
-    ) -> bool;
-
-    /// Returns `true` if this `Qualif` behaves sructurally for pointers and references:
-    /// the pointer/reference qualifies if and only if the pointee qualifies.
+    /// Returns `true` if the `Qualif` is structural in an ADT's fields, i.e. if we may
+    /// recurse into an operand *value* to determine whether it has this `Qualif`.
     ///
-    /// (This is currently `false` for all our instances, but that may change in the future. Also,
-    /// by keeping it abstract, the handling of `Deref` in `in_place` becomes more clear.)
-    fn deref_structural<'tcx>(cx: &ConstCx<'_, 'tcx>) -> bool;
+    /// If this returns false, `in_any_value_of_ty` will be invoked to determine the
+    /// final qualif for this ADT.
+    fn is_structural_in_adt_value<'tcx>(cx: &ConstCx<'_, 'tcx>, adt: AdtDef<'tcx>) -> bool;
 }
 
 /// Constant containing interior mutability (`UnsafeCell<T>`).
@@ -101,42 +90,43 @@ impl Qualif for HasMutInterior {
             return false;
         }
 
+        // Avoid selecting for `UnsafeCell` either.
+        if ty.ty_adt_def().is_some_and(|adt| adt.is_unsafe_cell()) {
+            return true;
+        }
+
         // We do not use `ty.is_freeze` here, because that requires revealing opaque types, which
         // requires borrowck, which in turn will invoke mir_const_qualifs again, causing a cycle error.
         // Instead we invoke an obligation context manually, and provide the opaque type inference settings
         // that allow the trait solver to just error out instead of cycling.
         let freeze_def_id = cx.tcx.require_lang_item(LangItem::Freeze, Some(cx.body.span));
-
+        // FIXME(#132279): Once we've got a typing mode which reveals opaque types using the HIR
+        // typeck results without causing query cycles, we should use this here instead of defining
+        // opaque types.
+        let typing_env = ty::TypingEnv {
+            typing_mode: ty::TypingMode::analysis_in_body(
+                cx.tcx,
+                cx.body.source.def_id().expect_local(),
+            ),
+            param_env: cx.typing_env.param_env,
+        };
+        let (infcx, param_env) = cx.tcx.infer_ctxt().build_with_typing_env(typing_env);
+        let ocx = ObligationCtxt::new(&infcx);
         let obligation = Obligation::new(
             cx.tcx,
             ObligationCause::dummy_with_span(cx.body.span),
-            cx.param_env,
+            param_env,
             ty::TraitRef::new(cx.tcx, freeze_def_id, [ty::GenericArg::from(ty)]),
         );
-
-        // FIXME(#132279): This should eventually use the already defined hidden types.
-        let infcx = cx.tcx.infer_ctxt().build(ty::TypingMode::analysis_in_body(
-            cx.tcx,
-            cx.body.source.def_id().expect_local(),
-        ));
-        let ocx = ObligationCtxt::new(&infcx);
         ocx.register_obligation(obligation);
         let errors = ocx.select_all_or_error();
         !errors.is_empty()
     }
 
-    fn in_adt_inherently<'tcx>(
-        _cx: &ConstCx<'_, 'tcx>,
-        adt: AdtDef<'tcx>,
-        _: GenericArgsRef<'tcx>,
-    ) -> bool {
+    fn is_structural_in_adt_value<'tcx>(_cx: &ConstCx<'_, 'tcx>, adt: AdtDef<'tcx>) -> bool {
         // Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
         // It arises structurally for all other types.
-        adt.is_unsafe_cell()
-    }
-
-    fn deref_structural<'tcx>(_cx: &ConstCx<'_, 'tcx>) -> bool {
-        false
+        !adt.is_unsafe_cell()
     }
 }
 
@@ -150,25 +140,18 @@ pub struct NeedsDrop;
 impl Qualif for NeedsDrop {
     const ANALYSIS_NAME: &'static str = "flow_needs_drop";
     const IS_CLEARED_ON_MOVE: bool = true;
+    const ALLOW_PROMOTED: bool = true;
 
     fn in_qualifs(qualifs: &ConstQualifs) -> bool {
         qualifs.needs_drop
     }
 
     fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
-        ty.needs_drop(cx.tcx, cx.param_env)
+        ty.needs_drop(cx.tcx, cx.typing_env)
     }
 
-    fn in_adt_inherently<'tcx>(
-        cx: &ConstCx<'_, 'tcx>,
-        adt: AdtDef<'tcx>,
-        _: GenericArgsRef<'tcx>,
-    ) -> bool {
-        adt.has_dtor(cx.tcx)
-    }
-
-    fn deref_structural<'tcx>(_cx: &ConstCx<'_, 'tcx>) -> bool {
-        false
+    fn is_structural_in_adt_value<'tcx>(cx: &ConstCx<'_, 'tcx>, adt: AdtDef<'tcx>) -> bool {
+        !adt.has_dtor(cx.tcx)
     }
 }
 
@@ -187,25 +170,46 @@ impl Qualif for NeedsNonConstDrop {
 
     #[instrument(level = "trace", skip(cx), ret)]
     fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
-        // Avoid selecting for simple cases, such as builtin types.
-        if ty::util::is_trivially_const_drop(ty) {
+        // If this doesn't need drop at all, then don't select `~const Destruct`.
+        if !ty.needs_drop(cx.tcx, cx.typing_env) {
             return false;
         }
 
-        // FIXME(const_trait_impl): Reimplement const drop checking.
-        NeedsDrop::in_any_value_of_ty(cx, ty)
-    }
-
-    fn in_adt_inherently<'tcx>(
-        cx: &ConstCx<'_, 'tcx>,
-        adt: AdtDef<'tcx>,
-        _: GenericArgsRef<'tcx>,
-    ) -> bool {
-        adt.has_non_const_dtor(cx.tcx)
+        // We check that the type is `~const Destruct` since that will verify that
+        // the type is both `~const Drop` (if a drop impl exists for the adt), *and*
+        // that the components of this type are also `~const Destruct`. This
+        // amounts to verifying that there are no values in this ADT that may have
+        // a non-const drop.
+        let destruct_def_id = cx.tcx.require_lang_item(LangItem::Destruct, Some(cx.body.span));
+        let (infcx, param_env) = cx.tcx.infer_ctxt().build_with_typing_env(cx.typing_env);
+        let ocx = ObligationCtxt::new(&infcx);
+        ocx.register_obligation(Obligation::new(
+            cx.tcx,
+            ObligationCause::misc(cx.body.span, cx.def_id()),
+            param_env,
+            ty::Binder::dummy(ty::TraitRef::new(cx.tcx, destruct_def_id, [ty]))
+                .to_host_effect_clause(cx.tcx, match cx.const_kind() {
+                    rustc_hir::ConstContext::ConstFn => ty::BoundConstness::Maybe,
+                    rustc_hir::ConstContext::Static(_) | rustc_hir::ConstContext::Const { .. } => {
+                        ty::BoundConstness::Const
+                    }
+                }),
+        ));
+        !ocx.select_all_or_error().is_empty()
     }
 
-    fn deref_structural<'tcx>(_cx: &ConstCx<'_, 'tcx>) -> bool {
-        false
+    fn is_structural_in_adt_value<'tcx>(cx: &ConstCx<'_, 'tcx>, adt: AdtDef<'tcx>) -> bool {
+        // As soon as an ADT has a destructor, then the drop becomes non-structural
+        // in its value since:
+        // 1. The destructor may have `~const` bounds which are not present on the type.
+        //   Someone needs to check that those are satisfied.
+        //   While this could be instead satisfied by checking that the `~const Drop`
+        //   impl holds (i.e. replicating part of the `in_any_value_of_ty` logic above),
+        //   even in this case, we have another problem, which is,
+        // 2. The destructor may *modify* the operand being dropped, so even if we
+        //   did recurse on the components of the operand, we may not be even dropping
+        //   the same values that were present before the custom destructor was invoked.
+        !adt.has_dtor(cx.tcx)
     }
 }
 
@@ -257,14 +261,15 @@ where
         Rvalue::Aggregate(kind, operands) => {
             // Return early if we know that the struct or enum being constructed is always
             // qualified.
-            if let AggregateKind::Adt(adt_did, _, args, ..) = **kind {
+            if let AggregateKind::Adt(adt_did, ..) = **kind {
                 let def = cx.tcx.adt_def(adt_did);
-                if Q::in_adt_inherently(cx, def, args) {
-                    return true;
-                }
                 // Don't do any value-based reasoning for unions.
-                if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
-                    return true;
+                // Also, if the ADT is not structural in its fields,
+                // then we cannot recurse on its fields. Instead,
+                // we fall back to checking the qualif for *any* value
+                // of the ADT.
+                if def.is_union() || !Q::is_structural_in_adt_value(cx, def) {
+                    return Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx));
                 }
             }
 
@@ -301,7 +306,11 @@ where
             return false;
         }
 
-        if matches!(elem, ProjectionElem::Deref) && !Q::deref_structural(cx) {
+        // `Deref` currently unconditionally "qualifies" if `in_any_value_of_ty` returns true,
+        // i.e., we treat all qualifs as non-structural for deref projections. Generally,
+        // we can say very little about `*ptr` even if we know that `ptr` satisfies all
+        // sorts of properties.
+        if matches!(elem, ProjectionElem::Deref) {
             // We have to assume that this qualifies.
             return true;
         }
diff --git a/compiler/rustc_const_eval/src/check_consts/resolver.rs b/compiler/rustc_const_eval/src/check_consts/resolver.rs
index 74eb6b37fbb..763c37a41af 100644
--- a/compiler/rustc_const_eval/src/check_consts/resolver.rs
+++ b/compiler/rustc_const_eval/src/check_consts/resolver.rs
@@ -120,7 +120,7 @@ where
     ///
     /// [rust-lang/unsafe-code-guidelines#134]: https://github.com/rust-lang/unsafe-code-guidelines/issues/134
     fn shared_borrow_allows_mutation(&self, place: mir::Place<'tcx>) -> bool {
-        !place.ty(self.ccx.body, self.ccx.tcx).ty.is_freeze(self.ccx.tcx, self.ccx.param_env)
+        !place.ty(self.ccx.body, self.ccx.tcx).ty.is_freeze(self.ccx.tcx, self.ccx.typing_env)
     }
 }
 
@@ -329,7 +329,7 @@ where
         self.transfer_function(state).initialize_state();
     }
 
-    fn apply_statement_effect(
+    fn apply_primary_statement_effect(
         &mut self,
         state: &mut Self::Domain,
         statement: &mir::Statement<'tcx>,
@@ -338,7 +338,7 @@ where
         self.transfer_function(state).visit_statement(statement, location);
     }
 
-    fn apply_terminator_effect<'mir>(
+    fn apply_primary_terminator_effect<'mir>(
         &mut self,
         state: &mut Self::Domain,
         terminator: &'mir mir::Terminator<'tcx>,
diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
index e49d702127d..817acfcca74 100644
--- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
@@ -168,9 +168,9 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
         })
     }
 
-    fn expose_ptr(
-        _ecx: &mut InterpCx<'tcx, Self>,
-        _ptr: interpret::Pointer<Self::Provenance>,
+    fn expose_provenance(
+        _ecx: &InterpCx<'tcx, Self>,
+        _provenance: Self::Provenance,
     ) -> interpret::InterpResult<'tcx> {
         unimplemented!()
     }
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index 6686413bf02..fee7287951d 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -139,12 +139,14 @@ where
     match error {
         // Don't emit a new diagnostic for these errors, they are already reported elsewhere or
         // should remain silent.
+        err_inval!(AlreadyReported(info)) => ErrorHandled::Reported(info, span),
         err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
             ErrorHandled::TooGeneric(span)
         }
-        err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar, span),
         err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
-            ErrorHandled::Reported(ReportedErrorInfo::tainted_by_errors(guar), span)
+            // This can occur in infallible promoteds e.g. when a non-existent type or field is
+            // encountered.
+            ErrorHandled::Reported(ReportedErrorInfo::allowed_in_infallible(guar), span)
         }
         // Report remaining errors.
         _ => {
@@ -152,13 +154,25 @@ where
             let span = span.substitute_dummy(our_span);
             let err = mk(span, frames);
             let mut err = tcx.dcx().create_err(err);
+            // We allow invalid programs in infallible promoteds since invalid layouts can occur
+            // anyway (e.g. due to size overflow). And we allow OOM as that can happen any time.
+            let allowed_in_infallible = matches!(
+                error,
+                InterpErrorKind::ResourceExhaustion(_) | InterpErrorKind::InvalidProgram(_)
+            );
 
             let msg = error.diagnostic_message();
             error.add_args(&mut err);
 
             // Use *our* span to label the interp error
             err.span_label(our_span, msg);
-            ErrorHandled::Reported(err.emit().into(), span)
+            let g = err.emit();
+            let reported = if allowed_in_infallible {
+                ReportedErrorInfo::allowed_in_infallible(g)
+            } else {
+                ReportedErrorInfo::const_eval_error(g)
+            };
+            ErrorHandled::Reported(reported, span)
         }
     }
 }
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 81b9d73b952..ce8eceebdf8 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -3,14 +3,13 @@ use std::sync::atomic::Ordering::Relaxed;
 use either::{Left, Right};
 use rustc_abi::{self as abi, BackendRepr};
 use rustc_hir::def::DefKind;
-use rustc_middle::bug;
-use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
+use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo, ReportedErrorInfo};
 use rustc_middle::mir::{self, ConstAlloc, ConstValue};
 use rustc_middle::query::TyCtxtAt;
-use rustc_middle::traits::Reveal;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf};
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::{bug, throw_inval};
 use rustc_span::def_id::LocalDefId;
 use rustc_span::{DUMMY_SP, Span};
 use tracing::{debug, instrument, trace};
@@ -31,7 +30,6 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
     cid: GlobalId<'tcx>,
     body: &'tcx mir::Body<'tcx>,
 ) -> InterpResult<'tcx, R> {
-    trace!(?ecx.param_env);
     let tcx = *ecx.tcx;
     assert!(
         cid.promoted.is_some()
@@ -95,18 +93,18 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
     match intern_result {
         Ok(()) => {}
         Err(InternResult::FoundDanglingPointer) => {
-            return Err(ecx
-                .tcx
-                .dcx()
-                .emit_err(errors::DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }))
-            .into();
+            throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error(
+                ecx.tcx
+                    .dcx()
+                    .emit_err(errors::DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }),
+            )));
         }
         Err(InternResult::FoundBadMutablePointer) => {
-            return Err(ecx
-                .tcx
-                .dcx()
-                .emit_err(errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind }))
-            .into();
+            throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error(
+                ecx.tcx
+                    .dcx()
+                    .emit_err(errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind }),
+            )));
         }
     }
 
@@ -126,14 +124,14 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
 pub(crate) fn mk_eval_cx_to_read_const_val<'tcx>(
     tcx: TyCtxt<'tcx>,
     root_span: Span,
-    param_env: ty::ParamEnv<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
     can_access_mut_global: CanAccessMutGlobal,
 ) -> CompileTimeInterpCx<'tcx> {
-    debug!("mk_eval_cx: {:?}", param_env);
+    debug!("mk_eval_cx: {:?}", typing_env);
     InterpCx::new(
         tcx,
         root_span,
-        param_env,
+        typing_env,
         CompileTimeMachine::new(can_access_mut_global, CheckAlignment::No),
     )
 }
@@ -142,11 +140,11 @@ pub(crate) fn mk_eval_cx_to_read_const_val<'tcx>(
 /// Returns both the context and an `OpTy` that represents the constant.
 pub fn mk_eval_cx_for_const_val<'tcx>(
     tcx: TyCtxtAt<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
     val: mir::ConstValue<'tcx>,
     ty: Ty<'tcx>,
 ) -> Option<(CompileTimeInterpCx<'tcx>, OpTy<'tcx>)> {
-    let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, param_env, CanAccessMutGlobal::No);
+    let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, typing_env, CanAccessMutGlobal::No);
     // FIXME: is it a problem to discard the error here?
     let op = ecx.const_val_to_op(val, ty, None).discard_err()?;
     Some((ecx, op))
@@ -221,7 +219,7 @@ pub(super) fn op_to_const<'tcx>(
                 let pointee_ty = imm.layout.ty.builtin_deref(false).unwrap(); // `false` = no raw ptrs
                 debug_assert!(
                     matches!(
-                        ecx.tcx.struct_tail_for_codegen(pointee_ty, ecx.param_env).kind(),
+                        ecx.tcx.struct_tail_for_codegen(pointee_ty, ecx.typing_env()).kind(),
                         ty::Str | ty::Slice(..),
                     ),
                     "`ConstValue::Slice` is for slice-tailed types only, but got {}",
@@ -245,7 +243,7 @@ pub(super) fn op_to_const<'tcx>(
 pub(crate) fn turn_into_const_value<'tcx>(
     tcx: TyCtxt<'tcx>,
     constant: ConstAlloc<'tcx>,
-    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+    key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>,
 ) -> ConstValue<'tcx> {
     let cid = key.value;
     let def_id = cid.instance.def.def_id();
@@ -254,7 +252,7 @@ pub(crate) fn turn_into_const_value<'tcx>(
     let ecx = mk_eval_cx_to_read_const_val(
         tcx,
         tcx.def_span(key.value.instance.def_id()),
-        key.param_env,
+        key.typing_env,
         CanAccessMutGlobal::from(is_static),
     );
 
@@ -274,21 +272,16 @@ pub(crate) fn turn_into_const_value<'tcx>(
 #[instrument(skip(tcx), level = "debug")]
 pub fn eval_to_const_value_raw_provider<'tcx>(
     tcx: TyCtxt<'tcx>,
-    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+    key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>,
 ) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
-    // Const eval always happens in Reveal::All mode in order to be able to use the hidden types of
-    // opaque types. This is needed for trivial things like `size_of`, but also for using associated
-    // types that are not specified in the opaque type.
-    assert_eq!(key.param_env.reveal(), Reveal::All);
-
     // We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
     // Catch such calls and evaluate them instead of trying to load a constant's MIR.
     if let ty::InstanceKind::Intrinsic(def_id) = key.value.instance.def {
-        let ty = key.value.instance.ty(tcx, key.param_env);
+        let ty = key.value.instance.ty(tcx, key.typing_env);
         let ty::FnDef(_, args) = ty.kind() else {
             bug!("intrinsic with type {:?}", ty);
         };
-        return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).report_err().map_err(
+        return eval_nullary_intrinsic(tcx, key.typing_env, def_id, args).report_err().map_err(
             |error| {
                 let span = tcx.def_span(def_id);
 
@@ -315,7 +308,7 @@ pub fn eval_static_initializer_provider<'tcx>(
 
     let instance = ty::Instance::mono(tcx, def_id.to_def_id());
     let cid = rustc_middle::mir::interpret::GlobalId { instance, promoted: None };
-    eval_in_interpreter(tcx, cid, ty::ParamEnv::reveal_all())
+    eval_in_interpreter(tcx, cid, ty::TypingEnv::fully_monomorphized())
 }
 
 pub trait InterpretationResult<'tcx> {
@@ -340,16 +333,14 @@ impl<'tcx> InterpretationResult<'tcx> for ConstAlloc<'tcx> {
 #[instrument(skip(tcx), level = "debug")]
 pub fn eval_to_allocation_raw_provider<'tcx>(
     tcx: TyCtxt<'tcx>,
-    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+    key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>,
 ) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
     // This shouldn't be used for statics, since statics are conceptually places,
     // not values -- so what we do here could break pointer identity.
     assert!(key.value.promoted.is_some() || !tcx.is_static(key.value.instance.def_id()));
-    // Const eval always happens in Reveal::All mode in order to be able to use the hidden types of
-    // opaque types. This is needed for trivial things like `size_of`, but also for using associated
-    // types that are not specified in the opaque type.
-
-    assert_eq!(key.param_env.reveal(), Reveal::All);
+    // Const eval always happens in PostAnalysis mode . See the comment in
+    // `InterpCx::new` for more details.
+    debug_assert_eq!(key.typing_env.typing_mode, ty::TypingMode::PostAnalysis);
     if cfg!(debug_assertions) {
         // Make sure we format the instance even if we do not print it.
         // This serves as a regression test against an ICE on printing.
@@ -360,13 +351,13 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
         trace!("const eval: {:?} ({})", key, instance);
     }
 
-    eval_in_interpreter(tcx, key.value, key.param_env)
+    eval_in_interpreter(tcx, key.value, key.typing_env)
 }
 
 fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
     tcx: TyCtxt<'tcx>,
     cid: GlobalId<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
 ) -> Result<R, ErrorHandled> {
     let def = cid.instance.def.def_id();
     let is_static = tcx.is_static(def);
@@ -374,7 +365,7 @@ fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
     let mut ecx = InterpCx::new(
         tcx,
         tcx.def_span(def),
-        param_env,
+        typing_env,
         // Statics (and promoteds inside statics) may access mutable global memory, because unlike consts
         // they do not have to behave "as if" they were evaluated at runtime.
         // For consts however we want to ensure they behave "as if" they were evaluated at runtime,
@@ -472,8 +463,9 @@ fn report_validation_error<'tcx>(
     backtrace.print_backtrace();
 
     let bytes = ecx.print_alloc_bytes_for_diagnostics(alloc_id);
-    let (size, align, _) = ecx.get_alloc_info(alloc_id);
-    let raw_bytes = errors::RawBytesNote { size: size.bytes(), align: align.bytes(), bytes };
+    let info = ecx.get_alloc_info(alloc_id);
+    let raw_bytes =
+        errors::RawBytesNote { size: info.size.bytes(), align: info.align.bytes(), bytes };
 
     crate::const_eval::report(
         *ecx.tcx,
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index beff0cd99fc..babf99c4c1f 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -4,27 +4,29 @@ use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_middle::query::Providers;
 use rustc_middle::ty::TyCtxt;
 
-pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+fn parent_impl_constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
     let parent_id = tcx.local_parent(def_id);
-    matches!(tcx.def_kind(parent_id), DefKind::Impl { .. })
-        && tcx.constness(parent_id) == hir::Constness::Const
+    if matches!(tcx.def_kind(parent_id), DefKind::Impl { .. })
+        && let Some(header) = tcx.impl_trait_header(parent_id)
+    {
+        header.constness
+    } else {
+        hir::Constness::NotConst
+    }
 }
 
-/// Checks whether an item is considered to be `const`. If it is a constructor, anonymous const,
-/// const block, const item or associated const, it is const. If it is a trait impl/function,
+/// Checks whether an item is considered to be `const`. If it is a constructor, it is const.
+/// If it is an assoc method or function,
 /// return if it has a `const` modifier. If it is an intrinsic, report whether said intrinsic
-/// has a `rustc_const_{un,}stable` attribute. Otherwise, return `Constness::NotConst`.
+/// has a `rustc_const_{un,}stable` attribute. Otherwise, panic.
 fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
     let node = tcx.hir_node_by_def_id(def_id);
 
     match node {
-        hir::Node::Ctor(_)
-        | hir::Node::AnonConst(_)
-        | hir::Node::ConstBlock(_)
+        hir::Node::Ctor(hir::VariantData::Tuple(..))
         | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => {
             hir::Constness::Const
         }
-        hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
         hir::Node::ForeignItem(_) => {
             // Foreign items cannot be evaluated at compile-time.
             hir::Constness::NotConst
@@ -38,10 +40,12 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
 
                 // If the function itself is not annotated with `const`, it may still be a `const fn`
                 // if it resides in a const trait impl.
-                let is_const = is_parent_const_impl_raw(tcx, def_id);
-                if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+                parent_impl_constness(tcx, def_id)
             } else {
-                hir::Constness::NotConst
+                tcx.dcx().span_bug(
+                    tcx.def_span(def_id),
+                    format!("should not be requesting the constness of items that can't be const: {node:#?}: {:?}", tcx.def_kind(def_id))
+                )
             }
         }
     }
@@ -53,9 +57,8 @@ fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
             Some(stab) => {
                 if cfg!(debug_assertions) && stab.promotable {
                     let sig = tcx.fn_sig(def_id);
-                    assert_eq!(
-                        sig.skip_binder().safety(),
-                        hir::Safety::Safe,
+                    assert!(
+                        sig.skip_binder().safety().is_safe(),
                         "don't mark const unsafe fns as promotable",
                         // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
                     );
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 62115aef4a7..9c660ef0b18 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -8,12 +8,12 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexMap, IndexEntry};
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_hir::{self as hir, CRATE_HIR_ID, LangItem};
 use rustc_middle::mir::AssertMessage;
+use rustc_middle::mir::interpret::ReportedErrorInfo;
 use rustc_middle::query::TyCtxtAt;
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::layout::{HasTypingEnv, TyAndLayout};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_middle::{bug, mir};
-use rustc_span::Span;
-use rustc_span::symbol::{Symbol, sym};
+use rustc_span::{Span, Symbol, sym};
 use tracing::debug;
 
 use super::error::*;
@@ -21,9 +21,8 @@ use crate::errors::{LongRunning, LongRunningWarn};
 use crate::fluent_generated as fluent;
 use crate::interpret::{
     self, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, GlobalAlloc, ImmTy,
-    InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, RangeSet, Scalar, compile_time_machine,
-    interp_ok, throw_exhaust, throw_inval, throw_ub, throw_ub_custom, throw_unsup,
-    throw_unsup_format,
+    InterpCx, InterpResult, MPlaceTy, OpTy, RangeSet, Scalar, compile_time_machine, interp_ok,
+    throw_exhaust, throw_inval, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
 };
 
 /// When hitting this many interpreted terminators we emit a deny by default lint
@@ -251,7 +250,7 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
             let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None);
             let new_instance = ty::Instance::expect_resolve(
                 *self.tcx,
-                ty::ParamEnv::reveal_all(),
+                self.typing_env(),
                 const_def_id,
                 instance.args,
                 self.cur_span(),
@@ -263,6 +262,12 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
     }
 
     /// See documentation on the `ptr_guaranteed_cmp` intrinsic.
+    /// Returns `2` if the result is unknown.
+    /// Returns `1` if the pointers are guaranteed equal.
+    /// Returns `0` if the pointers are guaranteed inequal.
+    ///
+    /// Note that this intrinsic is exposed on stable for comparison with null. In other words, any
+    /// change to this function that affects comparison with null is insta-stable!
     fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
         interp_ok(match (a, b) {
             // Comparisons between integers are always known.
@@ -557,7 +562,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
                         .tcx
                         .dcx()
                         .span_delayed_bug(span, "The deny lint should have already errored");
-                    throw_inval!(AlreadyReported(guard.into()));
+                    throw_inval!(AlreadyReported(ReportedErrorInfo::allowed_in_infallible(guard)));
                 }
             } else if new_steps > start && new_steps.is_power_of_two() {
                 // Only report after a certain number of terminators have been evaluated and the
@@ -579,7 +584,10 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
     }
 
     #[inline(always)]
-    fn expose_ptr(_ecx: &mut InterpCx<'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> {
+    fn expose_provenance(
+        _ecx: &InterpCx<'tcx, Self>,
+        _provenance: Self::Provenance,
+    ) -> InterpResult<'tcx> {
         // This is only reachable with -Zunleash-the-miri-inside-of-you.
         throw_unsup_format!("exposing pointers is not possible at compile-time")
     }
@@ -660,7 +668,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
                 .is_some_and(|p| !p.immutable())
         {
             // That next check is expensive, that's why we have all the guards above.
-            let is_immutable = ty.is_freeze(*ecx.tcx, ecx.param_env);
+            let is_immutable = ty.is_freeze(*ecx.tcx, ecx.typing_env());
             let place = ecx.ref_to_mplace(val)?;
             let new_place = if is_immutable {
                 place.map_provenance(CtfeProvenance::as_immutable)
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index d5012236c34..34f795bda75 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -2,6 +2,7 @@
 
 use rustc_abi::VariantIdx;
 use rustc_middle::query::{Key, TyCtxtAt};
+use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_middle::{bug, mir};
 use tracing::instrument;
@@ -38,8 +39,8 @@ pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
     val: mir::ConstValue<'tcx>,
     ty: Ty<'tcx>,
 ) -> Option<mir::DestructuredConstant<'tcx>> {
-    let param_env = ty::ParamEnv::reveal_all();
-    let (ecx, op) = mk_eval_cx_for_const_val(tcx, param_env, val, ty)?;
+    let typing_env = ty::TypingEnv::fully_monomorphized();
+    let (ecx, op) = mk_eval_cx_for_const_val(tcx, typing_env, val, ty)?;
 
     // We go to `usize` as we cannot allocate anything bigger anyway.
     let (field_count, variant, down) = match ty.kind() {
@@ -76,12 +77,15 @@ pub fn tag_for_variant_provider<'tcx>(
 ) -> Option<ty::ScalarInt> {
     assert!(ty.is_enum());
 
+    // FIXME: This uses an empty `TypingEnv` even though
+    // it may be used by a generic CTFE.
     let ecx = InterpCx::new(
         tcx,
         ty.default_span(tcx),
-        ty::ParamEnv::reveal_all(),
+        ty::TypingEnv::fully_monomorphized(),
         crate::const_eval::DummyMachine,
     );
 
-    ecx.tag_for_variant(ty, variant_index).unwrap().map(|(tag, _tag_field)| tag)
+    let layout = ecx.layout_of(ty).unwrap();
+    ecx.tag_for_variant(layout, variant_index).unwrap().map(|(tag, _tag_field)| tag)
 }
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index ea88b2ed22e..6f51b09323d 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -1,6 +1,6 @@
 use rustc_abi::{BackendRepr, VariantIdx};
 use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
+use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId, ReportedErrorInfo};
 use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
 use rustc_middle::{bug, mir};
@@ -228,16 +228,19 @@ fn create_valtree_place<'tcx>(
 /// Evaluates a constant and turns it into a type-level constant value.
 pub(crate) fn eval_to_valtree<'tcx>(
     tcx: TyCtxt<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
     cid: GlobalId<'tcx>,
 ) -> EvalToValTreeResult<'tcx> {
-    let const_alloc = tcx.eval_to_allocation_raw(param_env.and(cid))?;
+    // Const eval always happens in PostAnalysis mode . See the comment in
+    // `InterpCx::new` for more details.
+    debug_assert_eq!(typing_env.typing_mode, ty::TypingMode::PostAnalysis);
+    let const_alloc = tcx.eval_to_allocation_raw(typing_env.as_query_input(cid))?;
 
     // FIXME Need to provide a span to `eval_to_valtree`
     let ecx = mk_eval_cx_to_read_const_val(
         tcx,
         DUMMY_SP,
-        param_env,
+        typing_env,
         // It is absolutely crucial for soundness that
         // we do not read from mutable memory.
         CanAccessMutGlobal::No,
@@ -258,7 +261,7 @@ pub(crate) fn eval_to_valtree<'tcx>(
                 ValTreeCreationError::NodesOverflow => {
                     let handled =
                         tcx.dcx().emit_err(MaxNumNodesInConstErr { span, global_const_id });
-                    Err(handled.into())
+                    Err(ReportedErrorInfo::allowed_in_infallible(handled).into())
                 }
                 ValTreeCreationError::NonSupportedType(ty) => Ok(Err(ty)),
             }
@@ -272,7 +275,8 @@ pub(crate) fn eval_to_valtree<'tcx>(
 #[instrument(skip(tcx), level = "debug", ret)]
 pub fn valtree_to_const_value<'tcx>(
     tcx: TyCtxt<'tcx>,
-    param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+    typing_env: ty::TypingEnv<'tcx>,
+    ty: Ty<'tcx>,
     valtree: ty::ValTree<'tcx>,
 ) -> mir::ConstValue<'tcx> {
     // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
@@ -281,9 +285,6 @@ pub fn valtree_to_const_value<'tcx>(
     // the `ValTree` and using `place_projection` and `place_field` to
     // create inner `MPlace`s which are filled recursively.
     // FIXME Does this need an example?
-
-    let (param_env, ty) = param_env_ty.into_parts();
-
     match *ty.kind() {
         ty::FnDef(..) => {
             assert!(valtree.unwrap_branch().is_empty());
@@ -297,16 +298,17 @@ pub fn valtree_to_const_value<'tcx>(
                 ),
             }
         }
-        ty::Pat(ty, _) => valtree_to_const_value(tcx, param_env.and(ty), valtree),
+        ty::Pat(ty, _) => valtree_to_const_value(tcx, typing_env, ty, valtree),
         ty::Ref(_, inner_ty, _) => {
             let mut ecx =
-                mk_eval_cx_to_read_const_val(tcx, DUMMY_SP, param_env, CanAccessMutGlobal::No);
+                mk_eval_cx_to_read_const_val(tcx, DUMMY_SP, typing_env, CanAccessMutGlobal::No);
             let imm = valtree_to_ref(&mut ecx, valtree, inner_ty);
-            let imm = ImmTy::from_immediate(imm, tcx.layout_of(param_env_ty).unwrap());
+            let imm =
+                ImmTy::from_immediate(imm, tcx.layout_of(typing_env.as_query_input(ty)).unwrap());
             op_to_const(&ecx, &imm.into(), /* for diagnostics */ false)
         }
         ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
-            let layout = tcx.layout_of(param_env_ty).unwrap();
+            let layout = tcx.layout_of(typing_env.as_query_input(ty)).unwrap();
             if layout.is_zst() {
                 // Fast path to avoid some allocations.
                 return mir::ConstValue::ZeroSized;
@@ -319,16 +321,16 @@ pub fn valtree_to_const_value<'tcx>(
                 let branches = valtree.unwrap_branch();
                 // Find the non-ZST field. (There can be aligned ZST!)
                 for (i, &inner_valtree) in branches.iter().enumerate() {
-                    let field = layout.field(&LayoutCx::new(tcx, param_env), i);
+                    let field = layout.field(&LayoutCx::new(tcx, typing_env), i);
                     if !field.is_zst() {
-                        return valtree_to_const_value(tcx, param_env.and(field.ty), inner_valtree);
+                        return valtree_to_const_value(tcx, typing_env, field.ty, inner_valtree);
                     }
                 }
                 bug!("could not find non-ZST field during in {layout:#?}");
             }
 
             let mut ecx =
-                mk_eval_cx_to_read_const_val(tcx, DUMMY_SP, param_env, CanAccessMutGlobal::No);
+                mk_eval_cx_to_read_const_val(tcx, DUMMY_SP, typing_env, CanAccessMutGlobal::No);
 
             // Need to create a place for this valtree.
             let place = create_valtree_place(&mut ecx, layout, valtree);
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index 14e8bebbb18..80236ee05b7 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -177,6 +177,16 @@ pub(crate) struct NonConstFmtMacroCall {
 }
 
 #[derive(Diagnostic)]
+#[diag(const_eval_conditionally_const_call)]
+pub(crate) struct ConditionallyConstCall {
+    #[primary_span]
+    pub span: Span,
+    pub def_path_str: String,
+    pub def_descr: &'static str,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
 #[diag(const_eval_non_const_fn_call, code = E0015)]
 pub(crate) struct NonConstFnCall {
     #[primary_span]
@@ -401,7 +411,7 @@ pub struct LiveDrop<'tcx> {
     pub kind: ConstContext,
     pub dropped_ty: Ty<'tcx>,
     #[label(const_eval_dropped_at_label)]
-    pub dropped_at: Option<Span>,
+    pub dropped_at: Span,
 }
 
 #[derive(Diagnostic)]
diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs
index ef0902e4226..ed4a1a9e647 100644
--- a/compiler/rustc_const_eval/src/interpret/call.rs
+++ b/compiler/rustc_const_eval/src/interpret/call.rs
@@ -215,7 +215,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 // Even if `ty` is normalized, the search for the unsized tail will project
                 // to fields, which can yield non-normalized types. So we need to provide a
                 // normalization function.
-                let normalize = |ty| self.tcx.normalize_erasing_regions(self.param_env, ty);
+                let normalize = |ty| self.tcx.normalize_erasing_regions(self.typing_env, ty);
                 ty.ptr_metadata_ty(*self.tcx, normalize)
             };
             return interp_ok(meta_ty(caller) == meta_ty(callee));
@@ -662,7 +662,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                     // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
                     // (For that reason we also cannot use `unpack_dyn_trait`.)
                     let receiver_tail =
-                        self.tcx.struct_tail_for_codegen(receiver_place.layout.ty, self.param_env);
+                        self.tcx.struct_tail_for_codegen(receiver_place.layout.ty, self.typing_env);
                     let ty::Dynamic(receiver_trait, _, ty::Dyn) = receiver_tail.kind() else {
                         span_bug!(
                             self.cur_span(),
@@ -704,7 +704,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
                     let concrete_method = Instance::expect_resolve_for_vtable(
                         tcx,
-                        self.param_env,
+                        self.typing_env,
                         def_id,
                         instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args),
                         self.cur_span(),
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 49559059265..ef3e96784ce 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -83,7 +83,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                     ty::FnDef(def_id, args) => {
                         let instance = ty::Instance::resolve_for_fn_ptr(
                             *self.tcx,
-                            self.param_env,
+                            self.typing_env,
                             def_id,
                             args,
                         )
@@ -238,7 +238,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         let scalar = src.to_scalar();
         let ptr = scalar.to_pointer(self)?;
         match ptr.into_pointer_or_addr() {
-            Ok(ptr) => M::expose_ptr(self, ptr)?,
+            Ok(ptr) => M::expose_provenance(self, ptr.provenance)?,
             Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
         };
         interp_ok(ImmTy::from_scalar(
@@ -384,7 +384,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     ) -> InterpResult<'tcx> {
         // A<Struct> -> A<Trait> conversion
         let (src_pointee_ty, dest_pointee_ty) =
-            self.tcx.struct_lockstep_tails_for_codegen(source_ty, cast_ty, self.param_env);
+            self.tcx.struct_lockstep_tails_for_codegen(source_ty, cast_ty, self.typing_env);
 
         match (src_pointee_ty.kind(), dest_pointee_ty.kind()) {
             (&ty::Array(_, length), &ty::Slice(_)) => {
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index f94d0cbb42b..2f0b1cb6d1e 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -1,7 +1,7 @@
 //! Functions for reading and writing discriminants of multi-variant layouts (enums and coroutines).
 
 use rustc_abi::{self as abi, TagEncoding, VariantIdx, Variants};
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
 use rustc_middle::ty::{self, CoroutineArgsExt, ScalarInt, Ty};
 use rustc_middle::{mir, span_bug};
 use tracing::{instrument, trace};
@@ -21,17 +21,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         variant_index: VariantIdx,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        // Layout computation excludes uninhabited variants from consideration
-        // therefore there's no way to represent those variants in the given layout.
-        // Essentially, uninhabited variants do not have a tag that corresponds to their
-        // discriminant, so we cannot do anything here.
-        // When evaluating we will always error before even getting here, but ConstProp 'executes'
-        // dead code, so we cannot ICE here.
-        if dest.layout().for_variant(self, variant_index).is_uninhabited() {
-            throw_ub!(UninhabitedEnumVariantWritten(variant_index))
-        }
-
-        match self.tag_for_variant(dest.layout().ty, variant_index)? {
+        match self.tag_for_variant(dest.layout(), variant_index)? {
             Some((tag, tag_field)) => {
                 // No need to validate that the discriminant here because the
                 // `TyAndLayout::for_variant()` call earlier already checks the
@@ -54,7 +44,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         }
     }
 
-    /// Read discriminant, return the runtime value as well as the variant index.
+    /// Read discriminant, return the variant index.
     /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
     ///
     /// Will never return an uninhabited variant.
@@ -75,21 +65,17 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
         // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
         let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout().variants {
+            Variants::Empty => {
+                throw_ub!(UninhabitedEnumVariantRead(None));
+            }
             Variants::Single { index } => {
-                // Do some extra checks on enums.
-                if ty.is_enum() {
-                    // Hilariously, `Single` is used even for 0-variant enums.
-                    // (See https://github.com/rust-lang/rust/issues/89765).
-                    if matches!(ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
-                        throw_ub!(UninhabitedEnumVariantRead(index))
-                    }
+                if op.layout().is_uninhabited() {
                     // For consistency with `write_discriminant`, and to make sure that
                     // `project_downcast` cannot fail due to strange layouts, we declare immediate UB
-                    // for uninhabited variants.
-                    if op.layout().for_variant(self, index).is_uninhabited() {
-                        throw_ub!(UninhabitedEnumVariantRead(index))
-                    }
+                    // for uninhabited enums.
+                    throw_ub!(UninhabitedEnumVariantRead(Some(index)));
                 }
+                // Since the type is inhabited, there must be an index.
                 return interp_ok(index);
             }
             Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
@@ -188,6 +174,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                             let variants =
                                 ty.ty_adt_def().expect("tagged layout for non adt").variants();
                             assert!(variant_index < variants.next_index());
+                            if variant_index == untagged_variant {
+                                // The untagged variant can be in the niche range, but even then it
+                                // is not a valid encoding.
+                                throw_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size)))
+                            }
                             variant_index
                         } else {
                             untagged_variant
@@ -204,11 +195,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // `uninhabited_enum_branching` MIR pass. It also ensures consistency with
         // `write_discriminant`.
         if op.layout().for_variant(self, index).is_uninhabited() {
-            throw_ub!(UninhabitedEnumVariantRead(index))
+            throw_ub!(UninhabitedEnumVariantRead(Some(index)))
         }
         interp_ok(index)
     }
 
+    /// Read discriminant, return the user-visible discriminant.
+    /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
     pub fn discriminant_for_variant(
         &self,
         ty: Ty<'tcx>,
@@ -236,10 +229,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     ///   given field index.
     pub(crate) fn tag_for_variant(
         &self,
-        ty: Ty<'tcx>,
+        layout: TyAndLayout<'tcx>,
         variant_index: VariantIdx,
     ) -> InterpResult<'tcx, Option<(ScalarInt, usize)>> {
-        match self.layout_of(ty)?.variants {
+        // Layout computation excludes uninhabited variants from consideration.
+        // Therefore, there's no way to represent those variants in the given layout.
+        // Essentially, uninhabited variants do not have a tag that corresponds to their
+        // discriminant, so we have to bail out here.
+        if layout.for_variant(self, variant_index).is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantWritten(variant_index))
+        }
+
+        match layout.variants {
+            abi::Variants::Empty => unreachable!("we already handled uninhabited types"),
             abi::Variants::Single { .. } => {
                 // The tag of a `Single` enum is like the tag of the niched
                 // variant: there's no tag as the discriminant is encoded
@@ -260,7 +262,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 // raw discriminants for enums are isize or bigger during
                 // their computation, but the in-memory tag is the smallest possible
                 // representation
-                let discr = self.discriminant_for_variant(ty, variant_index)?;
+                let discr = self.discriminant_for_variant(layout.ty, variant_index)?;
                 let discr_size = discr.layout.size;
                 let discr_val = discr.to_scalar().to_bits(discr_size)?;
                 let tag_size = tag_layout.size(self);
@@ -286,11 +288,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 ..
             } => {
                 assert!(variant_index != untagged_variant);
+                // We checked that this variant is inhabited, so it must be in the niche range.
+                assert!(
+                    niche_variants.contains(&variant_index),
+                    "invalid variant index for this enum"
+                );
                 let variants_start = niche_variants.start().as_u32();
-                let variant_index_relative = variant_index
-                    .as_u32()
-                    .checked_sub(variants_start)
-                    .expect("overflow computing relative variant idx");
+                let variant_index_relative = variant_index.as_u32().strict_sub(variants_start);
                 // We need to use machine arithmetic when taking into account `niche_start`:
                 // tag_val = variant_index_relative + niche_start_val
                 let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index ff6d5b28b3b..95a72d3cbc1 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -1,18 +1,18 @@
+use std::assert_matches::debug_assert_matches;
+
 use either::{Left, Right};
 use rustc_abi::{Align, HasDataLayout, Size, TargetDataLayout};
 use rustc_errors::DiagCtxtHandle;
 use rustc_hir::def_id::DefId;
 use rustc_infer::infer::TyCtxtInferExt;
 use rustc_infer::infer::at::ToTrace;
-use rustc_infer::traits::{ObligationCause, Reveal};
+use rustc_infer::traits::ObligationCause;
 use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo};
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::{
     self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
 };
-use rustc_middle::ty::{
-    self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, TypingMode, Variance,
-};
+use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeFoldable, TypingEnv, Variance};
 use rustc_middle::{mir, span_bug};
 use rustc_session::Limit;
 use rustc_span::Span;
@@ -38,8 +38,9 @@ pub struct InterpCx<'tcx, M: Machine<'tcx>> {
     /// we are evaluating (if this is CTFE).
     pub tcx: TyCtxtAt<'tcx>,
 
-    /// Bounds in scope for polymorphic evaluations.
-    pub(crate) param_env: ty::ParamEnv<'tcx>,
+    /// The current context in case we're evaluating in a
+    /// polymorphic context. This always uses `ty::TypingMode::PostAnalysis`.
+    pub(super) typing_env: ty::TypingEnv<'tcx>,
 
     /// The virtual memory system.
     pub memory: Memory<'tcx, M>,
@@ -65,12 +66,12 @@ where
     }
 }
 
-impl<'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'tcx, M>
+impl<'tcx, M> layout::HasTypingEnv<'tcx> for InterpCx<'tcx, M>
 where
     M: Machine<'tcx>,
 {
-    fn param_env(&self) -> ty::ParamEnv<'tcx> {
-        self.param_env
+    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
+        self.typing_env
     }
 }
 
@@ -116,8 +117,7 @@ impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> {
 /// This test should be symmetric, as it is primarily about layout compatibility.
 pub(super) fn mir_assign_valid_types<'tcx>(
     tcx: TyCtxt<'tcx>,
-    typing_mode: TypingMode<'tcx>,
-    param_env: ParamEnv<'tcx>,
+    typing_env: TypingEnv<'tcx>,
     src: TyAndLayout<'tcx>,
     dest: TyAndLayout<'tcx>,
 ) -> bool {
@@ -125,7 +125,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
     // all normal lifetimes are erased, higher-ranked types with their
     // late-bound lifetimes are still around and can lead to type
     // differences.
-    if util::relate_types(tcx, typing_mode, param_env, Variance::Covariant, src.ty, dest.ty) {
+    if util::relate_types(tcx, typing_env, Variance::Covariant, src.ty, dest.ty) {
         // Make sure the layout is equal, too -- just to be safe. Miri really
         // needs layout equality. For performance reason we skip this check when
         // the types are equal. Equal types *can* have different layouts when
@@ -145,8 +145,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
 #[cfg_attr(not(debug_assertions), inline(always))]
 pub(super) fn from_known_layout<'tcx>(
     tcx: TyCtxtAt<'tcx>,
-    typing_mode: TypingMode<'tcx>,
-    param_env: ParamEnv<'tcx>,
+    typing_env: TypingEnv<'tcx>,
     known_layout: Option<TyAndLayout<'tcx>>,
     compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
 ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
@@ -155,13 +154,7 @@ pub(super) fn from_known_layout<'tcx>(
         Some(known_layout) => {
             if cfg!(debug_assertions) {
                 let check_layout = compute()?;
-                if !mir_assign_valid_types(
-                    tcx.tcx,
-                    typing_mode,
-                    param_env,
-                    check_layout,
-                    known_layout,
-                ) {
+                if !mir_assign_valid_types(tcx.tcx, typing_env, check_layout, known_layout) {
                     span_bug!(
                         tcx.span,
                         "expected type differs from actual type.\nexpected: {}\nactual: {}",
@@ -199,23 +192,23 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     pub fn new(
         tcx: TyCtxt<'tcx>,
         root_span: Span,
-        param_env: ty::ParamEnv<'tcx>,
+        typing_env: ty::TypingEnv<'tcx>,
         machine: M,
     ) -> Self {
+        // Const eval always happens in post analysis mode in order to be able to use the hidden types of
+        // opaque types. This is needed for trivial things like `size_of`, but also for using associated
+        // types that are not specified in the opaque type. We also use MIR bodies whose opaque types have
+        // already been revealed, so we'd be able to at least partially observe the hidden types anyways.
+        debug_assert_matches!(typing_env.typing_mode, ty::TypingMode::PostAnalysis);
         InterpCx {
             machine,
             tcx: tcx.at(root_span),
-            param_env,
+            typing_env,
             memory: Memory::new(),
             recursion_limit: tcx.recursion_limit(),
         }
     }
 
-    pub fn typing_mode(&self) -> TypingMode<'tcx> {
-        debug_assert_eq!(self.param_env.reveal(), Reveal::All);
-        TypingMode::PostAnalysis
-    }
-
     /// Returns the span of the currently executed statement/terminator.
     /// This is the span typically used for error reporting.
     #[inline(always)]
@@ -258,7 +251,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
     #[inline]
     pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
-        ty.is_freeze(*self.tcx, self.param_env)
+        ty.is_freeze(*self.tcx, self.typing_env)
     }
 
     pub fn load_mir(
@@ -275,7 +268,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         };
         // do not continue if typeck errors occurred (can only occur in local crate)
         if let Some(err) = body.tainted_by_errors {
-            throw_inval!(AlreadyReported(ReportedErrorInfo::tainted_by_errors(err)));
+            throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error(err)));
         }
         interp_ok(body)
     }
@@ -304,31 +297,33 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             .instance
             .try_instantiate_mir_and_normalize_erasing_regions(
                 *self.tcx,
-                self.param_env,
+                self.typing_env,
                 ty::EarlyBinder::bind(value),
             )
             .map_err(|_| ErrorHandled::TooGeneric(self.cur_span()))
     }
 
-    /// The `args` are assumed to already be in our interpreter "universe" (param_env).
+    /// The `args` are assumed to already be in our interpreter "universe".
     pub(super) fn resolve(
         &self,
         def: DefId,
         args: GenericArgsRef<'tcx>,
     ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
         trace!("resolve: {:?}, {:#?}", def, args);
-        trace!("param_env: {:#?}", self.param_env);
+        trace!("typing_env: {:#?}", self.typing_env);
         trace!("args: {:#?}", args);
-        match ty::Instance::try_resolve(*self.tcx, self.param_env, def, args) {
+        match ty::Instance::try_resolve(*self.tcx, self.typing_env, def, args) {
             Ok(Some(instance)) => interp_ok(instance),
             Ok(None) => throw_inval!(TooGeneric),
 
             // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
-            Err(error_reported) => throw_inval!(AlreadyReported(error_reported.into())),
+            Err(error_guaranteed) => throw_inval!(AlreadyReported(
+                ReportedErrorInfo::non_const_eval_error(error_guaranteed)
+            )),
         }
     }
 
-    /// Check if the two things are equal in the current param_env, using an infctx to get proper
+    /// Check if the two things are equal in the current param_env, using an infcx to get proper
     /// equality checks.
     #[instrument(level = "trace", skip(self), ret)]
     pub(super) fn eq_in_param_env<T>(&self, a: T, b: T) -> bool
@@ -340,14 +335,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             return true;
         }
         // Slow path: spin up an inference context to check if these traits are sufficiently equal.
-        let infcx = self.tcx.infer_ctxt().build(self.typing_mode());
+        let (infcx, param_env) = self.tcx.infer_ctxt().build_with_typing_env(self.typing_env);
         let ocx = ObligationCtxt::new(&infcx);
         let cause = ObligationCause::dummy_with_span(self.cur_span());
         // equate the two trait refs after normalization
-        let a = ocx.normalize(&cause, self.param_env, a);
-        let b = ocx.normalize(&cause, self.param_env, b);
+        let a = ocx.normalize(&cause, param_env, a);
+        let b = ocx.normalize(&cause, param_env, b);
 
-        if let Err(terr) = ocx.eq(&cause, self.param_env, a, b) {
+        if let Err(terr) = ocx.eq(&cause, param_env, a, b) {
             trace!(?terr);
             return false;
         }
@@ -572,10 +567,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         let val = if self.tcx.is_static(gid.instance.def_id()) {
             let alloc_id = self.tcx.reserve_and_set_static_alloc(gid.instance.def_id());
 
-            let ty = instance.ty(self.tcx.tcx, self.param_env);
+            let ty = instance.ty(self.tcx.tcx, self.typing_env);
             mir::ConstAlloc { alloc_id, ty }
         } else {
-            self.ctfe_query(|tcx| tcx.eval_to_allocation_raw(self.param_env.and(gid)))?
+            self.ctfe_query(|tcx| tcx.eval_to_allocation_raw(self.typing_env.as_query_input(gid)))?
         };
         self.raw_const_to_mplace(val)
     }
@@ -587,15 +582,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         layout: Option<TyAndLayout<'tcx>>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         M::eval_mir_constant(self, *val, span, layout, |ecx, val, span, layout| {
-            let const_val = val.eval(*ecx.tcx, ecx.param_env, span).map_err(|err| {
+            let const_val = val.eval(*ecx.tcx, ecx.typing_env, span).map_err(|err| {
                 if M::ALL_CONSTS_ARE_PRECHECKED {
                     match err {
                         ErrorHandled::TooGeneric(..) => {},
                         ErrorHandled::Reported(reported, span) => {
-                            if reported.is_tainted_by_errors() {
-                                // const-eval will return "tainted" errors if e.g. the layout cannot
-                                // be computed as the type references non-existing names.
-                                // See <https://github.com/rust-lang/rust/issues/124348>.
+                            if reported.is_allowed_in_infallible() {
+                                // These errors can just sometimes happen, even when the expression
+                                // is nominally "infallible", e.g. when running out of memory
+                                // or when some layout could not be computed.
                             } else {
                                 // Looks like the const is not captured by `required_consts`, that's bad.
                                 span_bug!(span, "interpret const eval failure of {val:?} which is not in required_consts");
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 7a1b92601a4..0cbb3b157f3 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -165,7 +165,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval
         InternKind::Static(Mutability::Not) => {
             (
                 // Outermost allocation is mutable if `!Freeze`.
-                if ret.layout.ty.is_freeze(*ecx.tcx, ecx.param_env) {
+                if ret.layout.ty.is_freeze(*ecx.tcx, ecx.typing_env) {
                     Mutability::Not
                 } else {
                     Mutability::Mut
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index c7a56a80e81..1af8438534f 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -11,7 +11,7 @@ use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
 use rustc_middle::ty::layout::{LayoutOf as _, TyAndLayout, ValidityRequirement};
 use rustc_middle::ty::{GenericArgsRef, Ty, TyCtxt};
 use rustc_middle::{bug, ty};
-use rustc_span::symbol::{Symbol, sym};
+use rustc_span::{Symbol, sym};
 use tracing::trace;
 
 use super::memory::MemoryKind;
@@ -34,7 +34,7 @@ pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAll
 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
 pub(crate) fn eval_nullary_intrinsic<'tcx>(
     tcx: TyCtxt<'tcx>,
-    param_env: ty::ParamEnv<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
     def_id: DefId,
     args: GenericArgsRef<'tcx>,
 ) -> InterpResult<'tcx, ConstValue<'tcx>> {
@@ -48,11 +48,13 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
         }
         sym::needs_drop => {
             ensure_monomorphic_enough(tcx, tp_ty)?;
-            ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
+            ConstValue::from_bool(tp_ty.needs_drop(tcx, typing_env))
         }
         sym::pref_align_of => {
             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
-            let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(*e)))?;
+            let layout = tcx
+                .layout_of(typing_env.as_query_input(tp_ty))
+                .map_err(|e| err_inval!(Layout(*e)))?;
             ConstValue::from_target_usize(layout.align.pref.bytes(), &tcx)
         }
         sym::type_id => {
@@ -149,8 +151,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                     sym::type_name => Ty::new_static_str(self.tcx.tcx),
                     _ => bug!(),
                 };
-                let val =
-                    self.ctfe_query(|tcx| tcx.const_eval_global_id(self.param_env, gid, tcx.span))?;
+                let val = self
+                    .ctfe_query(|tcx| tcx.const_eval_global_id(self.typing_env, gid, tcx.span))?;
                 let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
                 self.copy_op(&val, dest)?;
             }
@@ -355,7 +357,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
                 let should_panic = !self
                     .tcx
-                    .check_validity_requirement((requirement, self.param_env.and(ty)))
+                    .check_validity_requirement((requirement, self.typing_env.as_query_input(ty)))
                     .map_err(|_| err_inval!(TooGeneric))?;
 
                 if should_panic {
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index dbe09d55b2d..9ac2a024ccf 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -327,11 +327,11 @@ pub trait Machine<'tcx>: Sized {
         addr: u64,
     ) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>>;
 
-    /// Marks a pointer as exposed, allowing it's provenance
+    /// Marks a pointer as exposed, allowing its provenance
     /// to be recovered. "Pointer-to-int cast"
-    fn expose_ptr(
-        ecx: &mut InterpCx<'tcx, Self>,
-        ptr: Pointer<Self::Provenance>,
+    fn expose_provenance(
+        ecx: &InterpCx<'tcx, Self>,
+        provenance: Self::Provenance,
     ) -> InterpResult<'tcx>;
 
     /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
@@ -540,10 +540,14 @@ pub trait Machine<'tcx>: Sized {
         interp_ok(ReturnAction::Normal)
     }
 
-    /// Called immediately after an "immediate" local variable is read
+    /// Called immediately after an "immediate" local variable is read in a given frame
     /// (i.e., this is called for reads that do not end up accessing addressable memory).
     #[inline(always)]
-    fn after_local_read(_ecx: &InterpCx<'tcx, Self>, _local: mir::Local) -> InterpResult<'tcx> {
+    fn after_local_read(
+        _ecx: &InterpCx<'tcx, Self>,
+        _frame: &Frame<'tcx, Self::Provenance, Self::FrameExtra>,
+        _local: mir::Local,
+    ) -> InterpResult<'tcx> {
         interp_ok(())
     }
 
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 1ad8ffa4b53..027ba9644cb 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -14,10 +14,9 @@ use std::{fmt, mem, ptr};
 use rustc_abi::{Align, HasDataLayout, Size};
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
-use rustc_hir::def::DefKind;
 use rustc_middle::bug;
 use rustc_middle::mir::display_allocation;
-use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
 use tracing::{debug, instrument, trace};
 
 use super::{
@@ -72,6 +71,21 @@ pub enum AllocKind {
     Dead,
 }
 
+/// Metadata about an `AllocId`.
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub struct AllocInfo {
+    pub size: Size,
+    pub align: Align,
+    pub kind: AllocKind,
+    pub mutbl: Mutability,
+}
+
+impl AllocInfo {
+    fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {
+        Self { size, align, kind, mutbl }
+    }
+}
+
 /// The value of a function pointer.
 #[derive(Debug, Copy, Clone)]
 pub enum FnVal<'tcx, Other> {
@@ -524,17 +538,22 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         match self.ptr_try_get_alloc_id(ptr, 0) {
             Err(addr) => is_offset_misaligned(addr, align),
             Ok((alloc_id, offset, _prov)) => {
-                let (_size, alloc_align, kind) = self.get_alloc_info(alloc_id);
-                if let Some(misalign) =
-                    M::alignment_check(self, alloc_id, alloc_align, kind, offset, align)
-                {
+                let alloc_info = self.get_alloc_info(alloc_id);
+                if let Some(misalign) = M::alignment_check(
+                    self,
+                    alloc_id,
+                    alloc_info.align,
+                    alloc_info.kind,
+                    offset,
+                    align,
+                ) {
                     Some(misalign)
                 } else if M::Provenance::OFFSET_IS_ADDR {
                     is_offset_misaligned(ptr.addr().bytes(), align)
                 } else {
                     // Check allocation alignment and offset alignment.
-                    if alloc_align.bytes() < align.bytes() {
-                        Some(Misalignment { has: alloc_align, required: align })
+                    if alloc_info.align.bytes() < align.bytes() {
+                        Some(Misalignment { has: alloc_info.align, required: align })
                     } else {
                         is_offset_misaligned(offset.bytes(), align)
                     }
@@ -818,82 +837,45 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
     /// Obtain the size and alignment of an allocation, even if that allocation has
     /// been deallocated.
-    pub fn get_alloc_info(&self, id: AllocId) -> (Size, Align, AllocKind) {
+    pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
         // # Regular allocations
         // Don't use `self.get_raw` here as that will
         // a) cause cycles in case `id` refers to a static
         // b) duplicate a global's allocation in miri
         if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
-            return (alloc.size(), alloc.align, AllocKind::LiveData);
+            return AllocInfo::new(
+                alloc.size(),
+                alloc.align,
+                AllocKind::LiveData,
+                alloc.mutability,
+            );
         }
 
         // # Function pointers
         // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
         if self.get_fn_alloc(id).is_some() {
-            return (Size::ZERO, Align::ONE, AllocKind::Function);
+            return AllocInfo::new(Size::ZERO, Align::ONE, AllocKind::Function, Mutability::Not);
         }
 
-        // # Statics
-        // Can't do this in the match argument, we may get cycle errors since the lock would
-        // be held throughout the match.
-        match self.tcx.try_get_global_alloc(id) {
-            Some(GlobalAlloc::Static(def_id)) => {
-                // Thread-local statics do not have a constant address. They *must* be accessed via
-                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
-                assert!(!self.tcx.is_thread_local_static(def_id));
-
-                let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else {
-                    bug!("GlobalAlloc::Static is not a static")
-                };
-
-                let (size, align) = if nested {
-                    // Nested anonymous statics are untyped, so let's get their
-                    // size and alignment from the allocation itself. This always
-                    // succeeds, as the query is fed at DefId creation time, so no
-                    // evaluation actually occurs.
-                    let alloc = self.tcx.eval_static_initializer(def_id).unwrap();
-                    (alloc.0.size(), alloc.0.align)
-                } else {
-                    // Use size and align of the type for everything else. We need
-                    // to do that to
-                    // * avoid cycle errors in case of self-referential statics,
-                    // * be able to get information on extern statics.
-                    let ty = self
-                        .tcx
-                        .type_of(def_id)
-                        .no_bound_vars()
-                        .expect("statics should not have generic parameters");
-                    let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
-                    assert!(layout.is_sized());
-                    (layout.size, layout.align.abi)
-                };
-                (size, align, AllocKind::LiveData)
-            }
-            Some(GlobalAlloc::Memory(alloc)) => {
-                // Need to duplicate the logic here, because the global allocations have
-                // different associated types than the interpreter-local ones.
-                let alloc = alloc.inner();
-                (alloc.size(), alloc.align, AllocKind::LiveData)
-            }
-            Some(GlobalAlloc::Function { .. }) => {
-                bug!("We already checked function pointers above")
-            }
-            Some(GlobalAlloc::VTable(..)) => {
-                // No data to be accessed here. But vtables are pointer-aligned.
-                return (Size::ZERO, self.tcx.data_layout.pointer_align.abi, AllocKind::VTable);
-            }
-            // The rest must be dead.
-            None => {
-                // Deallocated pointers are allowed, we should be able to find
-                // them in the map.
-                let (size, align) = *self
-                    .memory
-                    .dead_alloc_map
-                    .get(&id)
-                    .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
-                (size, align, AllocKind::Dead)
-            }
+        // # Global allocations
+        if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
+            let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
+            let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
+            let kind = match global_alloc {
+                GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
+                GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
+                GlobalAlloc::VTable { .. } => AllocKind::VTable,
+            };
+            return AllocInfo::new(size, align, kind, mutbl);
         }
+
+        // # Dead pointers
+        let (size, align) = *self
+            .memory
+            .dead_alloc_map
+            .get(&id)
+            .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
+        AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)
     }
 
     /// Obtain the size and alignment of a *live* allocation.
@@ -902,11 +884,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         id: AllocId,
         msg: CheckInAllocMsg,
     ) -> InterpResult<'tcx, (Size, Align)> {
-        let (size, align, kind) = self.get_alloc_info(id);
-        if matches!(kind, AllocKind::Dead) {
+        let info = self.get_alloc_info(id);
+        if matches!(info.kind, AllocKind::Dead) {
             throw_ub!(PointerUseAfterFree(id, msg))
         }
-        interp_ok((size, align))
+        interp_ok((info.size, info.align))
     }
 
     fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
@@ -962,6 +944,52 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         interp_ok(())
     }
 
+    /// Handle the effect an FFI call might have on the state of allocations.
+    /// This overapproximates the modifications which external code might make to memory:
+    /// We set all reachable allocations as initialized, mark all provenances as exposed
+    /// and overwrite them with `Provenance::WILDCARD`.
+    pub fn prepare_for_native_call(
+        &mut self,
+        id: AllocId,
+        initial_prov: M::Provenance,
+    ) -> InterpResult<'tcx> {
+        // Expose provenance of the root allocation.
+        M::expose_provenance(self, initial_prov)?;
+
+        let mut done = FxHashSet::default();
+        let mut todo = vec![id];
+        while let Some(id) = todo.pop() {
+            if !done.insert(id) {
+                // We already saw this allocation before, don't process it again.
+                continue;
+            }
+            let info = self.get_alloc_info(id);
+
+            // If there is no data behind this pointer, skip this.
+            if !matches!(info.kind, AllocKind::LiveData) {
+                continue;
+            }
+
+            // Expose all provenances in this allocation, and add them to `todo`.
+            let alloc = self.get_alloc_raw(id)?;
+            for prov in alloc.provenance().provenances() {
+                M::expose_provenance(self, prov)?;
+                if let Some(id) = prov.get_alloc_id() {
+                    todo.push(id);
+                }
+            }
+
+            // Prepare for possible write from native code if mutable.
+            if info.mutbl.is_mut() {
+                self.get_alloc_raw_mut(id)?
+                    .0
+                    .prepare_for_native_write()
+                    .map_err(|e| e.to_interp_error(id))?;
+            }
+        }
+        interp_ok(())
+    }
+
     /// Create a lazy debug printer that prints the given allocation and all allocations it points
     /// to, recursively.
     #[must_use]
@@ -1458,7 +1486,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 let ptr = scalar.to_pointer(self)?;
                 match self.ptr_try_get_alloc_id(ptr, 0) {
                     Ok((alloc_id, offset, _)) => {
-                        let (size, _align, _kind) = self.get_alloc_info(alloc_id);
+                        let size = self.get_alloc_info(alloc_id).size;
                         // If the pointer is out-of-bounds, it may be null.
                         // Note that one-past-the-end (offset == size) is still inbounds, and never null.
                         offset > size
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 5e84626f77e..f5792aba207 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -31,7 +31,7 @@ pub use self::intern::{
 };
 pub(crate) use self::intrinsics::eval_nullary_intrinsic;
 pub use self::machine::{AllocMap, Machine, MayLeak, ReturnAction, compile_time_machine};
-pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
+pub use self::memory::{AllocInfo, AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
 use self::operand::Operand;
 pub use self::operand::{ImmTy, Immediate, OpTy};
 pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index a130ae89bcb..b861ffb6110 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -8,16 +8,16 @@ use rustc_abi as abi;
 use rustc_abi::{BackendRepr, HasDataLayout, Size};
 use rustc_hir::def::Namespace;
 use rustc_middle::mir::interpret::ScalarSizeMismatch;
-use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
 use rustc_middle::ty::{ConstInt, ScalarInt, Ty, TyCtxt};
 use rustc_middle::{bug, mir, span_bug, ty};
 use tracing::trace;
 
 use super::{
-    CtfeProvenance, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode,
-    PlaceTy, Pointer, Projectable, Provenance, Scalar, alloc_range, err_ub, from_known_layout,
-    interp_ok, mir_assign_valid_types, throw_ub,
+    CtfeProvenance, Frame, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta,
+    OffsetMode, PlaceTy, Pointer, Projectable, Provenance, Scalar, alloc_range, err_ub,
+    from_known_layout, interp_ok, mir_assign_valid_types, throw_ub,
 };
 
 /// An `Immediate` represents a single immediate self-contained Rust value.
@@ -297,21 +297,27 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
 
     #[inline]
     pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
-        let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
+        // Can use any typing env, since `bool` is always monomorphic.
+        let layout = tcx
+            .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(tcx.types.bool))
+            .unwrap();
         Self::from_scalar(Scalar::from_bool(b), layout)
     }
 
     #[inline]
     pub fn from_ordering(c: std::cmp::Ordering, tcx: TyCtxt<'tcx>) -> Self {
+        // Can use any typing env, since `Ordering` is always monomorphic.
         let ty = tcx.ty_ordering_enum(None);
-        let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)).unwrap();
+        let layout =
+            tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(ty)).unwrap();
         Self::from_scalar(Scalar::from_i8(c as i8), layout)
     }
 
-    pub fn from_pair(a: Self, b: Self, tcx: TyCtxt<'tcx>) -> Self {
-        let layout = tcx
+    pub fn from_pair(a: Self, b: Self, cx: &(impl HasTypingEnv<'tcx> + HasTyCtxt<'tcx>)) -> Self {
+        let layout = cx
+            .tcx()
             .layout_of(
-                ty::ParamEnv::reveal_all().and(Ty::new_tup(tcx, &[a.layout.ty, b.layout.ty])),
+                cx.typing_env().as_query_input(Ty::new_tup(cx.tcx(), &[a.layout.ty, b.layout.ty])),
             )
             .unwrap();
         Self::from_scalar_pair(a.to_scalar(), b.to_scalar(), layout)
@@ -341,7 +347,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
 
     #[inline]
     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
-    pub fn to_pair(self, cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>)) -> (Self, Self) {
+    pub fn to_pair(self, cx: &(impl HasTyCtxt<'tcx> + HasTypingEnv<'tcx>)) -> (Self, Self) {
         let layout = self.layout;
         let (val0, val1) = self.to_scalar_pair();
         (
@@ -702,23 +708,32 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         interp_ok(str)
     }
 
-    /// Read from a local of the current frame.
+    /// Read from a local of the current frame. Convenience method for [`InterpCx::local_at_frame_to_op`].
+    pub fn local_to_op(
+        &self,
+        local: mir::Local,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        self.local_at_frame_to_op(self.frame(), local, layout)
+    }
+
+    /// Read from a local of a given frame.
     /// Will not access memory, instead an indirect `Operand` is returned.
     ///
-    /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
-    /// OpTy from a local.
-    pub fn local_to_op(
+    /// This is public because it is used by [Aquascope](https://github.com/cognitive-engineering-lab/aquascope/)
+    /// to get an OpTy from a local.
+    pub fn local_at_frame_to_op(
         &self,
+        frame: &Frame<'tcx, M::Provenance, M::FrameExtra>,
         local: mir::Local,
         layout: Option<TyAndLayout<'tcx>>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let frame = self.frame();
         let layout = self.layout_of_local(frame, local, layout)?;
         let op = *frame.locals[local].access()?;
         if matches!(op, Operand::Immediate(_)) {
             assert!(!layout.is_unsized());
         }
-        M::after_local_read(self, local)?;
+        M::after_local_read(self, frame, local)?;
         interp_ok(OpTy { op, layout })
     }
 
@@ -773,8 +788,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 )?;
             if !mir_assign_valid_types(
                 *self.tcx,
-                self.typing_mode(),
-                self.param_env,
+                self.typing_env(),
                 self.layout_of(normalized_place_ty)?,
                 op.layout,
             ) {
@@ -833,9 +847,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             })
         };
         let layout =
-            from_known_layout(self.tcx, self.typing_mode(), self.param_env, layout, || {
-                self.layout_of(ty).into()
-            })?;
+            from_known_layout(self.tcx, self.typing_env(), layout, || self.layout_of(ty).into())?;
         let imm = match val_val {
             mir::ConstValue::Indirect { alloc_id, offset } => {
                 // This is const data, no mutation allowed.
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index fbc85d37953..5fa632fc57a 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -6,7 +6,7 @@ use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty};
 use rustc_middle::{bug, mir, span_bug};
-use rustc_span::symbol::sym;
+use rustc_span::sym;
 use tracing::trace;
 
 use super::{ImmTy, InterpCx, Machine, MemPlaceMeta, interp_ok, throw_ub};
@@ -222,7 +222,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 let res = ImmTy::from_scalar_int(result, left.layout);
                 return interp_ok(if with_overflow {
                     let overflow = ImmTy::from_bool(overflow, *self.tcx);
-                    ImmTy::from_pair(res, overflow, *self.tcx)
+                    ImmTy::from_pair(res, overflow, self)
                 } else {
                     res
                 });
@@ -279,7 +279,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 let res = ImmTy::from_scalar_int(result, left.layout);
                 if with_overflow {
                     let overflow = ImmTy::from_bool(overflow, *self.tcx);
-                    ImmTy::from_pair(res, overflow, *self.tcx)
+                    ImmTy::from_pair(res, overflow, self)
                 } else {
                     res
                 }
@@ -533,7 +533,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             }
             OffsetOf(fields) => {
                 let val =
-                    self.tcx.offset_of_subfield(self.param_env, layout, fields.iter()).bytes();
+                    self.tcx.offset_of_subfield(self.typing_env, layout, fields.iter()).bytes();
                 ImmTy::from_uint(val, usize_layout())
             }
             UbChecks => ImmTy::from_bool(M::ub_checks(self)?, *self.tcx),
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index cc8d1db6cfb..810e9356b26 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -5,8 +5,7 @@
 use std::assert_matches::assert_matches;
 
 use either::{Either, Left, Right};
-use rustc_abi::{Align, BackendRepr, HasDataLayout, Size};
-use rustc_ast::Mutability;
+use rustc_abi::{BackendRepr, HasDataLayout, Size};
 use rustc_middle::ty::Ty;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::{bug, mir, span_bug};
@@ -540,8 +539,7 @@ where
                 )?;
             if !mir_assign_valid_types(
                 *self.tcx,
-                self.typing_mode(),
-                self.param_env,
+                self.typing_env,
                 self.layout_of(normalized_place_ty)?,
                 place.layout,
             ) {
@@ -871,13 +869,8 @@ where
     ) -> InterpResult<'tcx> {
         // We do NOT compare the types for equality, because well-typed code can
         // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
-        let layout_compat = mir_assign_valid_types(
-            *self.tcx,
-            self.typing_mode(),
-            self.param_env,
-            src.layout(),
-            dest.layout(),
-        );
+        let layout_compat =
+            mir_assign_valid_types(*self.tcx, self.typing_env, src.layout(), dest.layout());
         if !allow_transmute && !layout_compat {
             span_bug!(
                 self.cur_span(),
@@ -1024,29 +1017,39 @@ where
         self.allocate_dyn(layout, kind, MemPlaceMeta::None)
     }
 
-    /// Returns a wide MPlace of type `str` to a new 1-aligned allocation.
-    /// Immutable strings are deduplicated and stored in global memory.
-    pub fn allocate_str(
+    /// Allocates a sequence of bytes in the interpreter's memory with alignment 1.
+    /// This is allocated in immutable global memory and deduplicated.
+    pub fn allocate_bytes_dedup(
+        &mut self,
+        bytes: &[u8],
+    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+        let salt = M::get_global_alloc_salt(self, None);
+        let id = self.tcx.allocate_bytes_dedup(bytes, salt);
+
+        // Turn untagged "global" pointers (obtained via `tcx`) into the machine pointer to the allocation.
+        M::adjust_alloc_root_pointer(
+            &self,
+            Pointer::from(id),
+            M::GLOBAL_KIND.map(MemoryKind::Machine),
+        )
+    }
+
+    /// Allocates a string in the interpreter's memory, returning it as a (wide) place.
+    /// This is allocated in immutable global memory and deduplicated.
+    pub fn allocate_str_dedup(
         &mut self,
         str: &str,
-        kind: MemoryKind<M::MemoryKind>,
-        mutbl: Mutability,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
-        let tcx = self.tcx.tcx;
+        let bytes = str.as_bytes();
+        let ptr = self.allocate_bytes_dedup(bytes)?;
 
-        // Use cache for immutable strings.
-        let ptr = if mutbl.is_not() {
-            // Use dedup'd allocation function.
-            let salt = M::get_global_alloc_salt(self, None);
-            let id = tcx.allocate_bytes_dedup(str.as_bytes(), salt);
+        // Create length metadata for the string.
+        let meta = Scalar::from_target_usize(u64::try_from(bytes.len()).unwrap(), self);
 
-            // Turn untagged "global" pointers (obtained via `tcx`) into the machine pointer to the allocation.
-            M::adjust_alloc_root_pointer(&self, Pointer::from(id), Some(kind))?
-        } else {
-            self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?
-        };
-        let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
+        // Get layout for Rust's str type.
         let layout = self.layout_of(self.tcx.types.str_).unwrap();
+
+        // Combine pointer and metadata into a wide pointer.
         interp_ok(self.ptr_with_meta_to_mplace(
             ptr.into(),
             MemPlaceMeta::Meta(meta),
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 65a93784e2c..996142d7b03 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -325,7 +325,7 @@ where
         let actual_to = if from_end {
             if from.checked_add(to).is_none_or(|to| to > len) {
                 // This can only be reached in ConstProp and non-rustc-MIR.
-                throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
+                throw_ub!(BoundsCheckFailed { len, index: from.saturating_add(to) });
             }
             len.checked_sub(to).unwrap()
         } else {
diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs
index 50c0446b3cd..6512675530a 100644
--- a/compiler/rustc_const_eval/src/interpret/stack.rs
+++ b/compiler/rustc_const_eval/src/interpret/stack.rs
@@ -10,7 +10,7 @@ use rustc_index::IndexVec;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_middle::{bug, mir};
-use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_mir_dataflow::impls::always_storage_live_locals;
 use rustc_span::Span;
 use tracing::{info_span, instrument, trace};
 
@@ -379,7 +379,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         for &const_ in body.required_consts() {
             let c =
                 self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
-            c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| {
+            c.eval(*self.tcx, self.typing_env, const_.span).map_err(|err| {
                 err.emit_note(*self.tcx);
                 err
             })?;
@@ -584,8 +584,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         interp_ok(())
     }
 
+    /// This is public because it is used by [Aquascope](https://github.com/cognitive-engineering-lab/aquascope/)
+    /// to analyze all the locals in a stack frame.
     #[inline(always)]
-    pub(super) fn layout_of_local(
+    pub fn layout_of_local(
         &self,
         frame: &Frame<'tcx, M::Provenance, M::FrameExtra>,
         local: mir::Local,
@@ -596,13 +598,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             return interp_ok(layout);
         }
 
-        let layout =
-            from_known_layout(self.tcx, self.typing_mode(), self.param_env, layout, || {
-                let local_ty = frame.body.local_decls[local].ty;
-                let local_ty =
-                    self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
-                self.layout_of(local_ty).into()
-            })?;
+        let layout = from_known_layout(self.tcx, self.typing_env, layout, || {
+            let local_ty = frame.body.local_decls[local].ty;
+            let local_ty =
+                self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
+            self.layout_of(local_ty).into()
+        })?;
 
         // Layouts of locals are requested a lot, so we cache them.
         state.layout.set(Some(layout));
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 18cff2c5e0f..a26c2eca107 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -9,6 +9,7 @@ use rustc_middle::ty::layout::FnAbiOf;
 use rustc_middle::ty::{self, Instance, Ty};
 use rustc_middle::{bug, mir, span_bug};
 use rustc_span::source_map::Spanned;
+use rustc_span::{DesugaringKind, Span};
 use rustc_target::callconv::FnAbi;
 use tracing::{info, instrument, trace};
 
@@ -80,7 +81,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         use rustc_middle::mir::StatementKind::*;
 
         match &stmt.kind {
-            Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
+            Assign(box (place, rvalue)) => {
+                self.eval_rvalue_into_place(rvalue, *place, stmt.source_info.span)?
+            }
 
             SetDiscriminant { place, variant_index } => {
                 let dest = self.eval_place(**place)?;
@@ -143,6 +146,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             // Defined to do nothing. These are added by optimization passes, to avoid changing the
             // size of MIR constantly.
             Nop => {}
+
+            // Only used for temporary lifetime lints
+            BackwardIncompatibleDropHint { .. } => {}
         }
 
         interp_ok(())
@@ -156,6 +162,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         &mut self,
         rvalue: &mir::Rvalue<'tcx>,
         place: mir::Place<'tcx>,
+        span: Span,
     ) -> InterpResult<'tcx> {
         let dest = self.eval_place(place)?;
         // FIXME: ensure some kind of non-aliasing between LHS and RHS?
@@ -247,8 +254,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 let src = self.eval_place(place)?;
                 let place = self.force_allocation(&src)?;
                 let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
-                if !place_base_raw {
+                if !place_base_raw
+                    && span.desugaring_kind() != Some(DesugaringKind::IndexBoundsCheckReborrow)
+                {
                     // If this was not already raw, it needs retagging.
+                    // As a special hack, we exclude the desugared `PtrMetadata(&raw const *_n)`
+                    // from indexing. (Really we should not do any retag on `&raw` but that does not
+                    // currently work with Stacked Borrows.)
                     val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
                 }
                 self.write_immediate(*val, &dest)?;
@@ -418,7 +430,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             .collect::<InterpResult<'tcx, Vec<_>>>()?;
 
         let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
-        let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
+        let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.typing_env, fn_sig_binder);
         let extra_args = &args[fn_sig.inputs().len()..];
         let extra_args =
             self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index 8bb5f173a56..ecb7c3fc93c 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -14,10 +14,8 @@ use crate::const_eval::{CompileTimeInterpCx, CompileTimeMachine, InterpretationR
 
 /// Checks whether a type contains generic parameters which must be instantiated.
 ///
-/// In case it does, returns a `TooGeneric` const eval error. Note that due to polymorphization
-/// types may be "concrete enough" even though they still contain generic parameters in
-/// case these parameters are unused.
-pub(crate) fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
+/// In case it does, returns a `TooGeneric` const eval error.
+pub(crate) fn ensure_monomorphic_enough<'tcx, T>(_tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
 where
     T: TypeVisitable<TyCtxt<'tcx>>,
 {
@@ -27,11 +25,9 @@ where
     }
 
     struct FoundParam;
-    struct UsedParamsNeedInstantiationVisitor<'tcx> {
-        tcx: TyCtxt<'tcx>,
-    }
+    struct UsedParamsNeedInstantiationVisitor {}
 
-    impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for UsedParamsNeedInstantiationVisitor<'tcx> {
+    impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for UsedParamsNeedInstantiationVisitor {
         type Result = ControlFlow<FoundParam>;
 
         fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
@@ -41,25 +37,7 @@ where
 
             match *ty.kind() {
                 ty::Param(_) => ControlFlow::Break(FoundParam),
-                ty::Closure(def_id, args)
-                | ty::CoroutineClosure(def_id, args, ..)
-                | ty::Coroutine(def_id, args, ..)
-                | ty::FnDef(def_id, args) => {
-                    let instance = ty::InstanceKind::Item(def_id);
-                    let unused_params = self.tcx.unused_generic_params(instance);
-                    for (index, arg) in args.into_iter().enumerate() {
-                        let index = index
-                            .try_into()
-                            .expect("more generic parameters than can fit into a `u32`");
-                        // Only recurse when generic parameters in fns, closures and coroutines
-                        // are used and have to be instantiated.
-                        //
-                        // Just in case there are closures or coroutines within this arg,
-                        // recurse.
-                        if unused_params.is_used(index) && arg.has_param() {
-                            return arg.visit_with(self);
-                        }
-                    }
+                ty::Closure(..) | ty::CoroutineClosure(..) | ty::Coroutine(..) | ty::FnDef(..) => {
                     ControlFlow::Continue(())
                 }
                 _ => ty.super_visit_with(self),
@@ -74,7 +52,7 @@ where
         }
     }
 
-    let mut vis = UsedParamsNeedInstantiationVisitor { tcx };
+    let mut vis = UsedParamsNeedInstantiationVisitor {};
     if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
         throw_inval!(TooGeneric);
     } else {
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index cd2c1ef3613..6f101395ccf 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -26,13 +26,13 @@ use rustc_middle::mir::interpret::{
 };
 use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, Ty};
-use rustc_span::symbol::{Symbol, sym};
+use rustc_span::{Symbol, sym};
 use tracing::trace;
 
 use super::machine::AllocMap;
 use super::{
-    AllocId, AllocKind, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult,
-    MPlaceTy, Machine, MemPlaceMeta, PlaceTy, Pointer, Projectable, Scalar, ValueVisitor, err_ub,
+    AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
+    Machine, MemPlaceMeta, PlaceTy, Pointer, Projectable, Scalar, ValueVisitor, err_ub,
     format_interp_error,
 };
 
@@ -302,7 +302,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                     };
                 }
             }
-            Variants::Single { .. } => {}
+            Variants::Single { .. } | Variants::Empty => {}
         }
 
         // Now we know we are projecting to a field, so figure out which one.
@@ -344,6 +344,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                         // Inside a variant
                         PathElem::Field(def.variant(index).fields[FieldIdx::from_usize(field)].name)
                     }
+                    Variants::Empty => panic!("there is no field in Variants::Empty types"),
                     Variants::Multiple { .. } => bug!("we handled variants above"),
                 }
             }
@@ -448,7 +449,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
         meta: MemPlaceMeta<M::Provenance>,
         pointee: TyAndLayout<'tcx>,
     ) -> InterpResult<'tcx> {
-        let tail = self.ecx.tcx.struct_tail_for_codegen(pointee.ty, self.ecx.param_env);
+        let tail = self.ecx.tcx.struct_tail_for_codegen(pointee.ty, self.ecx.typing_env);
         match tail.kind() {
             ty::Dynamic(data, _, ty::Dyn) => {
                 let vtable = meta.unwrap_meta().to_pointer(self.ecx)?;
@@ -557,9 +558,20 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                 if let Ok((alloc_id, _offset, _prov)) =
                     self.ecx.ptr_try_get_alloc_id(place.ptr(), 0)
                 {
-                    if let Some(GlobalAlloc::Static(did)) =
-                        self.ecx.tcx.try_get_global_alloc(alloc_id)
-                    {
+                    // Everything should be already interned.
+                    let Some(global_alloc) = self.ecx.tcx.try_get_global_alloc(alloc_id) else {
+                        assert!(self.ecx.memory.alloc_map.get(alloc_id).is_none());
+                        // We can't have *any* references to non-existing allocations in const-eval
+                        // as the rest of rustc isn't happy with them... so we throw an error, even
+                        // though for zero-sized references this isn't really UB.
+                        // A potential future alternative would be to resurrect this as a zero-sized allocation
+                        // (which codegen will then compile to an aligned dummy pointer anyway).
+                        throw_validation_failure!(self.path, DanglingPtrUseAfterFree { ptr_kind });
+                    };
+                    let (size, _align) =
+                        global_alloc.size_and_align(*self.ecx.tcx, self.ecx.typing_env);
+
+                    if let GlobalAlloc::Static(did) = global_alloc {
                         let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else {
                             bug!()
                         };
@@ -593,17 +605,6 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                         }
                     }
 
-                    // Dangling and Mutability check.
-                    let (size, _align, alloc_kind) = self.ecx.get_alloc_info(alloc_id);
-                    if alloc_kind == AllocKind::Dead {
-                        // This can happen for zero-sized references. We can't have *any* references to
-                        // non-existing allocations in const-eval though, interning rejects them all as
-                        // the rest of rustc isn't happy with them... so we throw an error, even though
-                        // this isn't really UB.
-                        // A potential future alternative would be to resurrect this as a zero-sized allocation
-                        // (which codegen will then compile to an aligned dummy pointer anyway).
-                        throw_validation_failure!(self.path, DanglingPtrUseAfterFree { ptr_kind });
-                    }
                     // If this allocation has size zero, there is no actual mutability here.
                     if size != Size::ZERO {
                         // Determine whether this pointer expects to be pointing to something mutable.
@@ -618,7 +619,8 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                             }
                         };
                         // Determine what it actually points to.
-                        let alloc_actual_mutbl = mutability(self.ecx, alloc_id);
+                        let alloc_actual_mutbl =
+                            global_alloc.mutability(*self.ecx.tcx, self.ecx.typing_env);
                         // Mutable pointer to immutable memory is no good.
                         if ptr_expected_mutbl == Mutability::Mut
                             && alloc_actual_mutbl == Mutability::Not
@@ -842,9 +844,16 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
     }
 
     fn in_mutable_memory(&self, val: &PlaceTy<'tcx, M::Provenance>) -> bool {
+        debug_assert!(self.ctfe_mode.is_some());
         if let Some(mplace) = val.as_mplace_or_local().left() {
             if let Some(alloc_id) = mplace.ptr().provenance.and_then(|p| p.get_alloc_id()) {
-                mutability(self.ecx, alloc_id).is_mut()
+                let tcx = *self.ecx.tcx;
+                // Everything must be already interned.
+                let mutbl = tcx.global_alloc(alloc_id).mutability(tcx, self.ecx.typing_env);
+                if let Some((_, alloc)) = self.ecx.memory.alloc_map.get(alloc_id) {
+                    assert_eq!(alloc.mutability, mutbl);
+                }
+                mutbl.is_mut()
             } else {
                 // No memory at all.
                 false
@@ -947,7 +956,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
     ) -> Cow<'e, RangeSet> {
         assert!(layout.ty.is_union());
         assert!(layout.is_sized(), "there are no unsized unions");
-        let layout_cx = LayoutCx::new(*ecx.tcx, ecx.param_env);
+        let layout_cx = LayoutCx::new(*ecx.tcx, ecx.typing_env);
         return M::cached_union_data_range(ecx, layout.ty, || {
             let mut out = RangeSet(Vec::new());
             union_data_range_uncached(&layout_cx, layout, Size::ZERO, &mut out);
@@ -1002,7 +1011,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
             }
             // Don't forget potential other variants.
             match &layout.variants {
-                Variants::Single { .. } => {
+                Variants::Single { .. } | Variants::Empty => {
                     // Fully handled above.
                 }
                 Variants::Multiple { variants, .. } => {
@@ -1016,53 +1025,6 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
     }
 }
 
-/// Returns whether the allocation is mutable, and whether it's actually a static.
-/// For "root" statics we look at the type to account for interior
-/// mutability; for nested statics we have no type and directly use the annotated mutability.
-fn mutability<'tcx>(ecx: &InterpCx<'tcx, impl Machine<'tcx>>, alloc_id: AllocId) -> Mutability {
-    // Let's see what kind of memory this points to.
-    // We're not using `try_global_alloc` since dangling pointers have already been handled.
-    match ecx.tcx.global_alloc(alloc_id) {
-        GlobalAlloc::Static(did) => {
-            let DefKind::Static { safety: _, mutability, nested } = ecx.tcx.def_kind(did) else {
-                bug!()
-            };
-            if nested {
-                assert!(
-                    ecx.memory.alloc_map.get(alloc_id).is_none(),
-                    "allocations of nested statics are already interned: {alloc_id:?}, {did:?}"
-                );
-                // Nested statics in a `static` are never interior mutable,
-                // so just use the declared mutability.
-                mutability
-            } else {
-                let mutability = match mutability {
-                    Mutability::Not
-                        if !ecx
-                            .tcx
-                            .type_of(did)
-                            .no_bound_vars()
-                            .expect("statics should not have generic parameters")
-                            .is_freeze(*ecx.tcx, ty::ParamEnv::reveal_all()) =>
-                    {
-                        Mutability::Mut
-                    }
-                    _ => mutability,
-                };
-                if let Some((_, alloc)) = ecx.memory.alloc_map.get(alloc_id) {
-                    assert_eq!(alloc.mutability, mutability);
-                }
-                mutability
-            }
-        }
-        GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
-        GlobalAlloc::Function { .. } | GlobalAlloc::VTable(..) => {
-            // These are immutable, we better don't allow mutable pointers here.
-            Mutability::Not
-        }
-    }
-}
-
 impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, 'tcx, M> {
     type V = PlaceTy<'tcx, M::Provenance>;
 
@@ -1124,7 +1086,8 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
     ) -> InterpResult<'tcx> {
         // Special check for CTFE validation, preventing `UnsafeCell` inside unions in immutable memory.
         if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {
-            if !val.layout.is_zst() && !val.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
+            if !val.layout.is_zst() && !val.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.typing_env)
+            {
                 if !self.in_mutable_memory(val) {
                     throw_validation_failure!(self.path, UnsafeCellInImmutable);
                 }
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 76ab0bb544f..3647c109a6e 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -218,8 +218,8 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
                 // recurse with the inner type
                 self.visit_variant(v, idx, &inner)?;
             }
-            // For single-variant layouts, we already did anything there is to do.
-            Variants::Single { .. } => {}
+            // For single-variant layouts, we already did everything there is to do.
+            Variants::Single { .. } | Variants::Empty => {}
         }
 
         interp_ok(())
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
index 0490195caf4..b5adf06b300 100644
--- a/compiler/rustc_const_eval/src/lib.rs
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -10,6 +10,7 @@
 #![feature(never_type)]
 #![feature(rustdoc_internals)]
 #![feature(slice_ptr_get)]
+#![feature(strict_overflow_ops)]
 #![feature(trait_alias)]
 #![feature(try_blocks)]
 #![feature(unqualified_local_imports)]
@@ -40,14 +41,13 @@ pub fn provide(providers: &mut Providers) {
     providers.eval_to_allocation_raw = const_eval::eval_to_allocation_raw_provider;
     providers.eval_static_initializer = const_eval::eval_static_initializer_provider;
     providers.hooks.const_caller_location = util::caller_location::const_caller_location_provider;
-    providers.eval_to_valtree = |tcx, param_env_and_value| {
-        let (param_env, raw) = param_env_and_value.into_parts();
-        const_eval::eval_to_valtree(tcx, param_env, raw)
+    providers.eval_to_valtree = |tcx, ty::PseudoCanonicalInput { typing_env, value }| {
+        const_eval::eval_to_valtree(tcx, typing_env, value)
     };
     providers.hooks.try_destructure_mir_constant_for_user_output =
         const_eval::try_destructure_mir_constant_for_user_output;
     providers.valtree_to_const_val = |tcx, (ty, valtree)| {
-        const_eval::valtree_to_const_value(tcx, ty::ParamEnv::empty().and(ty), valtree)
+        const_eval::valtree_to_const_value(tcx, ty::TypingEnv::fully_monomorphized(), ty, valtree)
     };
     providers.check_validity_requirement = |tcx, (init_kind, param_env_and_ty)| {
         util::check_validity_requirement(tcx, init_kind, param_env_and_ty)
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
index 6fa7d369229..9507b24f603 100644
--- a/compiler/rustc_const_eval/src/util/alignment.rs
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -9,7 +9,7 @@ use tracing::debug;
 pub fn is_disaligned<'tcx, L>(
     tcx: TyCtxt<'tcx>,
     local_decls: &L,
-    param_env: ty::ParamEnv<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
     place: Place<'tcx>,
 ) -> bool
 where
@@ -22,8 +22,8 @@ where
     };
 
     let ty = place.ty(local_decls, tcx).ty;
-    let unsized_tail = || tcx.struct_tail_for_codegen(ty, param_env);
-    match tcx.layout_of(param_env.and(ty)) {
+    let unsized_tail = || tcx.struct_tail_for_codegen(ty, typing_env);
+    match tcx.layout_of(typing_env.as_query_input(ty)) {
         Ok(layout)
             if layout.align.abi <= pack
                 && (layout.is_sized()
diff --git a/compiler/rustc_const_eval/src/util/caller_location.rs b/compiler/rustc_const_eval/src/util/caller_location.rs
index 7f4c36835e4..6dd9447cf5a 100644
--- a/compiler/rustc_const_eval/src/util/caller_location.rs
+++ b/compiler/rustc_const_eval/src/util/caller_location.rs
@@ -1,9 +1,9 @@
 use rustc_hir::LangItem;
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::LayoutOf;
-use rustc_middle::ty::{self, Mutability};
+use rustc_middle::ty::{self};
 use rustc_middle::{bug, mir};
-use rustc_span::symbol::Symbol;
+use rustc_span::Symbol;
 use tracing::trace;
 
 use crate::const_eval::{CanAccessMutGlobal, CompileTimeInterpCx, mk_eval_cx_to_read_const_val};
@@ -20,12 +20,9 @@ fn alloc_caller_location<'tcx>(
     // This can fail if rustc runs out of memory right here. Trying to emit an error would be
     // pointless, since that would require allocating more memory than these short strings.
     let file = if loc_details.file {
-        ecx.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not).unwrap()
+        ecx.allocate_str_dedup(filename.as_str()).unwrap()
     } else {
-        // FIXME: This creates a new allocation each time. It might be preferable to
-        // perform this allocation only once, and re-use the `MPlaceTy`.
-        // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
-        ecx.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
+        ecx.allocate_str_dedup("<redacted>").unwrap()
     };
     let file = file.map_provenance(CtfeProvenance::as_immutable);
     let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
@@ -60,7 +57,7 @@ pub(crate) fn const_caller_location_provider(
     let mut ecx = mk_eval_cx_to_read_const_val(
         tcx.tcx,
         tcx.span,
-        ty::ParamEnv::reveal_all(),
+        ty::TypingEnv::fully_monomorphized(),
         CanAccessMutGlobal::No,
     );
 
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index f743525f359..a729d9325c8 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -3,7 +3,7 @@ use rustc_middle::bug;
 use rustc_middle::ty::layout::{
     HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
 };
-use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
+use rustc_middle::ty::{PseudoCanonicalInput, Ty, TyCtxt};
 
 use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
 use crate::interpret::{InterpCx, MemoryKind};
@@ -23,16 +23,16 @@ use crate::interpret::{InterpCx, MemoryKind};
 pub fn check_validity_requirement<'tcx>(
     tcx: TyCtxt<'tcx>,
     kind: ValidityRequirement,
-    param_env_and_ty: ParamEnvAnd<'tcx, Ty<'tcx>>,
+    input: PseudoCanonicalInput<'tcx, Ty<'tcx>>,
 ) -> Result<bool, &'tcx LayoutError<'tcx>> {
-    let layout = tcx.layout_of(param_env_and_ty)?;
+    let layout = tcx.layout_of(input)?;
 
     // There is nothing strict or lax about inhabitedness.
     if kind == ValidityRequirement::Inhabited {
         return Ok(!layout.is_uninhabited());
     }
 
-    let layout_cx = LayoutCx::new(tcx, param_env_and_ty.param_env);
+    let layout_cx = LayoutCx::new(tcx, input.typing_env);
     if kind == ValidityRequirement::Uninit || tcx.sess.opts.unstable_opts.strict_init_checks {
         check_validity_requirement_strict(layout, &layout_cx, kind)
     } else {
@@ -49,7 +49,7 @@ fn check_validity_requirement_strict<'tcx>(
 ) -> Result<bool, &'tcx LayoutError<'tcx>> {
     let machine = CompileTimeMachine::new(CanAccessMutGlobal::No, CheckAlignment::Error);
 
-    let mut cx = InterpCx::new(cx.tcx(), rustc_span::DUMMY_SP, cx.param_env, machine);
+    let mut cx = InterpCx::new(cx.tcx(), rustc_span::DUMMY_SP, cx.typing_env, machine);
 
     let allocated = cx
         .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
@@ -155,6 +155,7 @@ fn check_validity_requirement_lax<'tcx>(
     }
 
     match &this.variants {
+        Variants::Empty => return Ok(false),
         Variants::Single { .. } => {
             // All fields of this single variant have already been checked above, there is nothing
             // else to do.
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
index 0cf27d30c36..9eed1a20f15 100644
--- a/compiler/rustc_const_eval/src/util/compare_types.rs
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -5,18 +5,17 @@
 
 use rustc_infer::infer::TyCtxtInferExt;
 use rustc_middle::traits::ObligationCause;
-use rustc_middle::ty::{ParamEnv, Ty, TyCtxt, TypingMode, Variance};
+use rustc_middle::ty::{Ty, TyCtxt, TypingEnv, Variance};
 use rustc_trait_selection::traits::ObligationCtxt;
 
 /// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
 pub fn sub_types<'tcx>(
     tcx: TyCtxt<'tcx>,
-    typing_mode: TypingMode<'tcx>,
-    param_env: ParamEnv<'tcx>,
+    typing_env: TypingEnv<'tcx>,
     src: Ty<'tcx>,
     dest: Ty<'tcx>,
 ) -> bool {
-    relate_types(tcx, typing_mode, param_env, Variance::Covariant, src, dest)
+    relate_types(tcx, typing_env, Variance::Covariant, src, dest)
 }
 
 /// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
@@ -26,8 +25,7 @@ pub fn sub_types<'tcx>(
 /// because we want to check for type equality.
 pub fn relate_types<'tcx>(
     tcx: TyCtxt<'tcx>,
-    typing_mode: TypingMode<'tcx>,
-    param_env: ParamEnv<'tcx>,
+    typing_env: TypingEnv<'tcx>,
     variance: Variance,
     src: Ty<'tcx>,
     dest: Ty<'tcx>,
@@ -36,8 +34,7 @@ pub fn relate_types<'tcx>(
         return true;
     }
 
-    let mut builder = tcx.infer_ctxt().ignoring_regions();
-    let infcx = builder.build(typing_mode);
+    let (infcx, param_env) = tcx.infer_ctxt().ignoring_regions().build_with_typing_env(typing_env);
     let ocx = ObligationCtxt::new(&infcx);
     let cause = ObligationCause::dummy();
     let src = ocx.normalize(&cause, param_env, src);