about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs1
-rw-r--r--compiler/rustc_mir_transform/src/const_goto.rs128
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs17
-rw-r--r--compiler/rustc_mir_transform/src/coroutine/by_move_body.rs7
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs70
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs89
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs52
-rw-r--r--compiler/rustc_mir_transform/src/ffi_unwind_calls.rs1
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs87
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs25
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs25
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs2
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs11
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs11
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs12
-rw-r--r--compiler/rustc_mir_transform/src/promote_consts.rs1
-rw-r--r--compiler/rustc_mir_transform/src/separate_const_switch.rs343
17 files changed, 262 insertions, 620 deletions
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
index 451d3be255f..ba70a4453d6 100644
--- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -41,6 +41,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
             ty::Closure(..) => Abi::RustCall,
             ty::CoroutineClosure(..) => Abi::RustCall,
             ty::Coroutine(..) => Abi::Rust,
+            ty::Error(_) => return,
             _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
         };
         let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
diff --git a/compiler/rustc_mir_transform/src/const_goto.rs b/compiler/rustc_mir_transform/src/const_goto.rs
deleted file mode 100644
index cb5b66b314d..00000000000
--- a/compiler/rustc_mir_transform/src/const_goto.rs
+++ /dev/null
@@ -1,128 +0,0 @@
-//! This pass optimizes the following sequence
-//! ```rust,ignore (example)
-//! bb2: {
-//!     _2 = const true;
-//!     goto -> bb3;
-//! }
-//!
-//! bb3: {
-//!     switchInt(_2) -> [false: bb4, otherwise: bb5];
-//! }
-//! ```
-//! into
-//! ```rust,ignore (example)
-//! bb2: {
-//!     _2 = const true;
-//!     goto -> bb5;
-//! }
-//! ```
-
-use rustc_middle::mir::*;
-use rustc_middle::ty::TyCtxt;
-use rustc_middle::{mir::visit::Visitor, ty::ParamEnv};
-
-use super::simplify::{simplify_cfg, simplify_locals};
-
-pub struct ConstGoto;
-
-impl<'tcx> MirPass<'tcx> for ConstGoto {
-    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
-        // This pass participates in some as-of-yet untested unsoundness found
-        // in https://github.com/rust-lang/rust/issues/112460
-        sess.mir_opt_level() >= 2 && sess.opts.unstable_opts.unsound_mir_opts
-    }
-
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        trace!("Running ConstGoto on {:?}", body.source);
-        let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
-        let mut opt_finder =
-            ConstGotoOptimizationFinder { tcx, body, optimizations: vec![], param_env };
-        opt_finder.visit_body(body);
-        let should_simplify = !opt_finder.optimizations.is_empty();
-        for opt in opt_finder.optimizations {
-            let block = &mut body.basic_blocks_mut()[opt.bb_with_goto];
-            block.statements.extend(opt.stmts_move_up);
-            let terminator = block.terminator_mut();
-            let new_goto = TerminatorKind::Goto { target: opt.target_to_use_in_goto };
-            debug!("SUCCESS: replacing `{:?}` with `{:?}`", terminator.kind, new_goto);
-            terminator.kind = new_goto;
-        }
-
-        // if we applied optimizations, we potentially have some cfg to cleanup to
-        // make it easier for further passes
-        if should_simplify {
-            simplify_cfg(body);
-            simplify_locals(body, tcx);
-        }
-    }
-}
-
-impl<'tcx> Visitor<'tcx> for ConstGotoOptimizationFinder<'_, 'tcx> {
-    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &BasicBlockData<'tcx>) {
-        if data.is_cleanup {
-            // Because of the restrictions around control flow in cleanup blocks, we don't perform
-            // this optimization at all in such blocks.
-            return;
-        }
-        self.super_basic_block_data(block, data);
-    }
-
-    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
-        let _: Option<_> = try {
-            let target = terminator.kind.as_goto()?;
-            // We only apply this optimization if the last statement is a const assignment
-            let last_statement = self.body.basic_blocks[location.block].statements.last()?;
-
-            if let (place, Rvalue::Use(Operand::Constant(_const))) =
-                last_statement.kind.as_assign()?
-            {
-                // We found a constant being assigned to `place`.
-                // Now check that the target of this Goto switches on this place.
-                let target_bb = &self.body.basic_blocks[target];
-
-                // The `StorageDead(..)` statement does not affect the functionality of mir.
-                // We can move this part of the statement up to the predecessor.
-                let mut stmts_move_up = Vec::new();
-                for stmt in &target_bb.statements {
-                    if let StatementKind::StorageDead(..) = stmt.kind {
-                        stmts_move_up.push(stmt.clone())
-                    } else {
-                        None?;
-                    }
-                }
-
-                let target_bb_terminator = target_bb.terminator();
-                let (discr, targets) = target_bb_terminator.kind.as_switch()?;
-                if discr.place() == Some(*place) {
-                    let switch_ty = place.ty(self.body.local_decls(), self.tcx).ty;
-                    debug_assert_eq!(switch_ty, _const.ty());
-                    // We now know that the Switch matches on the const place, and it is statementless
-                    // Now find which value in the Switch matches the const value.
-                    let const_value = _const.const_.try_eval_bits(self.tcx, self.param_env)?;
-                    let target_to_use_in_goto = targets.target_for_value(const_value);
-                    self.optimizations.push(OptimizationToApply {
-                        bb_with_goto: location.block,
-                        target_to_use_in_goto,
-                        stmts_move_up,
-                    });
-                }
-            }
-            Some(())
-        };
-
-        self.super_terminator(terminator, location);
-    }
-}
-
-struct OptimizationToApply<'tcx> {
-    bb_with_goto: BasicBlock,
-    target_to_use_in_goto: BasicBlock,
-    stmts_move_up: Vec<Statement<'tcx>>,
-}
-
-pub struct ConstGotoOptimizationFinder<'a, 'tcx> {
-    tcx: TyCtxt<'tcx>,
-    body: &'a Body<'tcx>,
-    param_env: ParamEnv<'tcx>,
-    optimizations: Vec<OptimizationToApply<'tcx>>,
-}
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
index f2448ee3d44..f8e6905282c 100644
--- a/compiler/rustc_mir_transform/src/const_prop_lint.rs
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -3,8 +3,9 @@
 
 use std::fmt::Debug;
 
-use rustc_const_eval::interpret::{ImmTy, Projectable};
-use rustc_const_eval::interpret::{InterpCx, InterpResult, Scalar};
+use rustc_const_eval::interpret::{
+    format_interp_error, ImmTy, InterpCx, InterpResult, Projectable, Scalar,
+};
 use rustc_data_structures::fx::FxHashSet;
 use rustc_hir::def::DefKind;
 use rustc_hir::HirId;
@@ -246,7 +247,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 assert!(
                     !error.kind().formatted_string(),
                     "const-prop encountered formatting error: {}",
-                    self.ecx.format_error(error),
+                    format_interp_error(self.ecx.tcx.dcx(), error),
                 );
                 None
             }
@@ -541,12 +542,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
     }
 
     #[instrument(level = "trace", skip(self), ret)]
-    fn eval_rvalue(
-        &mut self,
-        rvalue: &Rvalue<'tcx>,
-        location: Location,
-        dest: &Place<'tcx>,
-    ) -> Option<()> {
+    fn eval_rvalue(&mut self, rvalue: &Rvalue<'tcx>, dest: &Place<'tcx>) -> Option<()> {
         if !dest.projection.is_empty() {
             return None;
         }
@@ -639,6 +635,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                     NullOp::OffsetOf(fields) => {
                         op_layout.offset_of_subfield(self, fields.iter()).bytes()
                     }
+                    NullOp::DebugAssertions => return None,
                 };
                 ImmTy::from_scalar(Scalar::from_target_usize(val, self), layout).into()
             }
@@ -734,7 +731,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
             _ if place.is_indirect() => {}
             ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local),
             ConstPropMode::OnlyInsideOwnBlock | ConstPropMode::FullConstProp => {
-                if self.eval_rvalue(rvalue, location, place).is_none() {
+                if self.eval_rvalue(rvalue, place).is_none() {
                     // Const prop failed, so erase the destination, ensuring that whatever happens
                     // from here on, does not know about the previous value.
                     // This is important in case we have
diff --git a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs
index fcd4715b9e8..e40f4520671 100644
--- a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs
+++ b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs
@@ -7,7 +7,7 @@ use rustc_data_structures::fx::FxIndexSet;
 use rustc_hir as hir;
 use rustc_middle::mir::visit::MutVisitor;
 use rustc_middle::mir::{self, dump_mir, MirPass};
-use rustc_middle::ty::{self, InstanceDef, Ty, TyCtxt};
+use rustc_middle::ty::{self, InstanceDef, Ty, TyCtxt, TypeVisitableExt};
 use rustc_target::abi::FieldIdx;
 
 pub struct ByMoveBody;
@@ -23,7 +23,10 @@ impl<'tcx> MirPass<'tcx> for ByMoveBody {
             return;
         };
         let coroutine_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
-        let ty::Coroutine(_, args) = *coroutine_ty.kind() else { bug!() };
+        if coroutine_ty.references_error() {
+            return;
+        }
+        let ty::Coroutine(_, args) = *coroutine_ty.kind() else { bug!("{body:#?}") };
 
         let coroutine_kind = args.as_coroutine().kind_ty().to_opt_closure_kind().unwrap();
         if coroutine_kind == ty::ClosureKind::FnOnce {
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index 8c11dea5d4e..9a1d8bae6b4 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -1,4 +1,5 @@
-use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::graph::WithNumNodes;
 use rustc_index::bit_set::BitSet;
 use rustc_index::IndexVec;
@@ -38,19 +39,27 @@ impl Debug for BcbCounter {
     }
 }
 
+#[derive(Debug)]
+pub(super) enum CounterIncrementSite {
+    Node { bcb: BasicCoverageBlock },
+    Edge { from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock },
+}
+
 /// Generates and stores coverage counter and coverage expression information
 /// associated with nodes/edges in the BCB graph.
 pub(super) struct CoverageCounters {
-    next_counter_id: CounterId,
+    /// List of places where a counter-increment statement should be injected
+    /// into MIR, each with its corresponding counter ID.
+    counter_increment_sites: IndexVec<CounterId, CounterIncrementSite>,
 
     /// Coverage counters/expressions that are associated with individual BCBs.
     bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>,
     /// Coverage counters/expressions that are associated with the control-flow
     /// edge between two BCBs.
     ///
-    /// The iteration order of this map can affect the precise contents of MIR,
-    /// so we use `FxIndexMap` to avoid query stability hazards.
-    bcb_edge_counters: FxIndexMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>,
+    /// We currently don't iterate over this map, but if we do in the future,
+    /// switch it back to `FxIndexMap` to avoid query stability hazards.
+    bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>,
     /// Tracks which BCBs have a counter associated with some incoming edge.
     /// Only used by assertions, to verify that BCBs with incoming edge
     /// counters do not have their own physical counters (expressions are allowed).
@@ -71,9 +80,9 @@ impl CoverageCounters {
         let num_bcbs = basic_coverage_blocks.num_nodes();
 
         let mut this = Self {
-            next_counter_id: CounterId::START,
+            counter_increment_sites: IndexVec::new(),
             bcb_counters: IndexVec::from_elem_n(None, num_bcbs),
-            bcb_edge_counters: FxIndexMap::default(),
+            bcb_edge_counters: FxHashMap::default(),
             bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs),
             expressions: IndexVec::new(),
         };
@@ -84,8 +93,8 @@ impl CoverageCounters {
         this
     }
 
-    fn make_counter(&mut self) -> BcbCounter {
-        let id = self.next_counter();
+    fn make_counter(&mut self, site: CounterIncrementSite) -> BcbCounter {
+        let id = self.counter_increment_sites.push(site);
         BcbCounter::Counter { id }
     }
 
@@ -103,15 +112,8 @@ impl CoverageCounters {
         self.make_expression(lhs, Op::Add, rhs)
     }
 
-    /// Counter IDs start from one and go up.
-    fn next_counter(&mut self) -> CounterId {
-        let next = self.next_counter_id;
-        self.next_counter_id = self.next_counter_id + 1;
-        next
-    }
-
     pub(super) fn num_counters(&self) -> usize {
-        self.next_counter_id.as_usize()
+        self.counter_increment_sites.len()
     }
 
     #[cfg(test)]
@@ -171,22 +173,26 @@ impl CoverageCounters {
         self.bcb_counters[bcb]
     }
 
-    pub(super) fn bcb_node_counters(
+    /// Returns an iterator over all the nodes/edges in the coverage graph that
+    /// should have a counter-increment statement injected into MIR, along with
+    /// each site's corresponding counter ID.
+    pub(super) fn counter_increment_sites(
         &self,
-    ) -> impl Iterator<Item = (BasicCoverageBlock, &BcbCounter)> {
-        self.bcb_counters
-            .iter_enumerated()
-            .filter_map(|(bcb, counter_kind)| Some((bcb, counter_kind.as_ref()?)))
+    ) -> impl Iterator<Item = (CounterId, &CounterIncrementSite)> {
+        self.counter_increment_sites.iter_enumerated()
     }
 
-    /// For each edge in the BCB graph that has an associated counter, yields
-    /// that edge's *from* and *to* nodes, and its counter.
-    pub(super) fn bcb_edge_counters(
+    /// Returns an iterator over the subset of BCB nodes that have been associated
+    /// with a counter *expression*, along with the ID of that expression.
+    pub(super) fn bcb_nodes_with_coverage_expressions(
         &self,
-    ) -> impl Iterator<Item = (BasicCoverageBlock, BasicCoverageBlock, &BcbCounter)> {
-        self.bcb_edge_counters
-            .iter()
-            .map(|(&(from_bcb, to_bcb), counter_kind)| (from_bcb, to_bcb, counter_kind))
+    ) -> impl Iterator<Item = (BasicCoverageBlock, ExpressionId)> + Captures<'_> {
+        self.bcb_counters.iter_enumerated().filter_map(|(bcb, &counter_kind)| match counter_kind {
+            // Yield the BCB along with its associated expression ID.
+            Some(BcbCounter::Expression { id }) => Some((bcb, id)),
+            // This BCB is associated with a counter or nothing, so skip it.
+            Some(BcbCounter::Counter { .. }) | None => None,
+        })
     }
 
     pub(super) fn into_expressions(self) -> IndexVec<ExpressionId, Expression> {
@@ -339,7 +345,8 @@ impl<'a> MakeBcbCounters<'a> {
         // program results in a tight infinite loop, but it should still compile.
         let one_path_to_target = !self.basic_coverage_blocks.bcb_has_multiple_in_edges(bcb);
         if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
-            let counter_kind = self.coverage_counters.make_counter();
+            let counter_kind =
+                self.coverage_counters.make_counter(CounterIncrementSite::Node { bcb });
             if one_path_to_target {
                 debug!("{bcb:?} gets a new counter: {counter_kind:?}");
             } else {
@@ -401,7 +408,8 @@ impl<'a> MakeBcbCounters<'a> {
         }
 
         // Make a new counter to count this edge.
-        let counter_kind = self.coverage_counters.make_counter();
+        let counter_kind =
+            self.coverage_counters.make_counter(CounterIncrementSite::Edge { from_bcb, to_bcb });
         debug!("Edge {from_bcb:?}->{to_bcb:?} gets a new counter: {counter_kind:?}");
         self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind)
     }
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 3aa41250fd3..4c5be0a3f4b 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -7,7 +7,7 @@ mod spans;
 #[cfg(test)]
 mod tests;
 
-use self::counters::{BcbCounter, CoverageCounters};
+use self::counters::{CounterIncrementSite, CoverageCounters};
 use self::graph::{BasicCoverageBlock, CoverageGraph};
 use self::spans::{BcbMapping, BcbMappingKind, CoverageSpans};
 
@@ -155,61 +155,52 @@ fn inject_coverage_statements<'tcx>(
     bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
     coverage_counters: &CoverageCounters,
 ) {
-    // Process the counters associated with BCB nodes.
-    for (bcb, counter_kind) in coverage_counters.bcb_node_counters() {
-        let do_inject = match counter_kind {
-            // Counter-increment statements always need to be injected.
-            BcbCounter::Counter { .. } => true,
-            // The only purpose of expression-used statements is to detect
-            // when a mapping is unreachable, so we only inject them for
-            // expressions with one or more mappings.
-            BcbCounter::Expression { .. } => bcb_has_coverage_spans(bcb),
-        };
-        if do_inject {
-            inject_statement(
-                mir_body,
-                make_mir_coverage_kind(counter_kind),
-                basic_coverage_blocks[bcb].leader_bb(),
-            );
-        }
-    }
-
-    // Process the counters associated with BCB edges.
-    for (from_bcb, to_bcb, counter_kind) in coverage_counters.bcb_edge_counters() {
-        let do_inject = match counter_kind {
-            // Counter-increment statements always need to be injected.
-            BcbCounter::Counter { .. } => true,
-            // BCB-edge expressions never have mappings, so they never need
-            // a corresponding statement.
-            BcbCounter::Expression { .. } => false,
+    // Inject counter-increment statements into MIR.
+    for (id, counter_increment_site) in coverage_counters.counter_increment_sites() {
+        // Determine the block to inject a counter-increment statement into.
+        // For BCB nodes this is just their first block, but for edges we need
+        // to create a new block between the two BCBs, and inject into that.
+        let target_bb = match *counter_increment_site {
+            CounterIncrementSite::Node { bcb } => basic_coverage_blocks[bcb].leader_bb(),
+            CounterIncrementSite::Edge { from_bcb, to_bcb } => {
+                // Create a new block between the last block of `from_bcb` and
+                // the first block of `to_bcb`.
+                let from_bb = basic_coverage_blocks[from_bcb].last_bb();
+                let to_bb = basic_coverage_blocks[to_bcb].leader_bb();
+
+                let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb);
+                debug!(
+                    "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
+                    requires a new MIR BasicBlock {new_bb:?} for counter increment {id:?}",
+                );
+                new_bb
+            }
         };
-        if !do_inject {
-            continue;
-        }
-
-        // We need to inject a coverage statement into a new BB between the
-        // last BB of `from_bcb` and the first BB of `to_bcb`.
-        let from_bb = basic_coverage_blocks[from_bcb].last_bb();
-        let to_bb = basic_coverage_blocks[to_bcb].leader_bb();
 
-        let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb);
-        debug!(
-            "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
-                requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}",
-        );
-
-        // Inject a counter into the newly-created BB.
-        inject_statement(mir_body, make_mir_coverage_kind(counter_kind), new_bb);
+        inject_statement(mir_body, CoverageKind::CounterIncrement { id }, target_bb);
     }
-}
 
-fn make_mir_coverage_kind(counter_kind: &BcbCounter) -> CoverageKind {
-    match *counter_kind {
-        BcbCounter::Counter { id } => CoverageKind::CounterIncrement { id },
-        BcbCounter::Expression { id } => CoverageKind::ExpressionUsed { id },
+    // For each counter expression that is directly associated with at least one
+    // span, we inject an "expression-used" statement, so that coverage codegen
+    // can check whether the injected statement survived MIR optimization.
+    // (BCB edges can't have spans, so we only need to process BCB nodes here.)
+    //
+    // See the code in `rustc_codegen_llvm::coverageinfo::map_data` that deals
+    // with "expressions seen" and "zero terms".
+    for (bcb, expression_id) in coverage_counters
+        .bcb_nodes_with_coverage_expressions()
+        .filter(|&(bcb, _)| bcb_has_coverage_spans(bcb))
+    {
+        inject_statement(
+            mir_body,
+            CoverageKind::ExpressionUsed { id: expression_id },
+            basic_coverage_blocks[bcb].leader_bb(),
+        );
     }
 }
 
+/// Given two basic blocks that have a control-flow edge between them, creates
+/// and returns a new block that sits between those blocks.
 fn inject_edge_counter_basic_block(
     mir_body: &mut mir::Body<'_>,
     from_bb: BasicBlock,
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 6a37047a693..86e99a8a5b5 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -2,7 +2,9 @@
 //!
 //! Currently, this pass only propagates scalar values.
 
-use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable};
+use rustc_const_eval::interpret::{
+    ImmTy, Immediate, InterpCx, OpTy, PlaceTy, PointerArithmetic, Projectable,
+};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def::DefKind;
 use rustc_middle::mir::interpret::{AllocId, ConstAllocation, InterpResult, Scalar};
@@ -936,12 +938,50 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
     }
 
     fn binary_ptr_op(
-        _ecx: &InterpCx<'mir, 'tcx, Self>,
-        _bin_op: BinOp,
-        _left: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
-        _right: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        bin_op: BinOp,
+        left: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
+        right: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
     ) -> interpret::InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)> {
-        throw_machine_stop_str!("can't do pointer arithmetic");
+        use rustc_middle::mir::BinOp::*;
+        Ok(match bin_op {
+            Eq | Ne | Lt | Le | Gt | Ge => {
+                // Types can differ, e.g. fn ptrs with different `for`.
+                assert_eq!(left.layout.abi, right.layout.abi);
+                let size = ecx.pointer_size();
+                // Just compare the bits. ScalarPairs are compared lexicographically.
+                // We thus always compare pairs and simply fill scalars up with 0.
+                // If the pointer has provenance, `to_bits` will return `Err` and we bail out.
+                let left = match **left {
+                    Immediate::Scalar(l) => (l.to_bits(size)?, 0),
+                    Immediate::ScalarPair(l1, l2) => (l1.to_bits(size)?, l2.to_bits(size)?),
+                    Immediate::Uninit => panic!("we should never see uninit data here"),
+                };
+                let right = match **right {
+                    Immediate::Scalar(r) => (r.to_bits(size)?, 0),
+                    Immediate::ScalarPair(r1, r2) => (r1.to_bits(size)?, r2.to_bits(size)?),
+                    Immediate::Uninit => panic!("we should never see uninit data here"),
+                };
+                let res = match bin_op {
+                    Eq => left == right,
+                    Ne => left != right,
+                    Lt => left < right,
+                    Le => left <= right,
+                    Gt => left > right,
+                    Ge => left >= right,
+                    _ => bug!(),
+                };
+                (ImmTy::from_bool(res, *ecx.tcx), false)
+            }
+
+            // Some more operations are possible with atomics.
+            // The return value always has the provenance of the *left* operand.
+            Add | Sub | BitOr | BitAnd | BitXor => {
+                throw_machine_stop_str!("pointer arithmetic is not handled")
+            }
+
+            _ => span_bug!(ecx.cur_span(), "Invalid operator on pointers: {:?}", bin_op),
+        })
     }
 
     fn expose_ptr(
diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
index b0d758bcbfe..663abbece85 100644
--- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
+++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
@@ -59,6 +59,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
         ty::Closure(..) => Abi::RustCall,
         ty::CoroutineClosure(..) => Abi::RustCall,
         ty::Coroutine(..) => Abi::Rust,
+        ty::Error(_) => return false,
         _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
     };
     let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index f9798bc4e70..2c7ae53055f 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -93,7 +93,6 @@ use rustc_index::IndexVec;
 use rustc_middle::mir::interpret::GlobalAlloc;
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
-use rustc_middle::ty::adjustment::PointerCoercion;
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Ty, TyCtxt, TypeAndMut};
 use rustc_span::def_id::DefId;
@@ -489,6 +488,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     NullOp::OffsetOf(fields) => {
                         layout.offset_of_subfield(&self.ecx, fields.iter()).bytes()
                     }
+                    NullOp::DebugAssertions => return None,
                 };
                 let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
                 let imm = ImmTy::try_from_uint(val, usize_layout)?;
@@ -551,6 +551,29 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     }
                     value.offset(Size::ZERO, to, &self.ecx).ok()?
                 }
+                CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize) => {
+                    let src = self.evaluated[value].as_ref()?;
+                    let to = self.ecx.layout_of(to).ok()?;
+                    let dest = self.ecx.allocate(to, MemoryKind::Stack).ok()?;
+                    self.ecx.unsize_into(src, to, &dest.clone().into()).ok()?;
+                    self.ecx
+                        .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
+                        .ok()?;
+                    dest.into()
+                }
+                CastKind::FnPtrToPtr
+                | CastKind::PtrToPtr
+                | CastKind::PointerCoercion(
+                    ty::adjustment::PointerCoercion::MutToConstPointer
+                    | ty::adjustment::PointerCoercion::ArrayToPointer
+                    | ty::adjustment::PointerCoercion::UnsafeFnPointer,
+                ) => {
+                    let src = self.evaluated[value].as_ref()?;
+                    let src = self.ecx.read_immediate(src).ok()?;
+                    let to = self.ecx.layout_of(to).ok()?;
+                    let ret = self.ecx.ptr_to_ptr(&src, to).ok()?;
+                    ret.into()
+                }
                 _ => return None,
             },
         };
@@ -777,18 +800,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
             // Operations.
             Rvalue::Len(ref mut place) => return self.simplify_len(place, location),
-            Rvalue::Cast(kind, ref mut value, to) => {
-                let from = value.ty(self.local_decls, self.tcx);
-                let value = self.simplify_operand(value, location)?;
-                if let CastKind::PointerCoercion(
-                    PointerCoercion::ReifyFnPointer | PointerCoercion::ClosureFnPointer(_),
-                ) = kind
-                {
-                    // Each reification of a generic fn may get a different pointer.
-                    // Do not try to merge them.
-                    return self.new_opaque();
-                }
-                Value::Cast { kind, value, from, to }
+            Rvalue::Cast(ref mut kind, ref mut value, to) => {
+                return self.simplify_cast(kind, value, to, location);
             }
             Rvalue::BinaryOp(op, box (ref mut lhs, ref mut rhs)) => {
                 let ty = lhs.ty(self.local_decls, self.tcx);
@@ -1034,6 +1047,50 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         }
     }
 
+    fn simplify_cast(
+        &mut self,
+        kind: &mut CastKind,
+        operand: &mut Operand<'tcx>,
+        to: Ty<'tcx>,
+        location: Location,
+    ) -> Option<VnIndex> {
+        use rustc_middle::ty::adjustment::PointerCoercion::*;
+        use CastKind::*;
+
+        let mut from = operand.ty(self.local_decls, self.tcx);
+        let mut value = self.simplify_operand(operand, location)?;
+        if from == to {
+            return Some(value);
+        }
+
+        if let CastKind::PointerCoercion(ReifyFnPointer | ClosureFnPointer(_)) = kind {
+            // Each reification of a generic fn may get a different pointer.
+            // Do not try to merge them.
+            return self.new_opaque();
+        }
+
+        if let PtrToPtr | PointerCoercion(MutToConstPointer) = kind
+            && let Value::Cast { kind: inner_kind, value: inner_value, from: inner_from, to: _ } =
+                *self.get(value)
+            && let PtrToPtr | PointerCoercion(MutToConstPointer) = inner_kind
+        {
+            from = inner_from;
+            value = inner_value;
+            *kind = PtrToPtr;
+            if inner_from == to {
+                return Some(inner_value);
+            }
+            if let Some(const_) = self.try_as_constant(value) {
+                *operand = Operand::Constant(Box::new(const_));
+            } else if let Some(local) = self.try_as_local(value, location) {
+                *operand = Operand::Copy(local.into());
+                self.reused_locals.insert(local);
+            }
+        }
+
+        Some(self.insert(Value::Cast { kind: *kind, value, from, to }))
+    }
+
     fn simplify_len(&mut self, place: &mut Place<'tcx>, location: Location) -> Option<VnIndex> {
         // Trivial case: we are fetching a statically known length.
         let place_ty = place.ty(self.local_decls, self.tcx).ty;
@@ -1231,8 +1288,8 @@ impl<'tcx> MutVisitor<'tcx> for StorageRemover<'tcx> {
 
     fn visit_operand(&mut self, operand: &mut Operand<'tcx>, _: Location) {
         if let Operand::Move(place) = *operand
-            && let Some(local) = place.as_local()
-            && self.reused_locals.contains(local)
+            && !place.is_indirect_first_projection()
+            && self.reused_locals.contains(place.local)
         {
             *operand = Operand::Copy(place);
         }
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index e77553a03d6..be19bd8349e 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -1027,21 +1027,16 @@ fn try_instance_mir<'tcx>(
     tcx: TyCtxt<'tcx>,
     instance: InstanceDef<'tcx>,
 ) -> Result<&'tcx Body<'tcx>, &'static str> {
-    match instance {
-        ty::InstanceDef::DropGlue(_, Some(ty)) => match ty.kind() {
-            ty::Adt(def, args) => {
-                let fields = def.all_fields();
-                for field in fields {
-                    let field_ty = field.ty(tcx, args);
-                    if field_ty.has_param() && field_ty.has_projections() {
-                        return Err("cannot build drop shim for polymorphic type");
-                    }
-                }
-
-                Ok(tcx.instance_mir(instance))
+    if let ty::InstanceDef::DropGlue(_, Some(ty)) = instance
+        && let ty::Adt(def, args) = ty.kind()
+    {
+        let fields = def.all_fields();
+        for field in fields {
+            let field_ty = field.ty(tcx, args);
+            if field_ty.has_param() && field_ty.has_projections() {
+                return Err("cannot build drop shim for polymorphic type");
             }
-            _ => Ok(tcx.instance_mir(instance)),
-        },
-        _ => Ok(tcx.instance_mir(instance)),
+        }
     }
+    Ok(tcx.instance_mir(instance))
 }
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index a28db0defc9..f65eb5cbea9 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -2,10 +2,12 @@
 
 use crate::simplify::simplify_duplicate_switch_targets;
 use rustc_middle::mir::*;
+use rustc_middle::ty::layout;
 use rustc_middle::ty::layout::ValidityRequirement;
 use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt};
 use rustc_span::symbol::Symbol;
 use rustc_target::abi::FieldIdx;
+use rustc_target::spec::abi::Abi;
 
 pub struct InstSimplify;
 
@@ -38,6 +40,7 @@ impl<'tcx> MirPass<'tcx> for InstSimplify {
                 block.terminator.as_mut().unwrap(),
                 &mut block.statements,
             );
+            ctx.simplify_nounwind_call(block.terminator.as_mut().unwrap());
             simplify_duplicate_switch_targets(block.terminator.as_mut().unwrap());
         }
     }
@@ -252,6 +255,28 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
         terminator.kind = TerminatorKind::Goto { target: destination_block };
     }
 
+    fn simplify_nounwind_call(&self, terminator: &mut Terminator<'tcx>) {
+        let TerminatorKind::Call { func, unwind, .. } = &mut terminator.kind else {
+            return;
+        };
+
+        let Some((def_id, _)) = func.const_fn_def() else {
+            return;
+        };
+
+        let body_ty = self.tcx.type_of(def_id).skip_binder();
+        let body_abi = match body_ty.kind() {
+            ty::FnDef(..) => body_ty.fn_sig(self.tcx).abi(),
+            ty::Closure(..) => Abi::RustCall,
+            ty::Coroutine(..) => Abi::Rust,
+            _ => bug!("unexpected body ty: {:?}", body_ty),
+        };
+
+        if !layout::fn_can_unwind(self.tcx, Some(def_id), body_abi) {
+            *unwind = UnwindAction::Unreachable;
+        }
+    }
+
     fn simplify_intrinsic_assert(
         &self,
         terminator: &mut Terminator<'tcx>,
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index 7a70ed5cb7f..78ba166ba43 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -60,7 +60,7 @@ const MAX_PLACES: usize = 100;
 
 impl<'tcx> MirPass<'tcx> for JumpThreading {
     fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
-        sess.mir_opt_level() >= 4
+        sess.mir_opt_level() >= 2
     }
 
     #[instrument(skip_all level = "debug")]
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index 031515ea958..72d9ffe8ca5 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -1,5 +1,3 @@
-#![deny(rustc::untranslatable_diagnostic)]
-#![deny(rustc::diagnostic_outside_of_impl)]
 #![feature(assert_matches)]
 #![feature(box_patterns)]
 #![feature(const_type_name)]
@@ -10,7 +8,7 @@
 #![feature(is_sorted)]
 #![feature(let_chains)]
 #![feature(map_try_insert)]
-#![feature(min_specialization)]
+#![cfg_attr(bootstrap, feature(min_specialization))]
 #![feature(never_type)]
 #![feature(option_get_or_insert_default)]
 #![feature(round_char_boundary)]
@@ -61,7 +59,6 @@ mod remove_place_mention;
 mod add_subtyping_projections;
 pub mod cleanup_post_borrowck;
 mod const_debuginfo;
-mod const_goto;
 mod const_prop;
 mod const_prop_lint;
 mod copy_prop;
@@ -105,7 +102,6 @@ mod remove_unneeded_drops;
 mod remove_zsts;
 mod required_consts;
 mod reveal_all;
-mod separate_const_switch;
 mod shim;
 mod ssa;
 // This pass is public to allow external drivers to perform MIR cleanup
@@ -592,7 +588,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
 
             // Has to run after `slice::len` lowering
             &normalize_array_len::NormalizeArrayLen,
-            &const_goto::ConstGoto,
             &ref_prop::ReferencePropagation,
             &sroa::ScalarReplacementOfAggregates,
             &match_branches::MatchBranchSimplification,
@@ -603,10 +598,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &dead_store_elimination::DeadStoreElimination::Initial,
             &gvn::GVN,
             &simplify::SimplifyLocals::AfterGVN,
-            // Perform `SeparateConstSwitch` after SSA-based analyses, as cloning blocks may
-            // destroy the SSA property. It should still happen before const-propagation, so the
-            // latter pass will leverage the created opportunities.
-            &separate_const_switch::SeparateConstSwitch,
             &dataflow_const_prop::DataflowConstProp,
             &const_debuginfo::ConstDebugInfo,
             &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index 897375e0e16..f43b85173d4 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -21,6 +21,17 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
                     sym::unreachable => {
                         terminator.kind = TerminatorKind::Unreachable;
                     }
+                    sym::debug_assertions => {
+                        let target = target.unwrap();
+                        block.statements.push(Statement {
+                            source_info: terminator.source_info,
+                            kind: StatementKind::Assign(Box::new((
+                                *destination,
+                                Rvalue::NullaryOp(NullOp::DebugAssertions, tcx.types.bool),
+                            ))),
+                        });
+                        terminator.kind = TerminatorKind::Goto { target };
+                    }
                     sym::forget => {
                         if let Some(target) = *target {
                             block.statements.push(Statement {
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
index 605e1ad46d7..77478cc741d 100644
--- a/compiler/rustc_mir_transform/src/pass_manager.rs
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -8,18 +8,10 @@ use crate::{lint::lint_body, validate, MirPass};
 pub trait MirLint<'tcx> {
     fn name(&self) -> &'static str {
         // FIXME Simplify the implementation once more `str` methods get const-stable.
+        // See copypaste in `MirPass`
         const {
             let name = std::any::type_name::<Self>();
-            let bytes = name.as_bytes();
-            let mut i = bytes.len();
-            while i > 0 && bytes[i - 1] != b':' {
-                i = i - 1;
-            }
-            let (_, bytes) = bytes.split_at(i);
-            match std::str::from_utf8(bytes) {
-                Ok(name) => name,
-                Err(_) => name,
-            }
+            rustc_middle::util::common::c_name(name)
         }
     }
 
diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs
index c00093ea27e..577b8f2080f 100644
--- a/compiler/rustc_mir_transform/src/promote_consts.rs
+++ b/compiler/rustc_mir_transform/src/promote_consts.rs
@@ -446,6 +446,7 @@ impl<'tcx> Validator<'_, 'tcx> {
                 NullOp::SizeOf => {}
                 NullOp::AlignOf => {}
                 NullOp::OffsetOf(_) => {}
+                NullOp::DebugAssertions => {}
             },
 
             Rvalue::ShallowInitBox(_, _) => return Err(Unpromotable),
diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs
deleted file mode 100644
index 7120ef72142..00000000000
--- a/compiler/rustc_mir_transform/src/separate_const_switch.rs
+++ /dev/null
@@ -1,343 +0,0 @@
-//! A pass that duplicates switch-terminated blocks
-//! into a new copy for each predecessor, provided
-//! the predecessor sets the value being switched
-//! over to a constant.
-//!
-//! The purpose of this pass is to help constant
-//! propagation passes to simplify the switch terminator
-//! of the copied blocks into gotos when some predecessors
-//! statically determine the output of switches.
-//!
-//! ```text
-//!     x = 12 ---              ---> something
-//!               \            / 12
-//!                --> switch x
-//!               /            \ otherwise
-//!     x = y  ---              ---> something else
-//! ```
-//! becomes
-//! ```text
-//!     x = 12 ---> switch x ------> something
-//!                          \ / 12
-//!                           X
-//!                          / \ otherwise
-//!     x = y  ---> switch x ------> something else
-//! ```
-//! so it can hopefully later be turned by another pass into
-//! ```text
-//!     x = 12 --------------------> something
-//!                            / 12
-//!                           /
-//!                          /   otherwise
-//!     x = y  ---- switch x ------> something else
-//! ```
-//!
-//! This optimization is meant to cover simple cases
-//! like `?` desugaring. For now, it thus focuses on
-//! simplicity rather than completeness (it notably
-//! sometimes duplicates abusively).
-
-use rustc_middle::mir::*;
-use rustc_middle::ty::TyCtxt;
-use smallvec::SmallVec;
-
-pub struct SeparateConstSwitch;
-
-impl<'tcx> MirPass<'tcx> for SeparateConstSwitch {
-    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
-        // This pass participates in some as-of-yet untested unsoundness found
-        // in https://github.com/rust-lang/rust/issues/112460
-        sess.mir_opt_level() >= 2 && sess.opts.unstable_opts.unsound_mir_opts
-    }
-
-    fn run_pass(&self, _: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        // If execution did something, applying a simplification layer
-        // helps later passes optimize the copy away.
-        if separate_const_switch(body) > 0 {
-            super::simplify::simplify_cfg(body);
-        }
-    }
-}
-
-/// Returns the amount of blocks that were duplicated
-pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
-    let mut new_blocks: SmallVec<[(BasicBlock, BasicBlock); 6]> = SmallVec::new();
-    let predecessors = body.basic_blocks.predecessors();
-    'block_iter: for (block_id, block) in body.basic_blocks.iter_enumerated() {
-        if let TerminatorKind::SwitchInt {
-            discr: Operand::Copy(switch_place) | Operand::Move(switch_place),
-            ..
-        } = block.terminator().kind
-        {
-            // If the block is on an unwind path, do not
-            // apply the optimization as unwind paths
-            // rely on a unique parent invariant
-            if block.is_cleanup {
-                continue 'block_iter;
-            }
-
-            // If the block has fewer than 2 predecessors, ignore it
-            // we could maybe chain blocks that have exactly one
-            // predecessor, but for now we ignore that
-            if predecessors[block_id].len() < 2 {
-                continue 'block_iter;
-            }
-
-            // First, let's find a non-const place
-            // that determines the result of the switch
-            if let Some(switch_place) = find_determining_place(switch_place, block) {
-                // We now have an input place for which it would
-                // be interesting if predecessors assigned it from a const
-
-                let mut predecessors_left = predecessors[block_id].len();
-                'predec_iter: for predecessor_id in predecessors[block_id].iter().copied() {
-                    let predecessor = &body.basic_blocks[predecessor_id];
-
-                    // First we make sure the predecessor jumps
-                    // in a reasonable way
-                    match &predecessor.terminator().kind {
-                        // The following terminators are
-                        // unconditionally valid
-                        TerminatorKind::Goto { .. } | TerminatorKind::SwitchInt { .. } => {}
-
-                        TerminatorKind::FalseEdge { real_target, .. } => {
-                            if *real_target != block_id {
-                                continue 'predec_iter;
-                            }
-                        }
-
-                        // The following terminators are not allowed
-                        TerminatorKind::UnwindResume
-                        | TerminatorKind::Drop { .. }
-                        | TerminatorKind::Call { .. }
-                        | TerminatorKind::Assert { .. }
-                        | TerminatorKind::FalseUnwind { .. }
-                        | TerminatorKind::Yield { .. }
-                        | TerminatorKind::UnwindTerminate(_)
-                        | TerminatorKind::Return
-                        | TerminatorKind::Unreachable
-                        | TerminatorKind::InlineAsm { .. }
-                        | TerminatorKind::CoroutineDrop => {
-                            continue 'predec_iter;
-                        }
-                    }
-
-                    if is_likely_const(switch_place, predecessor) {
-                        new_blocks.push((predecessor_id, block_id));
-                        predecessors_left -= 1;
-                        if predecessors_left < 2 {
-                            // If the original block only has one predecessor left,
-                            // we have nothing left to do
-                            break 'predec_iter;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    // Once the analysis is done, perform the duplication
-    let body_span = body.span;
-    let copied_blocks = new_blocks.len();
-    let blocks = body.basic_blocks_mut();
-    for (pred_id, target_id) in new_blocks {
-        let new_block = blocks[target_id].clone();
-        let new_block_id = blocks.push(new_block);
-        let terminator = blocks[pred_id].terminator_mut();
-
-        match terminator.kind {
-            TerminatorKind::Goto { ref mut target } => {
-                *target = new_block_id;
-            }
-
-            TerminatorKind::FalseEdge { ref mut real_target, .. } => {
-                if *real_target == target_id {
-                    *real_target = new_block_id;
-                }
-            }
-
-            TerminatorKind::SwitchInt { ref mut targets, .. } => {
-                targets.all_targets_mut().iter_mut().for_each(|x| {
-                    if *x == target_id {
-                        *x = new_block_id;
-                    }
-                });
-            }
-
-            TerminatorKind::UnwindResume
-            | TerminatorKind::UnwindTerminate(_)
-            | TerminatorKind::Return
-            | TerminatorKind::Unreachable
-            | TerminatorKind::CoroutineDrop
-            | TerminatorKind::Assert { .. }
-            | TerminatorKind::FalseUnwind { .. }
-            | TerminatorKind::Drop { .. }
-            | TerminatorKind::Call { .. }
-            | TerminatorKind::InlineAsm { .. }
-            | TerminatorKind::Yield { .. } => {
-                span_bug!(
-                    body_span,
-                    "basic block terminator had unexpected kind {:?}",
-                    &terminator.kind
-                )
-            }
-        }
-    }
-
-    copied_blocks
-}
-
-/// This function describes a rough heuristic guessing
-/// whether a place is last set with a const within the block.
-/// Notably, it will be overly pessimistic in cases that are already
-/// not handled by `separate_const_switch`.
-fn is_likely_const<'tcx>(mut tracked_place: Place<'tcx>, block: &BasicBlockData<'tcx>) -> bool {
-    for statement in block.statements.iter().rev() {
-        match &statement.kind {
-            StatementKind::Assign(assign) => {
-                if assign.0 == tracked_place {
-                    match assign.1 {
-                        // These rvalues are definitely constant
-                        Rvalue::Use(Operand::Constant(_))
-                        | Rvalue::Ref(_, _, _)
-                        | Rvalue::AddressOf(_, _)
-                        | Rvalue::Cast(_, Operand::Constant(_), _)
-                        | Rvalue::NullaryOp(_, _)
-                        | Rvalue::ShallowInitBox(_, _)
-                        | Rvalue::UnaryOp(_, Operand::Constant(_)) => return true,
-
-                        // These rvalues make things ambiguous
-                        Rvalue::Repeat(_, _)
-                        | Rvalue::ThreadLocalRef(_)
-                        | Rvalue::Len(_)
-                        | Rvalue::BinaryOp(_, _)
-                        | Rvalue::CheckedBinaryOp(_, _)
-                        | Rvalue::Aggregate(_, _) => return false,
-
-                        // These rvalues move the place to track
-                        Rvalue::Cast(_, Operand::Copy(place) | Operand::Move(place), _)
-                        | Rvalue::Use(Operand::Copy(place) | Operand::Move(place))
-                        | Rvalue::CopyForDeref(place)
-                        | Rvalue::UnaryOp(_, Operand::Copy(place) | Operand::Move(place))
-                        | Rvalue::Discriminant(place) => tracked_place = place,
-                    }
-                }
-            }
-
-            // If the discriminant is set, it is always set
-            // as a constant, so the job is done.
-            // As we are **ignoring projections**, if the place
-            // we are tracking sees its discriminant be set,
-            // that means we had to be tracking the discriminant
-            // specifically (as it is impossible to switch over
-            // an enum directly, and if we were switching over
-            // its content, we would have had to at least cast it to
-            // some variant first)
-            StatementKind::SetDiscriminant { place, .. } => {
-                if **place == tracked_place {
-                    return true;
-                }
-            }
-
-            // These statements have no influence on the place
-            // we are interested in
-            StatementKind::FakeRead(_)
-            | StatementKind::Deinit(_)
-            | StatementKind::StorageLive(_)
-            | StatementKind::Retag(_, _)
-            | StatementKind::AscribeUserType(_, _)
-            | StatementKind::PlaceMention(..)
-            | StatementKind::Coverage(_)
-            | StatementKind::StorageDead(_)
-            | StatementKind::Intrinsic(_)
-            | StatementKind::ConstEvalCounter
-            | StatementKind::Nop => {}
-        }
-    }
-
-    // If no good reason for the place to be const is found,
-    // give up. We could maybe go up predecessors, but in
-    // most cases giving up now should be sufficient.
-    false
-}
-
-/// Finds a unique place that entirely determines the value
-/// of `switch_place`, if it exists. This is only a heuristic.
-/// Ideally we would like to track multiple determining places
-/// for some edge cases, but one is enough for a lot of situations.
-fn find_determining_place<'tcx>(
-    mut switch_place: Place<'tcx>,
-    block: &BasicBlockData<'tcx>,
-) -> Option<Place<'tcx>> {
-    for statement in block.statements.iter().rev() {
-        match &statement.kind {
-            StatementKind::Assign(op) => {
-                if op.0 != switch_place {
-                    continue;
-                }
-
-                match op.1 {
-                    // The following rvalues move the place
-                    // that may be const in the predecessor
-                    Rvalue::Use(Operand::Move(new) | Operand::Copy(new))
-                    | Rvalue::UnaryOp(_, Operand::Copy(new) | Operand::Move(new))
-                    | Rvalue::CopyForDeref(new)
-                    | Rvalue::Cast(_, Operand::Move(new) | Operand::Copy(new), _)
-                    | Rvalue::Repeat(Operand::Move(new) | Operand::Copy(new), _)
-                    | Rvalue::Discriminant(new)
-                    => switch_place = new,
-
-                    // The following rvalues might still make the block
-                    // be valid but for now we reject them
-                    Rvalue::Len(_)
-                    | Rvalue::Ref(_, _, _)
-                    | Rvalue::BinaryOp(_, _)
-                    | Rvalue::CheckedBinaryOp(_, _)
-                    | Rvalue::Aggregate(_, _)
-
-                    // The following rvalues definitely mean we cannot
-                    // or should not apply this optimization
-                    | Rvalue::Use(Operand::Constant(_))
-                    | Rvalue::Repeat(Operand::Constant(_), _)
-                    | Rvalue::ThreadLocalRef(_)
-                    | Rvalue::AddressOf(_, _)
-                    | Rvalue::NullaryOp(_, _)
-                    | Rvalue::ShallowInitBox(_, _)
-                    | Rvalue::UnaryOp(_, Operand::Constant(_))
-                    | Rvalue::Cast(_, Operand::Constant(_), _) => return None,
-                }
-            }
-
-            // These statements have no influence on the place
-            // we are interested in
-            StatementKind::FakeRead(_)
-            | StatementKind::Deinit(_)
-            | StatementKind::StorageLive(_)
-            | StatementKind::StorageDead(_)
-            | StatementKind::Retag(_, _)
-            | StatementKind::AscribeUserType(_, _)
-            | StatementKind::PlaceMention(..)
-            | StatementKind::Coverage(_)
-            | StatementKind::Intrinsic(_)
-            | StatementKind::ConstEvalCounter
-            | StatementKind::Nop => {}
-
-            // If the discriminant is set, it is always set
-            // as a constant, so the job is already done.
-            // As we are **ignoring projections**, if the place
-            // we are tracking sees its discriminant be set,
-            // that means we had to be tracking the discriminant
-            // specifically (as it is impossible to switch over
-            // an enum directly, and if we were switching over
-            // its content, we would have had to at least cast it to
-            // some variant first)
-            StatementKind::SetDiscriminant { place, .. } => {
-                if **place == switch_place {
-                    return None;
-                }
-            }
-        }
-    }
-
-    Some(switch_place)
-}