about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
authorThe Miri Conjob Bot <miri@cron.bot>2023-10-24 05:17:56 +0000
committerThe Miri Conjob Bot <miri@cron.bot>2023-10-24 05:17:56 +0000
commitddc76e232aac96d7ca30735aa78d9ea8ae43d721 (patch)
tree373e61763313b41349bafb994870b8ac5d555c39 /compiler/rustc_mir_transform/src
parente42a8d82d6bd0913a99e91a9a64b9e0e29c1f440 (diff)
parentf1a5ce19f5aa0cf61ed7b9f75b30e610befeed72 (diff)
downloadrust-ddc76e232aac96d7ca30735aa78d9ea8ae43d721.tar.gz
rust-ddc76e232aac96d7ca30735aa78d9ea8ae43d721.zip
Merge from rustc
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/cost_checker.rs98
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs184
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs92
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs759
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs3
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs7
6 files changed, 960 insertions, 183 deletions
diff --git a/compiler/rustc_mir_transform/src/cost_checker.rs b/compiler/rustc_mir_transform/src/cost_checker.rs
new file mode 100644
index 00000000000..9bb26693cb2
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/cost_checker.rs
@@ -0,0 +1,98 @@
+use rustc_middle::mir::visit::*;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt};
+
+const INSTR_COST: usize = 5;
+const CALL_PENALTY: usize = 25;
+const LANDINGPAD_PENALTY: usize = 50;
+const RESUME_PENALTY: usize = 45;
+
+/// Verify that the callee body is compatible with the caller.
+#[derive(Clone)]
+pub(crate) struct CostChecker<'b, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    cost: usize,
+    callee_body: &'b Body<'tcx>,
+    instance: Option<ty::Instance<'tcx>>,
+}
+
+impl<'b, 'tcx> CostChecker<'b, 'tcx> {
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+        instance: Option<ty::Instance<'tcx>>,
+        callee_body: &'b Body<'tcx>,
+    ) -> CostChecker<'b, 'tcx> {
+        CostChecker { tcx, param_env, callee_body, instance, cost: 0 }
+    }
+
+    pub fn cost(&self) -> usize {
+        self.cost
+    }
+
+    fn instantiate_ty(&self, v: Ty<'tcx>) -> Ty<'tcx> {
+        if let Some(instance) = self.instance {
+            instance.instantiate_mir(self.tcx, ty::EarlyBinder::bind(&v))
+        } else {
+            v
+        }
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
+        // Don't count StorageLive/StorageDead in the inlining cost.
+        match statement.kind {
+            StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Deinit(_)
+            | StatementKind::Nop => {}
+            _ => self.cost += INSTR_COST,
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
+        let tcx = self.tcx;
+        match terminator.kind {
+            TerminatorKind::Drop { ref place, unwind, .. } => {
+                // If the place doesn't actually need dropping, treat it like a regular goto.
+                let ty = self.instantiate_ty(place.ty(self.callee_body, tcx).ty);
+                if ty.needs_drop(tcx, self.param_env) {
+                    self.cost += CALL_PENALTY;
+                    if let UnwindAction::Cleanup(_) = unwind {
+                        self.cost += LANDINGPAD_PENALTY;
+                    }
+                } else {
+                    self.cost += INSTR_COST;
+                }
+            }
+            TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
+                let fn_ty = self.instantiate_ty(f.const_.ty());
+                self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
+                    // Don't give intrinsics the extra penalty for calls
+                    INSTR_COST
+                } else {
+                    CALL_PENALTY
+                };
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            TerminatorKind::Assert { unwind, .. } => {
+                self.cost += CALL_PENALTY;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
+            TerminatorKind::InlineAsm { unwind, .. } => {
+                self.cost += INSTR_COST;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            _ => self.cost += INSTR_COST,
+        }
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index d18fdaaf22f..59156b2427c 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -9,9 +9,9 @@ use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, DropFlagState, Unwind}
 use rustc_mir_dataflow::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
 use rustc_mir_dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
 use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
+use rustc_mir_dataflow::on_all_children_bits;
 use rustc_mir_dataflow::on_lookup_result_bits;
 use rustc_mir_dataflow::MoveDataParamEnv;
-use rustc_mir_dataflow::{on_all_children_bits, on_all_drop_children_bits};
 use rustc_mir_dataflow::{Analysis, ResultsCursor};
 use rustc_span::Span;
 use rustc_target::abi::{FieldIdx, VariantIdx};
@@ -54,16 +54,10 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
 
         let def_id = body.source.def_id();
         let param_env = tcx.param_env_reveal_all_normalized(def_id);
-        let move_data = match MoveData::gather_moves(body, tcx, param_env) {
-            Ok(move_data) => move_data,
-            Err((move_data, _)) => {
-                tcx.sess.delay_span_bug(
-                    body.span,
-                    "No `move_errors` should be allowed in MIR borrowck",
-                );
-                move_data
-            }
-        };
+        // For types that do not need dropping, the behaviour is trivial. So we only need to track
+        // init/uninit for types that do need dropping.
+        let move_data =
+            MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
         let elaborate_patch = {
             let env = MoveDataParamEnv { move_data, param_env };
 
@@ -178,13 +172,19 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
                 let mut some_live = false;
                 let mut some_dead = false;
                 let mut children_count = 0;
-                on_all_drop_children_bits(self.tcx(), self.body(), self.ctxt.env, path, |child| {
-                    let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
-                    debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
-                    some_live |= live;
-                    some_dead |= dead;
-                    children_count += 1;
-                });
+                on_all_children_bits(
+                    self.tcx(),
+                    self.body(),
+                    self.ctxt.move_data(),
+                    path,
+                    |child| {
+                        let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
+                        debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
+                        some_live |= live;
+                        some_dead |= dead;
+                        children_count += 1;
+                    },
+                );
                 ((some_live, some_dead), children_count != 1)
             }
         };
@@ -296,26 +296,36 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
     fn collect_drop_flags(&mut self) {
         for (bb, data) in self.body.basic_blocks.iter_enumerated() {
             let terminator = data.terminator();
-            let place = match terminator.kind {
-                TerminatorKind::Drop { ref place, .. } => place,
-                _ => continue,
-            };
-
-            self.init_data.seek_before(self.body.terminator_loc(bb));
+            let TerminatorKind::Drop { ref place, .. } = terminator.kind else { continue };
 
             let path = self.move_data().rev_lookup.find(place.as_ref());
             debug!("collect_drop_flags: {:?}, place {:?} ({:?})", bb, place, path);
 
-            let path = match path {
-                LookupResult::Exact(e) => e,
-                LookupResult::Parent(None) => continue,
+            match path {
+                LookupResult::Exact(path) => {
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
+                        let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
+                        debug!(
+                            "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
+                            child,
+                            place,
+                            path,
+                            (maybe_live, maybe_dead)
+                        );
+                        if maybe_live && maybe_dead {
+                            self.create_drop_flag(child, terminator.source_info.span)
+                        }
+                    });
+                }
+                LookupResult::Parent(None) => {}
                 LookupResult::Parent(Some(parent)) => {
-                    let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
-
                     if self.body.local_decls[place.local].is_deref_temp() {
                         continue;
                     }
 
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
                     if maybe_dead {
                         self.tcx.sess.delay_span_bug(
                             terminator.source_info.span,
@@ -324,80 +334,74 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
                             ),
                         );
                     }
-                    continue;
                 }
             };
-
-            on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
-                let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
-                debug!(
-                    "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
-                    child,
-                    place,
-                    path,
-                    (maybe_live, maybe_dead)
-                );
-                if maybe_live && maybe_dead {
-                    self.create_drop_flag(child, terminator.source_info.span)
-                }
-            });
         }
     }
 
     fn elaborate_drops(&mut self) {
+        // This function should mirror what `collect_drop_flags` does.
         for (bb, data) in self.body.basic_blocks.iter_enumerated() {
-            let loc = Location { block: bb, statement_index: data.statements.len() };
             let terminator = data.terminator();
+            let TerminatorKind::Drop { place, target, unwind, replace } = terminator.kind else {
+                continue;
+            };
 
-            match terminator.kind {
-                TerminatorKind::Drop { place, target, unwind, replace } => {
-                    self.init_data.seek_before(loc);
-                    match self.move_data().rev_lookup.find(place.as_ref()) {
-                        LookupResult::Exact(path) => {
-                            let unwind = if data.is_cleanup {
-                                Unwind::InCleanup
-                            } else {
-                                match unwind {
-                                    UnwindAction::Cleanup(cleanup) => Unwind::To(cleanup),
-                                    UnwindAction::Continue => Unwind::To(self.patch.resume_block()),
-                                    UnwindAction::Unreachable => {
-                                        Unwind::To(self.patch.unreachable_cleanup_block())
-                                    }
-                                    UnwindAction::Terminate(reason) => {
-                                        debug_assert_ne!(
-                                            reason,
-                                            UnwindTerminateReason::InCleanup,
-                                            "we are not in a cleanup block, InCleanup reason should be impossible"
-                                        );
-                                        Unwind::To(self.patch.terminate_block(reason))
-                                    }
-                                }
-                            };
-                            elaborate_drop(
-                                &mut Elaborator { ctxt: self },
-                                terminator.source_info,
-                                place,
-                                path,
-                                target,
-                                unwind,
-                                bb,
-                            )
+            // This place does not need dropping. It does not have an associated move-path, so the
+            // match below will conservatively keep an unconditional drop. As that drop is useless,
+            // just remove it here and now.
+            if !place
+                .ty(&self.body.local_decls, self.tcx)
+                .ty
+                .needs_drop(self.tcx, self.env.param_env)
+            {
+                self.patch.patch_terminator(bb, TerminatorKind::Goto { target });
+                continue;
+            }
+
+            let path = self.move_data().rev_lookup.find(place.as_ref());
+            match path {
+                LookupResult::Exact(path) => {
+                    let unwind = match unwind {
+                        _ if data.is_cleanup => Unwind::InCleanup,
+                        UnwindAction::Cleanup(cleanup) => Unwind::To(cleanup),
+                        UnwindAction::Continue => Unwind::To(self.patch.resume_block()),
+                        UnwindAction::Unreachable => {
+                            Unwind::To(self.patch.unreachable_cleanup_block())
                         }
-                        LookupResult::Parent(..) => {
-                            if !replace {
-                                self.tcx.sess.delay_span_bug(
-                                    terminator.source_info.span,
-                                    format!("drop of untracked value {bb:?}"),
-                                );
-                            }
-                            // A drop and replace behind a pointer/array/whatever.
-                            // The borrow checker requires that these locations are initialized before the assignment,
-                            // so we just leave an unconditional drop.
-                            assert!(!data.is_cleanup);
+                        UnwindAction::Terminate(reason) => {
+                            debug_assert_ne!(
+                                reason,
+                                UnwindTerminateReason::InCleanup,
+                                "we are not in a cleanup block, InCleanup reason should be impossible"
+                            );
+                            Unwind::To(self.patch.terminate_block(reason))
                         }
+                    };
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    elaborate_drop(
+                        &mut Elaborator { ctxt: self },
+                        terminator.source_info,
+                        place,
+                        path,
+                        target,
+                        unwind,
+                        bb,
+                    )
+                }
+                LookupResult::Parent(None) => {}
+                LookupResult::Parent(Some(_)) => {
+                    if !replace {
+                        self.tcx.sess.delay_span_bug(
+                            terminator.source_info.span,
+                            format!("drop of untracked value {bb:?}"),
+                        );
                     }
+                    // A drop and replace behind a pointer/array/whatever.
+                    // The borrow checker requires that these locations are initialized before the assignment,
+                    // so we just leave an unconditional drop.
+                    assert!(!data.is_cleanup);
                 }
-                _ => continue,
             }
         }
     }
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 757b2aeca7b..8b33e00c63c 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -14,6 +14,7 @@ use rustc_session::config::OptLevel;
 use rustc_target::abi::FieldIdx;
 use rustc_target::spec::abi::Abi;
 
+use crate::cost_checker::CostChecker;
 use crate::simplify::{remove_dead_blocks, CfgSimplifier};
 use crate::util;
 use crate::MirPass;
@@ -22,11 +23,6 @@ use std::ops::{Range, RangeFrom};
 
 pub(crate) mod cycle;
 
-const INSTR_COST: usize = 5;
-const CALL_PENALTY: usize = 25;
-const LANDINGPAD_PENALTY: usize = 50;
-const RESUME_PENALTY: usize = 45;
-
 const TOP_DOWN_DEPTH_LIMIT: usize = 5;
 
 pub struct Inline;
@@ -479,13 +475,8 @@ impl<'tcx> Inliner<'tcx> {
 
         // FIXME: Give a bonus to functions with only a single caller
 
-        let mut checker = CostChecker {
-            tcx: self.tcx,
-            param_env: self.param_env,
-            instance: callsite.callee,
-            callee_body,
-            cost: 0,
-        };
+        let mut checker =
+            CostChecker::new(self.tcx, self.param_env, Some(callsite.callee), callee_body);
 
         // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
         let mut work_list = vec![START_BLOCK];
@@ -530,7 +521,7 @@ impl<'tcx> Inliner<'tcx> {
         // That attribute is often applied to very large functions that exceed LLVM's (very
         // generous) inlining threshold. Such functions are very poor MIR inlining candidates.
         // Always inlining #[inline(always)] functions in MIR, on net, slows down the compiler.
-        let cost = checker.cost;
+        let cost = checker.cost();
         if cost <= threshold {
             debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
             Ok(())
@@ -803,81 +794,6 @@ impl<'tcx> Inliner<'tcx> {
     }
 }
 
-/// Verify that the callee body is compatible with the caller.
-///
-/// This visitor mostly computes the inlining cost,
-/// but also needs to verify that types match because of normalization failure.
-struct CostChecker<'b, 'tcx> {
-    tcx: TyCtxt<'tcx>,
-    param_env: ParamEnv<'tcx>,
-    cost: usize,
-    callee_body: &'b Body<'tcx>,
-    instance: ty::Instance<'tcx>,
-}
-
-impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
-    fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
-        // Don't count StorageLive/StorageDead in the inlining cost.
-        match statement.kind {
-            StatementKind::StorageLive(_)
-            | StatementKind::StorageDead(_)
-            | StatementKind::Deinit(_)
-            | StatementKind::Nop => {}
-            _ => self.cost += INSTR_COST,
-        }
-    }
-
-    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
-        let tcx = self.tcx;
-        match terminator.kind {
-            TerminatorKind::Drop { ref place, unwind, .. } => {
-                // If the place doesn't actually need dropping, treat it like a regular goto.
-                let ty = self.instance.instantiate_mir(
-                    tcx,
-                    ty::EarlyBinder::bind(&place.ty(self.callee_body, tcx).ty),
-                );
-                if ty.needs_drop(tcx, self.param_env) {
-                    self.cost += CALL_PENALTY;
-                    if let UnwindAction::Cleanup(_) = unwind {
-                        self.cost += LANDINGPAD_PENALTY;
-                    }
-                } else {
-                    self.cost += INSTR_COST;
-                }
-            }
-            TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
-                let fn_ty =
-                    self.instance.instantiate_mir(tcx, ty::EarlyBinder::bind(&f.const_.ty()));
-                self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind()
-                    && tcx.is_intrinsic(def_id)
-                {
-                    // Don't give intrinsics the extra penalty for calls
-                    INSTR_COST
-                } else {
-                    CALL_PENALTY
-                };
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            TerminatorKind::Assert { unwind, .. } => {
-                self.cost += CALL_PENALTY;
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
-            TerminatorKind::InlineAsm { unwind, .. } => {
-                self.cost += INSTR_COST;
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            _ => self.cost += INSTR_COST,
-        }
-    }
-}
-
 /**
  * Integrator.
  *
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
new file mode 100644
index 00000000000..7b918be4474
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -0,0 +1,759 @@
+//! A jump threading optimization.
+//!
+//! This optimization seeks to replace join-then-switch control flow patterns by straight jumps
+//!    X = 0                                      X = 0
+//! ------------\      /--------              ------------
+//!    X = 1     X----X SwitchInt(X)     =>       X = 1
+//! ------------/      \--------              ------------
+//!
+//!
+//! We proceed by walking the cfg backwards starting from each `SwitchInt` terminator,
+//! looking for assignments that will turn the `SwitchInt` into a simple `Goto`.
+//!
+//! The algorithm maintains a set of replacement conditions:
+//! - `conditions[place]` contains `Condition { value, polarity: Eq, target }`
+//!   if assigning `value` to `place` turns the `SwitchInt` into `Goto { target }`.
+//! - `conditions[place]` contains `Condition { value, polarity: Ne, target }`
+//!   if assigning anything different from `value` to `place` turns the `SwitchInt`
+//!   into `Goto { target }`.
+//!
+//! In this file, we denote as `place ?= value` the existence of a replacement condition
+//! on `place` with given `value`, irrespective of the polarity and target of that
+//! replacement condition.
+//!
+//! We then walk the CFG backwards transforming the set of conditions.
+//! When we find a fulfilling assignment, we record a `ThreadingOpportunity`.
+//! All `ThreadingOpportunity`s are applied to the body, by duplicating blocks if required.
+//!
+//! The optimization search can be very heavy, as it performs a DFS on MIR starting from
+//! each `SwitchInt` terminator. To manage the complexity, we:
+//! - bound the maximum depth by a constant `MAX_BACKTRACK`;
+//! - we only traverse `Goto` terminators.
+//!
+//! We try to avoid creating irreducible control-flow by not threading through a loop header.
+//!
+//! Likewise, applying the optimisation can create a lot of new MIR, so we bound the instruction
+//! cost by `MAX_COST`.
+
+use rustc_arena::DroplessArena;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
+use rustc_mir_dataflow::value_analysis::{Map, PlaceIndex, State, TrackElem};
+
+use crate::cost_checker::CostChecker;
+use crate::MirPass;
+
+pub struct JumpThreading;
+
+const MAX_BACKTRACK: usize = 5;
+const MAX_COST: usize = 100;
+const MAX_PLACES: usize = 100;
+
+impl<'tcx> MirPass<'tcx> for JumpThreading {
+    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+        sess.mir_opt_level() >= 4
+    }
+
+    #[instrument(skip_all level = "debug")]
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let def_id = body.source.def_id();
+        debug!(?def_id);
+
+        let param_env = tcx.param_env_reveal_all_normalized(def_id);
+        let map = Map::new(tcx, body, Some(MAX_PLACES));
+        let loop_headers = loop_headers(body);
+
+        let arena = DroplessArena::default();
+        let mut finder = TOFinder {
+            tcx,
+            param_env,
+            body,
+            arena: &arena,
+            map: &map,
+            loop_headers: &loop_headers,
+            opportunities: Vec::new(),
+        };
+
+        for (bb, bbdata) in body.basic_blocks.iter_enumerated() {
+            debug!(?bb, term = ?bbdata.terminator());
+            if bbdata.is_cleanup || loop_headers.contains(bb) {
+                continue;
+            }
+            let Some((discr, targets)) = bbdata.terminator().kind.as_switch() else { continue };
+            let Some(discr) = discr.place() else { continue };
+            debug!(?discr, ?bb);
+
+            let discr_ty = discr.ty(body, tcx).ty;
+            let Ok(discr_layout) = tcx.layout_of(param_env.and(discr_ty)) else { continue };
+
+            let Some(discr) = finder.map.find(discr.as_ref()) else { continue };
+            debug!(?discr);
+
+            let cost = CostChecker::new(tcx, param_env, None, body);
+
+            let mut state = State::new(ConditionSet::default(), &finder.map);
+
+            let conds = if let Some((value, then, else_)) = targets.as_static_if() {
+                let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else {
+                    continue;
+                };
+                arena.alloc_from_iter([
+                    Condition { value, polarity: Polarity::Eq, target: then },
+                    Condition { value, polarity: Polarity::Ne, target: else_ },
+                ])
+            } else {
+                arena.alloc_from_iter(targets.iter().filter_map(|(value, target)| {
+                    let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+                    Some(Condition { value, polarity: Polarity::Eq, target })
+                }))
+            };
+            let conds = ConditionSet(conds);
+            state.insert_value_idx(discr, conds, &finder.map);
+
+            finder.find_opportunity(bb, state, cost, 0);
+        }
+
+        let opportunities = finder.opportunities;
+        debug!(?opportunities);
+        if opportunities.is_empty() {
+            return;
+        }
+
+        // Verify that we do not thread through a loop header.
+        for to in opportunities.iter() {
+            assert!(to.chain.iter().all(|&block| !loop_headers.contains(block)));
+        }
+        OpportunitySet::new(body, opportunities).apply(body);
+    }
+}
+
+#[derive(Debug)]
+struct ThreadingOpportunity {
+    /// The list of `BasicBlock`s from the one that found the opportunity to the `SwitchInt`.
+    chain: Vec<BasicBlock>,
+    /// The `SwitchInt` will be replaced by `Goto { target }`.
+    target: BasicBlock,
+}
+
+struct TOFinder<'tcx, 'a> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &'a Body<'tcx>,
+    map: &'a Map,
+    loop_headers: &'a BitSet<BasicBlock>,
+    /// We use an arena to avoid cloning the slices when cloning `state`.
+    arena: &'a DroplessArena,
+    opportunities: Vec<ThreadingOpportunity>,
+}
+
+/// Represent the following statement. If we can prove that the current local is equal/not-equal
+/// to `value`, jump to `target`.
+#[derive(Copy, Clone, Debug)]
+struct Condition {
+    value: ScalarInt,
+    polarity: Polarity,
+    target: BasicBlock,
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+enum Polarity {
+    Ne,
+    Eq,
+}
+
+impl Condition {
+    fn matches(&self, value: ScalarInt) -> bool {
+        (self.value == value) == (self.polarity == Polarity::Eq)
+    }
+
+    fn inv(mut self) -> Self {
+        self.polarity = match self.polarity {
+            Polarity::Eq => Polarity::Ne,
+            Polarity::Ne => Polarity::Eq,
+        };
+        self
+    }
+}
+
+#[derive(Copy, Clone, Debug, Default)]
+struct ConditionSet<'a>(&'a [Condition]);
+
+impl<'a> ConditionSet<'a> {
+    fn iter(self) -> impl Iterator<Item = Condition> + 'a {
+        self.0.iter().copied()
+    }
+
+    fn iter_matches(self, value: ScalarInt) -> impl Iterator<Item = Condition> + 'a {
+        self.iter().filter(move |c| c.matches(value))
+    }
+
+    fn map(self, arena: &'a DroplessArena, f: impl Fn(Condition) -> Condition) -> ConditionSet<'a> {
+        ConditionSet(arena.alloc_from_iter(self.iter().map(f)))
+    }
+}
+
+impl<'tcx, 'a> TOFinder<'tcx, 'a> {
+    fn is_empty(&self, state: &State<ConditionSet<'a>>) -> bool {
+        state.all(|cs| cs.0.is_empty())
+    }
+
+    /// Recursion entry point to find threading opportunities.
+    #[instrument(level = "trace", skip(self, cost), ret)]
+    fn find_opportunity(
+        &mut self,
+        bb: BasicBlock,
+        mut state: State<ConditionSet<'a>>,
+        mut cost: CostChecker<'_, 'tcx>,
+        depth: usize,
+    ) {
+        // Do not thread through loop headers.
+        if self.loop_headers.contains(bb) {
+            return;
+        }
+
+        debug!(cost = ?cost.cost());
+        for (statement_index, stmt) in
+            self.body.basic_blocks[bb].statements.iter().enumerate().rev()
+        {
+            if self.is_empty(&state) {
+                return;
+            }
+
+            cost.visit_statement(stmt, Location { block: bb, statement_index });
+            if cost.cost() > MAX_COST {
+                return;
+            }
+
+            // Attempt to turn the `current_condition` on `lhs` into a condition on another place.
+            self.process_statement(bb, stmt, &mut state);
+
+            // When a statement mutates a place, assignments to that place that happen
+            // above the mutation cannot fulfill a condition.
+            //   _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`.
+            //   _1 = 6
+            if let Some((lhs, tail)) = self.mutated_statement(stmt) {
+                state.flood_with_tail_elem(lhs.as_ref(), tail, self.map, ConditionSet::default());
+            }
+        }
+
+        if self.is_empty(&state) || depth >= MAX_BACKTRACK {
+            return;
+        }
+
+        let last_non_rec = self.opportunities.len();
+
+        let predecessors = &self.body.basic_blocks.predecessors()[bb];
+        if let &[pred] = &predecessors[..] && bb != START_BLOCK {
+            let term = self.body.basic_blocks[pred].terminator();
+            match term.kind {
+                TerminatorKind::SwitchInt { ref discr, ref targets } => {
+                    self.process_switch_int(discr, targets, bb, &mut state);
+                    self.find_opportunity(pred, state, cost, depth + 1);
+                }
+                _ => self.recurse_through_terminator(pred, &state, &cost, depth),
+            }
+        } else {
+            for &pred in predecessors {
+                self.recurse_through_terminator(pred, &state, &cost, depth);
+            }
+        }
+
+        let new_tos = &mut self.opportunities[last_non_rec..];
+        debug!(?new_tos);
+
+        // Try to deduplicate threading opportunities.
+        if new_tos.len() > 1
+            && new_tos.len() == predecessors.len()
+            && predecessors
+                .iter()
+                .zip(new_tos.iter())
+                .all(|(&pred, to)| to.chain == &[pred] && to.target == new_tos[0].target)
+        {
+            // All predecessors have a threading opportunity, and they all point to the same block.
+            debug!(?new_tos, "dedup");
+            let first = &mut new_tos[0];
+            *first = ThreadingOpportunity { chain: vec![bb], target: first.target };
+            self.opportunities.truncate(last_non_rec + 1);
+            return;
+        }
+
+        for op in self.opportunities[last_non_rec..].iter_mut() {
+            op.chain.push(bb);
+        }
+    }
+
+    /// Extract the mutated place from a statement.
+    ///
+    /// This method returns the `Place` so we can flood the state in case of a partial assignment.
+    ///     (_1 as Ok).0 = _5;
+    ///     (_1 as Err).0 = _6;
+    /// We want to ensure that a `SwitchInt((_1 as Ok).0)` does not see the first assignment, as
+    /// the value may have been mangled by the second assignment.
+    ///
+    /// In case we assign to a discriminant, we return `Some(TrackElem::Discriminant)`, so we can
+    /// stop at flooding the discriminant, and preserve the variant fields.
+    ///     (_1 as Some).0 = _6;
+    ///     SetDiscriminant(_1, 1);
+    ///     switchInt((_1 as Some).0)
+    #[instrument(level = "trace", skip(self), ret)]
+    fn mutated_statement(
+        &self,
+        stmt: &Statement<'tcx>,
+    ) -> Option<(Place<'tcx>, Option<TrackElem>)> {
+        match stmt.kind {
+            StatementKind::Assign(box (place, _))
+            | StatementKind::Deinit(box place) => Some((place, None)),
+            StatementKind::SetDiscriminant { box place, variant_index: _ } => {
+                Some((place, Some(TrackElem::Discriminant)))
+            }
+            StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+                Some((Place::from(local), None))
+            }
+            StatementKind::Retag(..)
+            | StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(..))
+            // copy_nonoverlapping takes pointers and mutated the pointed-to value.
+            | StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(..))
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::FakeRead(..)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => None,
+        }
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_operand(
+        &mut self,
+        bb: BasicBlock,
+        lhs: PlaceIndex,
+        rhs: &Operand<'tcx>,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        match rhs {
+            // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
+            Operand::Constant(constant) => {
+                let conditions = state.try_get_idx(lhs, self.map)?;
+                let constant =
+                    constant.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()?;
+                conditions.iter_matches(constant).for_each(register_opportunity);
+            }
+            // Transfer the conditions on the copied rhs.
+            Operand::Move(rhs) | Operand::Copy(rhs) => {
+                let rhs = self.map.find(rhs.as_ref())?;
+                state.insert_place_idx(rhs, lhs, self.map);
+            }
+        }
+
+        None
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_statement(
+        &mut self,
+        bb: BasicBlock,
+        stmt: &Statement<'tcx>,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        // Below, `lhs` is the return value of `mutated_statement`,
+        // the place to which `conditions` apply.
+
+        let discriminant_for_variant = |enum_ty: Ty<'tcx>, variant_index| {
+            let discr = enum_ty.discriminant_for_variant(self.tcx, variant_index)?;
+            let discr_layout = self.tcx.layout_of(self.param_env.and(discr.ty)).ok()?;
+            let scalar = ScalarInt::try_from_uint(discr.val, discr_layout.size)?;
+            Some(Operand::const_from_scalar(
+                self.tcx,
+                discr.ty,
+                scalar.into(),
+                rustc_span::DUMMY_SP,
+            ))
+        };
+
+        match &stmt.kind {
+            // If we expect `discriminant(place) ?= A`,
+            // we have an opportunity if `variant_index ?= A`.
+            StatementKind::SetDiscriminant { box place, variant_index } => {
+                let discr_target = self.map.find_discr(place.as_ref())?;
+                let enum_ty = place.ty(self.body, self.tcx).ty;
+                let discr = discriminant_for_variant(enum_ty, *variant_index)?;
+                self.process_operand(bb, discr_target, &discr, state)?;
+            }
+            // If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`.
+            StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(
+                Operand::Copy(place) | Operand::Move(place),
+            )) => {
+                let conditions = state.try_get(place.as_ref(), self.map)?;
+                conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity);
+            }
+            StatementKind::Assign(box (lhs_place, rhs)) => {
+                if let Some(lhs) = self.map.find(lhs_place.as_ref()) {
+                    match rhs {
+                        Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state)?,
+                        // Transfer the conditions on the copy rhs.
+                        Rvalue::CopyForDeref(rhs) => {
+                            self.process_operand(bb, lhs, &Operand::Copy(*rhs), state)?
+                        }
+                        Rvalue::Discriminant(rhs) => {
+                            let rhs = self.map.find_discr(rhs.as_ref())?;
+                            state.insert_place_idx(rhs, lhs, self.map);
+                        }
+                        // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
+                        Rvalue::Aggregate(box ref kind, ref operands) => {
+                            let agg_ty = lhs_place.ty(self.body, self.tcx).ty;
+                            let lhs = match kind {
+                                // Do not support unions.
+                                AggregateKind::Adt(.., Some(_)) => return None,
+                                AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
+                                    if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
+                                        && let Some(discr_value) = discriminant_for_variant(agg_ty, *variant_index)
+                                    {
+                                        self.process_operand(bb, discr_target, &discr_value, state);
+                                    }
+                                    self.map.apply(lhs, TrackElem::Variant(*variant_index))?
+                                }
+                                _ => lhs,
+                            };
+                            for (field_index, operand) in operands.iter_enumerated() {
+                                if let Some(field) =
+                                    self.map.apply(lhs, TrackElem::Field(field_index))
+                                {
+                                    self.process_operand(bb, field, operand, state);
+                                }
+                            }
+                        }
+                        // Transfer the conditions on the copy rhs, after inversing polarity.
+                        Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => {
+                            let conditions = state.try_get_idx(lhs, self.map)?;
+                            let place = self.map.find(place.as_ref())?;
+                            let conds = conditions.map(self.arena, Condition::inv);
+                            state.insert_value_idx(place, conds, self.map);
+                        }
+                        // We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`.
+                        // Create a condition on `rhs ?= B`.
+                        Rvalue::BinaryOp(
+                            op,
+                            box (
+                                Operand::Move(place) | Operand::Copy(place),
+                                Operand::Constant(value),
+                            )
+                            | box (
+                                Operand::Constant(value),
+                                Operand::Move(place) | Operand::Copy(place),
+                            ),
+                        ) => {
+                            let conditions = state.try_get_idx(lhs, self.map)?;
+                            let place = self.map.find(place.as_ref())?;
+                            let equals = match op {
+                                BinOp::Eq => ScalarInt::TRUE,
+                                BinOp::Ne => ScalarInt::FALSE,
+                                _ => return None,
+                            };
+                            let value = value
+                                .const_
+                                .normalize(self.tcx, self.param_env)
+                                .try_to_scalar_int()?;
+                            let conds = conditions.map(self.arena, |c| Condition {
+                                value,
+                                polarity: if c.matches(equals) {
+                                    Polarity::Eq
+                                } else {
+                                    Polarity::Ne
+                                },
+                                ..c
+                            });
+                            state.insert_value_idx(place, conds, self.map);
+                        }
+
+                        _ => {}
+                    }
+                }
+            }
+            _ => {}
+        }
+
+        None
+    }
+
+    #[instrument(level = "trace", skip(self, cost))]
+    fn recurse_through_terminator(
+        &mut self,
+        bb: BasicBlock,
+        state: &State<ConditionSet<'a>>,
+        cost: &CostChecker<'_, 'tcx>,
+        depth: usize,
+    ) {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        let term = self.body.basic_blocks[bb].terminator();
+        let place_to_flood = match term.kind {
+            // We come from a target, so those are not possible.
+            TerminatorKind::UnwindResume
+            | TerminatorKind::UnwindTerminate(_)
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::CoroutineDrop => bug!("{term:?} has no terminators"),
+            // Disallowed during optimizations.
+            TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::Yield { .. } => bug!("{term:?} invalid"),
+            // Cannot reason about inline asm.
+            TerminatorKind::InlineAsm { .. } => return,
+            // `SwitchInt` is handled specially.
+            TerminatorKind::SwitchInt { .. } => return,
+            // We can recurse, no thing particular to do.
+            TerminatorKind::Goto { .. } => None,
+            // Flood the overwritten place, and progress through.
+            TerminatorKind::Drop { place: destination, .. }
+            | TerminatorKind::Call { destination, .. } => Some(destination),
+            // Treat as an `assume(cond == expected)`.
+            TerminatorKind::Assert { ref cond, expected, .. } => {
+                if let Some(place) = cond.place()
+                    && let Some(conditions) = state.try_get(place.as_ref(), self.map)
+                {
+                    let expected = if expected { ScalarInt::TRUE } else { ScalarInt::FALSE };
+                    conditions.iter_matches(expected).for_each(register_opportunity);
+                }
+                None
+            }
+        };
+
+        // We can recurse through this terminator.
+        let mut state = state.clone();
+        if let Some(place_to_flood) = place_to_flood {
+            state.flood_with(place_to_flood.as_ref(), self.map, ConditionSet::default());
+        }
+        self.find_opportunity(bb, state, cost.clone(), depth + 1);
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_switch_int(
+        &mut self,
+        discr: &Operand<'tcx>,
+        targets: &SwitchTargets,
+        target_bb: BasicBlock,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        debug_assert_ne!(target_bb, START_BLOCK);
+        debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1);
+
+        let discr = discr.place()?;
+        let discr_ty = discr.ty(self.body, self.tcx).ty;
+        let discr_layout = self.tcx.layout_of(self.param_env.and(discr_ty)).ok()?;
+        let conditions = state.try_get(discr.as_ref(), self.map)?;
+
+        if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
+            let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+            debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1);
+
+            // We are inside `target_bb`. Since we have a single predecessor, we know we passed
+            // through the `SwitchInt` before arriving here. Therefore, we know that
+            // `discr == value`. If one condition can be fulfilled by `discr == value`,
+            // that's an opportunity.
+            for c in conditions.iter_matches(value) {
+                debug!(?target_bb, ?c.target, "register");
+                self.opportunities.push(ThreadingOpportunity { chain: vec![], target: c.target });
+            }
+        } else if let Some((value, _, else_bb)) = targets.as_static_if()
+            && target_bb == else_bb
+        {
+            let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+
+            // We only know that `discr != value`. That's much weaker information than
+            // the equality we had in the previous arm. All we can conclude is that
+            // the replacement condition `discr != value` can be threaded, and nothing else.
+            for c in conditions.iter() {
+                if c.value == value && c.polarity == Polarity::Ne {
+                    debug!(?target_bb, ?c.target, "register");
+                    self.opportunities
+                        .push(ThreadingOpportunity { chain: vec![], target: c.target });
+                }
+            }
+        }
+
+        None
+    }
+}
+
+struct OpportunitySet {
+    opportunities: Vec<ThreadingOpportunity>,
+    /// For each bb, give the TOs in which it appears. The pair corresponds to the index
+    /// in `opportunities` and the index in `ThreadingOpportunity::chain`.
+    involving_tos: IndexVec<BasicBlock, Vec<(usize, usize)>>,
+    /// Cache the number of predecessors for each block, as we clear the basic block cache..
+    predecessors: IndexVec<BasicBlock, usize>,
+}
+
+impl OpportunitySet {
+    fn new(body: &Body<'_>, opportunities: Vec<ThreadingOpportunity>) -> OpportunitySet {
+        let mut involving_tos = IndexVec::from_elem(Vec::new(), &body.basic_blocks);
+        for (index, to) in opportunities.iter().enumerate() {
+            for (ibb, &bb) in to.chain.iter().enumerate() {
+                involving_tos[bb].push((index, ibb));
+            }
+            involving_tos[to.target].push((index, to.chain.len()));
+        }
+        let predecessors = predecessor_count(body);
+        OpportunitySet { opportunities, involving_tos, predecessors }
+    }
+
+    /// Apply the opportunities on the graph.
+    fn apply(&mut self, body: &mut Body<'_>) {
+        for i in 0..self.opportunities.len() {
+            self.apply_once(i, body);
+        }
+    }
+
+    #[instrument(level = "trace", skip(self, body))]
+    fn apply_once(&mut self, index: usize, body: &mut Body<'_>) {
+        debug!(?self.predecessors);
+        debug!(?self.involving_tos);
+
+        // Check that `predecessors` satisfies its invariant.
+        debug_assert_eq!(self.predecessors, predecessor_count(body));
+
+        // Remove the TO from the vector to allow modifying the other ones later.
+        let op = &mut self.opportunities[index];
+        debug!(?op);
+        let op_chain = std::mem::take(&mut op.chain);
+        let op_target = op.target;
+        debug_assert_eq!(op_chain.len(), op_chain.iter().collect::<FxHashSet<_>>().len());
+
+        let Some((current, chain)) = op_chain.split_first() else { return };
+        let basic_blocks = body.basic_blocks.as_mut();
+
+        // Invariant: the control-flow is well-formed at the end of each iteration.
+        let mut current = *current;
+        for &succ in chain {
+            debug!(?current, ?succ);
+
+            // `succ` must be a successor of `current`. If it is not, this means this TO is not
+            // satisfiable and a previous TO erased this edge, so we bail out.
+            if basic_blocks[current].terminator().successors().find(|s| *s == succ).is_none() {
+                debug!("impossible");
+                return;
+            }
+
+            // Fast path: `succ` is only used once, so we can reuse it directly.
+            if self.predecessors[succ] == 1 {
+                debug!("single");
+                current = succ;
+                continue;
+            }
+
+            let new_succ = basic_blocks.push(basic_blocks[succ].clone());
+            debug!(?new_succ);
+
+            // Replace `succ` by `new_succ` where it appears.
+            let mut num_edges = 0;
+            for s in basic_blocks[current].terminator_mut().successors_mut() {
+                if *s == succ {
+                    *s = new_succ;
+                    num_edges += 1;
+                }
+            }
+
+            // Update predecessors with the new block.
+            let _new_succ = self.predecessors.push(num_edges);
+            debug_assert_eq!(new_succ, _new_succ);
+            self.predecessors[succ] -= num_edges;
+            self.update_predecessor_count(basic_blocks[new_succ].terminator(), Update::Incr);
+
+            // Replace the `current -> succ` edge by `current -> new_succ` in all the following
+            // TOs. This is necessary to avoid trying to thread through a non-existing edge. We
+            // use `involving_tos` here to avoid traversing the full set of TOs on each iteration.
+            let mut new_involved = Vec::new();
+            for &(to_index, in_to_index) in &self.involving_tos[current] {
+                // That TO has already been applied, do nothing.
+                if to_index <= index {
+                    continue;
+                }
+
+                let other_to = &mut self.opportunities[to_index];
+                if other_to.chain.get(in_to_index) != Some(&current) {
+                    continue;
+                }
+                let s = other_to.chain.get_mut(in_to_index + 1).unwrap_or(&mut other_to.target);
+                if *s == succ {
+                    // `other_to` references the `current -> succ` edge, so replace `succ`.
+                    *s = new_succ;
+                    new_involved.push((to_index, in_to_index + 1));
+                }
+            }
+
+            // The TOs that we just updated now reference `new_succ`. Update `involving_tos`
+            // in case we need to duplicate an edge starting at `new_succ` later.
+            let _new_succ = self.involving_tos.push(new_involved);
+            debug_assert_eq!(new_succ, _new_succ);
+
+            current = new_succ;
+        }
+
+        let current = &mut basic_blocks[current];
+        self.update_predecessor_count(current.terminator(), Update::Decr);
+        current.terminator_mut().kind = TerminatorKind::Goto { target: op_target };
+        self.predecessors[op_target] += 1;
+    }
+
+    fn update_predecessor_count(&mut self, terminator: &Terminator<'_>, incr: Update) {
+        match incr {
+            Update::Incr => {
+                for s in terminator.successors() {
+                    self.predecessors[s] += 1;
+                }
+            }
+            Update::Decr => {
+                for s in terminator.successors() {
+                    self.predecessors[s] -= 1;
+                }
+            }
+        }
+    }
+}
+
+fn predecessor_count(body: &Body<'_>) -> IndexVec<BasicBlock, usize> {
+    let mut predecessors: IndexVec<_, _> =
+        body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect();
+    predecessors[START_BLOCK] += 1; // Account for the implicit entry edge.
+    predecessors
+}
+
+enum Update {
+    Incr,
+    Decr,
+}
+
+/// Compute the set of loop headers in the given body. We define a loop header as a block which has
+/// at least a predecessor which it dominates. This definition is only correct for reducible CFGs.
+/// But if the CFG is already irreducible, there is no point in trying much harder.
+/// is already irreducible.
+fn loop_headers(body: &Body<'_>) -> BitSet<BasicBlock> {
+    let mut loop_headers = BitSet::new_empty(body.basic_blocks.len());
+    let dominators = body.basic_blocks.dominators();
+    // Only visit reachable blocks.
+    for (bb, bbdata) in traversal::preorder(body) {
+        for succ in bbdata.terminator().successors() {
+            if dominators.dominates(succ, bb) {
+                loop_headers.insert(succ);
+            }
+        }
+    }
+    loop_headers
+}
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index 6c0aa51795b..9aaa54110bd 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -62,6 +62,7 @@ mod const_prop;
 mod const_prop_lint;
 mod copy_prop;
 mod coroutine;
+mod cost_checker;
 mod coverage;
 mod cross_crate_inline;
 mod ctfe_limit;
@@ -81,6 +82,7 @@ mod function_item_references;
 mod gvn;
 pub mod inline;
 mod instsimplify;
+mod jump_threading;
 mod large_enums;
 mod lower_intrinsics;
 mod lower_slice_len;
@@ -571,6 +573,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &dataflow_const_prop::DataflowConstProp,
             &const_debuginfo::ConstDebugInfo,
             &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
+            &jump_threading::JumpThreading,
             &early_otherwise_branch::EarlyOtherwiseBranch,
             &simplify_comparison_integral::SimplifyComparisonIntegral,
             &dead_store_elimination::DeadStoreElimination,
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
index 26384974798..87fee2410ec 100644
--- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -24,11 +24,8 @@ pub struct RemoveUninitDrops;
 impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let param_env = tcx.param_env(body.source.def_id());
-        let Ok(move_data) = MoveData::gather_moves(body, tcx, param_env) else {
-            // We could continue if there are move errors, but there's not much point since our
-            // init data isn't complete.
-            return;
-        };
+        let move_data =
+            MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
 
         let mdpe = MoveDataParamEnv { move_data, param_env };
         let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)