diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
75 files changed, 1963 insertions, 1946 deletions
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs index edb6bc4fbea..8291df9be53 100644 --- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs +++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs @@ -20,9 +20,9 @@ use rustc_target::spec::PanicStrategy; /// This forces all unwinds, in panic=abort mode happening in foreign code, to /// trigger a process abort. #[derive(PartialEq)] -pub struct AbortUnwindingCalls; +pub(super) struct AbortUnwindingCalls; -impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls { +impl<'tcx> crate::MirPass<'tcx> for AbortUnwindingCalls { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let def_id = body.source.def_id(); let kind = tcx.def_kind(def_id); @@ -50,9 +50,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls { // with a function call, and whose function we're calling may unwind. // This will filter to functions with `extern "C-unwind"` ABIs, for // example. - let mut calls_to_terminate = Vec::new(); - let mut cleanups_to_remove = Vec::new(); - for (id, block) in body.basic_blocks.iter_enumerated() { + for block in body.basic_blocks.as_mut() { if block.is_cleanup { continue; } @@ -61,7 +59,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls { let call_can_unwind = match &terminator.kind { TerminatorKind::Call { func, .. } => { - let ty = func.ty(body, tcx); + let ty = func.ty(&body.local_decls, tcx); let sig = ty.fn_sig(tcx); let fn_def_id = match ty.kind() { ty::FnPtr(..) => None, @@ -86,33 +84,22 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls { _ => continue, }; - // If this function call can't unwind, then there's no need for it - // to have a landing pad. This means that we can remove any cleanup - // registered for it. if !call_can_unwind { - cleanups_to_remove.push(id); - continue; - } - - // Otherwise if this function can unwind, then if the outer function - // can also unwind there's nothing to do. If the outer function - // can't unwind, however, we need to change the landing pad for this - // function call to one that aborts. - if !body_can_unwind { - calls_to_terminate.push(id); + // If this function call can't unwind, then there's no need for it + // to have a landing pad. This means that we can remove any cleanup + // registered for it. + let cleanup = block.terminator_mut().unwind_mut().unwrap(); + *cleanup = UnwindAction::Unreachable; + } else if !body_can_unwind { + // Otherwise if this function can unwind, then if the outer function + // can also unwind there's nothing to do. If the outer function + // can't unwind, however, we need to change the landing pad for this + // function call to one that aborts. + let cleanup = block.terminator_mut().unwind_mut().unwrap(); + *cleanup = UnwindAction::Terminate(UnwindTerminateReason::Abi); } } - for id in calls_to_terminate { - let cleanup = body.basic_blocks_mut()[id].terminator_mut().unwind_mut().unwrap(); - *cleanup = UnwindAction::Terminate(UnwindTerminateReason::Abi); - } - - for id in cleanups_to_remove { - let cleanup = body.basic_blocks_mut()[id].terminator_mut().unwind_mut().unwrap(); - *cleanup = UnwindAction::Unreachable; - } - // We may have invalidated some `cleanup` blocks so clean those up now. super::simplify::remove_dead_blocks(body); } diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs index a47c8d94bba..24c955c0c78 100644 --- a/compiler/rustc_mir_transform/src/add_call_guards.rs +++ b/compiler/rustc_mir_transform/src/add_call_guards.rs @@ -1,13 +1,14 @@ use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::debug; #[derive(PartialEq)] -pub enum AddCallGuards { +pub(super) enum AddCallGuards { AllCallEdges, CriticalCallEdges, } -pub use self::AddCallGuards::*; +pub(super) use self::AddCallGuards::*; /** * Breaks outgoing critical edges for call terminators in the MIR. @@ -29,14 +30,8 @@ pub use self::AddCallGuards::*; * */ -impl<'tcx> MirPass<'tcx> for AddCallGuards { +impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - self.add_call_guards(body); - } -} - -impl AddCallGuards { - pub fn add_call_guards(&self, body: &mut Body<'_>) { let mut pred_count: IndexVec<_, _> = body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect(); pred_count[START_BLOCK] += 1; diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs index cd850e2d731..74df5f7479e 100644 --- a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs +++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs @@ -1,6 +1,7 @@ use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::debug; use crate::util; @@ -34,40 +35,39 @@ use crate::util; /// /// The storage instructions are required to avoid stack space /// blowup. -pub struct AddMovesForPackedDrops; +pub(super) struct AddMovesForPackedDrops; -impl<'tcx> MirPass<'tcx> for AddMovesForPackedDrops { +impl<'tcx> crate::MirPass<'tcx> for AddMovesForPackedDrops { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!("add_moves_for_packed_drops({:?} @ {:?})", body.source, body.span); - add_moves_for_packed_drops(tcx, body); - } -} - -pub fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let patch = add_moves_for_packed_drops_patch(tcx, body); - patch.apply(body); -} -fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> MirPatch<'tcx> { - let def_id = body.source.def_id(); - let mut patch = MirPatch::new(body); - let param_env = tcx.param_env(def_id); + let def_id = body.source.def_id(); + let mut patch = MirPatch::new(body); + let param_env = tcx.param_env(def_id); - for (bb, data) in body.basic_blocks.iter_enumerated() { - let loc = Location { block: bb, statement_index: data.statements.len() }; - let terminator = data.terminator(); + for (bb, data) in body.basic_blocks.iter_enumerated() { + let loc = Location { block: bb, statement_index: data.statements.len() }; + let terminator = data.terminator(); - match terminator.kind { - TerminatorKind::Drop { place, .. } - if util::is_disaligned(tcx, body, param_env, place) => - { - add_move_for_packed_drop(tcx, body, &mut patch, terminator, loc, data.is_cleanup); + match terminator.kind { + TerminatorKind::Drop { place, .. } + if util::is_disaligned(tcx, body, param_env, place) => + { + add_move_for_packed_drop( + tcx, + body, + &mut patch, + terminator, + loc, + data.is_cleanup, + ); + } + _ => {} } - _ => {} } - } - patch + patch.apply(body); + } } fn add_move_for_packed_drop<'tcx>( @@ -85,7 +85,7 @@ fn add_move_for_packed_drop<'tcx>( let source_info = terminator.source_info; let ty = place.ty(body, tcx).ty; - let temp = patch.new_temp(ty, terminator.source_info.span); + let temp = patch.new_temp(ty, source_info.span); let storage_dead_block = patch.new_block(BasicBlockData { statements: vec![Statement { source_info, kind: StatementKind::StorageDead(temp) }], diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs index 12a68790374..8ad364ed055 100644 --- a/compiler/rustc_mir_transform/src/add_retag.rs +++ b/compiler/rustc_mir_transform/src/add_retag.rs @@ -8,7 +8,7 @@ use rustc_hir::LangItem; use rustc_middle::mir::*; use rustc_middle::ty::{self, Ty, TyCtxt}; -pub struct AddRetag; +pub(super) struct AddRetag; /// Determine whether this type may contain a reference (or box), and thus needs retagging. /// We will only recurse `depth` times into Tuples/ADTs to bound the cost of this. @@ -48,7 +48,7 @@ fn may_contain_reference<'tcx>(ty: Ty<'tcx>, depth: u32, tcx: TyCtxt<'tcx>) -> b } } -impl<'tcx> MirPass<'tcx> for AddRetag { +impl<'tcx> crate::MirPass<'tcx> for AddRetag { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.opts.unstable_opts.mir_emit_retag } @@ -60,7 +60,9 @@ impl<'tcx> MirPass<'tcx> for AddRetag { let basic_blocks = body.basic_blocks.as_mut(); let local_decls = &body.local_decls; let needs_retag = |place: &Place<'tcx>| { - !place.is_indirect_first_projection() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway + // We're not really interested in stores to "outside" locations, they are hard to keep + // track of anyway. + !place.is_indirect_first_projection() && may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx) && !local_decls[place.local].is_deref_temp() }; @@ -129,14 +131,16 @@ impl<'tcx> MirPass<'tcx> for AddRetag { StatementKind::Assign(box (ref place, ref rvalue)) => { let add_retag = match rvalue { // Ptr-creating operations already do their own internal retagging, no - // need to also add a retag statement. - // *Except* if we are deref'ing a Box, because those get desugared to directly working - // with the inner raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it + // need to also add a retag statement. *Except* if we are deref'ing a + // Box, because those get desugared to directly working with the inner + // raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it // a NOP when the original pointer is already raw. Rvalue::RawPtr(_mutbl, place) => { // Using `is_box_global` here is a bit sketchy: if this code is // generic over the allocator, we'll not add a retag! This is a hack // to make Stacked Borrows compatible with custom allocator code. + // It means the raw pointer inherits the tag of the box, which mostly works + // but can sometimes lead to unexpected aliasing errors. // Long-term, we'll want to move to an aliasing model where "cast to // raw pointer" is a complete NOP, and then this will no longer be // an issue. diff --git a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs index 04204c68f7b..e585e338613 100644 --- a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs +++ b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs @@ -1,15 +1,14 @@ -use rustc_index::IndexVec; use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::visit::MutVisitor; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; -pub struct Subtyper; +pub(super) struct Subtyper; -pub struct SubTypeChecker<'a, 'tcx> { +struct SubTypeChecker<'a, 'tcx> { tcx: TyCtxt<'tcx>, patcher: MirPatch<'tcx>, - local_decls: &'a IndexVec<Local, LocalDecl<'tcx>>, + local_decls: &'a LocalDecls<'tcx>, } impl<'a, 'tcx> MutVisitor<'tcx> for SubTypeChecker<'a, 'tcx> { @@ -52,18 +51,14 @@ impl<'a, 'tcx> MutVisitor<'tcx> for SubTypeChecker<'a, 'tcx> { // // gets transformed to // let temp: rval_ty = rval; // let place: place_ty = temp as place_ty; -pub fn subtype_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let patch = MirPatch::new(body); - let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls }; - - for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { - checker.visit_basic_block_data(bb, data); - } - checker.patcher.apply(body); -} - -impl<'tcx> MirPass<'tcx> for Subtyper { +impl<'tcx> crate::MirPass<'tcx> for Subtyper { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - subtype_finder(tcx, body); + let patch = MirPatch::new(body); + let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls }; + + for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { + checker.visit_basic_block_data(bb, data); + } + checker.patcher.apply(body); } } diff --git a/compiler/rustc_mir_transform/src/check_alignment.rs b/compiler/rustc_mir_transform/src/check_alignment.rs index 5dfdcfc8b94..a9600f77c0b 100644 --- a/compiler/rustc_mir_transform/src/check_alignment.rs +++ b/compiler/rustc_mir_transform/src/check_alignment.rs @@ -5,10 +5,11 @@ use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceC use rustc_middle::mir::*; use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt}; use rustc_session::Session; +use tracing::{debug, trace}; -pub struct CheckAlignment; +pub(super) struct CheckAlignment; -impl<'tcx> MirPass<'tcx> for CheckAlignment { +impl<'tcx> crate::MirPass<'tcx> for CheckAlignment { fn is_enabled(&self, sess: &Session) -> bool { // FIXME(#112480) MSVC and rustc disagree on minimum stack alignment on x86 Windows if sess.target.llvm_target == "i686-pc-windows-msvc" { @@ -61,14 +62,14 @@ impl<'tcx> MirPass<'tcx> for CheckAlignment { } } -struct PointerFinder<'tcx, 'a> { +struct PointerFinder<'a, 'tcx> { tcx: TyCtxt<'tcx>, local_decls: &'a mut LocalDecls<'tcx>, param_env: ParamEnv<'tcx>, pointers: Vec<(Place<'tcx>, Ty<'tcx>)>, } -impl<'tcx, 'a> Visitor<'tcx> for PointerFinder<'tcx, 'a> { +impl<'a, 'tcx> Visitor<'tcx> for PointerFinder<'a, 'tcx> { fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) { // We want to only check reads and writes to Places, so we specifically exclude // Borrow and RawBorrow. diff --git a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs index 1f615c9d8d1..9c3cbbe1fc9 100644 --- a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs +++ b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs @@ -6,11 +6,11 @@ use rustc_session::lint::builtin::CONST_ITEM_MUTATION; use rustc_span::def_id::DefId; use rustc_span::Span; -use crate::{errors, MirLint}; +use crate::errors; -pub struct CheckConstItemMutation; +pub(super) struct CheckConstItemMutation; -impl<'tcx> MirLint<'tcx> for CheckConstItemMutation { +impl<'tcx> crate::MirLint<'tcx> for CheckConstItemMutation { fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { let mut checker = ConstMutationChecker { body, tcx, target_local: None }; checker.visit_body(body); @@ -95,19 +95,19 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> { // Check for assignment to fields of a constant // Assigning directly to a constant (e.g. `FOO = true;`) is a hard error, // so emitting a lint would be redundant. - if !lhs.projection.is_empty() { - if let Some(def_id) = self.is_const_item_without_destructor(lhs.local) - && let Some((lint_root, span, item)) = - self.should_lint_const_item_usage(lhs, def_id, loc) - { - self.tcx.emit_node_span_lint( - CONST_ITEM_MUTATION, - lint_root, - span, - errors::ConstMutate::Modify { konst: item }, - ); - } + if !lhs.projection.is_empty() + && let Some(def_id) = self.is_const_item_without_destructor(lhs.local) + && let Some((lint_root, span, item)) = + self.should_lint_const_item_usage(lhs, def_id, loc) + { + self.tcx.emit_node_span_lint( + CONST_ITEM_MUTATION, + lint_root, + span, + errors::ConstMutate::Modify { konst: item }, + ); } + // We are looking for MIR of the form: // // ``` @@ -123,6 +123,7 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> { self.super_statement(stmt, loc); self.target_local = None; } + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, loc: Location) { if let Rvalue::Ref(_, BorrowKind::Mut { .. }, place) = rvalue { let local = place.local; diff --git a/compiler/rustc_mir_transform/src/check_packed_ref.rs b/compiler/rustc_mir_transform/src/check_packed_ref.rs index 9902002580a..1922d4fef25 100644 --- a/compiler/rustc_mir_transform/src/check_packed_ref.rs +++ b/compiler/rustc_mir_transform/src/check_packed_ref.rs @@ -3,11 +3,11 @@ use rustc_middle::mir::*; use rustc_middle::span_bug; use rustc_middle::ty::{self, TyCtxt}; -use crate::{errors, util, MirLint}; +use crate::{errors, util}; -pub struct CheckPackedRef; +pub(super) struct CheckPackedRef; -impl<'tcx> MirLint<'tcx> for CheckPackedRef { +impl<'tcx> crate::MirLint<'tcx> for CheckPackedRef { fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { let param_env = tcx.param_env(body.source.def_id()); let source_info = SourceInfo::outermost(body.span); @@ -37,24 +37,17 @@ impl<'tcx> Visitor<'tcx> for PackedRefChecker<'_, 'tcx> { } fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) { - if context.is_borrow() { - if util::is_disaligned(self.tcx, self.body, self.param_env, *place) { - let def_id = self.body.source.instance.def_id(); - if let Some(impl_def_id) = self.tcx.impl_of_method(def_id) - && self.tcx.is_builtin_derived(impl_def_id) - { - // If we ever reach here it means that the generated derive - // code is somehow doing an unaligned reference, which it - // shouldn't do. - span_bug!( - self.source_info.span, - "builtin derive created an unaligned reference" - ); - } else { - self.tcx - .dcx() - .emit_err(errors::UnalignedPackedRef { span: self.source_info.span }); - } + if context.is_borrow() && util::is_disaligned(self.tcx, self.body, self.param_env, *place) { + let def_id = self.body.source.instance.def_id(); + if let Some(impl_def_id) = self.tcx.impl_of_method(def_id) + && self.tcx.is_builtin_derived(impl_def_id) + { + // If we ever reach here it means that the generated derive + // code is somehow doing an unaligned reference, which it + // shouldn't do. + span_bug!(self.source_info.span, "builtin derive created an unaligned reference"); + } else { + self.tcx.dcx().emit_err(errors::UnalignedPackedRef { span: self.source_info.span }); } } } diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs index 08c9f9f08e6..97ce16c0661 100644 --- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs +++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs @@ -21,11 +21,9 @@ use rustc_middle::mir::{Body, BorrowKind, CastKind, Rvalue, StatementKind, Termi use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::TyCtxt; -use crate::MirPass; +pub(super) struct CleanupPostBorrowck; -pub struct CleanupPostBorrowck; - -impl<'tcx> MirPass<'tcx> for CleanupPostBorrowck { +impl<'tcx> crate::MirPass<'tcx> for CleanupPostBorrowck { fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { for basic_block in body.basic_blocks.as_mut() { for statement in basic_block.statements.iter_mut() { diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs index c1f9313a377..b3db5669121 100644 --- a/compiler/rustc_mir_transform/src/copy_prop.rs +++ b/compiler/rustc_mir_transform/src/copy_prop.rs @@ -3,6 +3,7 @@ use rustc_index::IndexSlice; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::{debug, instrument}; use crate::ssa::SsaLocals; @@ -16,9 +17,9 @@ use crate::ssa::SsaLocals; /// where each of the locals is only assigned once. /// /// We want to replace all those locals by `_a`, either copied or moved. -pub struct CopyProp; +pub(super) struct CopyProp; -impl<'tcx> MirPass<'tcx> for CopyProp { +impl<'tcx> crate::MirPass<'tcx> for CopyProp { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 1 } @@ -26,37 +27,34 @@ impl<'tcx> MirPass<'tcx> for CopyProp { #[instrument(level = "trace", skip(self, tcx, body))] fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!(def_id = ?body.source.def_id()); - propagate_ssa(tcx, body); - } -} -fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); - let ssa = SsaLocals::new(tcx, body, param_env); + let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); + let ssa = SsaLocals::new(tcx, body, param_env); - let fully_moved = fully_moved_locals(&ssa, body); - debug!(?fully_moved); + let fully_moved = fully_moved_locals(&ssa, body); + debug!(?fully_moved); - let mut storage_to_remove = BitSet::new_empty(fully_moved.domain_size()); - for (local, &head) in ssa.copy_classes().iter_enumerated() { - if local != head { - storage_to_remove.insert(head); + let mut storage_to_remove = BitSet::new_empty(fully_moved.domain_size()); + for (local, &head) in ssa.copy_classes().iter_enumerated() { + if local != head { + storage_to_remove.insert(head); + } } - } - let any_replacement = ssa.copy_classes().iter_enumerated().any(|(l, &h)| l != h); + let any_replacement = ssa.copy_classes().iter_enumerated().any(|(l, &h)| l != h); - Replacer { - tcx, - copy_classes: ssa.copy_classes(), - fully_moved, - borrowed_locals: ssa.borrowed_locals(), - storage_to_remove, - } - .visit_body_preserves_cfg(body); + Replacer { + tcx, + copy_classes: ssa.copy_classes(), + fully_moved, + borrowed_locals: ssa.borrowed_locals(), + storage_to_remove, + } + .visit_body_preserves_cfg(body); - if any_replacement { - crate::simplify::remove_unused_definitions(body); + if any_replacement { + crate::simplify::remove_unused_definitions(body); + } } } @@ -139,7 +137,8 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> { fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) { if let Operand::Move(place) = *operand - // A move out of a projection of a copy is equivalent to a copy of the original projection. + // A move out of a projection of a copy is equivalent to a copy of the original + // projection. && !place.is_indirect_first_projection() && !self.fully_moved.contains(place.local) { diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index 703339bf5bc..1fb74f5d82c 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -53,7 +53,7 @@ mod by_move_body; use std::{iter, ops}; -pub use by_move_body::ByMoveBody; +pub(super) use by_move_body::coroutine_by_move_body_def_id; use rustc_data_structures::fx::FxHashSet; use rustc_errors::pluralize; use rustc_hir as hir; @@ -63,7 +63,9 @@ use rustc_index::bit_set::{BitMatrix, BitSet, GrowableBitSet}; use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; use rustc_middle::mir::*; -use rustc_middle::ty::{self, CoroutineArgs, CoroutineArgsExt, InstanceKind, Ty, TyCtxt}; +use rustc_middle::ty::{ + self, CoroutineArgs, CoroutineArgsExt, GenericArgsRef, InstanceKind, Ty, TyCtxt, +}; use rustc_middle::{bug, span_bug}; use rustc_mir_dataflow::impls::{ MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive, @@ -78,11 +80,12 @@ use rustc_target::spec::PanicStrategy; use rustc_trait_selection::error_reporting::InferCtxtErrorExt; use rustc_trait_selection::infer::TyCtxtInferExt as _; use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode, ObligationCtxt}; +use tracing::{debug, instrument, trace}; use crate::deref_separator::deref_finder; use crate::{abort_unwinding_calls, errors, pass_manager as pm, simplify}; -pub struct StateTransform; +pub(super) struct StateTransform; struct RenameLocalVisitor<'tcx> { from: Local, @@ -112,47 +115,18 @@ impl<'tcx> MutVisitor<'tcx> for RenameLocalVisitor<'tcx> { } } -struct DerefArgVisitor<'tcx> { +struct SelfArgVisitor<'tcx> { tcx: TyCtxt<'tcx>, + new_base: Place<'tcx>, } -impl<'tcx> MutVisitor<'tcx> for DerefArgVisitor<'tcx> { - fn tcx(&self) -> TyCtxt<'tcx> { - self.tcx +impl<'tcx> SelfArgVisitor<'tcx> { + fn new(tcx: TyCtxt<'tcx>, elem: ProjectionElem<Local, Ty<'tcx>>) -> Self { + Self { tcx, new_base: Place { local: SELF_ARG, projection: tcx.mk_place_elems(&[elem]) } } } - - fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) { - assert_ne!(*local, SELF_ARG); - } - - fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) { - if place.local == SELF_ARG { - replace_base( - place, - Place { - local: SELF_ARG, - projection: self.tcx().mk_place_elems(&[ProjectionElem::Deref]), - }, - self.tcx, - ); - } else { - self.visit_local(&mut place.local, context, location); - - for elem in place.projection.iter() { - if let PlaceElem::Index(local) = elem { - assert_ne!(local, SELF_ARG); - } - } - } - } -} - -struct PinArgVisitor<'tcx> { - ref_coroutine_ty: Ty<'tcx>, - tcx: TyCtxt<'tcx>, } -impl<'tcx> MutVisitor<'tcx> for PinArgVisitor<'tcx> { +impl<'tcx> MutVisitor<'tcx> for SelfArgVisitor<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } @@ -163,17 +137,7 @@ impl<'tcx> MutVisitor<'tcx> for PinArgVisitor<'tcx> { fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) { if place.local == SELF_ARG { - replace_base( - place, - Place { - local: SELF_ARG, - projection: self.tcx().mk_place_elems(&[ProjectionElem::Field( - FieldIdx::ZERO, - self.ref_coroutine_ty, - )]), - }, - self.tcx, - ); + replace_base(place, self.new_base, self.tcx); } else { self.visit_local(&mut place.local, context, location); @@ -197,15 +161,6 @@ fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtx const SELF_ARG: Local = Local::from_u32(1); -/// Coroutine has not been resumed yet. -const UNRESUMED: usize = CoroutineArgs::UNRESUMED; -/// Coroutine has returned / is completed. -const RETURNED: usize = CoroutineArgs::RETURNED; -/// Coroutine has panicked and is poisoned. -const POISONED: usize = CoroutineArgs::POISONED; -/// Number of reserved variants of coroutine state. -const RESERVED_VARIANTS: usize = CoroutineArgs::RESERVED_VARIANTS; - /// A `yield` point in the coroutine. struct SuspensionPoint<'tcx> { /// State discriminant used when suspending or resuming at this point. @@ -260,14 +215,10 @@ impl<'tcx> TransformVisitor<'tcx> { // `gen` continues return `None` CoroutineKind::Desugared(CoroutineDesugaring::Gen, _) => { let option_def_id = self.tcx.require_lang_item(LangItem::Option, None); - Rvalue::Aggregate( - Box::new(AggregateKind::Adt( - option_def_id, - VariantIdx::ZERO, - self.tcx.mk_args(&[self.old_yield_ty.into()]), - None, - None, - )), + make_aggregate_adt( + option_def_id, + VariantIdx::ZERO, + self.tcx.mk_args(&[self.old_yield_ty.into()]), IndexVec::new(), ) } @@ -316,64 +267,28 @@ impl<'tcx> TransformVisitor<'tcx> { is_return: bool, statements: &mut Vec<Statement<'tcx>>, ) { + const ZERO: VariantIdx = VariantIdx::ZERO; + const ONE: VariantIdx = VariantIdx::from_usize(1); let rvalue = match self.coroutine_kind { CoroutineKind::Desugared(CoroutineDesugaring::Async, _) => { let poll_def_id = self.tcx.require_lang_item(LangItem::Poll, None); let args = self.tcx.mk_args(&[self.old_ret_ty.into()]); - if is_return { - // Poll::Ready(val) - Rvalue::Aggregate( - Box::new(AggregateKind::Adt( - poll_def_id, - VariantIdx::ZERO, - args, - None, - None, - )), - IndexVec::from_raw(vec![val]), - ) + let (variant_idx, operands) = if is_return { + (ZERO, IndexVec::from_raw(vec![val])) // Poll::Ready(val) } else { - // Poll::Pending - Rvalue::Aggregate( - Box::new(AggregateKind::Adt( - poll_def_id, - VariantIdx::from_usize(1), - args, - None, - None, - )), - IndexVec::new(), - ) - } + (ONE, IndexVec::new()) // Poll::Pending + }; + make_aggregate_adt(poll_def_id, variant_idx, args, operands) } CoroutineKind::Desugared(CoroutineDesugaring::Gen, _) => { let option_def_id = self.tcx.require_lang_item(LangItem::Option, None); let args = self.tcx.mk_args(&[self.old_yield_ty.into()]); - if is_return { - // None - Rvalue::Aggregate( - Box::new(AggregateKind::Adt( - option_def_id, - VariantIdx::ZERO, - args, - None, - None, - )), - IndexVec::new(), - ) + let (variant_idx, operands) = if is_return { + (ZERO, IndexVec::new()) // None } else { - // Some(val) - Rvalue::Aggregate( - Box::new(AggregateKind::Adt( - option_def_id, - VariantIdx::from_usize(1), - args, - None, - None, - )), - IndexVec::from_raw(vec![val]), - ) - } + (ONE, IndexVec::from_raw(vec![val])) // Some(val) + }; + make_aggregate_adt(option_def_id, variant_idx, args, operands) } CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, _) => { if is_return { @@ -399,31 +314,17 @@ impl<'tcx> TransformVisitor<'tcx> { let coroutine_state_def_id = self.tcx.require_lang_item(LangItem::CoroutineState, None); let args = self.tcx.mk_args(&[self.old_yield_ty.into(), self.old_ret_ty.into()]); - if is_return { - // CoroutineState::Complete(val) - Rvalue::Aggregate( - Box::new(AggregateKind::Adt( - coroutine_state_def_id, - VariantIdx::from_usize(1), - args, - None, - None, - )), - IndexVec::from_raw(vec![val]), - ) + let variant_idx = if is_return { + ONE // CoroutineState::Complete(val) } else { - // CoroutineState::Yielded(val) - Rvalue::Aggregate( - Box::new(AggregateKind::Adt( - coroutine_state_def_id, - VariantIdx::ZERO, - args, - None, - None, - )), - IndexVec::from_raw(vec![val]), - ) - } + ZERO // CoroutineState::Yielded(val) + }; + make_aggregate_adt( + coroutine_state_def_id, + variant_idx, + args, + IndexVec::from_raw(vec![val]), + ) } }; @@ -516,7 +417,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> { self.make_state(v, source_info, is_return, &mut data.statements); let state = if let Some((resume, mut resume_arg)) = resume { // Yield - let state = RESERVED_VARIANTS + self.suspension_points.len(); + let state = CoroutineArgs::RESERVED_VARIANTS + self.suspension_points.len(); // The resume arg target location might itself be remapped if its base local is // live across a yield. @@ -549,7 +450,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> { VariantIdx::new(state) } else { // Return - VariantIdx::new(RETURNED) // state for returned + VariantIdx::new(CoroutineArgs::RETURNED) // state for returned }; data.statements.push(self.set_discr(state, source_info)); data.terminator_mut().kind = TerminatorKind::Return; @@ -559,6 +460,15 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> { } } +fn make_aggregate_adt<'tcx>( + def_id: DefId, + variant_idx: VariantIdx, + args: GenericArgsRef<'tcx>, + operands: IndexVec<FieldIdx, Operand<'tcx>>, +) -> Rvalue<'tcx> { + Rvalue::Aggregate(Box::new(AggregateKind::Adt(def_id, variant_idx, args, None, None)), operands) +} + fn make_coroutine_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let coroutine_ty = body.local_decls.raw[1].ty; @@ -568,7 +478,7 @@ fn make_coroutine_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Bo body.local_decls.raw[1].ty = ref_coroutine_ty; // Add a deref to accesses of the coroutine state - DerefArgVisitor { tcx }.visit_body(body); + SelfArgVisitor::new(tcx, ProjectionElem::Deref).visit_body(body); } fn make_coroutine_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { @@ -583,7 +493,8 @@ fn make_coroutine_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body body.local_decls.raw[1].ty = pin_ref_coroutine_ty; // Add the Pin field access to accesses of the coroutine state - PinArgVisitor { ref_coroutine_ty, tcx }.visit_body(body); + SelfArgVisitor::new(tcx, ProjectionElem::Field(FieldIdx::ZERO, ref_coroutine_ty)) + .visit_body(body); } /// Allocates a new local and replaces all references of `local` with it. Returns the new local. @@ -650,8 +561,6 @@ fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let local = eliminate_get_context_call(&mut body[bb]); replace_resume_ty_local(tcx, body, local, context_mut_ref); } - } else { - continue; } } TerminatorKind::Yield { resume_arg, .. } => { @@ -664,24 +573,23 @@ fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn eliminate_get_context_call<'tcx>(bb_data: &mut BasicBlockData<'tcx>) -> Local { let terminator = bb_data.terminator.take().unwrap(); - if let TerminatorKind::Call { args, destination, target, .. } = terminator.kind { - let [arg] = *Box::try_from(args).unwrap(); - let local = arg.node.place().unwrap().local; - - let arg = Rvalue::Use(arg.node); - let assign = Statement { - source_info: terminator.source_info, - kind: StatementKind::Assign(Box::new((destination, arg))), - }; - bb_data.statements.push(assign); - bb_data.terminator = Some(Terminator { - source_info: terminator.source_info, - kind: TerminatorKind::Goto { target: target.unwrap() }, - }); - local - } else { + let TerminatorKind::Call { args, destination, target, .. } = terminator.kind else { bug!(); - } + }; + let [arg] = *Box::try_from(args).unwrap(); + let local = arg.node.place().unwrap().local; + + let arg = Rvalue::Use(arg.node); + let assign = Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(Box::new((destination, arg))), + }; + bb_data.statements.push(assign); + bb_data.terminator = Some(Terminator { + source_info: terminator.source_info, + kind: TerminatorKind::Goto { target: target.unwrap() }, + }); + local } #[cfg_attr(not(debug_assertions), allow(unused))] @@ -964,9 +872,9 @@ fn compute_storage_conflicts<'mir, 'tcx>( storage_conflicts } -struct StorageConflictVisitor<'mir, 'tcx, 's> { - body: &'mir Body<'tcx>, - saved_locals: &'s CoroutineSavedLocals, +struct StorageConflictVisitor<'a, 'tcx> { + body: &'a Body<'tcx>, + saved_locals: &'a CoroutineSavedLocals, // FIXME(tmandry): Consider using sparse bitsets here once we have good // benchmarks for coroutines. local_conflicts: BitMatrix<Local, Local>, @@ -974,16 +882,16 @@ struct StorageConflictVisitor<'mir, 'tcx, 's> { eligible_storage_live: BitSet<Local>, } -impl<'mir, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx, R> - for StorageConflictVisitor<'mir, 'tcx, '_> +impl<'a, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'a, 'tcx, R> + for StorageConflictVisitor<'a, 'tcx> { - type FlowState = BitSet<Local>; + type Domain = BitSet<Local>; fn visit_statement_before_primary_effect( &mut self, _results: &mut R, - state: &Self::FlowState, - _statement: &'mir Statement<'tcx>, + state: &Self::Domain, + _statement: &'a Statement<'tcx>, loc: Location, ) { self.apply_state(state, loc); @@ -992,22 +900,22 @@ impl<'mir, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx, R> fn visit_terminator_before_primary_effect( &mut self, _results: &mut R, - state: &Self::FlowState, - _terminator: &'mir Terminator<'tcx>, + state: &Self::Domain, + _terminator: &'a Terminator<'tcx>, loc: Location, ) { self.apply_state(state, loc); } } -impl StorageConflictVisitor<'_, '_, '_> { - fn apply_state(&mut self, flow_state: &BitSet<Local>, loc: Location) { +impl StorageConflictVisitor<'_, '_> { + fn apply_state(&mut self, state: &BitSet<Local>, loc: Location) { // Ignore unreachable blocks. if let TerminatorKind::Unreachable = self.body.basic_blocks[loc.block].terminator().kind { return; } - self.eligible_storage_live.clone_from(flow_state); + self.eligible_storage_live.clone_from(state); self.eligible_storage_live.intersect(&**self.saved_locals); for local in self.eligible_storage_live.iter() { @@ -1084,10 +992,11 @@ fn compute_layout<'tcx>( // Build the coroutine variant field list. // Create a map from local indices to coroutine struct indices. let mut variant_fields: IndexVec<VariantIdx, IndexVec<FieldIdx, CoroutineSavedLocal>> = - iter::repeat(IndexVec::new()).take(RESERVED_VARIANTS).collect(); + iter::repeat(IndexVec::new()).take(CoroutineArgs::RESERVED_VARIANTS).collect(); let mut remap = IndexVec::from_elem_n(None, saved_locals.domain_size()); for (suspension_point_idx, live_locals) in live_locals_at_suspension_points.iter().enumerate() { - let variant_index = VariantIdx::from(RESERVED_VARIANTS + suspension_point_idx); + let variant_index = + VariantIdx::from(CoroutineArgs::RESERVED_VARIANTS + suspension_point_idx); let mut fields = IndexVec::new(); for (idx, saved_local) in live_locals.iter().enumerate() { fields.push(saved_local); @@ -1182,12 +1091,10 @@ fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { source_info, kind: TerminatorKind::Drop { place, target, unwind, replace: _ }, } => { - if let Some(local) = place.as_local() { - if local == SELF_ARG { - (target, unwind, source_info) - } else { - continue; - } + if let Some(local) = place.as_local() + && local == SELF_ARG + { + (target, unwind, source_info) } else { continue; } @@ -1236,7 +1143,7 @@ fn create_coroutine_drop_shim<'tcx>( let mut cases = create_cases(&mut body, transform, Operation::Drop); - cases.insert(0, (UNRESUMED, drop_clean)); + cases.insert(0, (CoroutineArgs::UNRESUMED, drop_clean)); // The returned state and the poisoned state fall through to the default // case which is just to return @@ -1292,7 +1199,7 @@ fn insert_panic_block<'tcx>( message: AssertMessage<'tcx>, ) -> BasicBlock { let assert_block = BasicBlock::new(body.basic_blocks.len()); - let term = TerminatorKind::Assert { + let kind = TerminatorKind::Assert { cond: Operand::Constant(Box::new(ConstOperand { span: body.span, user_ty: None, @@ -1304,14 +1211,7 @@ fn insert_panic_block<'tcx>( unwind: UnwindAction::Continue, }; - let source_info = SourceInfo::outermost(body.span); - body.basic_blocks_mut().push(BasicBlockData { - statements: Vec::new(), - terminator: Some(Terminator { source_info, kind: term }), - is_cleanup: false, - }); - - assert_block + insert_term_block(body, kind) } fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { @@ -1386,7 +1286,9 @@ fn create_coroutine_resume_function<'tcx>( if can_unwind { let source_info = SourceInfo::outermost(body.span); let poison_block = body.basic_blocks_mut().push(BasicBlockData { - statements: vec![transform.set_discr(VariantIdx::new(POISONED), source_info)], + statements: vec![ + transform.set_discr(VariantIdx::new(CoroutineArgs::POISONED), source_info), + ], terminator: Some(Terminator { source_info, kind: TerminatorKind::UnwindResume }), is_cleanup: true, }); @@ -1418,13 +1320,16 @@ fn create_coroutine_resume_function<'tcx>( use rustc_middle::mir::AssertKind::{ResumedAfterPanic, ResumedAfterReturn}; // Jump to the entry point on the unresumed - cases.insert(0, (UNRESUMED, START_BLOCK)); + cases.insert(0, (CoroutineArgs::UNRESUMED, START_BLOCK)); // Panic when resumed on the returned or poisoned state if can_unwind { cases.insert( 1, - (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(transform.coroutine_kind))), + ( + CoroutineArgs::POISONED, + insert_panic_block(tcx, body, ResumedAfterPanic(transform.coroutine_kind)), + ), ); } @@ -1439,7 +1344,7 @@ fn create_coroutine_resume_function<'tcx>( transform.insert_none_ret_block(body) } }; - cases.insert(1, (RETURNED, block)); + cases.insert(1, (CoroutineArgs::RETURNED, block)); } insert_switch(body, cases, &transform, TerminatorKind::Unreachable); @@ -1623,7 +1528,7 @@ fn check_field_tys_sized<'tcx>( } } -impl<'tcx> MirPass<'tcx> for StateTransform { +impl<'tcx> crate::MirPass<'tcx> for StateTransform { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let Some(old_yield_ty) = body.yield_ty() else { // This only applies to coroutines diff --git a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs index 7ea36f08232..6476f3c6238 100644 --- a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs +++ b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs @@ -69,161 +69,175 @@ //! in case precise captures (edition 2021 closure capture rules) caused the inner coroutine //! to split one field capture into two. +use rustc_data_structures::steal::Steal; use rustc_data_structures::unord::UnordMap; use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_middle::bug; use rustc_middle::hir::place::{Projection, ProjectionKind}; use rustc_middle::mir::visit::MutVisitor; -use rustc_middle::mir::{self, dump_mir, MirPass}; +use rustc_middle::mir::{self, dump_mir}; use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt, TypeVisitableExt}; +use rustc_span::symbol::kw; use rustc_target::abi::{FieldIdx, VariantIdx}; -use crate::pass_manager::validate_body; +pub(crate) fn coroutine_by_move_body_def_id<'tcx>( + tcx: TyCtxt<'tcx>, + coroutine_def_id: LocalDefId, +) -> DefId { + let body = tcx.mir_built(coroutine_def_id).borrow(); -pub struct ByMoveBody; + // If the typeck results are tainted, no need to make a by-ref body. + if body.tainted_by_errors.is_some() { + return coroutine_def_id.to_def_id(); + } -impl<'tcx> MirPass<'tcx> for ByMoveBody { - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) { - // We only need to generate by-move coroutine bodies for coroutines that come - // from coroutine-closures. - let Some(coroutine_def_id) = body.source.def_id().as_local() else { - return; - }; - let Some(hir::CoroutineKind::Desugared(_, hir::CoroutineSource::Closure)) = - tcx.coroutine_kind(coroutine_def_id) - else { - return; - }; + let Some(hir::CoroutineKind::Desugared(_, hir::CoroutineSource::Closure)) = + tcx.coroutine_kind(coroutine_def_id) + else { + bug!("should only be invoked on coroutine-closures"); + }; - // Also, let's skip processing any bodies with errors, since there's no guarantee - // the MIR body will be constructed well. - let coroutine_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty; - if coroutine_ty.references_error() { - return; - } + // Also, let's skip processing any bodies with errors, since there's no guarantee + // the MIR body will be constructed well. + let coroutine_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty; - // We don't need to generate a by-move coroutine if the coroutine body was - // produced by the `CoroutineKindShim`, since it's already by-move. - if matches!(body.source.instance, ty::InstanceKind::CoroutineKindShim { .. }) { - return; - } + let ty::Coroutine(_, args) = *coroutine_ty.kind() else { + bug!("tried to create by-move body of non-coroutine receiver"); + }; + let args = args.as_coroutine(); - let ty::Coroutine(_, args) = *coroutine_ty.kind() else { bug!("{body:#?}") }; - let args = args.as_coroutine(); + let coroutine_kind = args.kind_ty().to_opt_closure_kind().unwrap(); - let coroutine_kind = args.kind_ty().to_opt_closure_kind().unwrap(); + let parent_def_id = tcx.local_parent(coroutine_def_id); + let ty::CoroutineClosure(_, parent_args) = + *tcx.type_of(parent_def_id).instantiate_identity().kind() + else { + bug!("coroutine's parent was not a coroutine-closure"); + }; + if parent_args.references_error() { + return coroutine_def_id.to_def_id(); + } - let parent_def_id = tcx.local_parent(coroutine_def_id); - let ty::CoroutineClosure(_, parent_args) = - *tcx.type_of(parent_def_id).instantiate_identity().kind() - else { - bug!(); - }; - let parent_closure_args = parent_args.as_coroutine_closure(); - let num_args = parent_closure_args - .coroutine_closure_sig() - .skip_binder() - .tupled_inputs_ty - .tuple_fields() - .len(); + let parent_closure_args = parent_args.as_coroutine_closure(); + let num_args = parent_closure_args + .coroutine_closure_sig() + .skip_binder() + .tupled_inputs_ty + .tuple_fields() + .len(); - let field_remapping: UnordMap<_, _> = ty::analyze_coroutine_closure_captures( - tcx.closure_captures(parent_def_id).iter().copied(), - tcx.closure_captures(coroutine_def_id).iter().skip(num_args).copied(), - |(parent_field_idx, parent_capture), (child_field_idx, child_capture)| { - // Store this set of additional projections (fields and derefs). - // We need to re-apply them later. - let mut child_precise_captures = child_capture.place.projections - [parent_capture.place.projections.len()..] - .to_vec(); + let field_remapping: UnordMap<_, _> = ty::analyze_coroutine_closure_captures( + tcx.closure_captures(parent_def_id).iter().copied(), + tcx.closure_captures(coroutine_def_id).iter().skip(num_args).copied(), + |(parent_field_idx, parent_capture), (child_field_idx, child_capture)| { + // Store this set of additional projections (fields and derefs). + // We need to re-apply them later. + let mut child_precise_captures = + child_capture.place.projections[parent_capture.place.projections.len()..].to_vec(); - // If the parent capture is by-ref, then we need to apply an additional - // deref before applying any further projections to this place. - if parent_capture.is_by_ref() { - child_precise_captures.insert( - 0, - Projection { ty: parent_capture.place.ty(), kind: ProjectionKind::Deref }, - ); - } - // If the child capture is by-ref, then we need to apply a "ref" - // projection (i.e. `&`) at the end. But wait! We don't have that - // as a projection kind. So instead, we can apply its dual and - // *peel* a deref off of the place when it shows up in the MIR body. - // Luckily, by construction this is always possible. - let peel_deref = if child_capture.is_by_ref() { - assert!( - parent_capture.is_by_ref() || coroutine_kind != ty::ClosureKind::FnOnce, - "`FnOnce` coroutine-closures return coroutines that capture from \ + // If the parent capture is by-ref, then we need to apply an additional + // deref before applying any further projections to this place. + if parent_capture.is_by_ref() { + child_precise_captures.insert( + 0, + Projection { ty: parent_capture.place.ty(), kind: ProjectionKind::Deref }, + ); + } + // If the child capture is by-ref, then we need to apply a "ref" + // projection (i.e. `&`) at the end. But wait! We don't have that + // as a projection kind. So instead, we can apply its dual and + // *peel* a deref off of the place when it shows up in the MIR body. + // Luckily, by construction this is always possible. + let peel_deref = if child_capture.is_by_ref() { + assert!( + parent_capture.is_by_ref() || coroutine_kind != ty::ClosureKind::FnOnce, + "`FnOnce` coroutine-closures return coroutines that capture from \ their body; it will always result in a borrowck error!" - ); - true - } else { - false - }; + ); + true + } else { + false + }; - // Regarding the behavior above, you may think that it's redundant to both - // insert a deref and then peel a deref if the parent and child are both - // captured by-ref. This would be correct, except for the case where we have - // precise capturing projections, since the inserted deref is to the *beginning* - // and the peeled deref is at the *end*. I cannot seem to actually find a - // case where this happens, though, but let's keep this code flexible. + // Regarding the behavior above, you may think that it's redundant to both + // insert a deref and then peel a deref if the parent and child are both + // captured by-ref. This would be correct, except for the case where we have + // precise capturing projections, since the inserted deref is to the *beginning* + // and the peeled deref is at the *end*. I cannot seem to actually find a + // case where this happens, though, but let's keep this code flexible. - // Finally, store the type of the parent's captured place. We need - // this when building the field projection in the MIR body later on. - let mut parent_capture_ty = parent_capture.place.ty(); - parent_capture_ty = match parent_capture.info.capture_kind { - ty::UpvarCapture::ByValue => parent_capture_ty, - ty::UpvarCapture::ByRef(kind) => Ty::new_ref( - tcx, - tcx.lifetimes.re_erased, - parent_capture_ty, - kind.to_mutbl_lossy(), - ), - }; + // Finally, store the type of the parent's captured place. We need + // this when building the field projection in the MIR body later on. + let mut parent_capture_ty = parent_capture.place.ty(); + parent_capture_ty = match parent_capture.info.capture_kind { + ty::UpvarCapture::ByValue => parent_capture_ty, + ty::UpvarCapture::ByRef(kind) => Ty::new_ref( + tcx, + tcx.lifetimes.re_erased, + parent_capture_ty, + kind.to_mutbl_lossy(), + ), + }; + ( + FieldIdx::from_usize(child_field_idx + num_args), ( - FieldIdx::from_usize(child_field_idx + num_args), - ( - FieldIdx::from_usize(parent_field_idx + num_args), - parent_capture_ty, - peel_deref, - child_precise_captures, - ), - ) - }, - ) - .collect(); + FieldIdx::from_usize(parent_field_idx + num_args), + parent_capture_ty, + peel_deref, + child_precise_captures, + ), + ) + }, + ) + .collect(); - if coroutine_kind == ty::ClosureKind::FnOnce { - assert_eq!(field_remapping.len(), tcx.closure_captures(parent_def_id).len()); - return; - } + if coroutine_kind == ty::ClosureKind::FnOnce { + assert_eq!(field_remapping.len(), tcx.closure_captures(parent_def_id).len()); + // The by-move body is just the body :) + return coroutine_def_id.to_def_id(); + } - let by_move_coroutine_ty = tcx - .instantiate_bound_regions_with_erased(parent_closure_args.coroutine_closure_sig()) - .to_coroutine_given_kind_and_upvars( - tcx, - parent_closure_args.parent_args(), - coroutine_def_id.to_def_id(), - ty::ClosureKind::FnOnce, - tcx.lifetimes.re_erased, - parent_closure_args.tupled_upvars_ty(), - parent_closure_args.coroutine_captures_by_ref_ty(), - ); + let by_move_coroutine_ty = tcx + .instantiate_bound_regions_with_erased(parent_closure_args.coroutine_closure_sig()) + .to_coroutine_given_kind_and_upvars( + tcx, + parent_closure_args.parent_args(), + coroutine_def_id.to_def_id(), + ty::ClosureKind::FnOnce, + tcx.lifetimes.re_erased, + parent_closure_args.tupled_upvars_ty(), + parent_closure_args.coroutine_captures_by_ref_ty(), + ); - let mut by_move_body = body.clone(); - MakeByMoveBody { tcx, field_remapping, by_move_coroutine_ty }.visit_body(&mut by_move_body); - dump_mir(tcx, false, "coroutine_by_move", &0, &by_move_body, |_, _| Ok(())); + let mut by_move_body = body.clone(); + MakeByMoveBody { tcx, field_remapping, by_move_coroutine_ty }.visit_body(&mut by_move_body); - // Let's just always validate this body. - validate_body(tcx, &mut by_move_body, "Initial coroutine_by_move body".to_string()); + // This will always be `{closure#1}`, since the original coroutine is `{closure#0}`. + let body_def = tcx.create_def(parent_def_id, kw::Empty, DefKind::SyntheticCoroutineBody); + by_move_body.source = + mir::MirSource::from_instance(InstanceKind::Item(body_def.def_id().to_def_id())); + dump_mir(tcx, false, "built", &"after", &by_move_body, |_, _| Ok(())); - // FIXME: use query feeding to generate the body right here and then only store the `DefId` of the new body. - by_move_body.source = mir::MirSource::from_instance(InstanceKind::CoroutineKindShim { - coroutine_def_id: coroutine_def_id.to_def_id(), - }); - body.coroutine.as_mut().unwrap().by_move_body = Some(by_move_body); - } + // Inherited from the by-ref coroutine. + body_def.codegen_fn_attrs(tcx.codegen_fn_attrs(coroutine_def_id).clone()); + body_def.constness(tcx.constness(coroutine_def_id).clone()); + body_def.coroutine_kind(tcx.coroutine_kind(coroutine_def_id).clone()); + body_def.def_ident_span(tcx.def_ident_span(coroutine_def_id)); + body_def.def_span(tcx.def_span(coroutine_def_id)); + body_def.explicit_predicates_of(tcx.explicit_predicates_of(coroutine_def_id).clone()); + body_def.generics_of(tcx.generics_of(coroutine_def_id).clone()); + body_def.param_env(tcx.param_env(coroutine_def_id).clone()); + body_def.predicates_of(tcx.predicates_of(coroutine_def_id).clone()); + + // The type of the coroutine is the `by_move_coroutine_ty`. + body_def.type_of(ty::EarlyBinder::bind(by_move_coroutine_ty)); + + body_def.mir_built(tcx.arena.alloc(Steal::new(by_move_body))); + + body_def.def_id().to_def_id() } struct MakeByMoveBody<'tcx> { diff --git a/compiler/rustc_mir_transform/src/cost_checker.rs b/compiler/rustc_mir_transform/src/cost_checker.rs index a1c1422912e..59b403538a3 100644 --- a/compiler/rustc_mir_transform/src/cost_checker.rs +++ b/compiler/rustc_mir_transform/src/cost_checker.rs @@ -12,7 +12,7 @@ const CONST_SWITCH_BONUS: usize = 10; /// Verify that the callee body is compatible with the caller. #[derive(Clone)] -pub(crate) struct CostChecker<'b, 'tcx> { +pub(super) struct CostChecker<'b, 'tcx> { tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, penalty: usize, @@ -22,7 +22,7 @@ pub(crate) struct CostChecker<'b, 'tcx> { } impl<'b, 'tcx> CostChecker<'b, 'tcx> { - pub fn new( + pub(super) fn new( tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, instance: Option<ty::Instance<'tcx>>, @@ -36,7 +36,7 @@ impl<'b, 'tcx> CostChecker<'b, 'tcx> { /// Needed because the `CostChecker` is used sometimes for just blocks, /// and even the full `Inline` doesn't call `visit_body`, so there's nowhere /// to put this logic in the visitor. - pub fn add_function_level_costs(&mut self) { + pub(super) fn add_function_level_costs(&mut self) { fn is_call_like(bbd: &BasicBlockData<'_>) -> bool { use TerminatorKind::*; match bbd.terminator().kind { @@ -64,7 +64,7 @@ impl<'b, 'tcx> CostChecker<'b, 'tcx> { } } - pub fn cost(&self) -> usize { + pub(super) fn cost(&self) -> usize { usize::saturating_sub(self.penalty, self.bonus) } diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index a8b0f4a8d6d..ef4031c5c03 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -6,6 +6,7 @@ use rustc_data_structures::graph::DirectedGraph; use rustc_index::IndexVec; use rustc_middle::bug; use rustc_middle::mir::coverage::{CounterId, CovTerm, Expression, ExpressionId, Op}; +use tracing::{debug, debug_span, instrument}; use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, TraverseCoverageGraphWithLoops}; @@ -73,12 +74,11 @@ pub(super) struct CoverageCounters { } impl CoverageCounters { - /// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or - /// indirectly associated with coverage spans, and accumulates additional `Expression`s - /// representing intermediate values. + /// Ensures that each BCB node needing a counter has one, by creating physical + /// counters or counter expressions for nodes and edges as required. pub(super) fn make_bcb_counters( basic_coverage_blocks: &CoverageGraph, - bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool, + bcb_needs_counter: impl Fn(BasicCoverageBlock) -> bool, ) -> Self { let num_bcbs = basic_coverage_blocks.num_nodes(); @@ -90,17 +90,38 @@ impl CoverageCounters { expressions_memo: FxHashMap::default(), }; - MakeBcbCounters::new(&mut this, basic_coverage_blocks) - .make_bcb_counters(bcb_has_coverage_spans); + MakeBcbCounters::new(&mut this, basic_coverage_blocks).make_bcb_counters(bcb_needs_counter); this } - fn make_counter(&mut self, site: CounterIncrementSite) -> BcbCounter { + /// Shared helper used by [`Self::make_phys_node_counter`] and + /// [`Self::make_phys_edge_counter`]. Don't call this directly. + fn make_counter_inner(&mut self, site: CounterIncrementSite) -> BcbCounter { let id = self.counter_increment_sites.push(site); BcbCounter::Counter { id } } + /// Creates a new physical counter attached a BCB node. + /// The node must not already have a counter. + fn make_phys_node_counter(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { + let counter = self.make_counter_inner(CounterIncrementSite::Node { bcb }); + debug!(?bcb, ?counter, "node gets a physical counter"); + self.set_bcb_counter(bcb, counter) + } + + /// Creates a new physical counter attached to a BCB edge. + /// The edge must not already have a counter. + fn make_phys_edge_counter( + &mut self, + from_bcb: BasicCoverageBlock, + to_bcb: BasicCoverageBlock, + ) -> BcbCounter { + let counter = self.make_counter_inner(CounterIncrementSite::Edge { from_bcb, to_bcb }); + debug!(?from_bcb, ?to_bcb, ?counter, "edge gets a physical counter"); + self.set_bcb_edge_counter(from_bcb, to_bcb, counter) + } + fn make_expression(&mut self, lhs: BcbCounter, op: Op, rhs: BcbCounter) -> BcbCounter { let new_expr = BcbExpression { lhs, op, rhs }; *self @@ -156,12 +177,14 @@ impl CoverageCounters { BcbCounter::Expression { id } } - /// Variant of `make_expression` that makes `lhs` optional and assumes [`Op::Add`]. + /// Creates a counter that is the sum of the given counters. /// - /// This is useful when using [`Iterator::fold`] to build an arbitrary-length sum. - fn make_sum_expression(&mut self, lhs: Option<BcbCounter>, rhs: BcbCounter) -> BcbCounter { - let Some(lhs) = lhs else { return rhs }; - self.make_expression(lhs, Op::Add, rhs) + /// Returns `None` if the given list of counters was empty. + fn make_sum(&mut self, counters: &[BcbCounter]) -> Option<BcbCounter> { + counters + .iter() + .copied() + .reduce(|accum, counter| self.make_expression(accum, Op::Add, counter)) } pub(super) fn num_counters(&self) -> usize { @@ -240,10 +263,7 @@ impl CoverageCounters { } } -/// Traverse the `CoverageGraph` and add either a `Counter` or `Expression` to every BCB, to be -/// injected with coverage spans. `Expressions` have no runtime overhead, so if a viable expression -/// (adding or subtracting two other counters or expressions) can compute the same result as an -/// embedded counter, an `Expression` should be used. +/// Helper struct that allows counter creation to inspect the BCB graph. struct MakeBcbCounters<'a> { coverage_counters: &'a mut CoverageCounters, basic_coverage_blocks: &'a CoverageGraph, @@ -257,36 +277,21 @@ impl<'a> MakeBcbCounters<'a> { Self { coverage_counters, basic_coverage_blocks } } - /// If two `BasicCoverageBlock`s branch from another `BasicCoverageBlock`, one of the branches - /// can be counted by `Expression` by subtracting the other branch from the branching - /// block. Otherwise, the `BasicCoverageBlock` executed the least should have the `Counter`. - /// One way to predict which branch executes the least is by considering loops. A loop is exited - /// at a branch, so the branch that jumps to a `BasicCoverageBlock` outside the loop is almost - /// always executed less than the branch that does not exit the loop. - fn make_bcb_counters(&mut self, bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool) { + fn make_bcb_counters(&mut self, bcb_needs_counter: impl Fn(BasicCoverageBlock) -> bool) { debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock"); - // Walk the `CoverageGraph`. For each `BasicCoverageBlock` node with an associated - // coverage span, add a counter. If the `BasicCoverageBlock` branches, add a counter or - // expression to each branch `BasicCoverageBlock` (if the branch BCB has only one incoming - // edge) or edge from the branching BCB to the branch BCB (if the branch BCB has multiple - // incoming edges). + // Traverse the coverage graph, ensuring that every node that needs a + // coverage counter has one. // - // The `TraverseCoverageGraphWithLoops` traversal ensures that, when a loop is encountered, - // all `BasicCoverageBlock` nodes in the loop are visited before visiting any node outside - // the loop. The `traversal` state includes a `context_stack`, providing a way to know if - // the current BCB is in one or more nested loops or not. + // The traversal tries to ensure that, when a loop is encountered, all + // nodes within the loop are visited before visiting any nodes outside + // the loop. It also keeps track of which loop(s) the traversal is + // currently inside. let mut traversal = TraverseCoverageGraphWithLoops::new(self.basic_coverage_blocks); while let Some(bcb) = traversal.next() { - if bcb_has_coverage_spans(bcb) { - debug!("{:?} has at least one coverage span. Get or make its counter", bcb); - self.make_node_and_branch_counters(&traversal, bcb); - } else { - debug!( - "{:?} does not have any coverage spans. A counter will only be added if \ - and when a covered BCB has an expression dependency.", - bcb, - ); + let _span = debug_span!("traversal", ?bcb).entered(); + if bcb_needs_counter(bcb) { + self.make_node_counter_and_out_edge_counters(&traversal, bcb); } } @@ -297,146 +302,133 @@ impl<'a> MakeBcbCounters<'a> { ); } - fn make_node_and_branch_counters( + /// Make sure the given node has a node counter, and then make sure each of + /// its out-edges has an edge counter (if appropriate). + #[instrument(level = "debug", skip(self, traversal))] + fn make_node_counter_and_out_edge_counters( &mut self, traversal: &TraverseCoverageGraphWithLoops<'_>, from_bcb: BasicCoverageBlock, ) { // First, ensure that this node has a counter of some kind. - // We might also use its term later to compute one of the branch counters. - let from_bcb_operand = self.get_or_make_counter_operand(from_bcb); - - let branch_target_bcbs = self.basic_coverage_blocks.successors[from_bcb].as_slice(); - - // If this node doesn't have multiple out-edges, or all of its out-edges - // already have counters, then we don't need to create edge counters. - let needs_branch_counters = branch_target_bcbs.len() > 1 - && branch_target_bcbs - .iter() - .any(|&to_bcb| self.branch_has_no_counter(from_bcb, to_bcb)); - if !needs_branch_counters { + // We might also use that counter to compute one of the out-edge counters. + let node_counter = self.get_or_make_node_counter(from_bcb); + + let successors = self.basic_coverage_blocks.successors[from_bcb].as_slice(); + + // If this node's out-edges won't sum to the node's counter, + // then there's no reason to create edge counters here. + if !self.basic_coverage_blocks[from_bcb].is_out_summable { return; } - debug!( - "{from_bcb:?} has some branch(es) without counters:\n {}", - branch_target_bcbs - .iter() - .map(|&to_bcb| { - format!("{from_bcb:?}->{to_bcb:?}: {:?}", self.branch_counter(from_bcb, to_bcb)) - }) - .collect::<Vec<_>>() - .join("\n "), - ); - - // Of the branch edges that don't have counters yet, one can be given an expression - // (computed from the other edges) instead of a dedicated counter. - let expression_to_bcb = self.choose_preferred_expression_branch(traversal, from_bcb); + // Determine the set of out-edges that don't yet have edge counters. + let candidate_successors = self.basic_coverage_blocks.successors[from_bcb] + .iter() + .copied() + .filter(|&to_bcb| self.edge_has_no_counter(from_bcb, to_bcb)) + .collect::<Vec<_>>(); + debug!(?candidate_successors); + + // If there are out-edges without counters, choose one to be given an expression + // (computed from this node and the other out-edges) instead of a physical counter. + let Some(expression_to_bcb) = + self.choose_out_edge_for_expression(traversal, &candidate_successors) + else { + return; + }; - // For each branch arm other than the one that was chosen to get an expression, + // For each out-edge other than the one that was chosen to get an expression, // ensure that it has a counter (existing counter/expression or a new counter), - // and accumulate the corresponding terms into a single sum term. - let sum_of_all_other_branches: BcbCounter = { - let _span = debug_span!("sum_of_all_other_branches", ?expression_to_bcb).entered(); - branch_target_bcbs - .iter() - .copied() - // Skip the chosen branch, since we'll calculate it from the other branches. - .filter(|&to_bcb| to_bcb != expression_to_bcb) - .fold(None, |accum, to_bcb| { - let _span = debug_span!("to_bcb", ?accum, ?to_bcb).entered(); - let branch_counter = self.get_or_make_edge_counter_operand(from_bcb, to_bcb); - Some(self.coverage_counters.make_sum_expression(accum, branch_counter)) - }) - .expect("there must be at least one other branch") + // and accumulate the corresponding counters into a single sum expression. + let other_out_edge_counters = successors + .iter() + .copied() + // Skip the chosen edge, since we'll calculate its count from this sum. + .filter(|&to_bcb| to_bcb != expression_to_bcb) + .map(|to_bcb| self.get_or_make_edge_counter(from_bcb, to_bcb)) + .collect::<Vec<_>>(); + let Some(sum_of_all_other_out_edges) = + self.coverage_counters.make_sum(&other_out_edge_counters) + else { + return; }; - // For the branch that was chosen to get an expression, create that expression - // by taking the count of the node we're branching from, and subtracting the - // sum of all the other branches. - debug!( - "Making an expression for the selected expression_branch: \ - {expression_to_bcb:?} (expression_branch predecessors: {:?})", - self.bcb_predecessors(expression_to_bcb), - ); + // Now create an expression for the chosen edge, by taking the counter + // for its source node and subtracting the sum of its sibling out-edges. let expression = self.coverage_counters.make_expression( - from_bcb_operand, + node_counter, Op::Subtract, - sum_of_all_other_branches, + sum_of_all_other_out_edges, ); + debug!("{expression_to_bcb:?} gets an expression: {expression:?}"); - if self.basic_coverage_blocks.bcb_has_multiple_in_edges(expression_to_bcb) { - self.coverage_counters.set_bcb_edge_counter(from_bcb, expression_to_bcb, expression); - } else { + if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(expression_to_bcb) { + // This edge normally wouldn't get its own counter, so attach the expression + // to its target node instead, so that `edge_has_no_counter` can see it. + assert_eq!(sole_pred, from_bcb); self.coverage_counters.set_bcb_counter(expression_to_bcb, expression); + } else { + self.coverage_counters.set_bcb_edge_counter(from_bcb, expression_to_bcb, expression); } } #[instrument(level = "debug", skip(self))] - fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { + fn get_or_make_node_counter(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { // If the BCB already has a counter, return it. if let Some(counter_kind) = self.coverage_counters.bcb_counters[bcb] { debug!("{bcb:?} already has a counter: {counter_kind:?}"); return counter_kind; } - // A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`). - // Also, a BCB that loops back to itself gets a simple `Counter`. This may indicate the - // program results in a tight infinite loop, but it should still compile. - let one_path_to_target = !self.basic_coverage_blocks.bcb_has_multiple_in_edges(bcb); - if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) { - let counter_kind = - self.coverage_counters.make_counter(CounterIncrementSite::Node { bcb }); - if one_path_to_target { - debug!("{bcb:?} gets a new counter: {counter_kind:?}"); - } else { - debug!( - "{bcb:?} has itself as its own predecessor. It can't be part of its own \ - Expression sum, so it will get its own new counter: {counter_kind:?}. \ - (Note, the compiled code will generate an infinite loop.)", - ); - } - return self.coverage_counters.set_bcb_counter(bcb, counter_kind); + let predecessors = self.basic_coverage_blocks.predecessors[bcb].as_slice(); + + // Handle cases where we can't compute a node's count from its in-edges: + // - START_BCB has no in-edges, so taking the sum would panic (or be wrong). + // - For nodes with one in-edge, or that directly loop to themselves, + // trying to get the in-edge counts would require this node's counter, + // leading to infinite recursion. + if predecessors.len() <= 1 || predecessors.contains(&bcb) { + debug!(?bcb, ?predecessors, "node has <=1 predecessors or is its own predecessor"); + return self.coverage_counters.make_phys_node_counter(bcb); } // A BCB with multiple incoming edges can compute its count by ensuring that counters // exist for each of those edges, and then adding them up to get a total count. - let sum_of_in_edges: BcbCounter = { - let _span = debug_span!("sum_of_in_edges", ?bcb).entered(); - // We avoid calling `self.bcb_predecessors` here so that we can - // call methods on `&mut self` inside the fold. - self.basic_coverage_blocks.predecessors[bcb] - .iter() - .copied() - .fold(None, |accum, from_bcb| { - let _span = debug_span!("from_bcb", ?accum, ?from_bcb).entered(); - let edge_counter = self.get_or_make_edge_counter_operand(from_bcb, bcb); - Some(self.coverage_counters.make_sum_expression(accum, edge_counter)) - }) - .expect("there must be at least one in-edge") - }; + let in_edge_counters = predecessors + .iter() + .copied() + .map(|from_bcb| self.get_or_make_edge_counter(from_bcb, bcb)) + .collect::<Vec<_>>(); + let sum_of_in_edges: BcbCounter = self + .coverage_counters + .make_sum(&in_edge_counters) + .expect("there must be at least one in-edge"); debug!("{bcb:?} gets a new counter (sum of predecessor counters): {sum_of_in_edges:?}"); self.coverage_counters.set_bcb_counter(bcb, sum_of_in_edges) } #[instrument(level = "debug", skip(self))] - fn get_or_make_edge_counter_operand( + fn get_or_make_edge_counter( &mut self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, ) -> BcbCounter { - // If the target BCB has only one in-edge (i.e. this one), then create - // a node counter instead, since it will have the same value. - if !self.basic_coverage_blocks.bcb_has_multiple_in_edges(to_bcb) { - assert_eq!([from_bcb].as_slice(), self.basic_coverage_blocks.predecessors[to_bcb]); - return self.get_or_make_counter_operand(to_bcb); + // If the target node has exactly one in-edge (i.e. this one), then just + // use the node's counter, since it will have the same value. + if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(to_bcb) { + assert_eq!(sole_pred, from_bcb); + // This call must take care not to invoke `get_or_make_edge` for + // this edge, since that would result in infinite recursion! + return self.get_or_make_node_counter(to_bcb); } - // If the source BCB has only one successor (assumed to be the given target), an edge - // counter is unnecessary. Just get or make a counter for the source BCB. - if self.bcb_successors(from_bcb).len() == 1 { - return self.get_or_make_counter_operand(from_bcb); + // If the source node has exactly one out-edge (i.e. this one) and would have + // the same execution count as that edge, then just use the node's counter. + if let Some(simple_succ) = self.basic_coverage_blocks.simple_successor(from_bcb) { + assert_eq!(simple_succ, to_bcb); + return self.get_or_make_node_counter(from_bcb); } // If the edge already has a counter, return it. @@ -448,121 +440,81 @@ impl<'a> MakeBcbCounters<'a> { } // Make a new counter to count this edge. - let counter_kind = - self.coverage_counters.make_counter(CounterIncrementSite::Edge { from_bcb, to_bcb }); - debug!("Edge {from_bcb:?}->{to_bcb:?} gets a new counter: {counter_kind:?}"); - self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind) + self.coverage_counters.make_phys_edge_counter(from_bcb, to_bcb) } - /// Select a branch for the expression, either the recommended `reloop_branch`, or if none was - /// found, select any branch. - fn choose_preferred_expression_branch( + /// Given a set of candidate out-edges (represented by their successor node), + /// choose one to be given a counter expression instead of a physical counter. + fn choose_out_edge_for_expression( &self, traversal: &TraverseCoverageGraphWithLoops<'_>, - from_bcb: BasicCoverageBlock, - ) -> BasicCoverageBlock { - let good_reloop_branch = self.find_good_reloop_branch(traversal, from_bcb); - if let Some(reloop_target) = good_reloop_branch { - assert!(self.branch_has_no_counter(from_bcb, reloop_target)); + candidate_successors: &[BasicCoverageBlock], + ) -> Option<BasicCoverageBlock> { + // Try to find a candidate that leads back to the top of a loop, + // because reloop edges tend to be executed more times than loop-exit edges. + if let Some(reloop_target) = self.find_good_reloop_edge(traversal, &candidate_successors) { debug!("Selecting reloop target {reloop_target:?} to get an expression"); - reloop_target - } else { - let &branch_without_counter = self - .bcb_successors(from_bcb) - .iter() - .find(|&&to_bcb| self.branch_has_no_counter(from_bcb, to_bcb)) - .expect( - "needs_branch_counters was `true` so there should be at least one \ - branch", - ); - debug!( - "Selecting any branch={:?} that still needs a counter, to get the \ - `Expression` because there was no `reloop_branch`, or it already had a \ - counter", - branch_without_counter - ); - branch_without_counter + return Some(reloop_target); } + + // We couldn't identify a "good" edge, so just choose an arbitrary one. + let arbitrary_target = candidate_successors.first().copied()?; + debug!(?arbitrary_target, "selecting arbitrary out-edge to get an expression"); + Some(arbitrary_target) } - /// Tries to find a branch that leads back to the top of a loop, and that - /// doesn't already have a counter. Such branches are good candidates to - /// be given an expression (instead of a physical counter), because they - /// will tend to be executed more times than a loop-exit branch. - fn find_good_reloop_branch( + /// Given a set of candidate out-edges (represented by their successor node), + /// tries to find one that leads back to the top of a loop. + /// + /// Reloop edges are good candidates for counter expressions, because they + /// will tend to be executed more times than a loop-exit edge, so it's nice + /// for them to be able to avoid a physical counter increment. + fn find_good_reloop_edge( &self, traversal: &TraverseCoverageGraphWithLoops<'_>, - from_bcb: BasicCoverageBlock, + candidate_successors: &[BasicCoverageBlock], ) -> Option<BasicCoverageBlock> { - let branch_target_bcbs = self.bcb_successors(from_bcb); + // If there are no candidates, avoid iterating over the loop stack. + if candidate_successors.is_empty() { + return None; + } // Consider each loop on the current traversal context stack, top-down. for reloop_bcbs in traversal.reloop_bcbs_per_loop() { - let mut all_branches_exit_this_loop = true; - - // Try to find a branch that doesn't exit this loop and doesn't - // already have a counter. - for &branch_target_bcb in branch_target_bcbs { - // A branch is a reloop branch if it dominates any BCB that has - // an edge back to the loop header. (Other branches are exits.) - let is_reloop_branch = reloop_bcbs.iter().any(|&reloop_bcb| { - self.basic_coverage_blocks.dominates(branch_target_bcb, reloop_bcb) + // Try to find a candidate edge that doesn't exit this loop. + for &target_bcb in candidate_successors { + // An edge is a reloop edge if its target dominates any BCB that has + // an edge back to the loop header. (Otherwise it's an exit edge.) + let is_reloop_edge = reloop_bcbs.iter().any(|&reloop_bcb| { + self.basic_coverage_blocks.dominates(target_bcb, reloop_bcb) }); - - if is_reloop_branch { - all_branches_exit_this_loop = false; - if self.branch_has_no_counter(from_bcb, branch_target_bcb) { - // We found a good branch to be given an expression. - return Some(branch_target_bcb); - } - // Keep looking for another reloop branch without a counter. - } else { - // This branch exits the loop. + if is_reloop_edge { + // We found a good out-edge to be given an expression. + return Some(target_bcb); } } - if !all_branches_exit_this_loop { - // We found one or more reloop branches, but all of them already - // have counters. Let the caller choose one of the exit branches. - debug!("All reloop branches had counters; skip checking the other loops"); - return None; - } - - // All of the branches exit this loop, so keep looking for a good - // reloop branch for one of the outer loops. + // All of the candidate edges exit this loop, so keep looking + // for a good reloop edge for one of the outer loops. } None } #[inline] - fn bcb_predecessors(&self, bcb: BasicCoverageBlock) -> &[BasicCoverageBlock] { - &self.basic_coverage_blocks.predecessors[bcb] - } - - #[inline] - fn bcb_successors(&self, bcb: BasicCoverageBlock) -> &[BasicCoverageBlock] { - &self.basic_coverage_blocks.successors[bcb] - } - - #[inline] - fn branch_has_no_counter( + fn edge_has_no_counter( &self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, ) -> bool { - self.branch_counter(from_bcb, to_bcb).is_none() - } + let edge_counter = + if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(to_bcb) { + assert_eq!(sole_pred, from_bcb); + self.coverage_counters.bcb_counters[to_bcb] + } else { + self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb)).copied() + }; - fn branch_counter( - &self, - from_bcb: BasicCoverageBlock, - to_bcb: BasicCoverageBlock, - ) -> Option<&BcbCounter> { - if self.basic_coverage_blocks.bcb_has_multiple_in_edges(to_bcb) { - self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb)) - } else { - self.coverage_counters.bcb_counters[to_bcb].as_ref() - } + edge_counter.is_none() } } diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index 31b20775194..743aa679058 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -10,6 +10,7 @@ use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_middle::bug; use rustc_middle::mir::{self, BasicBlock, Terminator, TerminatorKind}; +use tracing::debug; /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s. @@ -86,7 +87,11 @@ impl CoverageGraph { for &bb in basic_blocks.iter() { bb_to_bcb[bb] = Some(bcb); } - let bcb_data = BasicCoverageBlockData::from(basic_blocks); + + let is_out_summable = basic_blocks.last().map_or(false, |&bb| { + bcb_filtered_successors(mir_body[bb].terminator()).is_out_summable() + }); + let bcb_data = BasicCoverageBlockData { basic_blocks, is_out_summable }; debug!("adding bcb{}: {:?}", bcb.index(), bcb_data); bcbs.push(bcb_data); }; @@ -160,23 +165,33 @@ impl CoverageGraph { self.dominators.as_ref().unwrap().cmp_in_dominator_order(a, b) } - /// Returns true if the given node has 2 or more in-edges, i.e. 2 or more - /// predecessors. - /// - /// This property is interesting to code that assigns counters to nodes and - /// edges, because if a node _doesn't_ have multiple in-edges, then there's - /// no benefit in having a separate counter for its in-edge, because it - /// would have the same value as the node's own counter. - /// - /// FIXME: That assumption might not be true for [`TerminatorKind::Yield`]? - #[inline(always)] - pub(crate) fn bcb_has_multiple_in_edges(&self, bcb: BasicCoverageBlock) -> bool { - // Even though bcb0 conceptually has an extra virtual in-edge due to - // being the entry point, we've already asserted that it has no _other_ - // in-edges, so there's no possibility of it having _multiple_ in-edges. - // (And since its virtual in-edge doesn't exist in the graph, that edge - // can't have a separate counter anyway.) - self.predecessors[bcb].len() > 1 + /// Returns the source of this node's sole in-edge, if it has exactly one. + /// That edge can be assumed to have the same execution count as the node + /// itself (in the absence of panics). + pub(crate) fn sole_predecessor( + &self, + to_bcb: BasicCoverageBlock, + ) -> Option<BasicCoverageBlock> { + // Unlike `simple_successor`, there is no need for extra checks here. + if let &[from_bcb] = self.predecessors[to_bcb].as_slice() { Some(from_bcb) } else { None } + } + + /// Returns the target of this node's sole out-edge, if it has exactly + /// one, but only if that edge can be assumed to have the same execution + /// count as the node itself (in the absence of panics). + pub(crate) fn simple_successor( + &self, + from_bcb: BasicCoverageBlock, + ) -> Option<BasicCoverageBlock> { + // If a node's count is the sum of its out-edges, and it has exactly + // one out-edge, then that edge has the same count as the node. + if self.bcbs[from_bcb].is_out_summable + && let &[to_bcb] = self.successors[from_bcb].as_slice() + { + Some(to_bcb) + } else { + None + } } } @@ -265,14 +280,16 @@ rustc_index::newtype_index! { #[derive(Debug, Clone)] pub(crate) struct BasicCoverageBlockData { pub(crate) basic_blocks: Vec<BasicBlock>, + + /// If true, this node's execution count can be assumed to be the sum of the + /// execution counts of all of its **out-edges** (assuming no panics). + /// + /// Notably, this is false for a node ending with [`TerminatorKind::Yield`], + /// because the yielding coroutine might not be resumed. + pub(crate) is_out_summable: bool, } impl BasicCoverageBlockData { - fn from(basic_blocks: Vec<BasicBlock>) -> Self { - assert!(basic_blocks.len() > 0); - Self { basic_blocks } - } - #[inline(always)] pub(crate) fn leader_bb(&self) -> BasicBlock { self.basic_blocks[0] @@ -294,6 +311,9 @@ enum CoverageSuccessors<'a> { Chainable(BasicBlock), /// The block cannot be combined into the same BCB as its successor(s). NotChainable(&'a [BasicBlock]), + /// Yield terminators are not chainable, and their execution count can also + /// differ from the execution count of their out-edge. + Yield(BasicBlock), } impl CoverageSuccessors<'_> { @@ -301,6 +321,17 @@ impl CoverageSuccessors<'_> { match self { Self::Chainable(_) => true, Self::NotChainable(_) => false, + Self::Yield(_) => false, + } + } + + /// Returns true if the terminator itself is assumed to have the same + /// execution count as the sum of its out-edges (assuming no panics). + fn is_out_summable(&self) -> bool { + match self { + Self::Chainable(_) => true, + Self::NotChainable(_) => true, + Self::Yield(_) => false, } } } @@ -311,7 +342,9 @@ impl IntoIterator for CoverageSuccessors<'_> { fn into_iter(self) -> Self::IntoIter { match self { - Self::Chainable(bb) => Some(bb).into_iter().chain((&[]).iter().copied()), + Self::Chainable(bb) | Self::Yield(bb) => { + Some(bb).into_iter().chain((&[]).iter().copied()) + } Self::NotChainable(bbs) => None.into_iter().chain(bbs.iter().copied()), } } @@ -330,7 +363,7 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera // A yield terminator has exactly 1 successor, but should not be chained, // because its resume edge has a different execution count. - Yield { ref resume, .. } => CoverageSuccessors::NotChainable(std::slice::from_ref(resume)), + Yield { resume, .. } => CoverageSuccessors::Yield(resume), // These terminators have exactly one coverage-relevant successor, // and can be chained into it. @@ -340,15 +373,15 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera | FalseUnwind { real_target: target, .. } | Goto { target } => CoverageSuccessors::Chainable(target), - // A call terminator can normally be chained, except when they have no - // successor because they are known to diverge. + // A call terminator can normally be chained, except when it has no + // successor because it is known to diverge. Call { target: maybe_target, .. } => match maybe_target { Some(target) => CoverageSuccessors::Chainable(target), None => CoverageSuccessors::NotChainable(&[]), }, - // An inline asm terminator can normally be chained, except when it diverges or uses asm - // goto. + // An inline asm terminator can normally be chained, except when it + // diverges or uses asm goto. InlineAsm { ref targets, .. } => { if let [target] = targets[..] { CoverageSuccessors::Chainable(target) diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 96ca3b43d5c..f78fc7931f9 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -1,4 +1,4 @@ -pub mod query; +pub(super) mod query; mod counters; mod graph; @@ -13,7 +13,7 @@ use rustc_hir::intravisit::{walk_expr, Visitor}; use rustc_middle::hir::map::Map; use rustc_middle::hir::nested_filter; use rustc_middle::mir::coverage::{ - CodeRegion, CoverageKind, DecisionInfo, FunctionCoverageInfo, Mapping, MappingKind, + CoverageKind, DecisionInfo, FunctionCoverageInfo, Mapping, MappingKind, SourceRegion, }; use rustc_middle::mir::{ self, BasicBlock, BasicBlockData, SourceInfo, Statement, StatementKind, Terminator, @@ -23,18 +23,18 @@ use rustc_middle::ty::TyCtxt; use rustc_span::def_id::LocalDefId; use rustc_span::source_map::SourceMap; use rustc_span::{BytePos, Pos, RelativeBytePos, Span, Symbol}; +use tracing::{debug, debug_span, instrument, trace}; use crate::coverage::counters::{CounterIncrementSite, CoverageCounters}; use crate::coverage::graph::CoverageGraph; use crate::coverage::mappings::ExtractedMappings; -use crate::MirPass; /// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected /// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen /// to construct the coverage map. -pub struct InstrumentCoverage; +pub(super) struct InstrumentCoverage; -impl<'tcx> MirPass<'tcx> for InstrumentCoverage { +impl<'tcx> crate::MirPass<'tcx> for InstrumentCoverage { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.instrument_coverage() } @@ -159,7 +159,7 @@ fn create_mappings<'tcx>( .expect("all BCBs with spans were given counters") .as_term() }; - let region_for_span = |span: Span| make_code_region(source_map, file_name, span, body_span); + let region_for_span = |span: Span| make_source_region(source_map, file_name, span, body_span); // Fully destructure the mappings struct to make sure we don't miss any kinds. let ExtractedMappings { @@ -175,9 +175,9 @@ fn create_mappings<'tcx>( mappings.extend(code_mappings.iter().filter_map( // Ordinary code mappings are the simplest kind. |&mappings::CodeMapping { span, bcb }| { - let code_region = region_for_span(span)?; + let source_region = region_for_span(span)?; let kind = MappingKind::Code(term_for_bcb(bcb)); - Some(Mapping { kind, code_region }) + Some(Mapping { kind, source_region }) }, )); @@ -186,29 +186,29 @@ fn create_mappings<'tcx>( let true_term = term_for_bcb(true_bcb); let false_term = term_for_bcb(false_bcb); let kind = MappingKind::Branch { true_term, false_term }; - let code_region = region_for_span(span)?; - Some(Mapping { kind, code_region }) + let source_region = region_for_span(span)?; + Some(Mapping { kind, source_region }) }, )); mappings.extend(mcdc_branches.iter().filter_map( |&mappings::MCDCBranch { span, true_bcb, false_bcb, condition_info, decision_depth: _ }| { - let code_region = region_for_span(span)?; + let source_region = region_for_span(span)?; let true_term = term_for_bcb(true_bcb); let false_term = term_for_bcb(false_bcb); let kind = match condition_info { Some(mcdc_params) => MappingKind::MCDCBranch { true_term, false_term, mcdc_params }, None => MappingKind::Branch { true_term, false_term }, }; - Some(Mapping { kind, code_region }) + Some(Mapping { kind, source_region }) }, )); mappings.extend(mcdc_decisions.iter().filter_map( |&mappings::MCDCDecision { span, bitmap_idx, num_conditions, .. }| { - let code_region = region_for_span(span)?; + let source_region = region_for_span(span)?; let kind = MappingKind::MCDCDecision(DecisionInfo { bitmap_idx, num_conditions }); - Some(Mapping { kind, code_region }) + Some(Mapping { kind, source_region }) }, )); @@ -279,7 +279,8 @@ fn inject_mcdc_statements<'tcx>( basic_coverage_blocks: &CoverageGraph, extracted_mappings: &ExtractedMappings, ) { - // Inject test vector update first because `inject_statement` always insert new statement at head. + // Inject test vector update first because `inject_statement` always insert new statement at + // head. for &mappings::MCDCDecision { span: _, ref end_bcbs, @@ -362,19 +363,13 @@ fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb /// but it's hard to rule out entirely (especially in the presence of complex macros /// or other expansions), and if it does happen then skipping a span or function is /// better than an ICE or `llvm-cov` failure that the user might have no way to avoid. -fn make_code_region( +#[instrument(level = "debug", skip(source_map))] +fn make_source_region( source_map: &SourceMap, file_name: Symbol, span: Span, body_span: Span, -) -> Option<CodeRegion> { - debug!( - "Called make_code_region(file_name={}, span={}, body_span={})", - file_name, - source_map.span_to_diagnostic_string(span), - source_map.span_to_diagnostic_string(body_span) - ); - +) -> Option<SourceRegion> { let lo = span.lo(); let hi = span.hi(); @@ -424,7 +419,7 @@ fn make_code_region( start_line = source_map.doctest_offset_line(&file.name, start_line); end_line = source_map.doctest_offset_line(&file.name, end_line); - check_code_region(CodeRegion { + check_source_region(SourceRegion { file_name, start_line: start_line as u32, start_col: start_col as u32, @@ -433,12 +428,12 @@ fn make_code_region( }) } -/// If `llvm-cov` sees a code region that is improperly ordered (end < start), +/// If `llvm-cov` sees a source region that is improperly ordered (end < start), /// it will immediately exit with a fatal error. To prevent that from happening, /// discard regions that are improperly ordered, or might be interpreted in a /// way that makes them improperly ordered. -fn check_code_region(code_region: CodeRegion) -> Option<CodeRegion> { - let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = code_region; +fn check_source_region(source_region: SourceRegion) -> Option<SourceRegion> { + let SourceRegion { file_name: _, start_line, start_col, end_line, end_col } = source_region; // Line/column coordinates are supposed to be 1-based. If we ever emit // coordinates of 0, `llvm-cov` might misinterpret them. @@ -451,17 +446,17 @@ fn check_code_region(code_region: CodeRegion) -> Option<CodeRegion> { let is_ordered = (start_line, start_col) <= (end_line, end_col); if all_nonzero && end_col_has_high_bit_unset && is_ordered { - Some(code_region) + Some(source_region) } else { debug!( - ?code_region, + ?source_region, ?all_nonzero, ?end_col_has_high_bit_unset, ?is_ordered, - "Skipping code region that would be misinterpreted or rejected by LLVM" + "Skipping source region that would be misinterpreted or rejected by LLVM" ); // If this happens in a debug build, ICE to make it easier to notice. - debug_assert!(false, "Improper code region: {code_region:?}"); + debug_assert!(false, "Improper source region: {source_region:?}"); None } } diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index 1fce2abbbbf..e65a5fdd5e7 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -7,6 +7,7 @@ use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::util::Providers; use rustc_span::def_id::LocalDefId; use rustc_span::sym; +use tracing::trace; /// Registers query/hook implementations related to coverage. pub(crate) fn provide(providers: &mut Providers) { diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index 092ec1e06d2..b904b0d2599 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -3,7 +3,8 @@ use std::collections::VecDeque; use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashSet; use rustc_middle::mir; -use rustc_span::Span; +use rustc_span::{DesugaringKind, ExpnKind, MacroKind, Span}; +use tracing::{debug, debug_span, instrument}; use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph}; use crate::coverage::spans::from_mir::{ @@ -24,7 +25,7 @@ pub(super) fn extract_refined_covspans( // First, perform the passes that need macro information. covspans.sort_by(|a, b| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb)); - remove_unwanted_macro_spans(&mut covspans); + remove_unwanted_expansion_spans(&mut covspans); split_visible_macro_spans(&mut covspans); // We no longer need the extra information in `SpanFromMir`, so convert to `Covspan`. @@ -75,18 +76,24 @@ pub(super) fn extract_refined_covspans( /// invocation, which is unhelpful. Keeping only the first such span seems to /// give better mappings, so remove the others. /// +/// Similarly, `await` expands to a branch on the discriminant of `Poll`, which +/// leads to incorrect coverage if the `Future` is immediately ready (#98712). +/// /// (The input spans should be sorted in BCB dominator order, so that the /// retained "first" span is likely to dominate the others.) -fn remove_unwanted_macro_spans(covspans: &mut Vec<SpanFromMir>) { - let mut seen_macro_spans = FxHashSet::default(); +fn remove_unwanted_expansion_spans(covspans: &mut Vec<SpanFromMir>) { + let mut deduplicated_spans = FxHashSet::default(); + covspans.retain(|covspan| { - // Ignore (retain) non-macro-expansion spans. - if covspan.visible_macro.is_none() { - return true; + match covspan.expn_kind { + // Retain only the first await-related or macro-expanded covspan with this span. + Some(ExpnKind::Desugaring(kind)) if kind == DesugaringKind::Await => { + deduplicated_spans.insert(covspan.span) + } + Some(ExpnKind::Macro(MacroKind::Bang, _)) => deduplicated_spans.insert(covspan.span), + // Ignore (retain) other spans. + _ => true, } - - // Retain only the first macro-expanded covspan with this span. - seen_macro_spans.insert(covspan.span) }); } @@ -98,7 +105,9 @@ fn split_visible_macro_spans(covspans: &mut Vec<SpanFromMir>) { let mut extra_spans = vec![]; covspans.retain(|covspan| { - let Some(visible_macro) = covspan.visible_macro else { return true }; + let Some(ExpnKind::Macro(MacroKind::Bang, visible_macro)) = covspan.expn_kind else { + return true; + }; let split_len = visible_macro.as_str().len() as u32 + 1; let (before, after) = covspan.span.split_at(split_len); @@ -110,8 +119,8 @@ fn split_visible_macro_spans(covspans: &mut Vec<SpanFromMir>) { return true; } - extra_spans.push(SpanFromMir::new(before, covspan.visible_macro, covspan.bcb)); - extra_spans.push(SpanFromMir::new(after, covspan.visible_macro, covspan.bcb)); + extra_spans.push(SpanFromMir::new(before, covspan.expn_kind.clone(), covspan.bcb)); + extra_spans.push(SpanFromMir::new(after, covspan.expn_kind.clone(), covspan.bcb)); false // Discard the original covspan that we just split. }); diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs index 32bd25bf4b9..7f5765c9462 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs @@ -3,13 +3,13 @@ use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::mir::{ self, FakeReadCause, Statement, StatementKind, Terminator, TerminatorKind, }; -use rustc_span::{Span, Symbol}; +use rustc_span::{ExpnKind, Span}; use crate::coverage::graph::{ BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB, }; use crate::coverage::spans::Covspan; -use crate::coverage::unexpand::unexpand_into_body_span_with_visible_macro; +use crate::coverage::unexpand::unexpand_into_body_span_with_expn_kind; use crate::coverage::ExtractedHirInfo; pub(crate) struct ExtractedCovspans { @@ -60,7 +60,7 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>( let data = &mir_body[bb]; let unexpand = move |expn_span| { - unexpand_into_body_span_with_visible_macro(expn_span, body_span) + unexpand_into_body_span_with_expn_kind(expn_span, body_span) // Discard any spans that fill the entire body, because they tend // to represent compiler-inserted code, e.g. implicitly returning `()`. .filter(|(span, _)| !span.source_equal(body_span)) @@ -68,9 +68,9 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>( let mut extract_statement_span = |statement| { let expn_span = filtered_statement_span(statement)?; - let (span, visible_macro) = unexpand(expn_span)?; + let (span, expn_kind) = unexpand(expn_span)?; - initial_covspans.push(SpanFromMir::new(span, visible_macro, bcb)); + initial_covspans.push(SpanFromMir::new(span, expn_kind, bcb)); Some(()) }; for statement in data.statements.iter() { @@ -79,9 +79,9 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>( let mut extract_terminator_span = |terminator| { let expn_span = filtered_terminator_span(terminator)?; - let (span, visible_macro) = unexpand(expn_span)?; + let (span, expn_kind) = unexpand(expn_span)?; - initial_covspans.push(SpanFromMir::new(span, visible_macro, bcb)); + initial_covspans.push(SpanFromMir::new(span, expn_kind, bcb)); Some(()) }; extract_terminator_span(data.terminator()); @@ -214,7 +214,7 @@ pub(crate) struct SpanFromMir { /// With the exception of `fn_sig_span`, this should always be contained /// within `body_span`. pub(crate) span: Span, - pub(crate) visible_macro: Option<Symbol>, + pub(crate) expn_kind: Option<ExpnKind>, pub(crate) bcb: BasicCoverageBlock, } @@ -223,12 +223,12 @@ impl SpanFromMir { Self::new(fn_sig_span, None, START_BCB) } - pub(crate) fn new(span: Span, visible_macro: Option<Symbol>, bcb: BasicCoverageBlock) -> Self { - Self { span, visible_macro, bcb } + pub(crate) fn new(span: Span, expn_kind: Option<ExpnKind>, bcb: BasicCoverageBlock) -> Self { + Self { span, expn_kind, bcb } } pub(crate) fn into_covspan(self) -> Covspan { - let Self { span, visible_macro: _, bcb } = self; + let Self { span, expn_kind: _, bcb } = self; Covspan { span, bcb } } } diff --git a/compiler/rustc_mir_transform/src/coverage/unexpand.rs b/compiler/rustc_mir_transform/src/coverage/unexpand.rs index 8cde291b907..cb861544736 100644 --- a/compiler/rustc_mir_transform/src/coverage/unexpand.rs +++ b/compiler/rustc_mir_transform/src/coverage/unexpand.rs @@ -1,4 +1,4 @@ -use rustc_span::{ExpnKind, MacroKind, Span, Symbol}; +use rustc_span::{ExpnKind, Span}; /// Walks through the expansion ancestors of `original_span` to find a span that /// is contained in `body_span` and has the same [syntax context] as `body_span`. @@ -13,20 +13,15 @@ pub(crate) fn unexpand_into_body_span(original_span: Span, body_span: Span) -> O /// /// If the returned span represents a bang-macro invocation (e.g. `foo!(..)`), /// the returned symbol will be the name of that macro (e.g. `foo`). -pub(crate) fn unexpand_into_body_span_with_visible_macro( +pub(crate) fn unexpand_into_body_span_with_expn_kind( original_span: Span, body_span: Span, -) -> Option<(Span, Option<Symbol>)> { +) -> Option<(Span, Option<ExpnKind>)> { let (span, prev) = unexpand_into_body_span_with_prev(original_span, body_span)?; - let visible_macro = prev - .map(|prev| match prev.ctxt().outer_expn_data().kind { - ExpnKind::Macro(MacroKind::Bang, name) => Some(name), - _ => None, - }) - .flatten(); + let expn_kind = prev.map(|prev| prev.ctxt().outer_expn_data().kind); - Some((span, visible_macro)) + Some((span, expn_kind)) } /// Walks through the expansion ancestors of `original_span` to find a span that diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs index 50aaed090f6..42cbece32d8 100644 --- a/compiler/rustc_mir_transform/src/cross_crate_inline.rs +++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs @@ -10,7 +10,7 @@ use rustc_span::sym; use crate::{inline, pass_manager as pm}; -pub fn provide(providers: &mut Providers) { +pub(super) fn provide(providers: &mut Providers) { providers.cross_crate_inlinable = cross_crate_inlinable; } @@ -24,7 +24,7 @@ fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { // This just reproduces the logic from Instance::requires_inline. match tcx.def_kind(def_id) { - DefKind::Ctor(..) | DefKind::Closure => return true, + DefKind::Ctor(..) | DefKind::Closure | DefKind::SyntheticCoroutineBody => return true, DefKind::Fn | DefKind::AssocFn => {} _ => return false, } diff --git a/compiler/rustc_mir_transform/src/ctfe_limit.rs b/compiler/rustc_mir_transform/src/ctfe_limit.rs index ff9fc776e54..bd58b1b6689 100644 --- a/compiler/rustc_mir_transform/src/ctfe_limit.rs +++ b/compiler/rustc_mir_transform/src/ctfe_limit.rs @@ -6,12 +6,11 @@ use rustc_middle::mir::{ BasicBlock, BasicBlockData, Body, Statement, StatementKind, TerminatorKind, }; use rustc_middle::ty::TyCtxt; +use tracing::instrument; -use crate::MirPass; +pub(super) struct CtfeLimit; -pub struct CtfeLimit; - -impl<'tcx> MirPass<'tcx> for CtfeLimit { +impl<'tcx> crate::MirPass<'tcx> for CtfeLimit { #[instrument(skip(self, _tcx, body))] fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let doms = body.basic_blocks.dominators(); diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index f207216d6f4..997b8c06a96 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -19,15 +19,16 @@ use rustc_mir_dataflow::value_analysis::{ use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor}; use rustc_span::DUMMY_SP; use rustc_target::abi::{Abi, FieldIdx, Size, VariantIdx, FIRST_VARIANT}; +use tracing::{debug, debug_span, instrument}; // These constants are somewhat random guesses and have not been optimized. // If `tcx.sess.mir_opt_level() >= 4`, we ignore the limits (this can become very expensive). const BLOCK_LIMIT: usize = 100; const PLACE_LIMIT: usize = 100; -pub struct DataflowConstProp; +pub(super) struct DataflowConstProp; -impl<'tcx> MirPass<'tcx> for DataflowConstProp { +impl<'tcx> crate::MirPass<'tcx> for DataflowConstProp { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 3 } @@ -331,7 +332,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { } impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { - pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, map: Map<'tcx>) -> Self { + fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, map: Map<'tcx>) -> Self { let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); Self { map, @@ -382,7 +383,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { place: PlaceIndex, mut operand: OpTy<'tcx>, projection: &[PlaceElem<'tcx>], - ) -> Option<!> { + ) { for &(mut proj_elem) in projection { if let PlaceElem::Index(index) = proj_elem { if let FlatSet::Elem(index) = state.get(index.into(), &self.map) @@ -391,10 +392,14 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { { proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false }; } else { - return None; + return; } } - operand = self.ecx.project(&operand, proj_elem).ok()?; + operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) { + operand + } else { + return; + } } self.map.for_each_projection_value( @@ -426,8 +431,6 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } }, ); - - None } fn binary_op( @@ -551,13 +554,13 @@ impl<'tcx> Patch<'tcx> { } } -struct Collector<'tcx, 'locals> { +struct Collector<'a, 'tcx> { patch: Patch<'tcx>, - local_decls: &'locals LocalDecls<'tcx>, + local_decls: &'a LocalDecls<'tcx>, } -impl<'tcx, 'locals> Collector<'tcx, 'locals> { - pub(crate) fn new(tcx: TyCtxt<'tcx>, local_decls: &'locals LocalDecls<'tcx>) -> Self { +impl<'a, 'tcx> Collector<'a, 'tcx> { + pub(crate) fn new(tcx: TyCtxt<'tcx>, local_decls: &'a LocalDecls<'tcx>) -> Self { Self { patch: Patch::new(tcx), local_decls } } @@ -644,7 +647,8 @@ fn try_write_constant<'tcx>( ty::FnDef(..) => {} // Those are scalars, must be handled above. - ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => throw_machine_stop_str!("primitive type with provenance"), + ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => + throw_machine_stop_str!("primitive type with provenance"), ty::Tuple(elem_tys) => { for (i, elem) in elem_tys.iter().enumerate() { @@ -718,15 +722,15 @@ fn try_write_constant<'tcx>( impl<'mir, 'tcx> ResultsVisitor<'mir, 'tcx, Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>> - for Collector<'tcx, '_> + for Collector<'_, 'tcx> { - type FlowState = State<FlatSet<Scalar>>; + type Domain = State<FlatSet<Scalar>>; #[instrument(level = "trace", skip(self, results, statement))] fn visit_statement_before_primary_effect( &mut self, results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>, - state: &Self::FlowState, + state: &Self::Domain, statement: &'mir Statement<'tcx>, location: Location, ) { @@ -748,7 +752,7 @@ impl<'mir, 'tcx> fn visit_statement_after_primary_effect( &mut self, results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>, - state: &Self::FlowState, + state: &Self::Domain, statement: &'mir Statement<'tcx>, location: Location, ) { @@ -773,7 +777,7 @@ impl<'mir, 'tcx> fn visit_terminator_before_primary_effect( &mut self, results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>, - state: &Self::FlowState, + state: &Self::Domain, terminator: &'mir Terminator<'tcx>, location: Location, ) { @@ -835,14 +839,14 @@ impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> { } } -struct OperandCollector<'tcx, 'map, 'locals, 'a> { +struct OperandCollector<'a, 'b, 'tcx> { state: &'a State<FlatSet<Scalar>>, - visitor: &'a mut Collector<'tcx, 'locals>, - ecx: &'map mut InterpCx<'tcx, DummyMachine>, - map: &'map Map<'tcx>, + visitor: &'a mut Collector<'b, 'tcx>, + ecx: &'a mut InterpCx<'tcx, DummyMachine>, + map: &'a Map<'tcx>, } -impl<'tcx> Visitor<'tcx> for OperandCollector<'tcx, '_, '_, '_> { +impl<'tcx> Visitor<'tcx> for OperandCollector<'_, '_, 'tcx> { fn visit_projection_elem( &mut self, _: PlaceRef<'tcx>, diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs index f473073083a..65c037559c5 100644 --- a/compiler/rustc_mir_transform/src/dead_store_elimination.rs +++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs @@ -28,11 +28,11 @@ use crate::util::is_within_packed; /// /// The `borrowed` set must be a `BitSet` of all the locals that are ever borrowed in this body. It /// can be generated via the [`borrowed_locals`] function. -pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { +fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let borrowed_locals = borrowed_locals(body); // If the user requests complete debuginfo, mark the locals that appear in it as live, so - // we don't remove assignements to them. + // we don't remove assignments to them. let mut always_live = debuginfo_locals(body); always_live.union(&borrowed_locals); @@ -127,12 +127,12 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { } } -pub enum DeadStoreElimination { +pub(super) enum DeadStoreElimination { Initial, Final, } -impl<'tcx> MirPass<'tcx> for DeadStoreElimination { +impl<'tcx> crate::MirPass<'tcx> for DeadStoreElimination { fn name(&self) -> &'static str { match self { DeadStoreElimination::Initial => "DeadStoreElimination-initial", diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs index 71af099199e..c0cb0e641ac 100644 --- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs +++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs @@ -42,9 +42,9 @@ impl<'tcx> Visitor<'tcx> for DeduceReadOnly { } PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => { // Whether mutating though a `&raw const` is allowed is still undecided, so we - // disable any sketchy `readonly` optimizations for now. - // But we only need to do this if the pointer would point into the argument. - // IOW: for indirect places, like `&raw (*local).field`, this surely cannot mutate `local`. + // disable any sketchy `readonly` optimizations for now. But we only need to do + // this if the pointer would point into the argument. IOW: for indirect places, + // like `&raw (*local).field`, this surely cannot mutate `local`. !place.is_indirect() } PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => { @@ -150,7 +150,7 @@ fn type_will_always_be_passed_directly(ty: Ty<'_>) -> bool { /// body of the function instead of just the signature. These can be useful for optimization /// purposes on a best-effort basis. We compute them here and store them into the crate metadata so /// dependent crates can use them. -pub fn deduced_param_attrs<'tcx>( +pub(super) fn deduced_param_attrs<'tcx>( tcx: TyCtxt<'tcx>, def_id: LocalDefId, ) -> &'tcx [DeducedParamAttrs] { @@ -168,17 +168,16 @@ pub fn deduced_param_attrs<'tcx>( // Codegen won't use this information for anything if all the function parameters are passed // directly. Detect that and bail, for compilation speed. let fn_ty = tcx.type_of(def_id).instantiate_identity(); - if matches!(fn_ty.kind(), ty::FnDef(..)) { - if fn_ty + if matches!(fn_ty.kind(), ty::FnDef(..)) + && fn_ty .fn_sig(tcx) .inputs() .skip_binder() .iter() .cloned() .all(type_will_always_be_passed_directly) - { - return &[]; - } + { + return &[]; } // Don't deduce any attributes for functions that have no MIR. diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs index 4a94c3eca86..26b28c8c487 100644 --- a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs +++ b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs @@ -9,12 +9,13 @@ use rustc_data_structures::fx::FxHashMap; use rustc_middle::mir::visit::MutVisitor; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::debug; use super::simplify::simplify_cfg; -pub struct DeduplicateBlocks; +pub(super) struct DeduplicateBlocks; -impl<'tcx> MirPass<'tcx> for DeduplicateBlocks { +impl<'tcx> crate::MirPass<'tcx> for DeduplicateBlocks { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 4 } @@ -68,8 +69,8 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> { // For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes. // Then we will see that bb2 is a duplicate of bb3, // and insert bb2 with the replacement bb3 in the duplicates list. - // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list - // with replacement bb3. + // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the + // duplicates list with replacement bb3. // When the duplicates are removed, we will end up with only bb3. for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) { // Basic blocks can get really big, so to avoid checking for duplicates in basic blocks @@ -97,14 +98,15 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> { duplicates } -struct BasicBlockHashable<'tcx, 'a> { +struct BasicBlockHashable<'a, 'tcx> { basic_block_data: &'a BasicBlockData<'tcx>, } impl Hash for BasicBlockHashable<'_, '_> { fn hash<H: Hasher>(&self, state: &mut H) { hash_statements(state, self.basic_block_data.statements.iter()); - // Note that since we only hash the kind, we lose span information if we deduplicate the blocks + // Note that since we only hash the kind, we lose span information if we deduplicate the + // blocks. self.basic_block_data.terminator().kind.hash(state); } } diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs index 0e2fccc85da..ad7ccef4976 100644 --- a/compiler/rustc_mir_transform/src/deref_separator.rs +++ b/compiler/rustc_mir_transform/src/deref_separator.rs @@ -1,16 +1,15 @@ -use rustc_index::IndexVec; use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::visit::NonUseContext::VarDebugInfo; use rustc_middle::mir::visit::{MutVisitor, PlaceContext}; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; -pub struct Derefer; +pub(super) struct Derefer; -pub struct DerefChecker<'a, 'tcx> { +struct DerefChecker<'a, 'tcx> { tcx: TyCtxt<'tcx>, patcher: MirPatch<'tcx>, - local_decls: &'a IndexVec<Local, LocalDecl<'tcx>>, + local_decls: &'a LocalDecls<'tcx>, } impl<'a, 'tcx> MutVisitor<'tcx> for DerefChecker<'a, 'tcx> { @@ -67,7 +66,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for DerefChecker<'a, 'tcx> { } } -pub fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { +pub(super) fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let patch = MirPatch::new(body); let mut checker = DerefChecker { tcx, patcher: patch, local_decls: &body.local_decls }; @@ -78,7 +77,7 @@ pub fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { checker.patcher.apply(body); } -impl<'tcx> MirPass<'tcx> for Derefer { +impl<'tcx> crate::MirPass<'tcx> for Derefer { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { deref_finder(tcx, body); } diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs index ed924761892..f123658580d 100644 --- a/compiler/rustc_mir_transform/src/dest_prop.rs +++ b/compiler/rustc_mir_transform/src/dest_prop.rs @@ -38,7 +38,7 @@ //! not contain any indirection through a pointer or any indexing projections. //! //! * `p` and `q` must have the **same type**. If we replace a local with a subtype or supertype, -//! we may end up with a differnet vtable for that local. See the `subtyping-impacts-selection` +//! we may end up with a different vtable for that local. See the `subtyping-impacts-selection` //! tests for an example where that causes issues. //! //! * We need to make sure that the goal of "merging the memory" is actually structurally possible @@ -144,12 +144,11 @@ use rustc_middle::ty::TyCtxt; use rustc_mir_dataflow::impls::MaybeLiveLocals; use rustc_mir_dataflow::points::{save_as_intervals, DenseLocationMap, PointIndex}; use rustc_mir_dataflow::Analysis; +use tracing::{debug, trace}; -use crate::MirPass; +pub(super) struct DestinationPropagation; -pub struct DestinationPropagation; - -impl<'tcx> MirPass<'tcx> for DestinationPropagation { +impl<'tcx> crate::MirPass<'tcx> for DestinationPropagation { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { // For now, only run at MIR opt level 3. Two things need to be changed before this can be // turned on by default: @@ -164,7 +163,8 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let def_id = body.source.def_id(); - let mut allocations = Allocations::default(); + let mut candidates = Candidates::default(); + let mut write_info = WriteInfo::default(); trace!(func = ?tcx.def_path_str(def_id)); let borrowed = rustc_mir_dataflow::impls::borrowed_locals(body); @@ -192,12 +192,7 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation { loop { // PERF: Can we do something smarter than recalculating the candidates and liveness // results? - let mut candidates = find_candidates( - body, - &borrowed, - &mut allocations.candidates, - &mut allocations.candidates_reverse, - ); + candidates.reset_and_find(body, &borrowed); trace!(?candidates); dest_prop_mir_dump(tcx, body, &points, &live, round_count); @@ -205,7 +200,7 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation { &mut candidates, &points, &live, - &mut allocations.write_info, + &mut write_info, body, ); @@ -247,27 +242,15 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation { } round_count += 1; - apply_merges(body, tcx, &merges, &merged_locals); + apply_merges(body, tcx, merges, merged_locals); } trace!(round_count); } } -/// Container for the various allocations that we need. -/// -/// We store these here and hand out `&mut` access to them, instead of dropping and recreating them -/// frequently. Everything with a `&'alloc` lifetime points into here. -#[derive(Default)] -struct Allocations { - candidates: FxIndexMap<Local, Vec<Local>>, - candidates_reverse: FxIndexMap<Local, Vec<Local>>, - write_info: WriteInfo, - // PERF: Do this for `MaybeLiveLocals` allocations too. -} - -#[derive(Debug)] -struct Candidates<'alloc> { +#[derive(Debug, Default)] +struct Candidates { /// The set of candidates we are considering in this optimization. /// /// We will always merge the key into at most one of its values. @@ -282,11 +265,12 @@ struct Candidates<'alloc> { /// /// We will still report that we would like to merge `_1` and `_2` in an attempt to allow us to /// remove that assignment. - c: &'alloc mut FxIndexMap<Local, Vec<Local>>, + c: FxIndexMap<Local, Vec<Local>>, + /// A reverse index of the `c` set; if the `c` set contains `a => Place { local: b, proj }`, /// then this contains `b => a`. // PERF: Possibly these should be `SmallVec`s? - reverse: &'alloc mut FxIndexMap<Local, Vec<Local>>, + reverse: FxIndexMap<Local, Vec<Local>>, } ////////////////////////////////////////////////////////// @@ -297,20 +281,20 @@ struct Candidates<'alloc> { fn apply_merges<'tcx>( body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>, - merges: &FxIndexMap<Local, Local>, - merged_locals: &BitSet<Local>, + merges: FxIndexMap<Local, Local>, + merged_locals: BitSet<Local>, ) { let mut merger = Merger { tcx, merges, merged_locals }; merger.visit_body_preserves_cfg(body); } -struct Merger<'a, 'tcx> { +struct Merger<'tcx> { tcx: TyCtxt<'tcx>, - merges: &'a FxIndexMap<Local, Local>, - merged_locals: &'a BitSet<Local>, + merges: FxIndexMap<Local, Local>, + merged_locals: BitSet<Local>, } -impl<'a, 'tcx> MutVisitor<'tcx> for Merger<'a, 'tcx> { +impl<'tcx> MutVisitor<'tcx> for Merger<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } @@ -359,19 +343,40 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Merger<'a, 'tcx> { // // This section enforces bullet point 2 -struct FilterInformation<'a, 'body, 'alloc, 'tcx> { - body: &'body Body<'tcx>, +struct FilterInformation<'a, 'tcx> { + body: &'a Body<'tcx>, points: &'a DenseLocationMap, live: &'a SparseIntervalMatrix<Local, PointIndex>, - candidates: &'a mut Candidates<'alloc>, - write_info: &'alloc mut WriteInfo, + candidates: &'a mut Candidates, + write_info: &'a mut WriteInfo, at: Location, } // We first implement some utility functions which we will expose removing candidates according to // different needs. Throughout the liveness filtering, the `candidates` are only ever accessed // through these methods, and not directly. -impl<'alloc> Candidates<'alloc> { +impl Candidates { + /// Collects the candidates for merging. + /// + /// This is responsible for enforcing the first and third bullet point. + fn reset_and_find<'tcx>(&mut self, body: &Body<'tcx>, borrowed: &BitSet<Local>) { + self.c.clear(); + self.reverse.clear(); + let mut visitor = FindAssignments { body, candidates: &mut self.c, borrowed }; + visitor.visit_body(body); + // Deduplicate candidates. + for (_, cands) in self.c.iter_mut() { + cands.sort(); + cands.dedup(); + } + // Generate the reverse map. + for (src, cands) in self.c.iter() { + for dest in cands.iter().copied() { + self.reverse.entry(dest).or_default().push(*src); + } + } + } + /// Just `Vec::retain`, but the condition is inverted and we add debugging output fn vec_filter_candidates( src: Local, @@ -446,7 +451,7 @@ enum CandidateFilter { Remove, } -impl<'a, 'body, 'alloc, 'tcx> FilterInformation<'a, 'body, 'alloc, 'tcx> { +impl<'a, 'tcx> FilterInformation<'a, 'tcx> { /// Filters the set of candidates to remove those that conflict. /// /// The steps we take are exactly those that are outlined at the top of the file. For each @@ -464,12 +469,12 @@ impl<'a, 'body, 'alloc, 'tcx> FilterInformation<'a, 'body, 'alloc, 'tcx> { /// before the statement/terminator will correctly report locals that are read in the /// statement/terminator to be live. We are additionally conservative by treating all written to /// locals as also being read from. - fn filter_liveness<'b>( - candidates: &mut Candidates<'alloc>, + fn filter_liveness( + candidates: &mut Candidates, points: &DenseLocationMap, live: &SparseIntervalMatrix<Local, PointIndex>, - write_info_alloc: &'alloc mut WriteInfo, - body: &'b Body<'tcx>, + write_info: &mut WriteInfo, + body: &Body<'tcx>, ) { let mut this = FilterInformation { body, @@ -478,7 +483,7 @@ impl<'a, 'body, 'alloc, 'tcx> FilterInformation<'a, 'body, 'alloc, 'tcx> { candidates, // We don't actually store anything at this scope, we just keep things here to be able // to reuse the allocation. - write_info: write_info_alloc, + write_info, // Doesn't matter what we put here, will be overwritten before being used at: Location::START, }; @@ -735,40 +740,13 @@ fn places_to_candidate_pair<'tcx>( Some((a, b)) } -/// Collects the candidates for merging -/// -/// This is responsible for enforcing the first and third bullet point. -fn find_candidates<'alloc, 'tcx>( - body: &Body<'tcx>, - borrowed: &BitSet<Local>, - candidates: &'alloc mut FxIndexMap<Local, Vec<Local>>, - candidates_reverse: &'alloc mut FxIndexMap<Local, Vec<Local>>, -) -> Candidates<'alloc> { - candidates.clear(); - candidates_reverse.clear(); - let mut visitor = FindAssignments { body, candidates, borrowed }; - visitor.visit_body(body); - // Deduplicate candidates - for (_, cands) in candidates.iter_mut() { - cands.sort(); - cands.dedup(); - } - // Generate the reverse map - for (src, cands) in candidates.iter() { - for dest in cands.iter().copied() { - candidates_reverse.entry(dest).or_default().push(*src); - } - } - Candidates { c: candidates, reverse: candidates_reverse } -} - -struct FindAssignments<'a, 'alloc, 'tcx> { +struct FindAssignments<'a, 'tcx> { body: &'a Body<'tcx>, - candidates: &'alloc mut FxIndexMap<Local, Vec<Local>>, + candidates: &'a mut FxIndexMap<Local, Vec<Local>>, borrowed: &'a BitSet<Local>, } -impl<'tcx> Visitor<'tcx> for FindAssignments<'_, '_, 'tcx> { +impl<'tcx> Visitor<'tcx> for FindAssignments<'_, 'tcx> { fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) { if let StatementKind::Assign(box ( lhs, @@ -820,9 +798,9 @@ fn is_local_required(local: Local, body: &Body<'_>) -> bool { ///////////////////////////////////////////////////////// // MIR Dump -fn dest_prop_mir_dump<'body, 'tcx>( +fn dest_prop_mir_dump<'tcx>( tcx: TyCtxt<'tcx>, - body: &'body Body<'tcx>, + body: &Body<'tcx>, points: &DenseLocationMap, live: &SparseIntervalMatrix<Local, PointIndex>, round: usize, diff --git a/compiler/rustc_mir_transform/src/dump_mir.rs b/compiler/rustc_mir_transform/src/dump_mir.rs index 29db45f9450..1640d34d6c8 100644 --- a/compiler/rustc_mir_transform/src/dump_mir.rs +++ b/compiler/rustc_mir_transform/src/dump_mir.rs @@ -7,11 +7,9 @@ use rustc_middle::mir::{write_mir_pretty, Body}; use rustc_middle::ty::TyCtxt; use rustc_session::config::{OutFileName, OutputType}; -use crate::MirPass; +pub(super) struct Marker(pub &'static str); -pub struct Marker(pub &'static str); - -impl<'tcx> MirPass<'tcx> for Marker { +impl<'tcx> crate::MirPass<'tcx> for Marker { fn name(&self) -> &'static str { self.0 } diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs index 49e41c35f1f..704ed508b22 100644 --- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs +++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs @@ -3,6 +3,7 @@ use std::fmt::Debug; use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::*; use rustc_middle::ty::{Ty, TyCtxt}; +use tracing::trace; use super::simplify::simplify_cfg; @@ -89,9 +90,9 @@ use super::simplify::simplify_cfg; /// | ... | /// ================= /// ``` -pub struct EarlyOtherwiseBranch; +pub(super) struct EarlyOtherwiseBranch; -impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { +impl<'tcx> crate::MirPass<'tcx> for EarlyOtherwiseBranch { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 2 } @@ -260,8 +261,8 @@ fn evaluate_candidate<'tcx>( // }; // ``` // - // Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant of an - // invalid value, which is UB. + // Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant + // of an invalid value, which is UB. // In order to fix this, **we would either need to show that the discriminant computation of // `place` is computed in all branches**. // FIXME(#95162) For the moment, we adopt a conservative approach and @@ -309,42 +310,28 @@ fn verify_candidate_branch<'tcx>( ) -> bool { // In order for the optimization to be correct, the branch must... // ...have exactly one statement - let [statement] = branch.statements.as_slice() else { - return false; - }; - // ...assign the discriminant of `place` in that statement - let StatementKind::Assign(boxed) = &statement.kind else { return false }; - let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed else { return false }; - if *from_place != place { - return false; - } - // ...make that assignment to a local - if discr_place.projection.len() != 0 { - return false; - } - // ...terminate on a `SwitchInt` that invalidates that local - let TerminatorKind::SwitchInt { discr: switch_op, targets, .. } = &branch.terminator().kind - else { - return false; - }; - if *switch_op != Operand::Move(*discr_place) { - return false; - } - // ...fall through to `destination` if the switch misses - if destination != targets.otherwise() { - return false; - } - // ...have a branch for value `value` - let mut iter = targets.iter(); - let Some((target_value, _)) = iter.next() else { - return false; - }; - if target_value != value { - return false; - } - // ...and have no more branches - if let Some(_) = iter.next() { - return false; + if let [statement] = branch.statements.as_slice() + // ...assign the discriminant of `place` in that statement + && let StatementKind::Assign(boxed) = &statement.kind + && let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed + && *from_place == place + // ...make that assignment to a local + && discr_place.projection.is_empty() + // ...terminate on a `SwitchInt` that invalidates that local + && let TerminatorKind::SwitchInt { discr: switch_op, targets, .. } = + &branch.terminator().kind + && *switch_op == Operand::Move(*discr_place) + // ...fall through to `destination` if the switch misses + && destination == targets.otherwise() + // ...have a branch for value `value` + && let mut iter = targets.iter() + && let Some((target_value, _)) = iter.next() + && target_value == value + // ...and have no more branches + && iter.next().is_none() + { + true + } else { + false } - true } diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs index e5778f8a05d..57f7a9ef7f5 100644 --- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs +++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs @@ -11,7 +11,7 @@ use rustc_middle::ty::{Ty, TyCtxt}; use rustc_target::abi::FieldIdx; /// Constructs the types used when accessing a Box's pointer -pub fn build_ptr_tys<'tcx>( +fn build_ptr_tys<'tcx>( tcx: TyCtxt<'tcx>, pointee: Ty<'tcx>, unique_did: DefId, @@ -26,7 +26,7 @@ pub fn build_ptr_tys<'tcx>( } /// Constructs the projection needed to access a Box's pointer -pub fn build_projection<'tcx>( +pub(super) fn build_projection<'tcx>( unique_ty: Ty<'tcx>, nonnull_ty: Ty<'tcx>, ptr_ty: Ty<'tcx>, @@ -38,7 +38,7 @@ pub fn build_projection<'tcx>( ] } -struct ElaborateBoxDerefVisitor<'tcx, 'a> { +struct ElaborateBoxDerefVisitor<'a, 'tcx> { tcx: TyCtxt<'tcx>, unique_did: DefId, nonnull_did: DefId, @@ -46,7 +46,7 @@ struct ElaborateBoxDerefVisitor<'tcx, 'a> { patch: MirPatch<'tcx>, } -impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> { +impl<'a, 'tcx> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'a, 'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } @@ -62,11 +62,13 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> { let base_ty = self.local_decls[place.local].ty; // Derefer ensures that derefs are always the first projection - if place.projection.first() == Some(&PlaceElem::Deref) && base_ty.is_box() { + if let Some(PlaceElem::Deref) = place.projection.first() + && let Some(boxed_ty) = base_ty.boxed_ty() + { let source_info = self.local_decls[place.local].source_info; let (unique_ty, nonnull_ty, ptr_ty) = - build_ptr_tys(tcx, base_ty.boxed_ty(), self.unique_did, self.nonnull_did); + build_ptr_tys(tcx, boxed_ty, self.unique_did, self.nonnull_did); let ptr_local = self.patch.new_temp(ptr_ty, source_info.span); @@ -86,66 +88,65 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> { } } -pub struct ElaborateBoxDerefs; +pub(super) struct ElaborateBoxDerefs; -impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs { +impl<'tcx> crate::MirPass<'tcx> for ElaborateBoxDerefs { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - if let Some(def_id) = tcx.lang_items().owned_box() { - let unique_did = tcx.adt_def(def_id).non_enum_variant().fields[FieldIdx::ZERO].did; + // If box is not present, this pass doesn't need to do anything. + let Some(def_id) = tcx.lang_items().owned_box() else { return }; - let Some(nonnull_def) = tcx.type_of(unique_did).instantiate_identity().ty_adt_def() - else { - span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique") - }; + let unique_did = tcx.adt_def(def_id).non_enum_variant().fields[FieldIdx::ZERO].did; - let nonnull_did = nonnull_def.non_enum_variant().fields[FieldIdx::ZERO].did; + let Some(nonnull_def) = tcx.type_of(unique_did).instantiate_identity().ty_adt_def() else { + span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique") + }; - let patch = MirPatch::new(body); + let nonnull_did = nonnull_def.non_enum_variant().fields[FieldIdx::ZERO].did; - let local_decls = &mut body.local_decls; + let patch = MirPatch::new(body); - let mut visitor = - ElaborateBoxDerefVisitor { tcx, unique_did, nonnull_did, local_decls, patch }; + let local_decls = &mut body.local_decls; - for (block, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { - visitor.visit_basic_block_data(block, data); - } + let mut visitor = + ElaborateBoxDerefVisitor { tcx, unique_did, nonnull_did, local_decls, patch }; - visitor.patch.apply(body); + for (block, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { + visitor.visit_basic_block_data(block, data); + } - for debug_info in body.var_debug_info.iter_mut() { - if let VarDebugInfoContents::Place(place) = &mut debug_info.value { - let mut new_projections: Option<Vec<_>> = None; + visitor.patch.apply(body); - for (base, elem) in place.iter_projections() { - let base_ty = base.ty(&body.local_decls, tcx).ty; + for debug_info in body.var_debug_info.iter_mut() { + if let VarDebugInfoContents::Place(place) = &mut debug_info.value { + let mut new_projections: Option<Vec<_>> = None; - if elem == PlaceElem::Deref && base_ty.is_box() { - // Clone the projections before us, since now we need to mutate them. - let new_projections = - new_projections.get_or_insert_with(|| base.projection.to_vec()); + for (base, elem) in place.iter_projections() { + let base_ty = base.ty(&body.local_decls, tcx).ty; - let (unique_ty, nonnull_ty, ptr_ty) = - build_ptr_tys(tcx, base_ty.boxed_ty(), unique_did, nonnull_did); + if let PlaceElem::Deref = elem + && let Some(boxed_ty) = base_ty.boxed_ty() + { + // Clone the projections before us, since now we need to mutate them. + let new_projections = + new_projections.get_or_insert_with(|| base.projection.to_vec()); - new_projections.extend_from_slice(&build_projection( - unique_ty, nonnull_ty, ptr_ty, - )); - new_projections.push(PlaceElem::Deref); - } else if let Some(new_projections) = new_projections.as_mut() { - // Keep building up our projections list once we've started it. - new_projections.push(elem); - } - } + let (unique_ty, nonnull_ty, ptr_ty) = + build_ptr_tys(tcx, boxed_ty, unique_did, nonnull_did); - // Store the mutated projections if we actually changed something. - if let Some(new_projections) = new_projections { - place.projection = tcx.mk_place_elems(&new_projections); + new_projections + .extend_from_slice(&build_projection(unique_ty, nonnull_ty, ptr_ty)); + new_projections.push(PlaceElem::Deref); + } else if let Some(new_projections) = new_projections.as_mut() { + // Keep building up our projections list once we've started it. + new_projections.push(elem); } } + + // Store the mutated projections if we actually changed something. + if let Some(new_projections) = new_projections { + place.projection = tcx.mk_place_elems(&new_projections); + } } - } else { - // box is not present, this pass doesn't need to do anything } } } diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index 5a22ef77903..42d13fa3613 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -15,12 +15,13 @@ use rustc_mir_dataflow::{ }; use rustc_span::Span; use rustc_target::abi::{FieldIdx, VariantIdx}; +use tracing::{debug, instrument}; use crate::deref_separator::deref_finder; /// During MIR building, Drop terminators are inserted in every place where a drop may occur. -/// However, in this phase, the presence of these terminators does not guarantee that a destructor will run, -/// as the target of the drop may be uninitialized. +/// However, in this phase, the presence of these terminators does not guarantee that a destructor +/// will run, as the target of the drop may be uninitialized. /// In general, the compiler cannot determine at compile time whether a destructor will run or not. /// /// At a high level, this pass refines Drop to only run the destructor if the @@ -29,10 +30,10 @@ use crate::deref_separator::deref_finder; /// Once this is complete, Drop terminators in the MIR correspond to a call to the "drop glue" or /// "drop shim" for the type of the dropped place. /// -/// This pass relies on dropped places having an associated move path, which is then used to determine -/// the initialization status of the place and its descendants. -/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill formed, -/// as it would allow running a destructor on a place behind a reference: +/// This pass relies on dropped places having an associated move path, which is then used to +/// determine the initialization status of the place and its descendants. +/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill +/// formed, as it would allow running a destructor on a place behind a reference: /// /// ```text /// fn drop_term<T>(t: &mut T) { @@ -46,9 +47,9 @@ use crate::deref_separator::deref_finder; /// } /// } /// ``` -pub struct ElaborateDrops; +pub(super) struct ElaborateDrops; -impl<'tcx> MirPass<'tcx> for ElaborateDrops { +impl<'tcx> crate::MirPass<'tcx> for ElaborateDrops { #[instrument(level = "trace", skip(self, tcx, body))] fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!("elaborate_drops({:?} @ {:?})", body.source, body.span); @@ -97,9 +98,9 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops { /// Records unwind edges which are known to be unreachable, because they are in `drop` terminators /// that can't drop anything. #[instrument(level = "trace", skip(body, flow_inits), ret)] -fn compute_dead_unwinds<'mir, 'tcx>( - body: &'mir Body<'tcx>, - flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'_, 'mir, 'tcx>>, +fn compute_dead_unwinds<'a, 'tcx>( + body: &'a Body<'tcx>, + flow_inits: &mut ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, ) -> BitSet<BasicBlock> { // We only need to do this pass once, because unwind edges can only // reach cleanup blocks, which can't have unwind edges themselves. @@ -120,12 +121,12 @@ fn compute_dead_unwinds<'mir, 'tcx>( dead_unwinds } -struct InitializationData<'a, 'mir, 'tcx> { - inits: ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'a, 'mir, 'tcx>>, - uninits: ResultsCursor<'mir, 'tcx, MaybeUninitializedPlaces<'a, 'mir, 'tcx>>, +struct InitializationData<'a, 'tcx> { + inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, + uninits: ResultsCursor<'a, 'tcx, MaybeUninitializedPlaces<'a, 'tcx>>, } -impl InitializationData<'_, '_, '_> { +impl InitializationData<'_, '_> { fn seek_before(&mut self, loc: Location) { self.inits.seek_before_primary_effect(loc); self.uninits.seek_before_primary_effect(loc); @@ -136,45 +137,35 @@ impl InitializationData<'_, '_, '_> { } } -struct Elaborator<'a, 'b, 'mir, 'tcx> { - ctxt: &'a mut ElaborateDropsCtxt<'b, 'mir, 'tcx>, -} - -impl fmt::Debug for Elaborator<'_, '_, '_, '_> { - fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { - Ok(()) - } -} - -impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, '_, 'tcx> { +impl<'a, 'tcx> DropElaborator<'a, 'tcx> for ElaborateDropsCtxt<'a, 'tcx> { type Path = MovePathIndex; fn patch(&mut self) -> &mut MirPatch<'tcx> { - &mut self.ctxt.patch + &mut self.patch } fn body(&self) -> &'a Body<'tcx> { - self.ctxt.body + self.body } fn tcx(&self) -> TyCtxt<'tcx> { - self.ctxt.tcx + self.tcx } fn param_env(&self) -> ty::ParamEnv<'tcx> { - self.ctxt.param_env() + self.param_env() } #[instrument(level = "debug", skip(self), ret)] fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle { let ((maybe_live, maybe_dead), multipart) = match mode { - DropFlagMode::Shallow => (self.ctxt.init_data.maybe_live_dead(path), false), + DropFlagMode::Shallow => (self.init_data.maybe_live_dead(path), false), DropFlagMode::Deep => { let mut some_live = false; let mut some_dead = false; let mut children_count = 0; - on_all_children_bits(self.ctxt.move_data(), path, |child| { - let (live, dead) = self.ctxt.init_data.maybe_live_dead(child); + on_all_children_bits(self.move_data(), path, |child| { + let (live, dead) = self.init_data.maybe_live_dead(child); debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead)); some_live |= live; some_dead |= dead; @@ -194,25 +185,25 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, '_, 'tcx> { fn clear_drop_flag(&mut self, loc: Location, path: Self::Path, mode: DropFlagMode) { match mode { DropFlagMode::Shallow => { - self.ctxt.set_drop_flag(loc, path, DropFlagState::Absent); + self.set_drop_flag(loc, path, DropFlagState::Absent); } DropFlagMode::Deep => { - on_all_children_bits(self.ctxt.move_data(), path, |child| { - self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent) + on_all_children_bits(self.move_data(), path, |child| { + self.set_drop_flag(loc, child, DropFlagState::Absent) }); } } } fn field_subpath(&self, path: Self::Path, field: FieldIdx) -> Option<Self::Path> { - rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e { + rustc_mir_dataflow::move_path_children_matching(self.move_data(), path, |e| match e { ProjectionElem::Field(idx, _) => idx == field, _ => false, }) } fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path> { - rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e { + rustc_mir_dataflow::move_path_children_matching(self.move_data(), path, |e| match e { ProjectionElem::ConstantIndex { offset, min_length, from_end } => { debug_assert!(size == min_length, "min_length should be exact for arrays"); assert!(!from_end, "from_end should not be used for array element ConstantIndex"); @@ -223,34 +214,40 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, '_, 'tcx> { } fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path> { - rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| { + rustc_mir_dataflow::move_path_children_matching(self.move_data(), path, |e| { e == ProjectionElem::Deref }) } fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path> { - rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e { + rustc_mir_dataflow::move_path_children_matching(self.move_data(), path, |e| match e { ProjectionElem::Downcast(_, idx) => idx == variant, _ => false, }) } fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>> { - self.ctxt.drop_flag(path).map(Operand::Copy) + self.drop_flag(path).map(Operand::Copy) } } -struct ElaborateDropsCtxt<'a, 'mir, 'tcx> { +struct ElaborateDropsCtxt<'a, 'tcx> { tcx: TyCtxt<'tcx>, - body: &'mir Body<'tcx>, + body: &'a Body<'tcx>, env: &'a MoveDataParamEnv<'tcx>, - init_data: InitializationData<'a, 'mir, 'tcx>, + init_data: InitializationData<'a, 'tcx>, drop_flags: IndexVec<MovePathIndex, Option<Local>>, patch: MirPatch<'tcx>, } -impl<'b, 'mir, 'tcx> ElaborateDropsCtxt<'b, 'mir, 'tcx> { - fn move_data(&self) -> &'b MoveData<'tcx> { +impl fmt::Debug for ElaborateDropsCtxt<'_, '_> { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> { + fn move_data(&self) -> &'a MoveData<'tcx> { &self.env.move_data } @@ -369,15 +366,7 @@ impl<'b, 'mir, 'tcx> ElaborateDropsCtxt<'b, 'mir, 'tcx> { } }; self.init_data.seek_before(self.body.terminator_loc(bb)); - elaborate_drop( - &mut Elaborator { ctxt: self }, - terminator.source_info, - place, - path, - target, - unwind, - bb, - ) + elaborate_drop(self, terminator.source_info, place, path, target, unwind, bb) } LookupResult::Parent(None) => {} LookupResult::Parent(Some(_)) => { @@ -388,8 +377,8 @@ impl<'b, 'mir, 'tcx> ElaborateDropsCtxt<'b, 'mir, 'tcx> { ); } // A drop and replace behind a pointer/array/whatever. - // The borrow checker requires that these locations are initialized before the assignment, - // so we just leave an unconditional drop. + // The borrow checker requires that these locations are initialized before the + // assignment, so we just leave an unconditional drop. assert!(!data.is_cleanup); } } diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs index 2703dc57cda..4550ef06315 100644 --- a/compiler/rustc_mir_transform/src/errors.rs +++ b/compiler/rustc_mir_transform/src/errors.rs @@ -64,7 +64,7 @@ impl<'a, P: std::fmt::Debug> LintDiagnostic<'a, ()> for AssertLint<P> { } impl AssertLintKind { - pub fn lint(&self) -> &'static Lint { + pub(crate) fn lint(&self) -> &'static Lint { match self { AssertLintKind::ArithmeticOverflow => lint::builtin::ARITHMETIC_OVERFLOW, AssertLintKind::UnconditionalPanic => lint::builtin::UNCONDITIONAL_PANIC, @@ -89,7 +89,7 @@ pub(crate) struct FnItemRef { pub ident: String, } -pub(crate) struct MustNotSupend<'tcx, 'a> { +pub(crate) struct MustNotSupend<'a, 'tcx> { pub tcx: TyCtxt<'tcx>, pub yield_sp: Span, pub reason: Option<MustNotSuspendReason>, diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs index 9a2cc057232..8490b7e2358 100644 --- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs +++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs @@ -6,6 +6,7 @@ use rustc_middle::{bug, span_bug}; use rustc_session::lint::builtin::FFI_UNWIND_CALLS; use rustc_target::spec::abi::Abi; use rustc_target::spec::PanicStrategy; +use tracing::debug; use crate::errors; @@ -59,8 +60,9 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool { let fn_def_id = match ty.kind() { ty::FnPtr(..) => None, &ty::FnDef(def_id, _) => { - // Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust ABI). - // So the leak of the foreign unwind into Rust can only be elsewhere, not here. + // Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust + // ABI). So the leak of the foreign unwind into Rust can only be elsewhere, not + // here. if !tcx.is_foreign_item(def_id) { continue; } diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs index b7873e73c18..eb09a7d3b21 100644 --- a/compiler/rustc_mir_transform/src/function_item_references.rs +++ b/compiler/rustc_mir_transform/src/function_item_references.rs @@ -9,11 +9,11 @@ use rustc_span::symbol::sym; use rustc_span::Span; use rustc_target::spec::abi::Abi; -use crate::{errors, MirLint}; +use crate::errors; -pub struct FunctionItemReferences; +pub(super) struct FunctionItemReferences; -impl<'tcx> MirLint<'tcx> for FunctionItemReferences { +impl<'tcx> crate::MirLint<'tcx> for FunctionItemReferences { fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { let mut checker = FunctionItemRefChecker { tcx, body }; checker.visit_body(body); @@ -92,8 +92,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { { let mut span = self.nth_arg_span(args, arg_num); if span.from_expansion() { - // The operand's ctxt wouldn't display the lint since it's inside a macro so - // we have to use the callsite's ctxt. + // The operand's ctxt wouldn't display the lint since it's + // inside a macro so we have to use the callsite's ctxt. let callsite_ctxt = span.source_callsite().ctxt(); span = span.with_ctxt(callsite_ctxt); } diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 90e3ba26a43..0f08d920c5e 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -105,12 +105,13 @@ use rustc_span::def_id::DefId; use rustc_span::DUMMY_SP; use rustc_target::abi::{self, Abi, FieldIdx, Size, VariantIdx, FIRST_VARIANT}; use smallvec::SmallVec; +use tracing::{debug, instrument, trace}; use crate::ssa::{AssignedValue, SsaLocals}; -pub struct GVN; +pub(super) struct GVN; -impl<'tcx> MirPass<'tcx> for GVN { +impl<'tcx> crate::MirPass<'tcx> for GVN { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 2 } @@ -118,53 +119,50 @@ impl<'tcx> MirPass<'tcx> for GVN { #[instrument(level = "trace", skip(self, tcx, body))] fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!(def_id = ?body.source.def_id()); - propagate_ssa(tcx, body); - } -} -fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); - let ssa = SsaLocals::new(tcx, body, param_env); - // Clone dominators as we need them while mutating the body. - let dominators = body.basic_blocks.dominators().clone(); - - let mut state = VnState::new(tcx, body, param_env, &ssa, &dominators, &body.local_decls); - ssa.for_each_assignment_mut( - body.basic_blocks.as_mut_preserves_cfg(), - |local, value, location| { - let value = match value { - // We do not know anything of this assigned value. - AssignedValue::Arg | AssignedValue::Terminator => None, - // Try to get some insight. - AssignedValue::Rvalue(rvalue) => { - let value = state.simplify_rvalue(rvalue, location); - // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark `local` as - // reusable if we have an exact type match. - if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) { - return; + let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); + let ssa = SsaLocals::new(tcx, body, param_env); + // Clone dominators because we need them while mutating the body. + let dominators = body.basic_blocks.dominators().clone(); + + let mut state = VnState::new(tcx, body, param_env, &ssa, dominators, &body.local_decls); + ssa.for_each_assignment_mut( + body.basic_blocks.as_mut_preserves_cfg(), + |local, value, location| { + let value = match value { + // We do not know anything of this assigned value. + AssignedValue::Arg | AssignedValue::Terminator => None, + // Try to get some insight. + AssignedValue::Rvalue(rvalue) => { + let value = state.simplify_rvalue(rvalue, location); + // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark + // `local` as reusable if we have an exact type match. + if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) { + return; + } + value } - value - } - }; - // `next_opaque` is `Some`, so `new_opaque` must return `Some`. - let value = value.or_else(|| state.new_opaque()).unwrap(); - state.assign(local, value); - }, - ); - - // Stop creating opaques during replacement as it is useless. - state.next_opaque = None; - - let reverse_postorder = body.basic_blocks.reverse_postorder().to_vec(); - for bb in reverse_postorder { - let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb]; - state.visit_basic_block_data(bb, data); - } + }; + // `next_opaque` is `Some`, so `new_opaque` must return `Some`. + let value = value.or_else(|| state.new_opaque()).unwrap(); + state.assign(local, value); + }, + ); - // For each local that is reused (`y` above), we remove its storage statements do avoid any - // difficulty. Those locals are SSA, so should be easy to optimize by LLVM without storage - // statements. - StorageRemover { tcx, reused_locals: state.reused_locals }.visit_body_preserves_cfg(body); + // Stop creating opaques during replacement as it is useless. + state.next_opaque = None; + + let reverse_postorder = body.basic_blocks.reverse_postorder().to_vec(); + for bb in reverse_postorder { + let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb]; + state.visit_basic_block_data(bb, data); + } + + // For each local that is reused (`y` above), we remove its storage statements do avoid any + // difficulty. Those locals are SSA, so should be easy to optimize by LLVM without storage + // statements. + StorageRemover { tcx, reused_locals: state.reused_locals }.visit_body_preserves_cfg(body); + } } newtype_index! { @@ -260,7 +258,7 @@ struct VnState<'body, 'tcx> { /// Cache the value of the `unsized_locals` features, to avoid fetching it repeatedly in a loop. feature_unsized_locals: bool, ssa: &'body SsaLocals, - dominators: &'body Dominators<BasicBlock>, + dominators: Dominators<BasicBlock>, reused_locals: BitSet<Local>, } @@ -270,7 +268,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { body: &Body<'tcx>, param_env: ty::ParamEnv<'tcx>, ssa: &'body SsaLocals, - dominators: &'body Dominators<BasicBlock>, + dominators: Dominators<BasicBlock>, local_decls: &'body LocalDecls<'tcx>, ) -> Self { // Compute a rough estimate of the number of values in the body from the number of @@ -479,7 +477,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let pointer = self.evaluated[local].as_ref()?; let mut mplace = self.ecx.deref_pointer(pointer).ok()?; for proj in place.projection.iter().skip(1) { - // We have no call stack to associate a local with a value, so we cannot interpret indexing. + // We have no call stack to associate a local with a value, so we cannot + // interpret indexing. if matches!(proj, ProjectionElem::Index(_)) { return None; } @@ -876,6 +875,95 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { None } + fn try_as_place_elem( + &mut self, + proj: ProjectionElem<VnIndex, Ty<'tcx>>, + loc: Location, + ) -> Option<PlaceElem<'tcx>> { + Some(match proj { + ProjectionElem::Deref => ProjectionElem::Deref, + ProjectionElem::Field(idx, ty) => ProjectionElem::Field(idx, ty), + ProjectionElem::Index(idx) => { + let Some(local) = self.try_as_local(idx, loc) else { + return None; + }; + self.reused_locals.insert(local); + ProjectionElem::Index(local) + } + ProjectionElem::ConstantIndex { offset, min_length, from_end } => { + ProjectionElem::ConstantIndex { offset, min_length, from_end } + } + ProjectionElem::Subslice { from, to, from_end } => { + ProjectionElem::Subslice { from, to, from_end } + } + ProjectionElem::Downcast(symbol, idx) => ProjectionElem::Downcast(symbol, idx), + ProjectionElem::OpaqueCast(idx) => ProjectionElem::OpaqueCast(idx), + ProjectionElem::Subtype(idx) => ProjectionElem::Subtype(idx), + }) + } + + fn simplify_aggregate_to_copy( + &mut self, + rvalue: &mut Rvalue<'tcx>, + location: Location, + fields: &[VnIndex], + variant_index: VariantIdx, + ) -> Option<VnIndex> { + let Some(&first_field) = fields.first() else { + return None; + }; + let Value::Projection(copy_from_value, _) = *self.get(first_field) else { + return None; + }; + // All fields must correspond one-to-one and come from the same aggregate value. + if fields.iter().enumerate().any(|(index, &v)| { + if let Value::Projection(pointer, ProjectionElem::Field(from_index, _)) = *self.get(v) + && copy_from_value == pointer + && from_index.index() == index + { + return false; + } + true + }) { + return None; + } + + let mut copy_from_local_value = copy_from_value; + if let Value::Projection(pointer, proj) = *self.get(copy_from_value) + && let ProjectionElem::Downcast(_, read_variant) = proj + { + if variant_index == read_variant { + // When copying a variant, there is no need to downcast. + copy_from_local_value = pointer; + } else { + // The copied variant must be identical. + return None; + } + } + + let tcx = self.tcx; + let mut projection = SmallVec::<[PlaceElem<'tcx>; 1]>::new(); + loop { + if let Some(local) = self.try_as_local(copy_from_local_value, location) { + projection.reverse(); + let place = Place { local, projection: tcx.mk_place_elems(projection.as_slice()) }; + if rvalue.ty(self.local_decls, tcx) == place.ty(self.local_decls, tcx).ty { + self.reused_locals.insert(local); + *rvalue = Rvalue::Use(Operand::Copy(place)); + return Some(copy_from_value); + } + return None; + } else if let Value::Projection(pointer, proj) = *self.get(copy_from_local_value) + && let Some(proj) = self.try_as_place_elem(proj, location) + { + projection.push(proj); + copy_from_local_value = pointer; + } else { + return None; + } + } + } + fn simplify_aggregate( &mut self, rvalue: &mut Rvalue<'tcx>, @@ -972,6 +1060,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } } + if let AggregateTy::Def(_, _) = ty + && let Some(value) = + self.simplify_aggregate_to_copy(rvalue, location, &fields, variant_index) + { + return Some(value); + } + Some(self.insert(Value::Aggregate(ty, variant_index, fields))) } @@ -1381,7 +1476,8 @@ fn op_to_prop_const<'tcx>( return Some(ConstValue::ZeroSized); } - // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to avoid. + // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to + // avoid. if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { return None; } @@ -1485,12 +1581,12 @@ impl<'tcx> VnState<'_, 'tcx> { } /// If there is a local which is assigned `index`, and its assignment strictly dominates `loc`, - /// return it. + /// return it. If you used this local, add it to `reused_locals` to remove storage statements. fn try_as_local(&mut self, index: VnIndex, loc: Location) -> Option<Local> { let other = self.rev_locals.get(index)?; other .iter() - .find(|&&other| self.ssa.assignment_dominates(self.dominators, other, loc)) + .find(|&&other| self.ssa.assignment_dominates(&self.dominators, other, loc)) .copied() } } diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 61fc5fc8816..2de75e2ef50 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -20,6 +20,7 @@ use rustc_span::source_map::Spanned; use rustc_span::sym; use rustc_target::abi::FieldIdx; use rustc_target::spec::abi::Abi; +use tracing::{debug, instrument, trace, trace_span}; use crate::cost_checker::CostChecker; use crate::deref_separator::deref_finder; @@ -31,9 +32,11 @@ pub(crate) mod cycle; const TOP_DOWN_DEPTH_LIMIT: usize = 5; +// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden +// by custom rustc drivers, running all the steps by themselves. See #114628. pub struct Inline; -#[derive(Copy, Clone, Debug)] +#[derive(Clone, Debug)] struct CallSite<'tcx> { callee: Instance<'tcx>, fn_sig: ty::PolyFnSig<'tcx>, @@ -41,7 +44,7 @@ struct CallSite<'tcx> { source_info: SourceInfo, } -impl<'tcx> MirPass<'tcx> for Inline { +impl<'tcx> crate::MirPass<'tcx> for Inline { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { // FIXME(#127234): Coverage instrumentation currently doesn't handle inlined // MIR correctly when Modified Condition/Decision Coverage is enabled. @@ -155,7 +158,6 @@ impl<'tcx> Inliner<'tcx> { match self.try_inlining(caller_body, &callsite) { Err(reason) => { debug!("not-inlined {} [{}]", callsite.callee, reason); - continue; } Ok(new_blocks) => { debug!("inlined {}", callsite.callee); @@ -341,7 +343,6 @@ impl<'tcx> Inliner<'tcx> { | InstanceKind::FnPtrShim(..) | InstanceKind::ClosureOnceShim { .. } | InstanceKind::ConstructCoroutineInClosureShim { .. } - | InstanceKind::CoroutineKindShim { .. } | InstanceKind::DropGlue(..) | InstanceKind::CloneShim(..) | InstanceKind::ThreadLocalShim(..) @@ -356,16 +357,6 @@ impl<'tcx> Inliner<'tcx> { } if callee_def_id.is_local() { - // Avoid a cycle here by only using `instance_mir` only if we have - // a lower `DefPathHash` than the callee. This ensures that the callee will - // not inline us. This trick even works with incremental compilation, - // since `DefPathHash` is stable. - if self.tcx.def_path_hash(caller_def_id).local_hash() - < self.tcx.def_path_hash(callee_def_id).local_hash() - { - return Ok(()); - } - // If we know for sure that the function we're calling will itself try to // call us, then we avoid inlining that function. if self.tcx.mir_callgraph_reachable((callee, caller_def_id.expect_local())) { @@ -567,7 +558,8 @@ impl<'tcx> Inliner<'tcx> { // if the no-attribute function ends up with the same instruction set anyway. return Err("Cannot move inline-asm across instruction sets"); } else if let TerminatorKind::TailCall { .. } = term.kind { - // FIXME(explicit_tail_calls): figure out how exactly functions containing tail calls can be inlined (and if they even should) + // FIXME(explicit_tail_calls): figure out how exactly functions containing tail + // calls can be inlined (and if they even should) return Err("can't inline functions with tail calls"); } else { work_list.extend(term.successors()) @@ -638,7 +630,7 @@ impl<'tcx> Inliner<'tcx> { ); let dest_ty = dest.ty(caller_body, self.tcx); let temp = - Place::from(self.new_call_temp(caller_body, &callsite, dest_ty, return_block)); + Place::from(self.new_call_temp(caller_body, callsite, dest_ty, return_block)); caller_body[callsite.block].statements.push(Statement { source_info: callsite.source_info, kind: StatementKind::Assign(Box::new((temp, dest))), @@ -657,7 +649,7 @@ impl<'tcx> Inliner<'tcx> { true, self.new_call_temp( caller_body, - &callsite, + callsite, destination.ty(caller_body, self.tcx).ty, return_block, ), @@ -665,7 +657,7 @@ impl<'tcx> Inliner<'tcx> { }; // Copy the arguments if needed. - let args = self.make_call_args(args, &callsite, caller_body, &callee_body, return_block); + let args = self.make_call_args(args, callsite, caller_body, &callee_body, return_block); let mut integrator = Integrator { args: &args, diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs index f5274c664cf..9828e90de88 100644 --- a/compiler/rustc_mir_transform/src/inline/cycle.rs +++ b/compiler/rustc_mir_transform/src/inline/cycle.rs @@ -5,6 +5,7 @@ use rustc_middle::mir::TerminatorKind; use rustc_middle::ty::{self, GenericArgsRef, InstanceKind, TyCtxt, TypeVisitableExt}; use rustc_session::Limit; use rustc_span::sym; +use tracing::{instrument, trace}; // FIXME: check whether it is cheaper to precompute the entire call graph instead of invoking // this query ridiculously often. @@ -88,7 +89,6 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( | InstanceKind::FnPtrShim(..) | InstanceKind::ClosureOnceShim { .. } | InstanceKind::ConstructCoroutineInClosureShim { .. } - | InstanceKind::CoroutineKindShim { .. } | InstanceKind::ThreadLocalShim { .. } | InstanceKind::CloneShim(..) => {} @@ -136,6 +136,14 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( } false } + // FIXME(-Znext-solver): Remove this hack when trait solver overflow can return an error. + // In code like that pointed out in #128887, the type complexity we ask the solver to deal with + // grows as we recurse into the call graph. If we use the same recursion limit here and in the + // solver, the solver hits the limit first and emits a fatal error. But if we use a reduced + // limit, we will hit the limit first and give up on looking for inlining. And in any case, + // the default recursion limits are quite generous for us. If we need to recurse 64 times + // into the call graph, we're probably not going to find any useful MIR inlining. + let recursion_limit = tcx.recursion_limit() / 2; process( tcx, param_env, @@ -144,7 +152,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( &mut Vec::new(), &mut FxHashSet::default(), &mut FxHashMap::default(), - tcx.recursion_limit(), + recursion_limit, ) } diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs index 3ec553d0ba0..5c8958924fb 100644 --- a/compiler/rustc_mir_transform/src/instsimplify.rs +++ b/compiler/rustc_mir_transform/src/instsimplify.rs @@ -13,24 +13,18 @@ use rustc_target::spec::abi::Abi; use crate::simplify::simplify_duplicate_switch_targets; use crate::take_array; -pub enum InstSimplify { +pub(super) enum InstSimplify { BeforeInline, AfterSimplifyCfg, } -impl InstSimplify { - pub fn name(&self) -> &'static str { +impl<'tcx> crate::MirPass<'tcx> for InstSimplify { + fn name(&self) -> &'static str { match self { InstSimplify::BeforeInline => "InstSimplify-before-inline", InstSimplify::AfterSimplifyCfg => "InstSimplify-after-simplifycfg", } } -} - -impl<'tcx> MirPass<'tcx> for InstSimplify { - fn name(&self) -> &'static str { - self.name() - } fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 @@ -69,13 +63,13 @@ impl<'tcx> MirPass<'tcx> for InstSimplify { } } -struct InstSimplifyContext<'tcx, 'a> { +struct InstSimplifyContext<'a, 'tcx> { tcx: TyCtxt<'tcx>, local_decls: &'a LocalDecls<'tcx>, param_env: ParamEnv<'tcx>, } -impl<'tcx> InstSimplifyContext<'tcx, '_> { +impl<'tcx> InstSimplifyContext<'_, 'tcx> { fn should_simplify(&self, source_info: &SourceInfo, rvalue: &Rvalue<'tcx>) -> bool { self.should_simplify_custom(source_info, "Rvalue", rvalue) } diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs index 96c52845a4a..1810569bc8e 100644 --- a/compiler/rustc_mir_transform/src/jump_threading.rs +++ b/compiler/rustc_mir_transform/src/jump_threading.rs @@ -51,16 +51,17 @@ use rustc_mir_dataflow::lattice::HasBottom; use rustc_mir_dataflow::value_analysis::{Map, PlaceIndex, State, TrackElem}; use rustc_span::DUMMY_SP; use rustc_target::abi::{TagEncoding, Variants}; +use tracing::{debug, instrument, trace}; use crate::cost_checker::CostChecker; -pub struct JumpThreading; +pub(super) struct JumpThreading; const MAX_BACKTRACK: usize = 5; const MAX_COST: usize = 100; const MAX_PLACES: usize = 100; -impl<'tcx> MirPass<'tcx> for JumpThreading { +impl<'tcx> crate::MirPass<'tcx> for JumpThreading { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 2 } @@ -77,18 +78,16 @@ impl<'tcx> MirPass<'tcx> for JumpThreading { } let param_env = tcx.param_env_reveal_all_normalized(def_id); - let map = Map::new(tcx, body, Some(MAX_PLACES)); - let loop_headers = loop_headers(body); - let arena = DroplessArena::default(); + let arena = &DroplessArena::default(); let mut finder = TOFinder { tcx, param_env, ecx: InterpCx::new(tcx, DUMMY_SP, param_env, DummyMachine), body, - arena: &arena, - map: &map, - loop_headers: &loop_headers, + arena, + map: Map::new(tcx, body, Some(MAX_PLACES)), + loop_headers: loop_headers(body), opportunities: Vec::new(), }; @@ -104,7 +103,7 @@ impl<'tcx> MirPass<'tcx> for JumpThreading { // Verify that we do not thread through a loop header. for to in opportunities.iter() { - assert!(to.chain.iter().all(|&block| !loop_headers.contains(block))); + assert!(to.chain.iter().all(|&block| !finder.loop_headers.contains(block))); } OpportunitySet::new(body, opportunities).apply(body); } @@ -118,13 +117,13 @@ struct ThreadingOpportunity { target: BasicBlock, } -struct TOFinder<'tcx, 'a> { +struct TOFinder<'a, 'tcx> { tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ecx: InterpCx<'tcx, DummyMachine>, body: &'a Body<'tcx>, - map: &'a Map<'tcx>, - loop_headers: &'a BitSet<BasicBlock>, + map: Map<'tcx>, + loop_headers: BitSet<BasicBlock>, /// We use an arena to avoid cloning the slices when cloning `state`. arena: &'a DroplessArena, opportunities: Vec<ThreadingOpportunity>, @@ -184,33 +183,33 @@ impl<'a> ConditionSet<'a> { } } -impl<'tcx, 'a> TOFinder<'tcx, 'a> { +impl<'a, 'tcx> TOFinder<'a, 'tcx> { fn is_empty(&self, state: &State<ConditionSet<'a>>) -> bool { state.all_bottom() } /// Recursion entry point to find threading opportunities. #[instrument(level = "trace", skip(self))] - fn start_from_switch(&mut self, bb: BasicBlock) -> Option<!> { + fn start_from_switch(&mut self, bb: BasicBlock) { let bbdata = &self.body[bb]; if bbdata.is_cleanup || self.loop_headers.contains(bb) { - return None; + return; } - let (discr, targets) = bbdata.terminator().kind.as_switch()?; - let discr = discr.place()?; + let Some((discr, targets)) = bbdata.terminator().kind.as_switch() else { return }; + let Some(discr) = discr.place() else { return }; debug!(?discr, ?bb); let discr_ty = discr.ty(self.body, self.tcx).ty; - let discr_layout = self.ecx.layout_of(discr_ty).ok()?; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; - let discr = self.map.find(discr.as_ref())?; + let Some(discr) = self.map.find(discr.as_ref()) else { return }; debug!(?discr); let cost = CostChecker::new(self.tcx, self.param_env, None, self.body); let mut state = State::new_reachable(); let conds = if let Some((value, then, else_)) = targets.as_static_if() { - let value = ScalarInt::try_from_uint(value, discr_layout.size)?; + let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return }; self.arena.alloc_from_iter([ Condition { value, polarity: Polarity::Eq, target: then }, Condition { value, polarity: Polarity::Ne, target: else_ }, @@ -222,10 +221,9 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { })) }; let conds = ConditionSet(conds); - state.insert_value_idx(discr, conds, self.map); + state.insert_value_idx(discr, conds, &self.map); self.find_opportunity(bb, state, cost, 0); - None } /// Recursively walk statements backwards from this bb's terminator to find threading @@ -264,7 +262,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { // _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`. // _1 = 6 if let Some((lhs, tail)) = self.mutated_statement(stmt) { - state.flood_with_tail_elem(lhs.as_ref(), tail, self.map, ConditionSet::BOTTOM); + state.flood_with_tail_elem(lhs.as_ref(), tail, &self.map, ConditionSet::BOTTOM); } } @@ -364,18 +362,17 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { lhs: PlaceIndex, rhs: ImmTy<'tcx>, state: &mut State<ConditionSet<'a>>, - ) -> Option<!> { + ) { let register_opportunity = |c: Condition| { debug!(?bb, ?c.target, "register"); self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target }) }; - let conditions = state.try_get_idx(lhs, self.map)?; - if let Immediate::Scalar(Scalar::Int(int)) = *rhs { + if let Some(conditions) = state.try_get_idx(lhs, &self.map) + && let Immediate::Scalar(Scalar::Int(int)) = *rhs + { conditions.iter_matches(int).for_each(register_opportunity); } - - None } /// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. @@ -407,7 +404,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } }, &mut |place, op| { - if let Some(conditions) = state.try_get_idx(place, self.map) + if let Some(conditions) = state.try_get_idx(place, &self.map) && let Ok(imm) = self.ecx.read_immediate_raw(op) && let Some(imm) = imm.right() && let Immediate::Scalar(Scalar::Int(int)) = *imm @@ -428,22 +425,23 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { lhs: PlaceIndex, rhs: &Operand<'tcx>, state: &mut State<ConditionSet<'a>>, - ) -> Option<!> { + ) { match rhs { // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. Operand::Constant(constant) => { - let constant = - self.ecx.eval_mir_constant(&constant.const_, constant.span, None).ok()?; + let Ok(constant) = + self.ecx.eval_mir_constant(&constant.const_, constant.span, None) + else { + return; + }; self.process_constant(bb, lhs, constant, state); } // Transfer the conditions on the copied rhs. Operand::Move(rhs) | Operand::Copy(rhs) => { - let rhs = self.map.find(rhs.as_ref())?; - state.insert_place_idx(rhs, lhs, self.map); + let Some(rhs) = self.map.find(rhs.as_ref()) else { return }; + state.insert_place_idx(rhs, lhs, &self.map); } } - - None } #[instrument(level = "trace", skip(self))] @@ -453,24 +451,22 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { lhs_place: &Place<'tcx>, rhs: &Rvalue<'tcx>, state: &mut State<ConditionSet<'a>>, - ) -> Option<!> { - let lhs = self.map.find(lhs_place.as_ref())?; + ) { + let Some(lhs) = self.map.find(lhs_place.as_ref()) else { return }; match rhs { - Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state)?, + Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state), // Transfer the conditions on the copy rhs. - Rvalue::CopyForDeref(rhs) => { - self.process_operand(bb, lhs, &Operand::Copy(*rhs), state)? - } + Rvalue::CopyForDeref(rhs) => self.process_operand(bb, lhs, &Operand::Copy(*rhs), state), Rvalue::Discriminant(rhs) => { - let rhs = self.map.find_discr(rhs.as_ref())?; - state.insert_place_idx(rhs, lhs, self.map); + let Some(rhs) = self.map.find_discr(rhs.as_ref()) else { return }; + state.insert_place_idx(rhs, lhs, &self.map); } // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. Rvalue::Aggregate(box ref kind, ref operands) => { let agg_ty = lhs_place.ty(self.body, self.tcx).ty; let lhs = match kind { // Do not support unions. - AggregateKind::Adt(.., Some(_)) => return None, + AggregateKind::Adt(.., Some(_)) => return, AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => { if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant) && let Ok(discr_value) = @@ -478,7 +474,11 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { { self.process_immediate(bb, discr_target, discr_value, state); } - self.map.apply(lhs, TrackElem::Variant(*variant_index))? + if let Some(idx) = self.map.apply(lhs, TrackElem::Variant(*variant_index)) { + idx + } else { + return; + } } _ => lhs, }; @@ -490,10 +490,10 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } // Transfer the conditions on the copy rhs, after inversing polarity. Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => { - let conditions = state.try_get_idx(lhs, self.map)?; - let place = self.map.find(place.as_ref())?; + let Some(conditions) = state.try_get_idx(lhs, &self.map) else { return }; + let Some(place) = self.map.find(place.as_ref()) else { return }; let conds = conditions.map(self.arena, Condition::inv); - state.insert_value_idx(place, conds, self.map); + state.insert_value_idx(place, conds, &self.map); } // We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`. // Create a condition on `rhs ?= B`. @@ -502,33 +502,35 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { box (Operand::Move(place) | Operand::Copy(place), Operand::Constant(value)) | box (Operand::Constant(value), Operand::Move(place) | Operand::Copy(place)), ) => { - let conditions = state.try_get_idx(lhs, self.map)?; - let place = self.map.find(place.as_ref())?; + let Some(conditions) = state.try_get_idx(lhs, &self.map) else { return }; + let Some(place) = self.map.find(place.as_ref()) else { return }; let equals = match op { BinOp::Eq => ScalarInt::TRUE, BinOp::Ne => ScalarInt::FALSE, - _ => return None, + _ => return, }; if value.const_.ty().is_floating_point() { // Floating point equality does not follow bit-patterns. // -0.0 and NaN both have special rules for equality, // and therefore we cannot use integer comparisons for them. // Avoid handling them, though this could be extended in the future. - return None; + return; } - let value = value.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()?; + let Some(value) = + value.const_.normalize(self.tcx, self.param_env).try_to_scalar_int() + else { + return; + }; let conds = conditions.map(self.arena, |c| Condition { value, polarity: if c.matches(equals) { Polarity::Eq } else { Polarity::Ne }, ..c }); - state.insert_value_idx(place, conds, self.map); + state.insert_value_idx(place, conds, &self.map); } _ => {} } - - None } #[instrument(level = "trace", skip(self))] @@ -537,7 +539,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { bb: BasicBlock, stmt: &Statement<'tcx>, state: &mut State<ConditionSet<'a>>, - ) -> Option<!> { + ) { let register_opportunity = |c: Condition| { debug!(?bb, ?c.target, "register"); self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target }) @@ -550,12 +552,12 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { // If we expect `discriminant(place) ?= A`, // we have an opportunity if `variant_index ?= A`. StatementKind::SetDiscriminant { box place, variant_index } => { - let discr_target = self.map.find_discr(place.as_ref())?; + let Some(discr_target) = self.map.find_discr(place.as_ref()) else { return }; let enum_ty = place.ty(self.body, self.tcx).ty; // `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant // of a niche encoding. If we cannot ensure that we write to the discriminant, do // nothing. - let enum_layout = self.ecx.layout_of(enum_ty).ok()?; + let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return }; let writes_discriminant = match enum_layout.variants { Variants::Single { index } => { assert_eq!(index, *variant_index); @@ -568,24 +570,25 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } => *variant_index != untagged_variant, }; if writes_discriminant { - let discr = self.ecx.discriminant_for_variant(enum_ty, *variant_index).ok()?; - self.process_immediate(bb, discr_target, discr, state)?; + let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index) + else { + return; + }; + self.process_immediate(bb, discr_target, discr, state); } } // If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`. StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume( Operand::Copy(place) | Operand::Move(place), )) => { - let conditions = state.try_get(place.as_ref(), self.map)?; + let Some(conditions) = state.try_get(place.as_ref(), &self.map) else { return }; conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity); } StatementKind::Assign(box (lhs_place, rhs)) => { - self.process_assign(bb, lhs_place, rhs, state)?; + self.process_assign(bb, lhs_place, rhs, state); } _ => {} } - - None } #[instrument(level = "trace", skip(self, state, cost))] @@ -626,7 +629,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { // We can recurse through this terminator. let mut state = state(); if let Some(place_to_flood) = place_to_flood { - state.flood_with(place_to_flood.as_ref(), self.map, ConditionSet::BOTTOM); + state.flood_with(place_to_flood.as_ref(), &self.map, ConditionSet::BOTTOM); } self.find_opportunity(bb, state, cost.clone(), depth + 1); } @@ -638,17 +641,17 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { targets: &SwitchTargets, target_bb: BasicBlock, state: &mut State<ConditionSet<'a>>, - ) -> Option<!> { + ) { debug_assert_ne!(target_bb, START_BLOCK); debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1); - let discr = discr.place()?; + let Some(discr) = discr.place() else { return }; let discr_ty = discr.ty(self.body, self.tcx).ty; - let discr_layout = self.ecx.layout_of(discr_ty).ok()?; - let conditions = state.try_get(discr.as_ref(), self.map)?; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; + let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return }; if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) { - let value = ScalarInt::try_from_uint(value, discr_layout.size)?; + let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return }; debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1); // We are inside `target_bb`. Since we have a single predecessor, we know we passed @@ -662,7 +665,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } else if let Some((value, _, else_bb)) = targets.as_static_if() && target_bb == else_bb { - let value = ScalarInt::try_from_uint(value, discr_layout.size)?; + let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return }; // We only know that `discr != value`. That's much weaker information than // the equality we had in the previous arm. All we can conclude is that @@ -675,8 +678,6 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } } } - - None } } diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 7eed47cf239..f123f39bf42 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -1,8 +1,6 @@ -//! A lint that checks for known panics like -//! overflows, division by zero, -//! out-of-bound access etc. -//! Uses const propagation to determine the -//! values of operands during checks. +//! A lint that checks for known panics like overflows, division by zero, +//! out-of-bound access etc. Uses const propagation to determine the values of +//! operands during checks. use std::fmt::Debug; @@ -22,13 +20,13 @@ use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayo use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt}; use rustc_span::Span; use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx}; +use tracing::{debug, instrument, trace}; use crate::errors::{AssertLint, AssertLintKind}; -use crate::MirLint; -pub struct KnownPanicsLint; +pub(super) struct KnownPanicsLint; -impl<'tcx> MirLint<'tcx> for KnownPanicsLint { +impl<'tcx> crate::MirLint<'tcx> for KnownPanicsLint { fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { if body.tainted_by_errors.is_some() { return; @@ -380,19 +378,19 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { if let (Some(l), Some(r)) = (l, r) && l.layout.ty.is_integral() && op.is_overflowing() - { - if self.use_ecx(|this| { + && self.use_ecx(|this| { let (_res, overflow) = this.ecx.binary_op(op, &l, &r)?.to_scalar_pair(); overflow.to_bool() - })? { - self.report_assert_as_lint( - location, - AssertLintKind::ArithmeticOverflow, - AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()), - ); - return None; - } + })? + { + self.report_assert_as_lint( + location, + AssertLintKind::ArithmeticOverflow, + AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()), + ); + return None; } + Some(()) } @@ -469,12 +467,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { msg: &AssertKind<Operand<'tcx>>, cond: &Operand<'tcx>, location: Location, - ) -> Option<!> { - let value = &self.eval_operand(cond)?; + ) { + let Some(value) = &self.eval_operand(cond) else { return }; trace!("assertion on {:?} should be {:?}", value, expected); let expected = Scalar::from_bool(expected); - let value_const = self.use_ecx(|this| this.ecx.read_scalar(value))?; + let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value)) else { return }; if expected != value_const { // Poison all places this operand references so that further code @@ -516,14 +514,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { AssertKind::BoundsCheck { len, index } } // Remaining overflow errors are already covered by checks on the binary operators. - AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => return None, + AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => return, // Need proper const propagator for these. - _ => return None, + _ => return, }; self.report_assert_as_lint(location, AssertLintKind::UnconditionalPanic, msg); } - - None } fn ensure_not_propagated(&self, local: Local) { @@ -564,7 +560,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?; if matches!(val.layout.abi, Abi::ScalarPair(..)) { - // FIXME `Value` should properly support pairs in `Immediate`... but currently it does not. + // FIXME `Value` should properly support pairs in `Immediate`... but currently + // it does not. let (val, overflow) = val.to_pair(&self.ecx); Value::Aggregate { variant: VariantIdx::ZERO, @@ -854,7 +851,7 @@ const MAX_ALLOC_LIMIT: u64 = 1024; /// The mode that `ConstProp` is allowed to run in for a given `Local`. #[derive(Clone, Copy, Debug, PartialEq)] -pub enum ConstPropMode { +enum ConstPropMode { /// The `Local` can be propagated into and reads of this `Local` can also be propagated. FullConstProp, /// The `Local` can only be propagated into and from its own block. @@ -866,7 +863,7 @@ pub enum ConstPropMode { /// A visitor that determines locals in a MIR body /// that can be const propagated -pub struct CanConstProp { +struct CanConstProp { can_const_prop: IndexVec<Local, ConstPropMode>, // False at the beginning. Once set, no more assignments are allowed to that local. found_assignment: BitSet<Local>, @@ -874,7 +871,7 @@ pub struct CanConstProp { impl CanConstProp { /// Returns true if `local` can be propagated - pub fn check<'tcx>( + fn check<'tcx>( tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, body: &Body<'tcx>, @@ -915,7 +912,7 @@ impl<'tcx> Visitor<'tcx> for CanConstProp { fn visit_place(&mut self, place: &Place<'tcx>, mut context: PlaceContext, loc: Location) { use rustc_middle::mir::visit::PlaceContext::*; - // Dereferencing just read the addess of `place.local`. + // Dereferencing just read the address of `place.local`. if place.projection.first() == Some(&PlaceElem::Deref) { context = NonMutatingUse(NonMutatingUseContext::Copy); } diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs index cbc3169f2f1..3e263aa4067 100644 --- a/compiler/rustc_mir_transform/src/large_enums.rs +++ b/compiler/rustc_mir_transform/src/large_enums.rs @@ -16,28 +16,190 @@ use rustc_target::abi::{HasDataLayout, Size, TagEncoding, Variants}; /// Large([u32; 1024]), /// } /// ``` -/// Instead of emitting moves of the large variant, -/// Perform a memcpy instead. +/// Instead of emitting moves of the large variant, perform a memcpy instead. /// Based off of [this HackMD](https://hackmd.io/@ft4bxUsFT5CEUBmRKYHr7w/rJM8BBPzD). /// /// In summary, what this does is at runtime determine which enum variant is active, /// and instead of copying all the bytes of the largest possible variant, /// copy only the bytes for the currently active variant. -pub struct EnumSizeOpt { +pub(super) struct EnumSizeOpt { pub(crate) discrepancy: u64, } -impl<'tcx> MirPass<'tcx> for EnumSizeOpt { +impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt { fn is_enabled(&self, sess: &Session) -> bool { // There are some differences in behavior on wasm and ARM that are not properly // understood, so we conservatively treat this optimization as unsound: // https://github.com/rust-lang/rust/pull/85158#issuecomment-1101836457 sess.opts.unstable_opts.unsound_mir_opts || sess.mir_opt_level() >= 3 } + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // NOTE: This pass may produce different MIR based on the alignment of the target // platform, but it will still be valid. - self.optim(tcx, body); + + let mut alloc_cache = FxHashMap::default(); + let body_did = body.source.def_id(); + let param_env = tcx.param_env_reveal_all_normalized(body_did); + + let blocks = body.basic_blocks.as_mut(); + let local_decls = &mut body.local_decls; + + for bb in blocks { + bb.expand_statements(|st| { + let StatementKind::Assign(box ( + lhs, + Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)), + )) = &st.kind + else { + return None; + }; + + let ty = lhs.ty(local_decls, tcx).ty; + + let (adt_def, num_variants, alloc_id) = + self.candidate(tcx, param_env, ty, &mut alloc_cache)?; + + let source_info = st.source_info; + let span = source_info.span; + + let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64); + let size_array_local = local_decls.push(LocalDecl::new(tmp_ty, span)); + let store_live = + Statement { source_info, kind: StatementKind::StorageLive(size_array_local) }; + + let place = Place::from(size_array_local); + let constant_vals = ConstOperand { + span, + user_ty: None, + const_: Const::Val( + ConstValue::Indirect { alloc_id, offset: Size::ZERO }, + tmp_ty, + ), + }; + let rval = Rvalue::Use(Operand::Constant(Box::new(constant_vals))); + let const_assign = + Statement { source_info, kind: StatementKind::Assign(Box::new((place, rval))) }; + + let discr_place = Place::from( + local_decls.push(LocalDecl::new(adt_def.repr().discr_type().to_ty(tcx), span)), + ); + let store_discr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + discr_place, + Rvalue::Discriminant(*rhs), + ))), + }; + + let discr_cast_place = + Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); + let cast_discr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + discr_cast_place, + Rvalue::Cast( + CastKind::IntToInt, + Operand::Copy(discr_place), + tcx.types.usize, + ), + ))), + }; + + let size_place = + Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); + let store_size = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + size_place, + Rvalue::Use(Operand::Copy(Place { + local: size_array_local, + projection: tcx + .mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]), + })), + ))), + }; + + let dst = + Place::from(local_decls.push(LocalDecl::new(Ty::new_mut_ptr(tcx, ty), span))); + let dst_ptr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + dst, + Rvalue::RawPtr(Mutability::Mut, *lhs), + ))), + }; + + let dst_cast_ty = Ty::new_mut_ptr(tcx, tcx.types.u8); + let dst_cast_place = + Place::from(local_decls.push(LocalDecl::new(dst_cast_ty, span))); + let dst_cast = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + dst_cast_place, + Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty), + ))), + }; + + let src = + Place::from(local_decls.push(LocalDecl::new(Ty::new_imm_ptr(tcx, ty), span))); + let src_ptr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + src, + Rvalue::RawPtr(Mutability::Not, *rhs), + ))), + }; + + let src_cast_ty = Ty::new_imm_ptr(tcx, tcx.types.u8); + let src_cast_place = + Place::from(local_decls.push(LocalDecl::new(src_cast_ty, span))); + let src_cast = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + src_cast_place, + Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty), + ))), + }; + + let deinit_old = + Statement { source_info, kind: StatementKind::Deinit(Box::new(dst)) }; + + let copy_bytes = Statement { + source_info, + kind: StatementKind::Intrinsic(Box::new( + NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping { + src: Operand::Copy(src_cast_place), + dst: Operand::Copy(dst_cast_place), + count: Operand::Copy(size_place), + }), + )), + }; + + let store_dead = + Statement { source_info, kind: StatementKind::StorageDead(size_array_local) }; + + let iter = [ + store_live, + const_assign, + store_discr, + cast_discr, + store_size, + dst_ptr, + dst_cast, + src_ptr, + src_cast, + deinit_old, + copy_bytes, + store_dead, + ] + .into_iter(); + + st.make_nop(); + + Some(iter) + }); + } } } @@ -82,6 +244,8 @@ impl EnumSizeOpt { let ptr_sized_int = data_layout.ptr_sized_integer(); let target_bytes = ptr_sized_int.size().bytes() as usize; let mut data = vec![0; target_bytes * num_discrs]; + + // We use a macro because `$bytes` can be u32 or u64. macro_rules! encode_store { ($curr_idx: expr, $endian: expr, $bytes: expr) => { let bytes = match $endian { @@ -116,184 +280,4 @@ impl EnumSizeOpt { let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc)); Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc))) } - fn optim<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let mut alloc_cache = FxHashMap::default(); - let body_did = body.source.def_id(); - let param_env = tcx.param_env_reveal_all_normalized(body_did); - - let blocks = body.basic_blocks.as_mut(); - let local_decls = &mut body.local_decls; - - for bb in blocks { - bb.expand_statements(|st| { - if let StatementKind::Assign(box ( - lhs, - Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)), - )) = &st.kind - { - let ty = lhs.ty(local_decls, tcx).ty; - - let source_info = st.source_info; - let span = source_info.span; - - let (adt_def, num_variants, alloc_id) = - self.candidate(tcx, param_env, ty, &mut alloc_cache)?; - - let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64); - - let size_array_local = local_decls.push(LocalDecl::new(tmp_ty, span)); - let store_live = Statement { - source_info, - kind: StatementKind::StorageLive(size_array_local), - }; - - let place = Place::from(size_array_local); - let constant_vals = ConstOperand { - span, - user_ty: None, - const_: Const::Val( - ConstValue::Indirect { alloc_id, offset: Size::ZERO }, - tmp_ty, - ), - }; - let rval = Rvalue::Use(Operand::Constant(Box::new(constant_vals))); - - let const_assign = Statement { - source_info, - kind: StatementKind::Assign(Box::new((place, rval))), - }; - - let discr_place = Place::from( - local_decls - .push(LocalDecl::new(adt_def.repr().discr_type().to_ty(tcx), span)), - ); - - let store_discr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - discr_place, - Rvalue::Discriminant(*rhs), - ))), - }; - - let discr_cast_place = - Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); - - let cast_discr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - discr_cast_place, - Rvalue::Cast( - CastKind::IntToInt, - Operand::Copy(discr_place), - tcx.types.usize, - ), - ))), - }; - - let size_place = - Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); - - let store_size = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - size_place, - Rvalue::Use(Operand::Copy(Place { - local: size_array_local, - projection: tcx - .mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]), - })), - ))), - }; - - let dst = Place::from( - local_decls.push(LocalDecl::new(Ty::new_mut_ptr(tcx, ty), span)), - ); - - let dst_ptr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - dst, - Rvalue::RawPtr(Mutability::Mut, *lhs), - ))), - }; - - let dst_cast_ty = Ty::new_mut_ptr(tcx, tcx.types.u8); - let dst_cast_place = - Place::from(local_decls.push(LocalDecl::new(dst_cast_ty, span))); - - let dst_cast = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - dst_cast_place, - Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty), - ))), - }; - - let src = Place::from( - local_decls.push(LocalDecl::new(Ty::new_imm_ptr(tcx, ty), span)), - ); - - let src_ptr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - src, - Rvalue::RawPtr(Mutability::Not, *rhs), - ))), - }; - - let src_cast_ty = Ty::new_imm_ptr(tcx, tcx.types.u8); - let src_cast_place = - Place::from(local_decls.push(LocalDecl::new(src_cast_ty, span))); - - let src_cast = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - src_cast_place, - Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty), - ))), - }; - - let deinit_old = - Statement { source_info, kind: StatementKind::Deinit(Box::new(dst)) }; - - let copy_bytes = Statement { - source_info, - kind: StatementKind::Intrinsic(Box::new( - NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping { - src: Operand::Copy(src_cast_place), - dst: Operand::Copy(dst_cast_place), - count: Operand::Copy(size_place), - }), - )), - }; - - let store_dead = Statement { - source_info, - kind: StatementKind::StorageDead(size_array_local), - }; - let iter = [ - store_live, - const_assign, - store_discr, - cast_discr, - store_size, - dst_ptr, - dst_cast, - src_ptr, - src_cast, - deinit_old, - copy_bytes, - store_dead, - ] - .into_iter(); - - st.make_nop(); - Some(iter) - } else { - None - } - }); - } - } } diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 1f214bc42cb..0868f4b3d88 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -3,34 +3,30 @@ #![feature(box_patterns)] #![feature(const_type_name)] #![feature(cow_is_borrowed)] -#![feature(decl_macro)] #![feature(if_let_guard)] #![feature(impl_trait_in_assoc_type)] #![feature(let_chains)] #![feature(map_try_insert)] #![feature(never_type)] -#![feature(option_get_or_insert_default)] #![feature(round_char_boundary)] #![feature(try_blocks)] #![feature(yeet_expr)] +#![warn(unreachable_pub)] // tidy-alphabetical-end -#[macro_use] -extern crate tracing; - use hir::ConstContext; use required_consts::RequiredConstsVisitor; +use rustc_const_eval::check_consts::{self, ConstCx}; use rustc_const_eval::util; use rustc_data_structures::fx::FxIndexSet; use rustc_data_structures::steal::Steal; use rustc_hir as hir; -use rustc_hir::def::DefKind; +use rustc_hir::def::{CtorKind, DefKind}; use rustc_hir::def_id::LocalDefId; -use rustc_hir::intravisit::{self, Visitor}; use rustc_index::IndexVec; use rustc_middle::mir::{ AnalysisPhase, Body, CallSource, ClearCrossCrate, ConstOperand, ConstQualifs, LocalDecl, - MirPass, MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, SourceInfo, + MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, SourceInfo, Statement, StatementKind, TerminatorKind, START_BLOCK, }; use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt}; @@ -39,11 +35,12 @@ use rustc_middle::{bug, query, span_bug}; use rustc_span::source_map::Spanned; use rustc_span::{sym, DUMMY_SP}; use rustc_trait_selection::traits; +use tracing::{debug, trace}; #[macro_use] mod pass_manager; -use pass_manager::{self as pm, Lint, MirLint, WithMinOptLevel}; +use pass_manager::{self as pm, Lint, MirLint, MirPass, WithMinOptLevel}; mod abort_unwinding_calls; mod add_call_guards; @@ -75,6 +72,8 @@ mod errors; mod ffi_unwind_calls; mod function_item_references; mod gvn; +// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden +// by custom rustc drivers, running all the steps by themselves. See #114628. pub mod inline; mod instsimplify; mod jump_threading; @@ -87,6 +86,7 @@ mod match_branches; mod mentioned_items; mod multiple_return_terminators; mod nrvo; +mod post_drop_elaboration; mod prettify; mod promote_consts; mod ref_prop; @@ -98,6 +98,7 @@ mod remove_unneeded_drops; mod remove_zsts; mod required_consts; mod reveal_all; +mod sanity_check; mod shim; mod ssa; // This pass is public to allow external drivers to perform MIR cleanup @@ -110,9 +111,6 @@ mod unreachable_enum_branching; mod unreachable_prop; mod validate; -use rustc_const_eval::check_consts::{self, ConstCx}; -use rustc_mir_dataflow::rustc_peek; - rustc_fluent_macro::fluent_messages! { "../messages.ftl" } pub fn provide(providers: &mut Providers) { @@ -130,11 +128,12 @@ pub fn provide(providers: &mut Providers) { mir_coroutine_witnesses: coroutine::mir_coroutine_witnesses, optimized_mir, is_mir_available, - is_ctfe_mir_available: |tcx, did| is_mir_available(tcx, did), + is_ctfe_mir_available: is_mir_available, mir_callgraph_reachable: inline::cycle::mir_callgraph_reachable, mir_inliner_callees: inline::cycle::mir_inliner_callees, promoted_mir, deduced_param_attrs: deduce_param_attrs::deduced_param_attrs, + coroutine_by_move_body_def_id: coroutine::coroutine_by_move_body_def_id, ..providers.queries }; } @@ -169,8 +168,9 @@ fn remap_mir_for_const_eval_select<'tcx>( let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) = match tupled_args.node { Operand::Constant(_) => { - // there is no good way of extracting a tuple arg from a constant (const generic stuff) - // so we just create a temporary and deconstruct that. + // There is no good way of extracting a tuple arg from a constant + // (const generic stuff) so we just create a temporary and deconstruct + // that. let local = body.local_decls.push(LocalDecl::new(ty, fn_span)); bb.statements.push(Statement { source_info: SourceInfo::outermost(fn_span), @@ -222,25 +222,30 @@ fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { /// Finds the full set of `DefId`s within the current crate that have /// MIR associated with them. fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> { - let mut set = FxIndexSet::default(); - // All body-owners have MIR associated with them. - set.extend(tcx.hir().body_owners()); + let mut set: FxIndexSet<_> = tcx.hir().body_owners().collect(); - // Additionally, tuple struct/variant constructors have MIR, but - // they don't have a BodyId, so we need to build them separately. - struct GatherCtors<'a> { - set: &'a mut FxIndexSet<LocalDefId>, + // Coroutine-closures (e.g. async closures) have an additional by-move MIR + // body that isn't in the HIR. + for body_owner in tcx.hir().body_owners() { + if let DefKind::Closure = tcx.def_kind(body_owner) + && tcx.needs_coroutine_by_move_body_def_id(body_owner.to_def_id()) + { + set.insert(tcx.coroutine_by_move_body_def_id(body_owner).expect_local()); + } } - impl<'tcx> Visitor<'tcx> for GatherCtors<'_> { - fn visit_variant_data(&mut self, v: &'tcx hir::VariantData<'tcx>) { - if let hir::VariantData::Tuple(_, _, def_id) = *v { - self.set.insert(def_id); + + // tuple struct/variant constructors have MIR, but they don't have a BodyId, + // so we need to build them separately. + for item in tcx.hir_crate_items(()).free_items() { + if let DefKind::Struct | DefKind::Enum = tcx.def_kind(item.owner_id) { + for variant in tcx.adt_def(item.owner_id).variants() { + if let Some((CtorKind::Fn, ctor_def_id)) = variant.ctor { + set.insert(ctor_def_id.expect_local()); + } } - intravisit::walk_struct_def(self, v) } } - tcx.hir().visit_all_item_likes_in_crate(&mut GatherCtors { set: &mut set }); set } @@ -250,8 +255,7 @@ fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs { // No need to const-check a non-const `fn`. match const_kind { - Some(ConstContext::Const { .. } | ConstContext::Static(_)) - | Some(ConstContext::ConstFn) => {} + Some(ConstContext::Const { .. } | ConstContext::Static(_) | ConstContext::ConstFn) => {} None => span_bug!( tcx.def_span(def), "`mir_const_qualif` should only be called on const fns and const items" @@ -293,13 +297,9 @@ fn mir_built(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> { &Lint(check_packed_ref::CheckPackedRef), &Lint(check_const_item_mutation::CheckConstItemMutation), &Lint(function_item_references::FunctionItemReferences), - // If this is an async closure's output coroutine, generate - // by-move and by-mut bodies if needed. We do this first so - // they can be optimized in lockstep with their parent bodies. - &coroutine::ByMoveBody, // What we need to do constant evaluation. &simplify::SimplifyCfg::Initial, - &rustc_peek::SanityCheck, // Just a lint + &Lint(sanity_check::SanityCheck), ], None, ); @@ -329,8 +329,15 @@ fn mir_promoted( | DefKind::AnonConst => tcx.mir_const_qualif(def), _ => ConstQualifs::default(), }; - // has_ffi_unwind_calls query uses the raw mir, so make sure it is run. + + // the `has_ffi_unwind_calls` query uses the raw mir, so make sure it is run. tcx.ensure_with_value().has_ffi_unwind_calls(def); + + // the `by_move_body` query uses the raw mir, so make sure it is run. + if tcx.needs_coroutine_by_move_body_def_id(def.to_def_id()) { + tcx.ensure_with_value().coroutine_by_move_body_def_id(def); + } + let mut body = tcx.mir_built(def).steal(); if let Some(error_reported) = const_qualifs.tainted_by_errors { body.tainted_by_errors = Some(error_reported); @@ -339,14 +346,6 @@ fn mir_promoted( // Collect `required_consts` *before* promotion, so if there are any consts being promoted // we still add them to the list in the outer MIR body. RequiredConstsVisitor::compute_required_consts(&mut body); - // If this has an associated by-move async closure body, that doesn't get run through these - // passes itself, it gets "tagged along" by the pass manager. `RequiredConstsVisitor` is not - // a regular pass so we have to also apply it manually to the other body. - if let Some(coroutine) = body.coroutine.as_mut() { - if let Some(by_move_body) = coroutine.by_move_body.as_mut() { - RequiredConstsVisitor::compute_required_consts(by_move_body); - } - } // What we need to run borrowck etc. let promote_pass = promote_consts::PromoteTemps::default(); @@ -398,7 +397,10 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & if tcx.is_coroutine(def.to_def_id()) { tcx.ensure_with_value().mir_coroutine_witnesses(def); } - let mir_borrowck = tcx.mir_borrowck(def); + + // We only need to borrowck non-synthetic MIR. + let tainted_by_errors = + if !tcx.is_synthetic_mir(def) { tcx.mir_borrowck(def).tainted_by_errors } else { None }; let is_fn_like = tcx.def_kind(def).is_fn_like(); if is_fn_like { @@ -410,7 +412,8 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & let (body, _) = tcx.mir_promoted(def); let mut body = body.steal(); - if let Some(error_reported) = mir_borrowck.tainted_by_errors { + + if let Some(error_reported) = tainted_by_errors { body.tainted_by_errors = Some(error_reported); } @@ -467,8 +470,8 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & tcx.alloc_steal_mir(body) } -// Made public such that `mir_drops_elaborated_and_const_checked` can be overridden -// by custom rustc drivers, running all the steps by themselves. +// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden +// by custom rustc drivers, running all the steps by themselves. See #114628. pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial)); let did = body.source.def_id(); @@ -482,10 +485,13 @@ pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<' pm::run_passes( tcx, body, - &[&remove_uninit_drops::RemoveUninitDrops, &simplify::SimplifyCfg::RemoveFalseEdges], + &[ + &remove_uninit_drops::RemoveUninitDrops, + &simplify::SimplifyCfg::RemoveFalseEdges, + &Lint(post_drop_elaboration::CheckLiveDrops), + ], None, ); - check_consts::post_drop_elaboration::check_live_drops(tcx, body); // FIXME: make this a MIR lint } debug!("runtime_mir_lowering({:?})", did); @@ -514,10 +520,12 @@ fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { /// Returns the sequence of passes that lowers analysis to runtime MIR. fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let passes: &[&dyn MirPass<'tcx>] = &[ - // These next passes must be executed together + // These next passes must be executed together. &add_call_guards::CriticalCallEdges, - &reveal_all::RevealAll, // has to be done before drop elaboration, since we need to drop opaque types, too. - &add_subtyping_projections::Subtyper, // calling this after reveal_all ensures that we don't deal with opaque types + // Must be done before drop elaboration because we need to drop opaque types, too. + &reveal_all::RevealAll, + // Calling this after reveal_all ensures that we don't deal with opaque types. + &add_subtyping_projections::Subtyper, &elaborate_drops::ElaborateDrops, // This will remove extraneous landing pads which are no longer // necessary as well as forcing any call in a non-unwinding @@ -526,8 +534,8 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // AddMovesForPackedDrops needs to run after drop // elaboration. &add_moves_for_packed_drops::AddMovesForPackedDrops, - // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`. Otherwise it should run fairly late, - // but before optimizations begin. + // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`. + // Otherwise it should run fairly late, but before optimizations begin. &add_retag::AddRetag, &elaborate_box_derefs::ElaborateBoxDerefs, &coroutine::StateTransform, @@ -568,13 +576,15 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // Before inlining: trim down MIR with passes to reduce inlining work. // Has to be done before inlining, otherwise actual call will be almost always inlined. - // Also simple, so can just do first + // Also simple, so can just do first. &lower_slice_len::LowerSliceLenCalls, - // Perform instsimplify before inline to eliminate some trivial calls (like clone shims). + // Perform instsimplify before inline to eliminate some trivial calls (like clone + // shims). &instsimplify::InstSimplify::BeforeInline, // Perform inlining, which may add a lot of code. &inline::Inline, - // Code from other crates may have storage markers, so this needs to happen after inlining. + // Code from other crates may have storage markers, so this needs to happen after + // inlining. &remove_storage_markers::RemoveStorageMarkers, // Inlining and instantiation may introduce ZST and useless drops. &remove_zsts::RemoveZsts, @@ -591,7 +601,8 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &match_branches::MatchBranchSimplification, // inst combine is after MatchBranchSimplification to clean up Ne(_1, false) &multiple_return_terminators::MultipleReturnTerminators, - // After simplifycfg, it allows us to discover new opportunities for peephole optimizations. + // After simplifycfg, it allows us to discover new opportunities for peephole + // optimizations. &instsimplify::InstSimplify::AfterSimplifyCfg, &simplify::SimplifyLocals::BeforeConstProp, &dead_store_elimination::DeadStoreElimination::Initial, @@ -660,14 +671,6 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> { // visited does not depend on the optimization level. // We do not use `run_passes` for this as that might skip the pass if `injection_phase` is set. mentioned_items::MentionedItems.run_pass(tcx, &mut body); - // If this has an associated by-move async closure body, that doesn't get run through these - // passes itself, it gets "tagged along" by the pass manager. Since we're not using the pass - // manager we have to do this by hand. - if let Some(coroutine) = body.coroutine.as_mut() { - if let Some(by_move_body) = coroutine.by_move_body.as_mut() { - mentioned_items::MentionedItems.run_pass(tcx, by_move_body); - } - } // If `mir_drops_elaborated_and_const_checked` found that the current body has unsatisfiable // predicates, it will shrink the MIR to a single `unreachable` terminator. @@ -690,7 +693,9 @@ fn promoted_mir(tcx: TyCtxt<'_>, def: LocalDefId) -> &IndexVec<Promoted, Body<'_ return tcx.arena.alloc(IndexVec::new()); } - tcx.ensure_with_value().mir_borrowck(def); + if !tcx.is_synthetic_mir(def) { + tcx.ensure_with_value().mir_borrowck(def); + } let mut promoted = tcx.mir_promoted(def).1.steal(); for body in &mut promoted { diff --git a/compiler/rustc_mir_transform/src/lint.rs b/compiler/rustc_mir_transform/src/lint.rs index 746068064b8..23733994a8b 100644 --- a/compiler/rustc_mir_transform/src/lint.rs +++ b/compiler/rustc_mir_transform/src/lint.rs @@ -13,7 +13,7 @@ use rustc_mir_dataflow::impls::{MaybeStorageDead, MaybeStorageLive}; use rustc_mir_dataflow::storage::always_storage_live_locals; use rustc_mir_dataflow::{Analysis, ResultsCursor}; -pub fn lint_body<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, when: String) { +pub(super) fn lint_body<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, when: String) { let always_live_locals = &always_storage_live_locals(body); let maybe_storage_live = MaybeStorageLive::new(Cow::Borrowed(always_live_locals)) diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs index a9bdff95fe5..6d635606687 100644 --- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs +++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs @@ -7,9 +7,9 @@ use rustc_span::symbol::sym; use crate::take_array; -pub struct LowerIntrinsics; +pub(super) struct LowerIntrinsics; -impl<'tcx> MirPass<'tcx> for LowerIntrinsics { +impl<'tcx> crate::MirPass<'tcx> for LowerIntrinsics { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let local_decls = &body.local_decls; for block in body.basic_blocks.as_mut() { @@ -35,20 +35,19 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { terminator.kind = TerminatorKind::Goto { target }; } sym::forget => { - if let Some(target) = *target { - block.statements.push(Statement { - source_info: terminator.source_info, - kind: StatementKind::Assign(Box::new(( - *destination, - Rvalue::Use(Operand::Constant(Box::new(ConstOperand { - span: terminator.source_info.span, - user_ty: None, - const_: Const::zero_sized(tcx.types.unit), - }))), - ))), - }); - terminator.kind = TerminatorKind::Goto { target }; - } + let target = target.unwrap(); + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(Box::new(( + *destination, + Rvalue::Use(Operand::Constant(Box::new(ConstOperand { + span: terminator.source_info.span, + user_ty: None, + const_: Const::zero_sized(tcx.types.unit), + }))), + ))), + }); + terminator.kind = TerminatorKind::Goto { target }; } sym::copy_nonoverlapping => { let target = target.unwrap(); @@ -121,43 +120,41 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { terminator.kind = TerminatorKind::Goto { target }; } sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { - if let Some(target) = *target { - let Ok([lhs, rhs]) = take_array(args) else { - bug!("Wrong arguments for {} intrinsic", intrinsic.name); - }; - let bin_op = match intrinsic.name { - sym::add_with_overflow => BinOp::AddWithOverflow, - sym::sub_with_overflow => BinOp::SubWithOverflow, - sym::mul_with_overflow => BinOp::MulWithOverflow, - _ => bug!("unexpected intrinsic"), - }; - block.statements.push(Statement { - source_info: terminator.source_info, - kind: StatementKind::Assign(Box::new(( - *destination, - Rvalue::BinaryOp(bin_op, Box::new((lhs.node, rhs.node))), - ))), - }); - terminator.kind = TerminatorKind::Goto { target }; - } + let target = target.unwrap(); + let Ok([lhs, rhs]) = take_array(args) else { + bug!("Wrong arguments for {} intrinsic", intrinsic.name); + }; + let bin_op = match intrinsic.name { + sym::add_with_overflow => BinOp::AddWithOverflow, + sym::sub_with_overflow => BinOp::SubWithOverflow, + sym::mul_with_overflow => BinOp::MulWithOverflow, + _ => bug!("unexpected intrinsic"), + }; + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(Box::new(( + *destination, + Rvalue::BinaryOp(bin_op, Box::new((lhs.node, rhs.node))), + ))), + }); + terminator.kind = TerminatorKind::Goto { target }; } sym::size_of | sym::min_align_of => { - if let Some(target) = *target { - let tp_ty = generic_args.type_at(0); - let null_op = match intrinsic.name { - sym::size_of => NullOp::SizeOf, - sym::min_align_of => NullOp::AlignOf, - _ => bug!("unexpected intrinsic"), - }; - block.statements.push(Statement { - source_info: terminator.source_info, - kind: StatementKind::Assign(Box::new(( - *destination, - Rvalue::NullaryOp(null_op, tp_ty), - ))), - }); - terminator.kind = TerminatorKind::Goto { target }; - } + let target = target.unwrap(); + let tp_ty = generic_args.type_at(0); + let null_op = match intrinsic.name { + sym::size_of => NullOp::SizeOf, + sym::min_align_of => NullOp::AlignOf, + _ => bug!("unexpected intrinsic"), + }; + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(Box::new(( + *destination, + Rvalue::NullaryOp(null_op, tp_ty), + ))), + }); + terminator.kind = TerminatorKind::Goto { target }; } sym::read_via_copy => { let Ok([arg]) = take_array(args) else { @@ -219,17 +216,23 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { terminator.kind = TerminatorKind::Goto { target }; } sym::discriminant_value => { - if let (Some(target), Some(arg)) = (*target, args[0].node.place()) { - let arg = tcx.mk_place_deref(arg); - block.statements.push(Statement { - source_info: terminator.source_info, - kind: StatementKind::Assign(Box::new(( - *destination, - Rvalue::Discriminant(arg), - ))), - }); - terminator.kind = TerminatorKind::Goto { target }; - } + let target = target.unwrap(); + let Ok([arg]) = take_array(args) else { + span_bug!( + terminator.source_info.span, + "Wrong arguments for discriminant_value intrinsic" + ); + }; + let arg = arg.node.place().unwrap(); + let arg = tcx.mk_place_deref(arg); + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(Box::new(( + *destination, + Rvalue::Discriminant(arg), + ))), + }); + terminator.kind = TerminatorKind::Goto { target }; } sym::offset => { let target = target.unwrap(); @@ -267,7 +270,6 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { Rvalue::Cast(CastKind::Transmute, arg.node, dst_ty), ))), }); - if let Some(target) = *target { terminator.kind = TerminatorKind::Goto { target }; } else { @@ -299,7 +301,6 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { Rvalue::Aggregate(Box::new(kind), fields.into()), ))), }); - terminator.kind = TerminatorKind::Goto { target }; } sym::ptr_metadata => { diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs index 77a7f4f47dd..420661f29c8 100644 --- a/compiler/rustc_mir_transform/src/lower_slice_len.rs +++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs @@ -5,30 +5,26 @@ use rustc_hir::def_id::DefId; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; -pub struct LowerSliceLenCalls; +pub(super) struct LowerSliceLenCalls; -impl<'tcx> MirPass<'tcx> for LowerSliceLenCalls { +impl<'tcx> crate::MirPass<'tcx> for LowerSliceLenCalls { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - lower_slice_len_calls(tcx, body) - } -} - -pub fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let language_items = tcx.lang_items(); - let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else { - // there is no lang item to compare to :) - return; - }; + let language_items = tcx.lang_items(); + let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else { + // there is no lang item to compare to :) + return; + }; - // The one successor remains unchanged, so no need to invalidate - let basic_blocks = body.basic_blocks.as_mut_preserves_cfg(); - for block in basic_blocks { - // lower `<[_]>::len` calls - lower_slice_len_call(block, slice_len_fn_item_def_id); + // The one successor remains unchanged, so no need to invalidate + let basic_blocks = body.basic_blocks.as_mut_preserves_cfg(); + for block in basic_blocks { + // lower `<[_]>::len` calls + lower_slice_len_call(block, slice_len_fn_item_def_id); + } } } diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs index 47758b56f8c..ad3126f66a6 100644 --- a/compiler/rustc_mir_transform/src/match_branches.rs +++ b/compiler/rustc_mir_transform/src/match_branches.rs @@ -10,9 +10,9 @@ use rustc_type_ir::TyKind::*; use super::simplify::simplify_cfg; -pub struct MatchBranchSimplification; +pub(super) struct MatchBranchSimplification; -impl<'tcx> MirPass<'tcx> for MatchBranchSimplification { +impl<'tcx> crate::MirPass<'tcx> for MatchBranchSimplification { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 1 } @@ -57,8 +57,9 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification { } trait SimplifyMatch<'tcx> { - /// Simplifies a match statement, returning true if the simplification succeeds, false otherwise. - /// Generic code is written here, and we generally don't need a custom implementation. + /// Simplifies a match statement, returning true if the simplification succeeds, false + /// otherwise. Generic code is written here, and we generally don't need a custom + /// implementation. fn simplify( &mut self, tcx: TyCtxt<'tcx>, @@ -240,7 +241,8 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToIf { // Same value in both blocks. Use statement as is. patch.add_statement(parent_end, f.kind.clone()); } else { - // Different value between blocks. Make value conditional on switch condition. + // Different value between blocks. Make value conditional on switch + // condition. let size = tcx.layout_of(param_env.and(discr_ty)).unwrap().size; let const_cmp = Operand::const_from_scalar( tcx, @@ -289,11 +291,11 @@ fn can_cast( #[derive(Default)] struct SimplifyToExp { - transfrom_kinds: Vec<TransfromKind>, + transform_kinds: Vec<TransformKind>, } #[derive(Clone, Copy)] -enum ExpectedTransformKind<'tcx, 'a> { +enum ExpectedTransformKind<'a, 'tcx> { /// Identical statements. Same(&'a StatementKind<'tcx>), /// Assignment statements have the same value. @@ -302,17 +304,17 @@ enum ExpectedTransformKind<'tcx, 'a> { Cast { place: &'a Place<'tcx>, ty: Ty<'tcx> }, } -enum TransfromKind { +enum TransformKind { Same, Cast, } -impl From<ExpectedTransformKind<'_, '_>> for TransfromKind { +impl From<ExpectedTransformKind<'_, '_>> for TransformKind { fn from(compare_type: ExpectedTransformKind<'_, '_>) -> Self { match compare_type { - ExpectedTransformKind::Same(_) => TransfromKind::Same, - ExpectedTransformKind::SameByEq { .. } => TransfromKind::Same, - ExpectedTransformKind::Cast { .. } => TransfromKind::Cast, + ExpectedTransformKind::Same(_) => TransformKind::Same, + ExpectedTransformKind::SameByEq { .. } => TransformKind::Same, + ExpectedTransformKind::Cast { .. } => TransformKind::Cast, } } } @@ -394,14 +396,16 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp { return None; } - // We first compare the two branches, and then the other branches need to fulfill the same conditions. + // We first compare the two branches, and then the other branches need to fulfill the same + // conditions. let mut expected_transform_kinds = Vec::new(); for (f, s) in iter::zip(first_stmts, second_stmts) { let compare_type = match (&f.kind, &s.kind) { // If two statements are exactly the same, we can optimize. (f_s, s_s) if f_s == s_s => ExpectedTransformKind::Same(f_s), - // If two statements are assignments with the match values to the same place, we can optimize. + // If two statements are assignments with the match values to the same place, we + // can optimize. ( StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))), StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))), @@ -475,7 +479,7 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp { } } } - self.transfrom_kinds = expected_transform_kinds.into_iter().map(|c| c.into()).collect(); + self.transform_kinds = expected_transform_kinds.into_iter().map(|c| c.into()).collect(); Some(()) } @@ -493,13 +497,13 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp { let (_, first) = targets.iter().next().unwrap(); let first = &bbs[first]; - for (t, s) in iter::zip(&self.transfrom_kinds, &first.statements) { + for (t, s) in iter::zip(&self.transform_kinds, &first.statements) { match (t, &s.kind) { - (TransfromKind::Same, _) => { + (TransformKind::Same, _) => { patch.add_statement(parent_end, s.kind.clone()); } ( - TransfromKind::Cast, + TransformKind::Cast, StatementKind::Assign(box (lhs, Rvalue::Use(Operand::Constant(f_c)))), ) => { let operand = Operand::Copy(Place::from(discr_local)); diff --git a/compiler/rustc_mir_transform/src/mentioned_items.rs b/compiler/rustc_mir_transform/src/mentioned_items.rs index 32c8064ebca..f24de609e6b 100644 --- a/compiler/rustc_mir_transform/src/mentioned_items.rs +++ b/compiler/rustc_mir_transform/src/mentioned_items.rs @@ -1,19 +1,19 @@ use rustc_middle::mir::visit::Visitor; -use rustc_middle::mir::{self, Location, MentionedItem, MirPass}; +use rustc_middle::mir::{self, Location, MentionedItem}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::{self, TyCtxt}; use rustc_session::Session; use rustc_span::source_map::Spanned; -pub struct MentionedItems; +pub(super) struct MentionedItems; struct MentionedItemsVisitor<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, - mentioned_items: &'a mut Vec<Spanned<MentionedItem<'tcx>>>, + mentioned_items: Vec<Spanned<MentionedItem<'tcx>>>, } -impl<'tcx> MirPass<'tcx> for MentionedItems { +impl<'tcx> crate::MirPass<'tcx> for MentionedItems { fn is_enabled(&self, _sess: &Session) -> bool { // If this pass is skipped the collector assume that nothing got mentioned! We could // potentially skip it in opt-level 0 if we are sure that opt-level will never *remove* uses @@ -23,9 +23,9 @@ impl<'tcx> MirPass<'tcx> for MentionedItems { } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) { - let mut mentioned_items = Vec::new(); - MentionedItemsVisitor { tcx, body, mentioned_items: &mut mentioned_items }.visit_body(body); - body.set_mentioned_items(mentioned_items); + let mut visitor = MentionedItemsVisitor { tcx, body, mentioned_items: Vec::new() }; + visitor.visit_body(body); + body.set_mentioned_items(visitor.mentioned_items); } } @@ -82,7 +82,9 @@ impl<'tcx> Visitor<'tcx> for MentionedItemsVisitor<'_, 'tcx> { source_ty.builtin_deref(true).map(|t| t.kind()), target_ty.builtin_deref(true).map(|t| t.kind()), ) { - (Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false, // &str/&[T] unsizing + // &str/&[T] unsizing + (Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false, + _ => true, }; if may_involve_vtable { diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs index 1e87a0e01d9..b6d6ef5de1d 100644 --- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs +++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs @@ -7,9 +7,9 @@ use rustc_middle::ty::TyCtxt; use crate::simplify; -pub struct MultipleReturnTerminators; +pub(super) struct MultipleReturnTerminators; -impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators { +impl<'tcx> crate::MirPass<'tcx> for MultipleReturnTerminators { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 4 } diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs index 885dbd5f339..98fa149e2bc 100644 --- a/compiler/rustc_mir_transform/src/nrvo.rs +++ b/compiler/rustc_mir_transform/src/nrvo.rs @@ -6,8 +6,7 @@ use rustc_middle::bug; use rustc_middle::mir::visit::{MutVisitor, NonUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{self, BasicBlock, Local, Location}; use rustc_middle::ty::TyCtxt; - -use crate::MirPass; +use tracing::{debug, trace}; /// This pass looks for MIR that always copies the same local into the return place and eliminates /// the copy by renaming all uses of that local to `_0`. @@ -31,9 +30,9 @@ use crate::MirPass; /// /// [#47954]: https://github.com/rust-lang/rust/pull/47954 /// [#71003]: https://github.com/rust-lang/rust/pull/71003 -pub struct RenameReturnPlace; +pub(super) struct RenameReturnPlace; -impl<'tcx> MirPass<'tcx> for RenameReturnPlace { +impl<'tcx> crate::MirPass<'tcx> for RenameReturnPlace { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { // unsound: #111005 sess.mir_opt_level() > 0 && sess.opts.unstable_opts.unsound_mir_opts diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs index 824a4b2f2df..29f8b4f6e4d 100644 --- a/compiler/rustc_mir_transform/src/pass_manager.rs +++ b/compiler/rustc_mir_transform/src/pass_manager.rs @@ -1,18 +1,99 @@ +use std::cell::RefCell; +use std::collections::hash_map::Entry; + +use rustc_data_structures::fx::FxHashMap; use rustc_middle::mir::{self, Body, MirPhase, RuntimePhase}; use rustc_middle::ty::TyCtxt; use rustc_session::Session; +use tracing::trace; use crate::lint::lint_body; -use crate::{validate, MirPass}; +use crate::validate; + +thread_local! { + static PASS_NAMES: RefCell<FxHashMap<&'static str, &'static str>> = { + RefCell::new(FxHashMap::default()) + }; +} -/// Just like `MirPass`, except it cannot mutate `Body`. -pub trait MirLint<'tcx> { +/// Converts a MIR pass name into a snake case form to match the profiling naming style. +fn to_profiler_name(type_name: &'static str) -> &'static str { + PASS_NAMES.with(|names| match names.borrow_mut().entry(type_name) { + Entry::Occupied(e) => *e.get(), + Entry::Vacant(e) => { + let snake_case: String = type_name + .chars() + .flat_map(|c| { + if c.is_ascii_uppercase() { + vec!['_', c.to_ascii_lowercase()] + } else if c == '-' { + vec!['_'] + } else { + vec![c] + } + }) + .collect(); + let result = &*String::leak(format!("mir_pass{}", snake_case)); + e.insert(result); + result + } + }) +} + +// const wrapper for `if let Some((_, tail)) = name.rsplit_once(':') { tail } else { name }` +const fn c_name(name: &'static str) -> &'static str { + // FIXME(const-hack) Simplify the implementation once more `str` methods get const-stable. + // and inline into call site + let bytes = name.as_bytes(); + let mut i = bytes.len(); + while i > 0 && bytes[i - 1] != b':' { + i = i - 1; + } + let (_, bytes) = bytes.split_at(i); + match std::str::from_utf8(bytes) { + Ok(name) => name, + Err(_) => name, + } +} + +/// A streamlined trait that you can implement to create a pass; the +/// pass will be named after the type, and it will consist of a main +/// loop that goes over each available MIR and applies `run_pass`. +pub(super) trait MirPass<'tcx> { fn name(&self) -> &'static str { - // FIXME Simplify the implementation once more `str` methods get const-stable. + // FIXME(const-hack) Simplify the implementation once more `str` methods get const-stable. + // See copypaste in `MirLint` + const { + let name = std::any::type_name::<Self>(); + c_name(name) + } + } + + fn profiler_name(&self) -> &'static str { + to_profiler_name(self.name()) + } + + /// Returns `true` if this pass is enabled with the current combination of compiler flags. + fn is_enabled(&self, _sess: &Session) -> bool { + true + } + + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>); + + fn is_mir_dump_enabled(&self) -> bool { + true + } +} + +/// Just like `MirPass`, except it cannot mutate `Body`, and MIR dumping is +/// disabled (via the `Lint` adapter). +pub(super) trait MirLint<'tcx> { + fn name(&self) -> &'static str { + // FIXME(const-hack) Simplify the implementation once more `str` methods get const-stable. // See copypaste in `MirPass` const { let name = std::any::type_name::<Self>(); - rustc_middle::util::common::c_name(name) + c_name(name) } } @@ -25,7 +106,7 @@ pub trait MirLint<'tcx> { /// An adapter for `MirLint`s that implements `MirPass`. #[derive(Debug, Clone)] -pub struct Lint<T>(pub T); +pub(super) struct Lint<T>(pub T); impl<'tcx, T> MirPass<'tcx> for Lint<T> where @@ -48,7 +129,7 @@ where } } -pub struct WithMinOptLevel<T>(pub u32, pub T); +pub(super) struct WithMinOptLevel<T>(pub u32, pub T); impl<'tcx, T> MirPass<'tcx> for WithMinOptLevel<T> where @@ -69,7 +150,7 @@ where /// Run the sequence of passes without validating the MIR after each pass. The MIR is still /// validated at the end. -pub fn run_passes_no_validate<'tcx>( +pub(super) fn run_passes_no_validate<'tcx>( tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, passes: &[&dyn MirPass<'tcx>], @@ -79,7 +160,7 @@ pub fn run_passes_no_validate<'tcx>( } /// The optional `phase_change` is applied after executing all the passes, if present -pub fn run_passes<'tcx>( +pub(super) fn run_passes<'tcx>( tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, passes: &[&dyn MirPass<'tcx>], @@ -88,7 +169,7 @@ pub fn run_passes<'tcx>( run_passes_inner(tcx, body, passes, phase_change, true); } -pub fn should_run_pass<'tcx, P>(tcx: TyCtxt<'tcx>, pass: &P) -> bool +pub(super) fn should_run_pass<'tcx, P>(tcx: TyCtxt<'tcx>, pass: &P) -> bool where P: MirPass<'tcx> + ?Sized, { @@ -182,24 +263,13 @@ fn run_passes_inner<'tcx>( body.pass_count = 1; } - - if let Some(coroutine) = body.coroutine.as_mut() { - if let Some(by_move_body) = coroutine.by_move_body.as_mut() { - run_passes_inner(tcx, by_move_body, passes, phase_change, validate_each); - } - } } -pub fn validate_body<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, when: String) { +pub(super) fn validate_body<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, when: String) { validate::Validator { when, mir_phase: body.phase }.run_pass(tcx, body); } -pub fn dump_mir_for_pass<'tcx>( - tcx: TyCtxt<'tcx>, - body: &Body<'tcx>, - pass_name: &str, - is_after: bool, -) { +fn dump_mir_for_pass<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, pass_name: &str, is_after: bool) { mir::dump_mir( tcx, true, @@ -210,7 +280,7 @@ pub fn dump_mir_for_pass<'tcx>( ); } -pub fn dump_mir_for_phase_change<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { +pub(super) fn dump_mir_for_phase_change<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { assert_eq!(body.pass_count, 0); mir::dump_mir(tcx, true, body.phase.name(), &"after", body, |_, _| Ok(())) } diff --git a/compiler/rustc_mir_transform/src/post_drop_elaboration.rs b/compiler/rustc_mir_transform/src/post_drop_elaboration.rs new file mode 100644 index 00000000000..75721d46076 --- /dev/null +++ b/compiler/rustc_mir_transform/src/post_drop_elaboration.rs @@ -0,0 +1,13 @@ +use rustc_const_eval::check_consts; +use rustc_middle::mir::*; +use rustc_middle::ty::TyCtxt; + +use crate::MirLint; + +pub(super) struct CheckLiveDrops; + +impl<'tcx> MirLint<'tcx> for CheckLiveDrops { + fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { + check_consts::post_drop_elaboration::check_live_drops(tcx, body); + } +} diff --git a/compiler/rustc_mir_transform/src/prettify.rs b/compiler/rustc_mir_transform/src/prettify.rs index 14dd0c6f61e..937c207776b 100644 --- a/compiler/rustc_mir_transform/src/prettify.rs +++ b/compiler/rustc_mir_transform/src/prettify.rs @@ -15,9 +15,9 @@ use rustc_session::Session; /// /// Thus after this pass, all the successors of a block are later than it in the /// `IndexVec`, unless that successor is a back-edge (such as from a loop). -pub struct ReorderBasicBlocks; +pub(super) struct ReorderBasicBlocks; -impl<'tcx> MirPass<'tcx> for ReorderBasicBlocks { +impl<'tcx> crate::MirPass<'tcx> for ReorderBasicBlocks { fn is_enabled(&self, _session: &Session) -> bool { false } @@ -43,9 +43,9 @@ impl<'tcx> MirPass<'tcx> for ReorderBasicBlocks { /// assigned or referenced will have a smaller number. /// /// (Does not reorder arguments nor the [`RETURN_PLACE`].) -pub struct ReorderLocals; +pub(super) struct ReorderLocals; -impl<'tcx> MirPass<'tcx> for ReorderLocals { +impl<'tcx> crate::MirPass<'tcx> for ReorderLocals { fn is_enabled(&self, _session: &Session) -> bool { false } @@ -63,7 +63,7 @@ impl<'tcx> MirPass<'tcx> for ReorderLocals { finder.visit_basic_block_data(bb, bbd); } - // track everything in case there are some locals that we never saw, + // Track everything in case there are some locals that we never saw, // such as in non-block things like debug info or in non-uses. for local in body.local_decls.indices() { finder.track(local); @@ -87,7 +87,7 @@ impl<'tcx> MirPass<'tcx> for ReorderLocals { fn permute<I: rustc_index::Idx + Ord, T>(data: &mut IndexVec<I, T>, map: &IndexSlice<I, I>) { // FIXME: It would be nice to have a less-awkward way to apply permutations, - // but I don't know one that exists. `sort_by_cached_key` has logic for it + // but I don't know one that exists. `sort_by_cached_key` has logic for it // internally, but not in a way that we're allowed to use here. let mut enumerated: Vec<_> = std::mem::take(data).into_iter_enumerated().collect(); enumerated.sort_by_key(|p| map[p.0]); @@ -135,8 +135,8 @@ impl<'tcx> Visitor<'tcx> for LocalFinder { } struct LocalUpdater<'tcx> { - pub map: IndexVec<Local, Local>, - pub tcx: TyCtxt<'tcx>, + map: IndexVec<Local, Local>, + tcx: TyCtxt<'tcx>, } impl<'tcx> MutVisitor<'tcx> for LocalUpdater<'tcx> { diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index 6e84914ef97..59df99f858d 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -1,16 +1,14 @@ //! A pass that promotes borrows of constant rvalues. //! -//! The rvalues considered constant are trees of temps, -//! each with exactly one initialization, and holding -//! a constant value with no interior mutability. -//! They are placed into a new MIR constant body in -//! `promoted` and the borrow rvalue is replaced with -//! a `Literal::Promoted` using the index into `promoted` -//! of that constant MIR. +//! The rvalues considered constant are trees of temps, each with exactly one +//! initialization, and holding a constant value with no interior mutability. +//! They are placed into a new MIR constant body in `promoted` and the borrow +//! rvalue is replaced with a `Literal::Promoted` using the index into +//! `promoted` of that constant MIR. //! -//! This pass assumes that every use is dominated by an -//! initialization and can otherwise silence errors, if -//! move analysis runs after promotion on broken MIR. +//! This pass assumes that every use is dominated by an initialization and can +//! otherwise silence errors, if move analysis runs after promotion on broken +//! MIR. use std::assert_matches::assert_matches; use std::cell::Cell; @@ -27,6 +25,7 @@ use rustc_middle::ty::{self, GenericArgs, List, Ty, TyCtxt, TypeVisitableExt}; use rustc_middle::{bug, mir, span_bug}; use rustc_span::source_map::Spanned; use rustc_span::Span; +use tracing::{debug, instrument}; /// A `MirPass` for promotion. /// @@ -36,11 +35,12 @@ use rustc_span::Span; /// After this pass is run, `promoted_fragments` will hold the MIR body corresponding to each /// newly created `Constant`. #[derive(Default)] -pub struct PromoteTemps<'tcx> { +pub(super) struct PromoteTemps<'tcx> { + // Must use `Cell` because `run_pass` takes `&self`, not `&mut self`. pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>, } -impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> { +impl<'tcx> crate::MirPass<'tcx> for PromoteTemps<'tcx> { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // There's not really any point in promoting errorful MIR. // @@ -385,7 +385,8 @@ impl<'tcx> Validator<'_, 'tcx> { fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> { match kind { // Reject these borrow types just to be safe. - // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase. + // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a + // usecase. BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => { return Err(Unpromotable); } @@ -467,7 +468,8 @@ impl<'tcx> Validator<'_, 'tcx> { let lhs_ty = lhs.ty(self.body, self.tcx); if let ty::RawPtr(_, _) | ty::FnPtr(..) = lhs_ty.kind() { - // Raw and fn pointer operations are not allowed inside consts and thus not promotable. + // Raw and fn pointer operations are not allowed inside consts and thus not + // promotable. assert_matches!( op, BinOp::Eq @@ -497,7 +499,8 @@ impl<'tcx> Validator<'_, 'tcx> { Some(x) if x != 0 => {} // okay _ => return Err(Unpromotable), // value not known or 0 -- not okay } - // Furthermore, for signed divison, we also have to exclude `int::MIN / -1`. + // Furthermore, for signed division, we also have to exclude `int::MIN / + // -1`. if lhs_ty.is_signed() { match rhs_val.map(|x| x.to_int(sz)) { Some(-1) | None => { @@ -511,8 +514,11 @@ impl<'tcx> Validator<'_, 'tcx> { }; let lhs_min = sz.signed_int_min(); match lhs_val.map(|x| x.to_int(sz)) { - Some(x) if x != lhs_min => {} // okay - _ => return Err(Unpromotable), // value not known or int::MIN -- not okay + // okay + Some(x) if x != lhs_min => {} + + // value not known or int::MIN -- not okay + _ => return Err(Unpromotable), } } _ => {} @@ -814,8 +820,8 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { TerminatorKind::Call { mut func, mut args, call_source: desugar, fn_span, .. } => { - // This promoted involves a function call, so it may fail to evaluate. - // Let's make sure it is added to `required_consts` so that failure cannot get lost. + // This promoted involves a function call, so it may fail to evaluate. Let's + // make sure it is added to `required_consts` so that failure cannot get lost. self.add_to_required = true; self.visit_operand(&mut func, loc); diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs index 973a191d786..4c3a99b79d4 100644 --- a/compiler/rustc_mir_transform/src/ref_prop.rs +++ b/compiler/rustc_mir_transform/src/ref_prop.rs @@ -10,6 +10,7 @@ use rustc_middle::ty::TyCtxt; use rustc_mir_dataflow::impls::MaybeStorageDead; use rustc_mir_dataflow::storage::always_storage_live_locals; use rustc_mir_dataflow::Analysis; +use tracing::{debug, instrument}; use crate::ssa::{SsaLocals, StorageLiveLocals}; @@ -69,9 +70,9 @@ use crate::ssa::{SsaLocals, StorageLiveLocals}; /// /// For immutable borrows, we do not need to preserve such uniqueness property, /// so we perform all the possible instantiations without removing the `_1 = &_2` statement. -pub struct ReferencePropagation; +pub(super) struct ReferencePropagation; -impl<'tcx> MirPass<'tcx> for ReferencePropagation { +impl<'tcx> crate::MirPass<'tcx> for ReferencePropagation { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 2 } @@ -252,11 +253,8 @@ fn compute_replacement<'tcx>( debug!(?targets); - let mut finder = ReplacementFinder { - targets: &mut targets, - can_perform_opt, - allowed_replacements: FxHashSet::default(), - }; + let mut finder = + ReplacementFinder { targets, can_perform_opt, allowed_replacements: FxHashSet::default() }; let reachable_blocks = traversal::reachable_as_bitset(body); for (bb, bbdata) in body.basic_blocks.iter_enumerated() { // Only visit reachable blocks as we rely on dataflow. @@ -268,19 +266,19 @@ fn compute_replacement<'tcx>( let allowed_replacements = finder.allowed_replacements; return Replacer { tcx, - targets, + targets: finder.targets, storage_to_remove, allowed_replacements, any_replacement: false, }; - struct ReplacementFinder<'a, 'tcx, F> { - targets: &'a mut IndexVec<Local, Value<'tcx>>, + struct ReplacementFinder<'tcx, F> { + targets: IndexVec<Local, Value<'tcx>>, can_perform_opt: F, allowed_replacements: FxHashSet<(Local, Location)>, } - impl<'tcx, F> Visitor<'tcx> for ReplacementFinder<'_, 'tcx, F> + impl<'tcx, F> Visitor<'tcx> for ReplacementFinder<'tcx, F> where F: FnMut(Place<'tcx>, Location) -> bool, { @@ -344,7 +342,7 @@ fn fully_replacable_locals(ssa: &SsaLocals) -> BitSet<Local> { replacable } -/// Utility to help performing subtitution of `*pattern` by `target`. +/// Utility to help performing substitution of `*pattern` by `target`. struct Replacer<'tcx> { tcx: TyCtxt<'tcx>, targets: IndexVec<Local, Value<'tcx>>, diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs index 1df5737e859..55394e93a5c 100644 --- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs +++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs @@ -3,13 +3,14 @@ use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; use rustc_target::spec::PanicStrategy; +use tracing::debug; /// A pass that removes noop landing pads and replaces jumps to them with /// `UnwindAction::Continue`. This is important because otherwise LLVM generates /// terrible code for these. -pub struct RemoveNoopLandingPads; +pub(super) struct RemoveNoopLandingPads; -impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads { +impl<'tcx> crate::MirPass<'tcx> for RemoveNoopLandingPads { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.panic_strategy() != PanicStrategy::Abort } @@ -17,7 +18,61 @@ impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads { fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let def_id = body.source.def_id(); debug!(?def_id); - self.remove_nop_landing_pads(body) + + // Skip the pass if there are no blocks with a resume terminator. + let has_resume = body + .basic_blocks + .iter_enumerated() + .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::UnwindResume)); + if !has_resume { + debug!("remove_noop_landing_pads: no resume block in MIR"); + return; + } + + // make sure there's a resume block without any statements + let resume_block = { + let mut patch = MirPatch::new(body); + let resume_block = patch.resume_block(); + patch.apply(body); + resume_block + }; + debug!("remove_noop_landing_pads: resume block is {:?}", resume_block); + + let mut jumps_folded = 0; + let mut landing_pads_removed = 0; + let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len()); + + // This is a post-order traversal, so that if A post-dominates B + // then A will be visited before B. + let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect(); + for bb in postorder { + debug!(" processing {:?}", bb); + if let Some(unwind) = body[bb].terminator_mut().unwind_mut() { + if let UnwindAction::Cleanup(unwind_bb) = *unwind { + if nop_landing_pads.contains(unwind_bb) { + debug!(" removing noop landing pad"); + landing_pads_removed += 1; + *unwind = UnwindAction::Continue; + } + } + } + + for target in body[bb].terminator_mut().successors_mut() { + if *target != resume_block && nop_landing_pads.contains(*target) { + debug!(" folding noop jump to {:?} to resume block", target); + *target = resume_block; + jumps_folded += 1; + } + } + + let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads); + if is_nop_landing_pad { + nop_landing_pads.insert(bb); + } + debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad); + } + + debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed); } } @@ -81,61 +136,4 @@ impl RemoveNoopLandingPads { | TerminatorKind::InlineAsm { .. } => false, } } - - fn remove_nop_landing_pads(&self, body: &mut Body<'_>) { - // Skip the pass if there are no blocks with a resume terminator. - let has_resume = body - .basic_blocks - .iter_enumerated() - .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::UnwindResume)); - if !has_resume { - debug!("remove_noop_landing_pads: no resume block in MIR"); - return; - } - - // make sure there's a resume block without any statements - let resume_block = { - let mut patch = MirPatch::new(body); - let resume_block = patch.resume_block(); - patch.apply(body); - resume_block - }; - debug!("remove_noop_landing_pads: resume block is {:?}", resume_block); - - let mut jumps_folded = 0; - let mut landing_pads_removed = 0; - let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len()); - - // This is a post-order traversal, so that if A post-dominates B - // then A will be visited before B. - let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect(); - for bb in postorder { - debug!(" processing {:?}", bb); - if let Some(unwind) = body[bb].terminator_mut().unwind_mut() { - if let UnwindAction::Cleanup(unwind_bb) = *unwind { - if nop_landing_pads.contains(unwind_bb) { - debug!(" removing noop landing pad"); - landing_pads_removed += 1; - *unwind = UnwindAction::Continue; - } - } - } - - for target in body[bb].terminator_mut().successors_mut() { - if *target != resume_block && nop_landing_pads.contains(*target) { - debug!(" folding noop jump to {:?} to resume block", target); - *target = resume_block; - jumps_folded += 1; - } - } - - let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads); - if is_nop_landing_pad { - nop_landing_pads.insert(bb); - } - debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad); - } - - debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed); - } } diff --git a/compiler/rustc_mir_transform/src/remove_place_mention.rs b/compiler/rustc_mir_transform/src/remove_place_mention.rs index 78335b3b5e0..71399eb72f0 100644 --- a/compiler/rustc_mir_transform/src/remove_place_mention.rs +++ b/compiler/rustc_mir_transform/src/remove_place_mention.rs @@ -2,10 +2,11 @@ use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::trace; -pub struct RemovePlaceMention; +pub(super) struct RemovePlaceMention; -impl<'tcx> MirPass<'tcx> for RemovePlaceMention { +impl<'tcx> crate::MirPass<'tcx> for RemovePlaceMention { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { !sess.opts.unstable_opts.mir_keep_place_mention } diff --git a/compiler/rustc_mir_transform/src/remove_storage_markers.rs b/compiler/rustc_mir_transform/src/remove_storage_markers.rs index f68e592db15..3ecb4a8994f 100644 --- a/compiler/rustc_mir_transform/src/remove_storage_markers.rs +++ b/compiler/rustc_mir_transform/src/remove_storage_markers.rs @@ -2,10 +2,11 @@ use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::trace; -pub struct RemoveStorageMarkers; +pub(super) struct RemoveStorageMarkers; -impl<'tcx> MirPass<'tcx> for RemoveStorageMarkers { +impl<'tcx> crate::MirPass<'tcx> for RemoveStorageMarkers { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 && !sess.emit_lifetime_markers() } diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs index fae1cb5f7d8..d80a4edecdf 100644 --- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs @@ -6,8 +6,6 @@ use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex}; use rustc_mir_dataflow::{move_path_children_matching, Analysis, MaybeReachable}; use rustc_target::abi::FieldIdx; -use crate::MirPass; - /// Removes `Drop` terminators whose target is known to be uninitialized at /// that point. /// @@ -16,9 +14,9 @@ use crate::MirPass; /// like [#90770]. /// /// [#90770]: https://github.com/rust-lang/rust/issues/90770 -pub struct RemoveUninitDrops; +pub(super) struct RemoveUninitDrops; -impl<'tcx> MirPass<'tcx> for RemoveUninitDrops { +impl<'tcx> crate::MirPass<'tcx> for RemoveUninitDrops { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let param_env = tcx.param_env(body.source.def_id()); let move_data = @@ -108,8 +106,9 @@ fn is_needs_drop_and_init<'tcx>( // If its projection *is* present in `MoveData`, then the field may have been moved // from separate from its parent. Recurse. adt.variants().iter_enumerated().any(|(vid, variant)| { - // Enums have multiple variants, which are discriminated with a `Downcast` projection. - // Structs have a single variant, and don't use a `Downcast` projection. + // Enums have multiple variants, which are discriminated with a `Downcast` + // projection. Structs have a single variant, and don't use a `Downcast` + // projection. let mpi = if adt.is_enum() { let downcast = move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid)); diff --git a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs index 2778d91e17b..28925ba1beb 100644 --- a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs @@ -6,12 +6,13 @@ use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::{debug, trace}; use super::simplify::simplify_cfg; -pub struct RemoveUnneededDrops; +pub(super) struct RemoveUnneededDrops; -impl<'tcx> MirPass<'tcx> for RemoveUnneededDrops { +impl<'tcx> crate::MirPass<'tcx> for RemoveUnneededDrops { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { trace!("Running RemoveUnneededDrops on {:?}", body.source); diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs index 9a94cae3382..f13bb1c5993 100644 --- a/compiler/rustc_mir_transform/src/remove_zsts.rs +++ b/compiler/rustc_mir_transform/src/remove_zsts.rs @@ -4,9 +4,9 @@ use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::{self, Ty, TyCtxt}; -pub struct RemoveZsts; +pub(super) struct RemoveZsts; -impl<'tcx> MirPass<'tcx> for RemoveZsts { +impl<'tcx> crate::MirPass<'tcx> for RemoveZsts { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 } diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs index 50637e2ac03..99d1cd6f63e 100644 --- a/compiler/rustc_mir_transform/src/required_consts.rs +++ b/compiler/rustc_mir_transform/src/required_consts.rs @@ -1,26 +1,21 @@ use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::{traversal, Body, ConstOperand, Location}; -pub struct RequiredConstsVisitor<'a, 'tcx> { - required_consts: &'a mut Vec<ConstOperand<'tcx>>, +pub(super) struct RequiredConstsVisitor<'tcx> { + required_consts: Vec<ConstOperand<'tcx>>, } -impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> { - fn new(required_consts: &'a mut Vec<ConstOperand<'tcx>>) -> Self { - RequiredConstsVisitor { required_consts } - } - - pub fn compute_required_consts(body: &mut Body<'tcx>) { - let mut required_consts = Vec::new(); - let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts); +impl<'tcx> RequiredConstsVisitor<'tcx> { + pub(super) fn compute_required_consts(body: &mut Body<'tcx>) { + let mut visitor = RequiredConstsVisitor { required_consts: Vec::new() }; for (bb, bb_data) in traversal::reverse_postorder(&body) { - required_consts_visitor.visit_basic_block_data(bb, bb_data); + visitor.visit_basic_block_data(bb, bb_data); } - body.set_required_consts(required_consts); + body.set_required_consts(visitor.required_consts); } } -impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> { +impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'tcx> { fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, _: Location) { if constant.const_.is_required_const() { self.required_consts.push(*constant); diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs index 5eaa024f846..f3b2f78b31c 100644 --- a/compiler/rustc_mir_transform/src/reveal_all.rs +++ b/compiler/rustc_mir_transform/src/reveal_all.rs @@ -4,9 +4,9 @@ use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::{self, Ty, TyCtxt}; -pub struct RevealAll; +pub(super) struct RevealAll; -impl<'tcx> MirPass<'tcx> for RevealAll { +impl<'tcx> crate::MirPass<'tcx> for RevealAll { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); RevealAllVisitor { tcx, param_env }.visit_body_preserves_cfg(body); @@ -35,9 +35,9 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> { if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) { return; } - // `OpaqueCast` projections are only needed if there are opaque types on which projections are performed. - // After the `RevealAll` pass, all opaque types are replaced with their hidden types, so we don't need these - // projections anymore. + // `OpaqueCast` projections are only needed if there are opaque types on which projections + // are performed. After the `RevealAll` pass, all opaque types are replaced with their + // hidden types, so we don't need these projections anymore. place.projection = self.tcx.mk_place_elems( &place .projection diff --git a/compiler/rustc_mir_transform/src/sanity_check.rs b/compiler/rustc_mir_transform/src/sanity_check.rs new file mode 100644 index 00000000000..c9445d18162 --- /dev/null +++ b/compiler/rustc_mir_transform/src/sanity_check.rs @@ -0,0 +1,11 @@ +use rustc_middle::mir::Body; +use rustc_middle::ty::TyCtxt; +use rustc_mir_dataflow::rustc_peek::sanity_check; + +pub(super) struct SanityCheck; + +impl<'tcx> crate::MirLint<'tcx> for SanityCheck { + fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { + sanity_check(tcx, body); + } +} diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index 09b4e5e0711..47d04d8a00b 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -17,6 +17,7 @@ use rustc_span::source_map::Spanned; use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT}; use rustc_target::spec::abi::Abi; +use tracing::{debug, instrument}; use crate::{ abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, deref_separator, @@ -25,7 +26,7 @@ use crate::{ mod async_destructor_ctor; -pub fn provide(providers: &mut Providers) { +pub(super) fn provide(providers: &mut Providers) { providers.mir_shims = make_shim; } @@ -78,15 +79,11 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body< receiver_by_ref, } => build_construct_coroutine_by_move_shim(tcx, coroutine_closure_def_id, receiver_by_ref), - ty::InstanceKind::CoroutineKindShim { coroutine_def_id } => { - return tcx.optimized_mir(coroutine_def_id).coroutine_by_move_body().unwrap().clone(); - } - ty::InstanceKind::DropGlue(def_id, ty) => { // FIXME(#91576): Drop shims for coroutines aren't subject to the MIR passes at the end // of this function. Is this intentional? - if let Some(ty::Coroutine(coroutine_def_id, args)) = ty.map(Ty::kind) { - let coroutine_body = tcx.optimized_mir(*coroutine_def_id); + if let Some(&ty::Coroutine(coroutine_def_id, args)) = ty.map(Ty::kind) { + let coroutine_body = tcx.optimized_mir(coroutine_def_id); let ty::Coroutine(_, id_args) = *tcx.type_of(coroutine_def_id).skip_binder().kind() else { @@ -105,7 +102,9 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body< args.as_coroutine().kind_ty().to_opt_closure_kind().unwrap(), ty::ClosureKind::FnOnce ); - coroutine_body.coroutine_by_move_body().unwrap().coroutine_drop().unwrap() + tcx.optimized_mir(tcx.coroutine_by_move_body_def_id(coroutine_def_id)) + .coroutine_drop() + .unwrap() }; let mut body = EarlyBinder::bind(body.clone()).instantiate(tcx, args); @@ -332,7 +331,7 @@ fn new_body<'tcx>( body } -pub struct DropShimElaborator<'a, 'tcx> { +pub(super) struct DropShimElaborator<'a, 'tcx> { pub body: &'a Body<'tcx>, pub patch: MirPatch<'tcx>, pub tcx: TyCtxt<'tcx>, @@ -405,8 +404,7 @@ fn build_thread_local_shim<'tcx>( let span = tcx.def_span(def_id); let source_info = SourceInfo::outermost(span); - let mut blocks = IndexVec::with_capacity(1); - blocks.push(BasicBlockData { + let blocks = IndexVec::from_raw(vec![BasicBlockData { statements: vec![Statement { source_info, kind: StatementKind::Assign(Box::new(( @@ -416,7 +414,7 @@ fn build_thread_local_shim<'tcx>( }], terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }), is_cleanup: false, - }); + }]); new_body( MirSource::from_instance(instance), @@ -914,7 +912,7 @@ fn build_call_shim<'tcx>( body } -pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> { +pub(super) fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> { debug_assert!(tcx.is_constructor(ctor_id)); let param_env = tcx.param_env_reveal_all_normalized(ctor_id); @@ -1004,7 +1002,8 @@ fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'t let locals = local_decls_for_sig(&sig, span); let source_info = SourceInfo::outermost(span); - // FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful provenance. + // FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful + // provenance. let rvalue = Rvalue::Cast( CastKind::FnPtrToPtr, Operand::Move(Place::from(Local::new(1))), @@ -1071,19 +1070,26 @@ fn build_construct_coroutine_by_move_shim<'tcx>( let locals = local_decls_for_sig(&sig, span); let mut fields = vec![]; + + // Move all of the closure args. for idx in 1..sig.inputs().len() { fields.push(Operand::Move(Local::from_usize(idx + 1).into())); } + for (idx, ty) in args.as_coroutine_closure().upvar_tys().iter().enumerate() { if receiver_by_ref { // The only situation where it's possible is when we capture immuatable references, // since those don't need to be reborrowed with the closure's env lifetime. Since // references are always `Copy`, just emit a copy. - assert_matches!( - ty.kind(), - ty::Ref(_, _, hir::Mutability::Not), - "field should be captured by immutable ref if we have an `Fn` instance" - ); + if !matches!(ty.kind(), ty::Ref(_, _, hir::Mutability::Not)) { + // This copy is only sound if it's a `&T`. This may be + // reachable e.g. when eagerly computing the `Fn` instance + // of an async closure that doesn't borrowck. + tcx.dcx().delayed_bug(format!( + "field should be captured by immutable ref if we have \ + an `Fn` instance, but it was: {ty}" + )); + } fields.push(Operand::Copy(tcx.mk_place_field( self_local, FieldIdx::from_usize(idx), diff --git a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs index 9c3f903e0ea..c88953a1d1a 100644 --- a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs +++ b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs @@ -19,10 +19,11 @@ use rustc_span::source_map::respan; use rustc_span::{Span, Symbol}; use rustc_target::abi::{FieldIdx, VariantIdx}; use rustc_target::spec::PanicStrategy; +use tracing::debug; use super::{local_decls_for_sig, new_body}; -pub fn build_async_destructor_ctor_shim<'tcx>( +pub(super) fn build_async_destructor_ctor_shim<'tcx>( tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>, diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs index 4fe8cf6213f..7ed43547e11 100644 --- a/compiler/rustc_mir_transform/src/simplify.rs +++ b/compiler/rustc_mir_transform/src/simplify.rs @@ -33,8 +33,9 @@ use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; use rustc_span::DUMMY_SP; use smallvec::SmallVec; +use tracing::{debug, trace}; -pub enum SimplifyCfg { +pub(super) enum SimplifyCfg { Initial, PromoteConsts, RemoveFalseEdges, @@ -49,7 +50,7 @@ pub enum SimplifyCfg { } impl SimplifyCfg { - pub fn name(&self) -> &'static str { + fn name(&self) -> &'static str { match self { SimplifyCfg::Initial => "SimplifyCfg-initial", SimplifyCfg::PromoteConsts => "SimplifyCfg-promote-consts", @@ -65,7 +66,7 @@ impl SimplifyCfg { } } -pub(crate) fn simplify_cfg(body: &mut Body<'_>) { +pub(super) fn simplify_cfg(body: &mut Body<'_>) { CfgSimplifier::new(body).simplify(); remove_dead_blocks(body); @@ -73,7 +74,7 @@ pub(crate) fn simplify_cfg(body: &mut Body<'_>) { body.basic_blocks_mut().raw.shrink_to_fit(); } -impl<'tcx> MirPass<'tcx> for SimplifyCfg { +impl<'tcx> crate::MirPass<'tcx> for SimplifyCfg { fn name(&self) -> &'static str { self.name() } @@ -84,13 +85,13 @@ impl<'tcx> MirPass<'tcx> for SimplifyCfg { } } -pub struct CfgSimplifier<'a, 'tcx> { +struct CfgSimplifier<'a, 'tcx> { basic_blocks: &'a mut IndexSlice<BasicBlock, BasicBlockData<'tcx>>, pred_count: IndexVec<BasicBlock, u32>, } impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> { - pub fn new(body: &'a mut Body<'tcx>) -> Self { + fn new(body: &'a mut Body<'tcx>) -> Self { let mut pred_count = IndexVec::from_elem(0u32, &body.basic_blocks); // we can't use mir.predecessors() here because that counts @@ -110,7 +111,7 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> { CfgSimplifier { basic_blocks, pred_count } } - pub fn simplify(mut self) { + fn simplify(mut self) { self.strip_nops(); // Vec of the blocks that should be merged. We store the indices here, instead of the @@ -279,7 +280,7 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> { } } -pub fn simplify_duplicate_switch_targets(terminator: &mut Terminator<'_>) { +pub(super) fn simplify_duplicate_switch_targets(terminator: &mut Terminator<'_>) { if let TerminatorKind::SwitchInt { targets, .. } = &mut terminator.kind { let otherwise = targets.otherwise(); if targets.iter().any(|t| t.1 == otherwise) { @@ -291,7 +292,7 @@ pub fn simplify_duplicate_switch_targets(terminator: &mut Terminator<'_>) { } } -pub(crate) fn remove_dead_blocks(body: &mut Body<'_>) { +pub(super) fn remove_dead_blocks(body: &mut Body<'_>) { let should_deduplicate_unreachable = |bbdata: &BasicBlockData<'_>| { // CfgSimplifier::simplify leaves behind some unreachable basic blocks without a // terminator. Those blocks will be deleted by remove_dead_blocks, but we run just @@ -359,13 +360,13 @@ pub(crate) fn remove_dead_blocks(body: &mut Body<'_>) { } } -pub enum SimplifyLocals { +pub(super) enum SimplifyLocals { BeforeConstProp, AfterGVN, Final, } -impl<'tcx> MirPass<'tcx> for SimplifyLocals { +impl<'tcx> crate::MirPass<'tcx> for SimplifyLocals { fn name(&self) -> &'static str { match &self { SimplifyLocals::BeforeConstProp => "SimplifyLocals-before-const-prop", @@ -380,23 +381,33 @@ impl<'tcx> MirPass<'tcx> for SimplifyLocals { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { trace!("running SimplifyLocals on {:?}", body.source); - simplify_locals(body, tcx); - } -} -pub fn remove_unused_definitions<'tcx>(body: &mut Body<'tcx>) { - // First, we're going to get a count of *actual* uses for every `Local`. - let mut used_locals = UsedLocals::new(body); + // First, we're going to get a count of *actual* uses for every `Local`. + let mut used_locals = UsedLocals::new(body); - // Next, we're going to remove any `Local` with zero actual uses. When we remove those - // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals` - // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from - // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a - // fixedpoint where there are no more unused locals. - remove_unused_definitions_helper(&mut used_locals, body); + // Next, we're going to remove any `Local` with zero actual uses. When we remove those + // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals` + // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from + // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a + // fixedpoint where there are no more unused locals. + remove_unused_definitions_helper(&mut used_locals, body); + + // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the + // `Local`s. + let map = make_local_map(&mut body.local_decls, &used_locals); + + // Only bother running the `LocalUpdater` if we actually found locals to remove. + if map.iter().any(Option::is_none) { + // Update references to all vars and tmps now + let mut updater = LocalUpdater { map, tcx }; + updater.visit_body_preserves_cfg(body); + + body.local_decls.shrink_to_fit(); + } + } } -pub fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) { +pub(super) fn remove_unused_definitions<'tcx>(body: &mut Body<'tcx>) { // First, we're going to get a count of *actual* uses for every `Local`. let mut used_locals = UsedLocals::new(body); @@ -406,18 +417,6 @@ pub fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) { // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a // fixedpoint where there are no more unused locals. remove_unused_definitions_helper(&mut used_locals, body); - - // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s. - let map = make_local_map(&mut body.local_decls, &used_locals); - - // Only bother running the `LocalUpdater` if we actually found locals to remove. - if map.iter().any(Option::is_none) { - // Update references to all vars and tmps now - let mut updater = LocalUpdater { map, tcx }; - updater.visit_body_preserves_cfg(body); - - body.local_decls.shrink_to_fit(); - } } /// Construct the mapping while swapping out unused stuff out from the `vec`. diff --git a/compiler/rustc_mir_transform/src/simplify_branches.rs b/compiler/rustc_mir_transform/src/simplify_branches.rs index c746041ebd8..e83b4727c48 100644 --- a/compiler/rustc_mir_transform/src/simplify_branches.rs +++ b/compiler/rustc_mir_transform/src/simplify_branches.rs @@ -1,12 +1,14 @@ use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; +use tracing::trace; -pub enum SimplifyConstCondition { +pub(super) enum SimplifyConstCondition { AfterConstProp, Final, } + /// A pass that replaces a branch with a goto when its condition is known. -impl<'tcx> MirPass<'tcx> for SimplifyConstCondition { +impl<'tcx> crate::MirPass<'tcx> for SimplifyConstCondition { fn name(&self) -> &'static str { match self { SimplifyConstCondition::AfterConstProp => "SimplifyConstCondition-after-const-prop", diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs index 59f67d8e73f..e8d8335b136 100644 --- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs +++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs @@ -7,8 +7,7 @@ use rustc_middle::mir::{ TerminatorKind, }; use rustc_middle::ty::{Ty, TyCtxt}; - -use super::MirPass; +use tracing::trace; /// Pass to convert `if` conditions on integrals into switches on the integral. /// For an example, it turns something like @@ -24,9 +23,9 @@ use super::MirPass; /// ```ignore (MIR) /// switchInt(_4) -> [43i32: bb3, otherwise: bb2]; /// ``` -pub struct SimplifyComparisonIntegral; +pub(super) struct SimplifyComparisonIntegral; -impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral { +impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 } @@ -74,12 +73,13 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral { _ => unreachable!(), } - // delete comparison statement if it the value being switched on was moved, which means it can not be user later on + // delete comparison statement if it the value being switched on was moved, which means + // it can not be user later on if opt.can_remove_bin_op_stmt { bb.statements[opt.bin_op_stmt_idx].make_nop(); } else { - // if the integer being compared to a const integral is being moved into the comparison, - // e.g `_2 = Eq(move _3, const 'x');` + // if the integer being compared to a const integral is being moved into the + // comparison, e.g `_2 = Eq(move _3, const 'x');` // we want to avoid making a double move later on in the switchInt on _3. // So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`, // we convert the move in the comparison statement to a copy. @@ -103,12 +103,15 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral { // remove StorageDead (if it exists) being used in the assign of the comparison for (stmt_idx, stmt) in bb.statements.iter().enumerate() { - if !matches!(stmt.kind, StatementKind::StorageDead(local) if local == opt.to_switch_on.local) - { + if !matches!( + stmt.kind, + StatementKind::StorageDead(local) if local == opt.to_switch_on.local + ) { continue; } storage_deads_to_remove.push((stmt_idx, opt.bb_idx)); - // if we have StorageDeads to remove then make sure to insert them at the top of each target + // if we have StorageDeads to remove then make sure to insert them at the top of + // each target for bb_idx in new_targets.all_targets() { storage_deads_to_insert.push(( *bb_idx, @@ -208,7 +211,8 @@ fn find_branch_value_info<'tcx>( (Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on)) | (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => { let branch_value_ty = branch_value.const_.ty(); - // we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats + // we only want to apply this optimization if we are matching on integrals (and chars), + // as it is not possible to switch on floats if !branch_value_ty.is_integral() && !branch_value_ty.is_char() { return None; }; @@ -223,7 +227,8 @@ fn find_branch_value_info<'tcx>( struct OptimizationInfo<'tcx> { /// Basic block to apply the optimization bb_idx: BasicBlock, - /// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be removed - i.e the statement is used later on + /// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be + /// removed - i.e the statement is used later on bin_op_stmt_idx: usize, /// Can remove Eq/Ne assignment can_remove_bin_op_stmt: bool, diff --git a/compiler/rustc_mir_transform/src/single_use_consts.rs b/compiler/rustc_mir_transform/src/single_use_consts.rs index 35cb6872fe9..26059268c37 100644 --- a/compiler/rustc_mir_transform/src/single_use_consts.rs +++ b/compiler/rustc_mir_transform/src/single_use_consts.rs @@ -19,9 +19,9 @@ use rustc_middle::ty::TyCtxt; /// /// It also removes *never*-used constants, since it had all the information /// needed to do that too, including updating the debug info. -pub struct SingleUseConsts; +pub(super) struct SingleUseConsts; -impl<'tcx> MirPass<'tcx> for SingleUseConsts { +impl<'tcx> crate::MirPass<'tcx> for SingleUseConsts { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 } diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs index c2108795372..e6dd2cfd862 100644 --- a/compiler/rustc_mir_transform/src/sroa.rs +++ b/compiler/rustc_mir_transform/src/sroa.rs @@ -9,10 +9,11 @@ use rustc_middle::mir::*; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_mir_dataflow::value_analysis::{excluded_locals, iter_fields}; use rustc_target::abi::{FieldIdx, FIRST_VARIANT}; +use tracing::{debug, instrument}; -pub struct ScalarReplacementOfAggregates; +pub(super) struct ScalarReplacementOfAggregates; -impl<'tcx> MirPass<'tcx> for ScalarReplacementOfAggregates { +impl<'tcx> crate::MirPass<'tcx> for ScalarReplacementOfAggregates { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() >= 2 } diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs index 76591f52625..ba64216f9e1 100644 --- a/compiler/rustc_mir_transform/src/ssa.rs +++ b/compiler/rustc_mir_transform/src/ssa.rs @@ -14,8 +14,9 @@ use rustc_middle::middle::resolve_bound_vars::Set1; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::{ParamEnv, TyCtxt}; +use tracing::{debug, instrument, trace}; -pub struct SsaLocals { +pub(super) struct SsaLocals { /// Assignments to each local. This defines whether the local is SSA. assignments: IndexVec<Local, Set1<DefLocation>>, /// We visit the body in reverse postorder, to ensure each local is assigned before it is used. @@ -31,14 +32,18 @@ pub struct SsaLocals { borrowed_locals: BitSet<Local>, } -pub enum AssignedValue<'a, 'tcx> { +pub(super) enum AssignedValue<'a, 'tcx> { Arg, Rvalue(&'a mut Rvalue<'tcx>), Terminator, } impl SsaLocals { - pub fn new<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ParamEnv<'tcx>) -> SsaLocals { + pub(super) fn new<'tcx>( + tcx: TyCtxt<'tcx>, + body: &Body<'tcx>, + param_env: ParamEnv<'tcx>, + ) -> SsaLocals { let assignment_order = Vec::with_capacity(body.local_decls.len()); let assignments = IndexVec::from_elem(Set1::Empty, &body.local_decls); @@ -100,25 +105,25 @@ impl SsaLocals { ssa } - pub fn num_locals(&self) -> usize { + pub(super) fn num_locals(&self) -> usize { self.assignments.len() } - pub fn locals(&self) -> impl Iterator<Item = Local> { + pub(super) fn locals(&self) -> impl Iterator<Item = Local> { self.assignments.indices() } - pub fn is_ssa(&self, local: Local) -> bool { + pub(super) fn is_ssa(&self, local: Local) -> bool { matches!(self.assignments[local], Set1::One(_)) } /// Return the number of uses if a local that are not "Deref". - pub fn num_direct_uses(&self, local: Local) -> u32 { + pub(super) fn num_direct_uses(&self, local: Local) -> u32 { self.direct_uses[local] } #[inline] - pub fn assignment_dominates( + pub(super) fn assignment_dominates( &self, dominators: &Dominators<BasicBlock>, local: Local, @@ -130,7 +135,7 @@ impl SsaLocals { } } - pub fn assignments<'a, 'tcx>( + pub(super) fn assignments<'a, 'tcx>( &'a self, body: &'a Body<'tcx>, ) -> impl Iterator<Item = (Local, &'a Rvalue<'tcx>, Location)> + 'a { @@ -147,7 +152,7 @@ impl SsaLocals { }) } - pub fn for_each_assignment_mut<'tcx>( + pub(super) fn for_each_assignment_mut<'tcx>( &self, basic_blocks: &mut IndexSlice<BasicBlock, BasicBlockData<'tcx>>, mut f: impl FnMut(Local, AssignedValue<'_, 'tcx>, Location), @@ -193,17 +198,17 @@ impl SsaLocals { /// _d => _a // transitively through _c /// /// Exception: we do not see through the return place, as it cannot be instantiated. - pub fn copy_classes(&self) -> &IndexSlice<Local, Local> { + pub(super) fn copy_classes(&self) -> &IndexSlice<Local, Local> { &self.copy_classes } /// Set of SSA locals that are immutably borrowed. - pub fn borrowed_locals(&self) -> &BitSet<Local> { + pub(super) fn borrowed_locals(&self) -> &BitSet<Local> { &self.borrowed_locals } /// Make a property uniform on a copy equivalence class by removing elements. - pub fn meet_copy_equivalence(&self, property: &mut BitSet<Local>) { + pub(super) fn meet_copy_equivalence(&self, property: &mut BitSet<Local>) { // Consolidate to have a local iff all its copies are. // // `copy_classes` defines equivalence classes between locals. The `local`s that recursively @@ -230,7 +235,7 @@ impl SsaLocals { } } -struct SsaVisitor<'tcx, 'a> { +struct SsaVisitor<'a, 'tcx> { body: &'a Body<'tcx>, dominators: &'a Dominators<BasicBlock>, assignments: IndexVec<Local, Set1<DefLocation>>, @@ -256,7 +261,7 @@ impl SsaVisitor<'_, '_> { } } -impl<'tcx> Visitor<'tcx> for SsaVisitor<'tcx, '_> { +impl<'tcx> Visitor<'tcx> for SsaVisitor<'_, 'tcx> { fn visit_local(&mut self, local: Local, ctxt: PlaceContext, loc: Location) { match ctxt { PlaceContext::MutatingUse(MutatingUseContext::Projection) diff --git a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs index 81baf58a5e0..5612e779d6b 100644 --- a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs +++ b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs @@ -10,10 +10,9 @@ use rustc_middle::mir::{ use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{Ty, TyCtxt}; use rustc_target::abi::{Abi, Variants}; +use tracing::trace; -use crate::MirPass; - -pub struct UnreachableEnumBranching; +pub(super) struct UnreachableEnumBranching; fn get_discriminant_local(terminator: &TerminatorKind<'_>) -> Option<Local> { if let TerminatorKind::SwitchInt { discr: Operand::Move(p), .. } = terminator { @@ -73,7 +72,7 @@ fn variant_discriminants<'tcx>( } } -impl<'tcx> MirPass<'tcx> for UnreachableEnumBranching { +impl<'tcx> crate::MirPass<'tcx> for UnreachableEnumBranching { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 } @@ -157,9 +156,9 @@ impl<'tcx> MirPass<'tcx> for UnreachableEnumBranching { }; true } - // If and only if there is a variant that does not have a branch set, - // change the current of otherwise as the variant branch and set otherwise to unreachable. - // It transforms following code + // If and only if there is a variant that does not have a branch set, change the + // current of otherwise as the variant branch and set otherwise to unreachable. It + // transforms following code // ```rust // match c { // Ordering::Less => 1, diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs index a6c3c3b189e..f3dafd13824 100644 --- a/compiler/rustc_mir_transform/src/unreachable_prop.rs +++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs @@ -10,9 +10,9 @@ use rustc_middle::mir::*; use rustc_middle::ty::{self, TyCtxt}; use rustc_target::abi::Size; -pub struct UnreachablePropagation; +pub(super) struct UnreachablePropagation; -impl MirPass<'_> for UnreachablePropagation { +impl crate::MirPass<'_> for UnreachablePropagation { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { // Enable only under -Zmir-opt-level=2 as this can make programs less debuggable. sess.mir_opt_level() >= 2 @@ -26,7 +26,8 @@ impl MirPass<'_> for UnreachablePropagation { let terminator = bb_data.terminator(); let is_unreachable = match &terminator.kind { TerminatorKind::Unreachable => true, - // This will unconditionally run into an unreachable and is therefore unreachable as well. + // This will unconditionally run into an unreachable and is therefore unreachable + // as well. TerminatorKind::Goto { target } if unreachable_blocks.contains(target) => { patch.patch_terminator(bb, TerminatorKind::Unreachable); true @@ -85,8 +86,9 @@ fn remove_successors_from_switch<'tcx>( // } // } // - // This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or LLVM to - // turn it into just `x` later. Without the unreachable, such a transformation would be illegal. + // This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or + // LLVM to turn it into just `x` later. Without the unreachable, such a transformation would be + // illegal. // // In order to preserve this information, we record reachable and unreachable targets as // `Assume` statements in MIR. diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs index 36908036796..e5837b1b475 100644 --- a/compiler/rustc_mir_transform/src/validate.rs +++ b/compiler/rustc_mir_transform/src/validate.rs @@ -25,7 +25,7 @@ enum EdgeKind { Normal, } -pub struct Validator { +pub(super) struct Validator { /// Describes at which point in the pipeline this validation is happening. pub when: String, /// The phase for which we are upholding the dialect. If the given phase forbids a specific @@ -36,7 +36,7 @@ pub struct Validator { pub mir_phase: MirPhase, } -impl<'tcx> MirPass<'tcx> for Validator { +impl<'tcx> crate::MirPass<'tcx> for Validator { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not // terribly important that they pass the validator. However, I think other passes might @@ -102,25 +102,6 @@ impl<'tcx> MirPass<'tcx> for Validator { } } } - - // Enforce that coroutine-closure layouts are identical. - if let Some(layout) = body.coroutine_layout_raw() - && let Some(by_move_body) = body.coroutine_by_move_body() - && let Some(by_move_layout) = by_move_body.coroutine_layout_raw() - { - // FIXME(async_closures): We could do other validation here? - if layout.variant_fields.len() != by_move_layout.variant_fields.len() { - cfg_checker.fail( - Location::START, - format!( - "Coroutine layout has different number of variant fields from \ - by-move coroutine layout:\n\ - layout: {layout:#?}\n\ - by_move_layout: {by_move_layout:#?}", - ), - ); - } - } } } @@ -406,10 +387,11 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { } self.check_unwind_edge(location, unwind); - // The code generation assumes that there are no critical call edges. The assumption - // is used to simplify inserting code that should be executed along the return edge - // from the call. FIXME(tmiasko): Since this is a strictly code generation concern, - // the code generation should be responsible for handling it. + // The code generation assumes that there are no critical call edges. The + // assumption is used to simplify inserting code that should be executed along + // the return edge from the call. FIXME(tmiasko): Since this is a strictly code + // generation concern, the code generation should be responsible for handling + // it. if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized) && self.is_critical_call_edge(target, unwind) { @@ -422,8 +404,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { ); } - // The call destination place and Operand::Move place used as an argument might be - // passed by a reference to the callee. Consequently they cannot be packed. + // The call destination place and Operand::Move place used as an argument might + // be passed by a reference to the callee. Consequently they cannot be packed. if is_within_packed(self.tcx, &self.body.local_decls, destination).is_some() { // This is bad! The callee will expect the memory to be aligned. self.fail( @@ -549,7 +531,7 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { /// /// `caller_body` is used to detect cycles in MIR inlining and MIR validation before /// `optimized_mir` is available. -pub fn validate_types<'tcx>( +pub(super) fn validate_types<'tcx>( tcx: TyCtxt<'tcx>, mir_phase: MirPhase, param_env: ty::ParamEnv<'tcx>, @@ -733,7 +715,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { // since we may be in the process of computing this MIR in the // first place. let layout = if def_id == self.caller_body.source.def_id() { - // FIXME: This is not right for async closures. + self.caller_body.coroutine_layout_raw() + } else if self.tcx.needs_coroutine_by_move_body_def_id(def_id) + && let ty::ClosureKind::FnOnce = + args.as_coroutine().kind_ty().to_opt_closure_kind().unwrap() + && self.caller_body.source.def_id() + == self.tcx.coroutine_by_move_body_def_id(def_id) + { + // Same if this is the by-move body of a coroutine-closure. self.caller_body.coroutine_layout_raw() } else { self.tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) @@ -961,9 +950,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } AggregateKind::RawPtr(pointee_ty, mutability) => { if !matches!(self.mir_phase, MirPhase::Runtime(_)) { - // It would probably be fine to support this in earlier phases, - // but at the time of writing it's only ever introduced from intrinsic lowering, - // so earlier things just `bug!` on it. + // It would probably be fine to support this in earlier phases, but at the + // time of writing it's only ever introduced from intrinsic lowering, so + // earlier things just `bug!` on it. self.fail(location, "RawPtr should be in runtime MIR only"); } @@ -1117,10 +1106,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } UnOp::PtrMetadata => { if !matches!(self.mir_phase, MirPhase::Runtime(_)) { - // It would probably be fine to support this in earlier phases, - // but at the time of writing it's only ever introduced from intrinsic lowering - // or other runtime-phase optimization passes, - // so earlier things can just `bug!` on it. + // It would probably be fine to support this in earlier phases, but at + // the time of writing it's only ever introduced from intrinsic + // lowering or other runtime-phase optimization passes, so earlier + // things can just `bug!` on it. self.fail(location, "PtrMetadata should be in runtime MIR only"); } @@ -1514,7 +1503,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } if let TerminatorKind::TailCall { .. } = terminator.kind { - // FIXME(explicit_tail_calls): implement tail-call specific checks here (such as signature matching, forbidding closures, etc) + // FIXME(explicit_tail_calls): implement tail-call specific checks here (such + // as signature matching, forbidding closures, etc) } } TerminatorKind::Assert { cond, .. } => { |
