diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
20 files changed, 373 insertions, 462 deletions
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs index f880476cec2..16977a63c59 100644 --- a/compiler/rustc_mir_transform/src/add_retag.rs +++ b/compiler/rustc_mir_transform/src/add_retag.rs @@ -4,6 +4,7 @@ //! of MIR building, and only after this pass we think of the program has having the //! normal MIR semantics. +use rustc_hir::LangItem; use rustc_middle::mir::*; use rustc_middle::ty::{self, Ty, TyCtxt}; @@ -27,7 +28,7 @@ fn may_contain_reference<'tcx>(ty: Ty<'tcx>, depth: u32, tcx: TyCtxt<'tcx>) -> b // References and Boxes (`noalias` sources) ty::Ref(..) => true, ty::Adt(..) if ty.is_box() => true, - ty::Adt(adt, _) if Some(adt.did()) == tcx.lang_items().ptr_unique() => true, + ty::Adt(adt, _) if tcx.is_lang_item(adt.did(), LangItem::PtrUnique) => true, // Compound types: recurse ty::Array(ty, _) | ty::Slice(ty) => { // This does not branch so we keep the depth the same. diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index 84512e81637..bf79b4e133a 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -68,7 +68,7 @@ use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; use rustc_middle::mir::*; use rustc_middle::ty::CoroutineArgs; -use rustc_middle::ty::InstanceDef; +use rustc_middle::ty::InstanceKind; use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; use rustc_mir_dataflow::impls::{ @@ -1276,7 +1276,7 @@ fn create_coroutine_drop_shim<'tcx>( // Update the body's def to become the drop glue. let coroutine_instance = body.source.instance; let drop_in_place = tcx.require_lang_item(LangItem::DropInPlace, None); - let drop_instance = InstanceDef::DropGlue(drop_in_place, Some(coroutine_ty)); + let drop_instance = InstanceKind::DropGlue(drop_in_place, Some(coroutine_ty)); // Temporary change MirSource to coroutine's instance so that dump_mir produces more sensible // filename. diff --git a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs index 10c0567eb4b..69d21a63f55 100644 --- a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs +++ b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs @@ -75,7 +75,7 @@ use rustc_middle::bug; use rustc_middle::hir::place::{Projection, ProjectionKind}; use rustc_middle::mir::visit::MutVisitor; use rustc_middle::mir::{self, dump_mir, MirPass}; -use rustc_middle::ty::{self, InstanceDef, Ty, TyCtxt, TypeVisitableExt}; +use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt, TypeVisitableExt}; use rustc_target::abi::{FieldIdx, VariantIdx}; pub struct ByMoveBody; @@ -102,7 +102,7 @@ impl<'tcx> MirPass<'tcx> for ByMoveBody { // We don't need to generate a by-move coroutine if the coroutine body was // produced by the `CoroutineKindShim`, since it's already by-move. - if matches!(body.source.instance, ty::InstanceDef::CoroutineKindShim { .. }) { + if matches!(body.source.instance, ty::InstanceKind::CoroutineKindShim { .. }) { return; } @@ -193,7 +193,7 @@ impl<'tcx> MirPass<'tcx> for ByMoveBody { MakeByMoveBody { tcx, field_remapping, by_move_coroutine_ty }.visit_body(&mut by_move_body); dump_mir(tcx, false, "coroutine_by_move", &0, &by_move_body, |_, _| Ok(())); // FIXME: use query feeding to generate the body right here and then only store the `DefId` of the new body. - by_move_body.source = mir::MirSource::from_instance(InstanceDef::CoroutineKindShim { + by_move_body.source = mir::MirSource::from_instance(InstanceKind::CoroutineKindShim { coroutine_def_id: coroutine_def_id.to_def_id(), }); body.coroutine.as_mut().unwrap().by_move_body = Some(by_move_body); diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index b5968517d77..a8b0f4a8d6d 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -168,11 +168,6 @@ impl CoverageCounters { self.counter_increment_sites.len() } - #[cfg(test)] - pub(super) fn num_expressions(&self) -> usize { - self.expressions.len() - } - fn set_bcb_counter(&mut self, bcb: BasicCoverageBlock, counter_kind: BcbCounter) -> BcbCounter { if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) { bug!( diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index fd74a2a97e2..360dccb240d 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -14,16 +14,16 @@ use std::ops::{Index, IndexMut}; /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s. #[derive(Debug)] -pub(super) struct CoverageGraph { +pub(crate) struct CoverageGraph { bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>, - pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, - pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, + pub(crate) successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, + pub(crate) predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, dominators: Option<Dominators<BasicCoverageBlock>>, } impl CoverageGraph { - pub fn from_mir(mir_body: &mir::Body<'_>) -> Self { + pub(crate) fn from_mir(mir_body: &mir::Body<'_>) -> Self { let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body); // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock @@ -135,24 +135,28 @@ impl CoverageGraph { } #[inline(always)] - pub fn iter_enumerated( + pub(crate) fn iter_enumerated( &self, ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> { self.bcbs.iter_enumerated() } #[inline(always)] - pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> { + pub(crate) fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> { if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None } } #[inline(always)] - pub fn dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool { + pub(crate) fn dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool { self.dominators.as_ref().unwrap().dominates(dom, node) } #[inline(always)] - pub fn cmp_in_dominator_order(&self, a: BasicCoverageBlock, b: BasicCoverageBlock) -> Ordering { + pub(crate) fn cmp_in_dominator_order( + &self, + a: BasicCoverageBlock, + b: BasicCoverageBlock, + ) -> Ordering { self.dominators.as_ref().unwrap().cmp_in_dominator_order(a, b) } @@ -166,7 +170,7 @@ impl CoverageGraph { /// /// FIXME: That assumption might not be true for [`TerminatorKind::Yield`]? #[inline(always)] - pub(super) fn bcb_has_multiple_in_edges(&self, bcb: BasicCoverageBlock) -> bool { + pub(crate) fn bcb_has_multiple_in_edges(&self, bcb: BasicCoverageBlock) -> bool { // Even though bcb0 conceptually has an extra virtual in-edge due to // being the entry point, we've already asserted that it has no _other_ // in-edges, so there's no possibility of it having _multiple_ in-edges. @@ -212,7 +216,7 @@ impl graph::StartNode for CoverageGraph { impl graph::Successors for CoverageGraph { #[inline] fn successors(&self, node: Self::Node) -> impl Iterator<Item = Self::Node> { - self.successors[node].iter().cloned() + self.successors[node].iter().copied() } } @@ -227,7 +231,7 @@ rustc_index::newtype_index! { /// A node in the control-flow graph of CoverageGraph. #[orderable] #[debug_format = "bcb{}"] - pub(super) struct BasicCoverageBlock { + pub(crate) struct BasicCoverageBlock { const START_BCB = 0; } } @@ -259,23 +263,23 @@ rustc_index::newtype_index! { /// queries (`dominates()`, `predecessors`, `successors`, etc.) have branch (control flow) /// significance. #[derive(Debug, Clone)] -pub(super) struct BasicCoverageBlockData { - pub basic_blocks: Vec<BasicBlock>, +pub(crate) struct BasicCoverageBlockData { + pub(crate) basic_blocks: Vec<BasicBlock>, } impl BasicCoverageBlockData { - pub fn from(basic_blocks: Vec<BasicBlock>) -> Self { + fn from(basic_blocks: Vec<BasicBlock>) -> Self { assert!(basic_blocks.len() > 0); Self { basic_blocks } } #[inline(always)] - pub fn leader_bb(&self) -> BasicBlock { + pub(crate) fn leader_bb(&self) -> BasicBlock { self.basic_blocks[0] } #[inline(always)] - pub fn last_bb(&self) -> BasicBlock { + pub(crate) fn last_bb(&self) -> BasicBlock { *self.basic_blocks.last().unwrap() } } @@ -364,7 +368,7 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that /// ensures a loop is completely traversed before processing Blocks after the end of the loop. #[derive(Debug)] -pub(super) struct TraversalContext { +struct TraversalContext { /// BCB with one or more incoming loop backedges, indicating which loop /// this context is for. /// @@ -375,7 +379,7 @@ pub(super) struct TraversalContext { worklist: VecDeque<BasicCoverageBlock>, } -pub(super) struct TraverseCoverageGraphWithLoops<'a> { +pub(crate) struct TraverseCoverageGraphWithLoops<'a> { basic_coverage_blocks: &'a CoverageGraph, backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, @@ -384,7 +388,7 @@ pub(super) struct TraverseCoverageGraphWithLoops<'a> { } impl<'a> TraverseCoverageGraphWithLoops<'a> { - pub(super) fn new(basic_coverage_blocks: &'a CoverageGraph) -> Self { + pub(crate) fn new(basic_coverage_blocks: &'a CoverageGraph) -> Self { let backedges = find_loop_backedges(basic_coverage_blocks); let worklist = VecDeque::from([basic_coverage_blocks.start_node()]); @@ -400,7 +404,7 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> { /// For each loop on the loop context stack (top-down), yields a list of BCBs /// within that loop that have an outgoing edge back to the loop header. - pub(super) fn reloop_bcbs_per_loop(&self) -> impl Iterator<Item = &[BasicCoverageBlock]> { + pub(crate) fn reloop_bcbs_per_loop(&self) -> impl Iterator<Item = &[BasicCoverageBlock]> { self.context_stack .iter() .rev() @@ -408,39 +412,38 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> { .map(|header_bcb| self.backedges[header_bcb].as_slice()) } - pub(super) fn next(&mut self) -> Option<BasicCoverageBlock> { + pub(crate) fn next(&mut self) -> Option<BasicCoverageBlock> { debug!( "TraverseCoverageGraphWithLoops::next - context_stack: {:?}", self.context_stack.iter().rev().collect::<Vec<_>>() ); while let Some(context) = self.context_stack.last_mut() { - if let Some(bcb) = context.worklist.pop_front() { - if !self.visited.insert(bcb) { - debug!("Already visited: {bcb:?}"); - continue; - } - debug!("Visiting {bcb:?}"); - - if self.backedges[bcb].len() > 0 { - debug!("{bcb:?} is a loop header! Start a new TraversalContext..."); - self.context_stack.push(TraversalContext { - loop_header: Some(bcb), - worklist: VecDeque::new(), - }); - } - self.add_successors_to_worklists(bcb); - return Some(bcb); - } else { - // Strip contexts with empty worklists from the top of the stack + let Some(bcb) = context.worklist.pop_front() else { + // This stack level is exhausted; pop it and try the next one. self.context_stack.pop(); + continue; + }; + + if !self.visited.insert(bcb) { + debug!("Already visited: {bcb:?}"); + continue; + } + debug!("Visiting {bcb:?}"); + + if self.backedges[bcb].len() > 0 { + debug!("{bcb:?} is a loop header! Start a new TraversalContext..."); + self.context_stack + .push(TraversalContext { loop_header: Some(bcb), worklist: VecDeque::new() }); } + self.add_successors_to_worklists(bcb); + return Some(bcb); } None } - pub fn add_successors_to_worklists(&mut self, bcb: BasicCoverageBlock) { + fn add_successors_to_worklists(&mut self, bcb: BasicCoverageBlock) { let successors = &self.basic_coverage_blocks.successors[bcb]; debug!("{:?} has {} successors:", bcb, successors.len()); @@ -494,11 +497,11 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> { } } - pub fn is_complete(&self) -> bool { + pub(crate) fn is_complete(&self) -> bool { self.visited.count() == self.visited.domain_size() } - pub fn unvisited(&self) -> Vec<BasicCoverageBlock> { + pub(crate) fn unvisited(&self) -> Vec<BasicCoverageBlock> { let mut unvisited_set: BitSet<BasicCoverageBlock> = BitSet::new_filled(self.visited.domain_size()); unvisited_set.subtract(&self.visited); @@ -506,7 +509,7 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> { } } -pub(super) fn find_loop_backedges( +fn find_loop_backedges( basic_coverage_blocks: &CoverageGraph, ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> { let num_bcbs = basic_coverage_blocks.num_nodes(); diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index 65715253647..25744009be8 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -49,7 +49,7 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { /// Query implementation for `coverage_ids_info`. fn coverage_ids_info<'tcx>( tcx: TyCtxt<'tcx>, - instance_def: ty::InstanceDef<'tcx>, + instance_def: ty::InstanceKind<'tcx>, ) -> CoverageIdsInfo { let mir_body = tcx.instance_mir(instance_def); diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index bb6a666ff73..84a70d1f02d 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -1,9 +1,15 @@ +use std::collections::VecDeque; + +use rustc_data_structures::captures::Captures; +use rustc_data_structures::fx::FxHashSet; use rustc_middle::mir; use rustc_span::Span; use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph}; use crate::coverage::mappings; -use crate::coverage::spans::from_mir::SpanFromMir; +use crate::coverage::spans::from_mir::{ + extract_covspans_and_holes_from_mir, ExtractedCovspans, Hole, SpanFromMir, +}; use crate::coverage::ExtractedHirInfo; mod from_mir; @@ -19,50 +25,181 @@ pub(super) fn extract_refined_covspans( basic_coverage_blocks: &CoverageGraph, code_mappings: &mut impl Extend<mappings::CodeMapping>, ) { - let sorted_span_buckets = - from_mir::mir_to_initial_sorted_coverage_spans(mir_body, hir_info, basic_coverage_blocks); - for bucket in sorted_span_buckets { - let refined_spans = refine_sorted_spans(bucket); - code_mappings.extend(refined_spans.into_iter().map(|RefinedCovspan { span, bcb }| { + let ExtractedCovspans { mut covspans, mut holes } = + extract_covspans_and_holes_from_mir(mir_body, hir_info, basic_coverage_blocks); + + // First, perform the passes that need macro information. + covspans.sort_by(|a, b| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb)); + remove_unwanted_macro_spans(&mut covspans); + split_visible_macro_spans(&mut covspans); + + // We no longer need the extra information in `SpanFromMir`, so convert to `Covspan`. + let mut covspans = covspans.into_iter().map(SpanFromMir::into_covspan).collect::<Vec<_>>(); + + let compare_covspans = |a: &Covspan, b: &Covspan| { + compare_spans(a.span, b.span) + // After deduplication, we want to keep only the most-dominated BCB. + .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb).reverse()) + }; + covspans.sort_by(compare_covspans); + + // Among covspans with the same span, keep only one, + // preferring the one with the most-dominated BCB. + // (Ideally we should try to preserve _all_ non-dominating BCBs, but that + // requires a lot more complexity in the span refiner, for little benefit.) + covspans.dedup_by(|b, a| a.span.source_equal(b.span)); + + // Sort the holes, and merge overlapping/adjacent holes. + holes.sort_by(|a, b| compare_spans(a.span, b.span)); + holes.dedup_by(|b, a| a.merge_if_overlapping_or_adjacent(b)); + + // Split the covspans into separate buckets that don't overlap any holes. + let buckets = divide_spans_into_buckets(covspans, &holes); + + for mut covspans in buckets { + // Make sure each individual bucket is internally sorted. + covspans.sort_by(compare_covspans); + let _span = debug_span!("processing bucket", ?covspans).entered(); + + let mut covspans = remove_unwanted_overlapping_spans(covspans); + debug!(?covspans, "after removing overlaps"); + + // Do one last merge pass, to simplify the output. + covspans.dedup_by(|b, a| a.merge_if_eligible(b)); + debug!(?covspans, "after merge"); + + code_mappings.extend(covspans.into_iter().map(|Covspan { span, bcb }| { // Each span produced by the refiner represents an ordinary code region. mappings::CodeMapping { span, bcb } })); } } -#[derive(Debug)] -struct RefinedCovspan { - span: Span, - bcb: BasicCoverageBlock, +/// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate +/// multiple condition/consequent blocks that have the span of the whole macro +/// invocation, which is unhelpful. Keeping only the first such span seems to +/// give better mappings, so remove the others. +/// +/// (The input spans should be sorted in BCB dominator order, so that the +/// retained "first" span is likely to dominate the others.) +fn remove_unwanted_macro_spans(covspans: &mut Vec<SpanFromMir>) { + let mut seen_macro_spans = FxHashSet::default(); + covspans.retain(|covspan| { + // Ignore (retain) non-macro-expansion spans. + if covspan.visible_macro.is_none() { + return true; + } + + // Retain only the first macro-expanded covspan with this span. + seen_macro_spans.insert(covspan.span) + }); } -impl RefinedCovspan { - fn is_mergeable(&self, other: &Self) -> bool { - self.bcb == other.bcb - } +/// When a span corresponds to a macro invocation that is visible from the +/// function body, split it into two parts. The first part covers just the +/// macro name plus `!`, and the second part covers the rest of the macro +/// invocation. This seems to give better results for code that uses macros. +fn split_visible_macro_spans(covspans: &mut Vec<SpanFromMir>) { + let mut extra_spans = vec![]; - fn merge_from(&mut self, other: &Self) { - debug_assert!(self.is_mergeable(other)); - self.span = self.span.to(other.span); + covspans.retain(|covspan| { + let Some(visible_macro) = covspan.visible_macro else { return true }; + + let split_len = visible_macro.as_str().len() as u32 + 1; + let (before, after) = covspan.span.split_at(split_len); + if !covspan.span.contains(before) || !covspan.span.contains(after) { + // Something is unexpectedly wrong with the split point. + // The debug assertion in `split_at` will have already caught this, + // but in release builds it's safer to do nothing and maybe get a + // bug report for unexpected coverage, rather than risk an ICE. + return true; + } + + extra_spans.push(SpanFromMir::new(before, covspan.visible_macro, covspan.bcb)); + extra_spans.push(SpanFromMir::new(after, covspan.visible_macro, covspan.bcb)); + false // Discard the original covspan that we just split. + }); + + // The newly-split spans are added at the end, so any previous sorting + // is not preserved. + covspans.extend(extra_spans); +} + +/// Uses the holes to divide the given covspans into buckets, such that: +/// - No span in any hole overlaps a bucket (truncating the spans if necessary). +/// - The spans in each bucket are strictly after all spans in previous buckets, +/// and strictly before all spans in subsequent buckets. +/// +/// The resulting buckets are sorted relative to each other, but might not be +/// internally sorted. +#[instrument(level = "debug")] +fn divide_spans_into_buckets(input_covspans: Vec<Covspan>, holes: &[Hole]) -> Vec<Vec<Covspan>> { + debug_assert!(input_covspans.is_sorted_by(|a, b| compare_spans(a.span, b.span).is_le())); + debug_assert!(holes.is_sorted_by(|a, b| compare_spans(a.span, b.span).is_le())); + + // Now we're ready to start carving holes out of the initial coverage spans, + // and grouping them in buckets separated by the holes. + + let mut input_covspans = VecDeque::from(input_covspans); + let mut fragments = vec![]; + + // For each hole: + // - Identify the spans that are entirely or partly before the hole. + // - Put those spans in a corresponding bucket, truncated to the start of the hole. + // - If one of those spans also extends after the hole, put the rest of it + // in a "fragments" vector that is processed by the next hole. + let mut buckets = (0..holes.len()).map(|_| vec![]).collect::<Vec<_>>(); + for (hole, bucket) in holes.iter().zip(&mut buckets) { + let fragments_from_prev = std::mem::take(&mut fragments); + + // Only inspect spans that precede or overlap this hole, + // leaving the rest to be inspected by later holes. + // (This relies on the spans and holes both being sorted.) + let relevant_input_covspans = + drain_front_while(&mut input_covspans, |c| c.span.lo() < hole.span.hi()); + + for covspan in fragments_from_prev.into_iter().chain(relevant_input_covspans) { + let (before, after) = covspan.split_around_hole_span(hole.span); + bucket.extend(before); + fragments.extend(after); + } } + + // After finding the spans before each hole, any remaining fragments/spans + // form their own final bucket, after the final hole. + // (If there were no holes, this will just be all of the initial spans.) + fragments.extend(input_covspans); + buckets.push(fragments); + + buckets +} + +/// Similar to `.drain(..)`, but stops just before it would remove an item not +/// satisfying the predicate. +fn drain_front_while<'a, T>( + queue: &'a mut VecDeque<T>, + mut pred_fn: impl FnMut(&T) -> bool, +) -> impl Iterator<Item = T> + Captures<'a> { + std::iter::from_fn(move || if pred_fn(queue.front()?) { queue.pop_front() } else { None }) } /// Takes one of the buckets of (sorted) spans extracted from MIR, and "refines" -/// those spans by removing spans that overlap in unwanted ways, and by merging -/// compatible adjacent spans. +/// those spans by removing spans that overlap in unwanted ways. #[instrument(level = "debug")] -fn refine_sorted_spans(sorted_spans: Vec<SpanFromMir>) -> Vec<RefinedCovspan> { +fn remove_unwanted_overlapping_spans(sorted_spans: Vec<Covspan>) -> Vec<Covspan> { + debug_assert!(sorted_spans.is_sorted_by(|a, b| compare_spans(a.span, b.span).is_le())); + // Holds spans that have been read from the input vector, but haven't yet // been committed to the output vector. let mut pending = vec![]; let mut refined = vec![]; for curr in sorted_spans { - pending.retain(|prev: &SpanFromMir| { + pending.retain(|prev: &Covspan| { if prev.span.hi() <= curr.span.lo() { // There's no overlap between the previous/current covspans, // so move the previous one into the refined list. - refined.push(RefinedCovspan { span: prev.span, bcb: prev.bcb }); + refined.push(prev.clone()); false } else { // Otherwise, retain the previous covspan only if it has the @@ -75,22 +212,57 @@ fn refine_sorted_spans(sorted_spans: Vec<SpanFromMir>) -> Vec<RefinedCovspan> { } // Drain the rest of the pending list into the refined list. - for prev in pending { - refined.push(RefinedCovspan { span: prev.span, bcb: prev.bcb }); + refined.extend(pending); + refined +} + +#[derive(Clone, Debug)] +struct Covspan { + span: Span, + bcb: BasicCoverageBlock, +} + +impl Covspan { + /// Splits this covspan into 0-2 parts: + /// - The part that is strictly before the hole span, if any. + /// - The part that is strictly after the hole span, if any. + fn split_around_hole_span(&self, hole_span: Span) -> (Option<Self>, Option<Self>) { + let before = try { + let span = self.span.trim_end(hole_span)?; + Self { span, ..*self } + }; + let after = try { + let span = self.span.trim_start(hole_span)?; + Self { span, ..*self } + }; + + (before, after) } - // Do one last merge pass, to simplify the output. - debug!(?refined, "before merge"); - refined.dedup_by(|b, a| { - if a.is_mergeable(b) { - debug!(?a, ?b, "merging list-adjacent refined spans"); - a.merge_from(b); - true - } else { - false + /// If `self` and `other` can be merged (i.e. they have the same BCB), + /// mutates `self.span` to also include `other.span` and returns true. + /// + /// Note that compatible covspans can be merged even if their underlying + /// spans are not overlapping/adjacent; any space between them will also be + /// part of the merged covspan. + fn merge_if_eligible(&mut self, other: &Self) -> bool { + if self.bcb != other.bcb { + return false; } - }); - debug!(?refined, "after merge"); - refined + self.span = self.span.to(other.span); + true + } +} + +/// Compares two spans in (lo ascending, hi descending) order. +fn compare_spans(a: Span, b: Span) -> std::cmp::Ordering { + // First sort by span start. + Ord::cmp(&a.lo(), &b.lo()) + // If span starts are the same, sort by span end in reverse order. + // This ensures that if spans A and B are adjacent in the list, + // and they overlap but are not equal, then either: + // - Span A extends further left, or + // - Both have the same start and span A extends further right + .then_with(|| Ord::cmp(&a.hi(), &b.hi()).reverse()) } diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs index b1f71035dde..09deb7534bf 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs @@ -1,7 +1,3 @@ -use std::collections::VecDeque; - -use rustc_data_structures::captures::Captures; -use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::mir::{ @@ -13,25 +9,25 @@ use rustc_span::{ExpnKind, MacroKind, Span, Symbol}; use crate::coverage::graph::{ BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB, }; +use crate::coverage::spans::Covspan; use crate::coverage::ExtractedHirInfo; +pub(crate) struct ExtractedCovspans { + pub(crate) covspans: Vec<SpanFromMir>, + pub(crate) holes: Vec<Hole>, +} + /// Traverses the MIR body to produce an initial collection of coverage-relevant /// spans, each associated with a node in the coverage graph (BCB) and possibly /// other metadata. -/// -/// The returned spans are divided into one or more buckets, such that: -/// - The spans in each bucket are strictly after all spans in previous buckets, -/// and strictly before all spans in subsequent buckets. -/// - The contents of each bucket are also sorted, in a specific order that is -/// expected by the subsequent span-refinement step. -pub(super) fn mir_to_initial_sorted_coverage_spans( +pub(crate) fn extract_covspans_and_holes_from_mir( mir_body: &mir::Body<'_>, hir_info: &ExtractedHirInfo, basic_coverage_blocks: &CoverageGraph, -) -> Vec<Vec<SpanFromMir>> { +) -> ExtractedCovspans { let &ExtractedHirInfo { body_span, .. } = hir_info; - let mut initial_spans = vec![]; + let mut covspans = vec![]; let mut holes = vec![]; for (bcb, bcb_data) in basic_coverage_blocks.iter_enumerated() { @@ -40,150 +36,21 @@ pub(super) fn mir_to_initial_sorted_coverage_spans( body_span, bcb, bcb_data, - &mut initial_spans, + &mut covspans, &mut holes, ); } // Only add the signature span if we found at least one span in the body. - if !initial_spans.is_empty() || !holes.is_empty() { + if !covspans.is_empty() || !holes.is_empty() { // If there is no usable signature span, add a fake one (before refinement) // to avoid an ugly gap between the body start and the first real span. // FIXME: Find a more principled way to solve this problem. let fn_sig_span = hir_info.fn_sig_span_extended.unwrap_or_else(|| body_span.shrink_to_lo()); - initial_spans.push(SpanFromMir::for_fn_sig(fn_sig_span)); - } - - initial_spans.sort_by(|a, b| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb)); - remove_unwanted_macro_spans(&mut initial_spans); - split_visible_macro_spans(&mut initial_spans); - - let compare_covspans = |a: &SpanFromMir, b: &SpanFromMir| { - compare_spans(a.span, b.span) - // After deduplication, we want to keep only the most-dominated BCB. - .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb).reverse()) - }; - initial_spans.sort_by(compare_covspans); - - // Among covspans with the same span, keep only one, - // preferring the one with the most-dominated BCB. - // (Ideally we should try to preserve _all_ non-dominating BCBs, but that - // requires a lot more complexity in the span refiner, for little benefit.) - initial_spans.dedup_by(|b, a| a.span.source_equal(b.span)); - - // Sort the holes, and merge overlapping/adjacent holes. - holes.sort_by(|a, b| compare_spans(a.span, b.span)); - holes.dedup_by(|b, a| a.merge_if_overlapping_or_adjacent(b)); - - // Now we're ready to start carving holes out of the initial coverage spans, - // and grouping them in buckets separated by the holes. - - let mut initial_spans = VecDeque::from(initial_spans); - let mut fragments: Vec<SpanFromMir> = vec![]; - - // For each hole: - // - Identify the spans that are entirely or partly before the hole. - // - Put those spans in a corresponding bucket, truncated to the start of the hole. - // - If one of those spans also extends after the hole, put the rest of it - // in a "fragments" vector that is processed by the next hole. - let mut buckets = (0..holes.len()).map(|_| vec![]).collect::<Vec<_>>(); - for (hole, bucket) in holes.iter().zip(&mut buckets) { - let fragments_from_prev = std::mem::take(&mut fragments); - - // Only inspect spans that precede or overlap this hole, - // leaving the rest to be inspected by later holes. - // (This relies on the spans and holes both being sorted.) - let relevant_initial_spans = - drain_front_while(&mut initial_spans, |c| c.span.lo() < hole.span.hi()); - - for covspan in fragments_from_prev.into_iter().chain(relevant_initial_spans) { - let (before, after) = covspan.split_around_hole_span(hole.span); - bucket.extend(before); - fragments.extend(after); - } + covspans.push(SpanFromMir::for_fn_sig(fn_sig_span)); } - // After finding the spans before each hole, any remaining fragments/spans - // form their own final bucket, after the final hole. - // (If there were no holes, this will just be all of the initial spans.) - fragments.extend(initial_spans); - buckets.push(fragments); - - // Make sure each individual bucket is still internally sorted. - for bucket in &mut buckets { - bucket.sort_by(compare_covspans); - } - buckets -} - -fn compare_spans(a: Span, b: Span) -> std::cmp::Ordering { - // First sort by span start. - Ord::cmp(&a.lo(), &b.lo()) - // If span starts are the same, sort by span end in reverse order. - // This ensures that if spans A and B are adjacent in the list, - // and they overlap but are not equal, then either: - // - Span A extends further left, or - // - Both have the same start and span A extends further right - .then_with(|| Ord::cmp(&a.hi(), &b.hi()).reverse()) -} - -/// Similar to `.drain(..)`, but stops just before it would remove an item not -/// satisfying the predicate. -fn drain_front_while<'a, T>( - queue: &'a mut VecDeque<T>, - mut pred_fn: impl FnMut(&T) -> bool, -) -> impl Iterator<Item = T> + Captures<'a> { - std::iter::from_fn(move || if pred_fn(queue.front()?) { queue.pop_front() } else { None }) -} - -/// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate -/// multiple condition/consequent blocks that have the span of the whole macro -/// invocation, which is unhelpful. Keeping only the first such span seems to -/// give better mappings, so remove the others. -/// -/// (The input spans should be sorted in BCB dominator order, so that the -/// retained "first" span is likely to dominate the others.) -fn remove_unwanted_macro_spans(initial_spans: &mut Vec<SpanFromMir>) { - let mut seen_macro_spans = FxHashSet::default(); - initial_spans.retain(|covspan| { - // Ignore (retain) non-macro-expansion spans. - if covspan.visible_macro.is_none() { - return true; - } - - // Retain only the first macro-expanded covspan with this span. - seen_macro_spans.insert(covspan.span) - }); -} - -/// When a span corresponds to a macro invocation that is visible from the -/// function body, split it into two parts. The first part covers just the -/// macro name plus `!`, and the second part covers the rest of the macro -/// invocation. This seems to give better results for code that uses macros. -fn split_visible_macro_spans(initial_spans: &mut Vec<SpanFromMir>) { - let mut extra_spans = vec![]; - - initial_spans.retain(|covspan| { - let Some(visible_macro) = covspan.visible_macro else { return true }; - - let split_len = visible_macro.as_str().len() as u32 + 1; - let (before, after) = covspan.span.split_at(split_len); - if !covspan.span.contains(before) || !covspan.span.contains(after) { - // Something is unexpectedly wrong with the split point. - // The debug assertion in `split_at` will have already caught this, - // but in release builds it's safer to do nothing and maybe get a - // bug report for unexpected coverage, rather than risk an ICE. - return true; - } - - extra_spans.push(SpanFromMir::new(before, covspan.visible_macro, covspan.bcb)); - extra_spans.push(SpanFromMir::new(after, covspan.visible_macro, covspan.bcb)); - false // Discard the original covspan that we just split. - }); - - // The newly-split spans are added at the end, so any previous sorting - // is not preserved. - initial_spans.extend(extra_spans); + ExtractedCovspans { covspans, holes } } // Generate a set of coverage spans from the filtered set of `Statement`s and `Terminator`s of @@ -402,12 +269,12 @@ fn unexpand_into_body_span_with_prev( } #[derive(Debug)] -struct Hole { - span: Span, +pub(crate) struct Hole { + pub(crate) span: Span, } impl Hole { - fn merge_if_overlapping_or_adjacent(&mut self, other: &mut Self) -> bool { + pub(crate) fn merge_if_overlapping_or_adjacent(&mut self, other: &mut Self) -> bool { if !self.span.overlaps_or_adjacent(other.span) { return false; } @@ -418,7 +285,7 @@ impl Hole { } #[derive(Debug)] -pub(super) struct SpanFromMir { +pub(crate) struct SpanFromMir { /// A span that has been extracted from MIR and then "un-expanded" back to /// within the current function's `body_span`. After various intermediate /// processing steps, this span is emitted as part of the final coverage @@ -426,9 +293,9 @@ pub(super) struct SpanFromMir { /// /// With the exception of `fn_sig_span`, this should always be contained /// within `body_span`. - pub(super) span: Span, - visible_macro: Option<Symbol>, - pub(super) bcb: BasicCoverageBlock, + pub(crate) span: Span, + pub(crate) visible_macro: Option<Symbol>, + pub(crate) bcb: BasicCoverageBlock, } impl SpanFromMir { @@ -436,23 +303,12 @@ impl SpanFromMir { Self::new(fn_sig_span, None, START_BCB) } - fn new(span: Span, visible_macro: Option<Symbol>, bcb: BasicCoverageBlock) -> Self { + pub(crate) fn new(span: Span, visible_macro: Option<Symbol>, bcb: BasicCoverageBlock) -> Self { Self { span, visible_macro, bcb } } - /// Splits this span into 0-2 parts: - /// - The part that is strictly before the hole span, if any. - /// - The part that is strictly after the hole span, if any. - fn split_around_hole_span(&self, hole_span: Span) -> (Option<Self>, Option<Self>) { - let before = try { - let span = self.span.trim_end(hole_span)?; - Self { span, ..*self } - }; - let after = try { - let span = self.span.trim_start(hole_span)?; - Self { span, ..*self } - }; - - (before, after) + pub(crate) fn into_covspan(self) -> Covspan { + let Self { span, visible_macro: _, bcb } = self; + Covspan { span, bcb } } } diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs index ca64688e6b8..048547dc9f5 100644 --- a/compiler/rustc_mir_transform/src/coverage/tests.rs +++ b/compiler/rustc_mir_transform/src/coverage/tests.rs @@ -24,7 +24,6 @@ //! globals is comparatively simpler. The easiest way is to wrap the test in a closure argument //! to: `rustc_span::create_default_session_globals_then(|| { test_here(); })`. -use super::counters; use super::graph::{self, BasicCoverageBlock}; use itertools::Itertools; @@ -551,108 +550,3 @@ fn test_covgraph_switchint_loop_then_inner_loop_else_break() { assert_successors(&basic_coverage_blocks, bcb(5), &[bcb(1)]); assert_successors(&basic_coverage_blocks, bcb(6), &[bcb(4)]); } - -#[test] -fn test_find_loop_backedges_none() { - let mir_body = goto_switchint(); - let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); - if false { - eprintln!( - "basic_coverage_blocks = {:?}", - basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>() - ); - eprintln!("successors = {:?}", basic_coverage_blocks.successors); - } - let backedges = graph::find_loop_backedges(&basic_coverage_blocks); - assert_eq!( - backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(), - 0, - "backedges: {:?}", - backedges - ); -} - -#[test] -fn test_find_loop_backedges_one() { - let mir_body = switchint_then_loop_else_return(); - let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); - let backedges = graph::find_loop_backedges(&basic_coverage_blocks); - assert_eq!( - backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(), - 1, - "backedges: {:?}", - backedges - ); - - assert_eq!(backedges[bcb(1)], &[bcb(3)]); -} - -#[test] -fn test_find_loop_backedges_two() { - let mir_body = switchint_loop_then_inner_loop_else_break(); - let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); - let backedges = graph::find_loop_backedges(&basic_coverage_blocks); - assert_eq!( - backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(), - 2, - "backedges: {:?}", - backedges - ); - - assert_eq!(backedges[bcb(1)], &[bcb(5)]); - assert_eq!(backedges[bcb(4)], &[bcb(6)]); -} - -#[test] -fn test_traverse_coverage_with_loops() { - let mir_body = switchint_loop_then_inner_loop_else_break(); - let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); - let mut traversed_in_order = Vec::new(); - let mut traversal = graph::TraverseCoverageGraphWithLoops::new(&basic_coverage_blocks); - while let Some(bcb) = traversal.next() { - traversed_in_order.push(bcb); - } - - // bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except* - // bcb6 are inside the first loop. - assert_eq!( - *traversed_in_order.last().expect("should have elements"), - bcb(6), - "bcb6 should not be visited until all nodes inside the first loop have been visited" - ); -} - -#[test] -fn test_make_bcb_counters() { - rustc_span::create_default_session_globals_then(|| { - let mir_body = goto_switchint(); - let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); - // Historically this test would use `spans` internals to set up fake - // coverage spans for BCBs 1 and 2. Now we skip that step and just tell - // BCB counter construction that those BCBs have spans. - let bcb_has_coverage_spans = |bcb: BasicCoverageBlock| (1..=2).contains(&bcb.as_usize()); - let coverage_counters = counters::CoverageCounters::make_bcb_counters( - &basic_coverage_blocks, - bcb_has_coverage_spans, - ); - assert_eq!(coverage_counters.num_expressions(), 0); - - assert_eq!( - 0, // bcb1 has a `Counter` with id = 0 - match coverage_counters.bcb_counter(bcb(1)).expect("should have a counter") { - counters::BcbCounter::Counter { id, .. } => id, - _ => panic!("expected a Counter"), - } - .as_u32() - ); - - assert_eq!( - 1, // bcb2 has a `Counter` with id = 1 - match coverage_counters.bcb_counter(bcb(2)).expect("should have a counter") { - counters::BcbCounter::Counter { id, .. } => id, - _ => panic!("expected a Counter"), - } - .as_u32() - ); - }); -} diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index fe2237dd2e9..d04bb8d302e 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -10,7 +10,7 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs} use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::TypeVisitableExt; -use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; +use rustc_middle::ty::{self, Instance, InstanceKind, ParamEnv, Ty, TyCtxt}; use rustc_session::config::{DebugInfo, OptLevel}; use rustc_span::source_map::Spanned; use rustc_span::sym; @@ -225,13 +225,8 @@ impl<'tcx> Inliner<'tcx> { // Normally, this shouldn't be required, but trait normalization failure can create a // validation ICE. let output_type = callee_body.return_ty(); - if !util::relate_types( - self.tcx, - self.param_env, - ty::Variance::Covariant, - output_type, - destination_ty, - ) { + if !util::relate_types(self.tcx, self.param_env, ty::Covariant, output_type, destination_ty) + { trace!(?output_type, ?destination_ty); return Err("failed to normalize return type"); } @@ -261,13 +256,8 @@ impl<'tcx> Inliner<'tcx> { self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter()) { let input_type = callee_body.local_decls[input].ty; - if !util::relate_types( - self.tcx, - self.param_env, - ty::Variance::Covariant, - input_type, - arg_ty, - ) { + if !util::relate_types(self.tcx, self.param_env, ty::Covariant, input_type, arg_ty) + { trace!(?arg_ty, ?input_type); return Err("failed to normalize tuple argument type"); } @@ -276,13 +266,8 @@ impl<'tcx> Inliner<'tcx> { for (arg, input) in args.iter().zip(callee_body.args_iter()) { let input_type = callee_body.local_decls[input].ty; let arg_ty = arg.node.ty(&caller_body.local_decls, self.tcx); - if !util::relate_types( - self.tcx, - self.param_env, - ty::Variance::Covariant, - input_type, - arg_ty, - ) { + if !util::relate_types(self.tcx, self.param_env, ty::Covariant, input_type, arg_ty) + { trace!(?arg_ty, ?input_type); return Err("failed to normalize argument type"); } @@ -308,7 +293,7 @@ impl<'tcx> Inliner<'tcx> { } match callee.def { - InstanceDef::Item(_) => { + InstanceKind::Item(_) => { // If there is no MIR available (either because it was not in metadata or // because it has no MIR because it's an extern function), then the inliner // won't cause cycles on this. @@ -317,24 +302,24 @@ impl<'tcx> Inliner<'tcx> { } } // These have no own callable MIR. - InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => { + InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => { return Err("instance without MIR (intrinsic / virtual)"); } // This cannot result in an immediate cycle since the callee MIR is a shim, which does // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we // do not need to catch this here, we can wait until the inliner decides to continue // inlining a second time. - InstanceDef::VTableShim(_) - | InstanceDef::ReifyShim(..) - | InstanceDef::FnPtrShim(..) - | InstanceDef::ClosureOnceShim { .. } - | InstanceDef::ConstructCoroutineInClosureShim { .. } - | InstanceDef::CoroutineKindShim { .. } - | InstanceDef::DropGlue(..) - | InstanceDef::CloneShim(..) - | InstanceDef::ThreadLocalShim(..) - | InstanceDef::FnPtrAddrShim(..) - | InstanceDef::AsyncDropGlueCtorShim(..) => return Ok(()), + InstanceKind::VTableShim(_) + | InstanceKind::ReifyShim(..) + | InstanceKind::FnPtrShim(..) + | InstanceKind::ClosureOnceShim { .. } + | InstanceKind::ConstructCoroutineInClosureShim { .. } + | InstanceKind::CoroutineKindShim { .. } + | InstanceKind::DropGlue(..) + | InstanceKind::CloneShim(..) + | InstanceKind::ThreadLocalShim(..) + | InstanceKind::FnPtrAddrShim(..) + | InstanceKind::AsyncDropGlueCtorShim(..) => return Ok(()), } if self.tcx.is_constructor(callee_def_id) { @@ -387,7 +372,7 @@ impl<'tcx> Inliner<'tcx> { let callee = Instance::resolve(self.tcx, self.param_env, def_id, args).ok().flatten()?; - if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def { + if let InstanceKind::Virtual(..) | InstanceKind::Intrinsic(_) = callee.def { return None; } @@ -399,7 +384,7 @@ impl<'tcx> Inliner<'tcx> { // Additionally, check that the body that we're inlining actually agrees // with the ABI of the trait that the item comes from. - if let InstanceDef::Item(instance_def_id) = callee.def + if let InstanceKind::Item(instance_def_id) = callee.def && self.tcx.def_kind(instance_def_id) == DefKind::AssocFn && let instance_fn_sig = self.tcx.fn_sig(instance_def_id).skip_binder() && instance_fn_sig.abi() != fn_sig.abi() @@ -1078,10 +1063,10 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> { #[instrument(skip(tcx), level = "debug")] fn try_instance_mir<'tcx>( tcx: TyCtxt<'tcx>, - instance: InstanceDef<'tcx>, + instance: InstanceKind<'tcx>, ) -> Result<&'tcx Body<'tcx>, &'static str> { - if let ty::InstanceDef::DropGlue(_, Some(ty)) - | ty::InstanceDef::AsyncDropGlueCtorShim(_, Some(ty)) = instance + if let ty::InstanceKind::DropGlue(_, Some(ty)) + | ty::InstanceKind::AsyncDropGlueCtorShim(_, Some(ty)) = instance && let ty::Adt(def, args) = ty.kind() { let fields = def.all_fields(); diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs index 8c5f965108b..35bcd24ce95 100644 --- a/compiler/rustc_mir_transform/src/inline/cycle.rs +++ b/compiler/rustc_mir_transform/src/inline/cycle.rs @@ -3,7 +3,7 @@ use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_middle::mir::TerminatorKind; use rustc_middle::ty::TypeVisitableExt; -use rustc_middle::ty::{self, GenericArgsRef, InstanceDef, TyCtxt}; +use rustc_middle::ty::{self, GenericArgsRef, InstanceKind, TyCtxt}; use rustc_session::Limit; use rustc_span::sym; @@ -22,7 +22,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( "you should not call `mir_callgraph_reachable` on immediate self recursion" ); assert!( - matches!(root.def, InstanceDef::Item(_)), + matches!(root.def, InstanceKind::Item(_)), "you should not call `mir_callgraph_reachable` on shims" ); assert!( @@ -70,7 +70,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( } match callee.def { - InstanceDef::Item(_) => { + InstanceKind::Item(_) => { // If there is no MIR available (either because it was not in metadata or // because it has no MIR because it's an extern function), then the inliner // won't cause cycles on this. @@ -80,24 +80,24 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( } } // These have no own callable MIR. - InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => continue, + InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => continue, // These have MIR and if that MIR is inlined, instantiated and then inlining is run // again, a function item can end up getting inlined. Thus we'll be able to cause // a cycle that way - InstanceDef::VTableShim(_) - | InstanceDef::ReifyShim(..) - | InstanceDef::FnPtrShim(..) - | InstanceDef::ClosureOnceShim { .. } - | InstanceDef::ConstructCoroutineInClosureShim { .. } - | InstanceDef::CoroutineKindShim { .. } - | InstanceDef::ThreadLocalShim { .. } - | InstanceDef::CloneShim(..) => {} + InstanceKind::VTableShim(_) + | InstanceKind::ReifyShim(..) + | InstanceKind::FnPtrShim(..) + | InstanceKind::ClosureOnceShim { .. } + | InstanceKind::ConstructCoroutineInClosureShim { .. } + | InstanceKind::CoroutineKindShim { .. } + | InstanceKind::ThreadLocalShim { .. } + | InstanceKind::CloneShim(..) => {} // This shim does not call any other functions, thus there can be no recursion. - InstanceDef::FnPtrAddrShim(..) => { + InstanceKind::FnPtrAddrShim(..) => { continue; } - InstanceDef::DropGlue(..) | InstanceDef::AsyncDropGlueCtorShim(..) => { + InstanceKind::DropGlue(..) | InstanceKind::AsyncDropGlueCtorShim(..) => { // FIXME: A not fully instantiated drop shim can cause ICEs if one attempts to // have its MIR built. Likely oli-obk just screwed up the `ParamEnv`s, so this // needs some more analysis. @@ -151,12 +151,12 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( pub(crate) fn mir_inliner_callees<'tcx>( tcx: TyCtxt<'tcx>, - instance: ty::InstanceDef<'tcx>, + instance: ty::InstanceKind<'tcx>, ) -> &'tcx [(DefId, GenericArgsRef<'tcx>)] { let steal; let guard; let body = match (instance, instance.def_id().as_local()) { - (InstanceDef::Item(_), Some(def_id)) => { + (InstanceKind::Item(_), Some(def_id)) => { steal = tcx.mir_promoted(def_id).0; guard = steal.borrow(); &*guard diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 6a20b46e7f9..8d6c00bbedb 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -706,9 +706,9 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { self.super_operand(operand, location); } - fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, location: Location) { - trace!("visit_constant: {:?}", constant); - self.super_constant(constant, location); + fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, location: Location) { + trace!("visit_const_operand: {:?}", constant); + self.super_const_operand(constant, location); self.eval_constant(constant); } diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 3c0f4e9142b..afba6781a70 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -398,7 +398,7 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & if is_fn_like { // Do not compute the mir call graph without said call graph actually being used. if pm::should_run_pass(tcx, &inline::Inline) { - tcx.ensure_with_value().mir_inliner_callees(ty::InstanceDef::Item(def.to_def_id())); + tcx.ensure_with_value().mir_inliner_callees(ty::InstanceKind::Item(def.to_def_id())); } } diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index ecd1179ca99..ecdca8292b4 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -956,7 +956,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> { } } - fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, _location: Location) { + fn visit_const_operand(&mut self, constant: &mut ConstOperand<'tcx>, _location: Location) { if constant.const_.is_required_const() { self.promoted.required_consts.push(*constant); } diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs index 71ac929d35e..00bfb5e6600 100644 --- a/compiler/rustc_mir_transform/src/required_consts.rs +++ b/compiler/rustc_mir_transform/src/required_consts.rs @@ -12,7 +12,7 @@ impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> { } impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> { - fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, _: Location) { + fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, _: Location) { if constant.const_.is_required_const() { self.required_consts.push(*constant); } diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs index 4d2eca57840..5eaa024f846 100644 --- a/compiler/rustc_mir_transform/src/reveal_all.rs +++ b/compiler/rustc_mir_transform/src/reveal_all.rs @@ -49,14 +49,14 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> { } #[inline] - fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) { + fn visit_const_operand(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) { // We have to use `try_normalize_erasing_regions` here, since it's // possible that we visit impossible-to-satisfy where clauses here, // see #91745 if let Ok(c) = self.tcx.try_normalize_erasing_regions(self.param_env, constant.const_) { constant.const_ = c; } - self.super_constant(constant, location); + self.super_const_operand(constant, location); } #[inline] diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index d03c2d18c0c..825f8957187 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -29,16 +29,16 @@ pub fn provide(providers: &mut Providers) { providers.mir_shims = make_shim; } -fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'tcx> { +fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body<'tcx> { debug!("make_shim({:?})", instance); let mut result = match instance { - ty::InstanceDef::Item(..) => bug!("item {:?} passed to make_shim", instance), - ty::InstanceDef::VTableShim(def_id) => { + ty::InstanceKind::Item(..) => bug!("item {:?} passed to make_shim", instance), + ty::InstanceKind::VTableShim(def_id) => { let adjustment = Adjustment::Deref { source: DerefSource::MutPtr }; build_call_shim(tcx, instance, Some(adjustment), CallKind::Direct(def_id)) } - ty::InstanceDef::FnPtrShim(def_id, ty) => { + ty::InstanceKind::FnPtrShim(def_id, ty) => { let trait_ = tcx.trait_of_item(def_id).unwrap(); // Supports `Fn` or `async Fn` traits. let adjustment = match tcx @@ -58,10 +58,10 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' // a virtual call, or a direct call to a function for which // indirect calls must be codegen'd differently than direct ones // (such as `#[track_caller]`). - ty::InstanceDef::ReifyShim(def_id, _) => { + ty::InstanceKind::ReifyShim(def_id, _) => { build_call_shim(tcx, instance, None, CallKind::Direct(def_id)) } - ty::InstanceDef::ClosureOnceShim { call_once: _, track_caller: _ } => { + ty::InstanceKind::ClosureOnceShim { call_once: _, track_caller: _ } => { let fn_mut = tcx.require_lang_item(LangItem::FnMut, None); let call_mut = tcx .associated_items(fn_mut) @@ -73,16 +73,16 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' build_call_shim(tcx, instance, Some(Adjustment::RefMut), CallKind::Direct(call_mut)) } - ty::InstanceDef::ConstructCoroutineInClosureShim { + ty::InstanceKind::ConstructCoroutineInClosureShim { coroutine_closure_def_id, receiver_by_ref, } => build_construct_coroutine_by_move_shim(tcx, coroutine_closure_def_id, receiver_by_ref), - ty::InstanceDef::CoroutineKindShim { coroutine_def_id } => { + ty::InstanceKind::CoroutineKindShim { coroutine_def_id } => { return tcx.optimized_mir(coroutine_def_id).coroutine_by_move_body().unwrap().clone(); } - ty::InstanceDef::DropGlue(def_id, ty) => { + ty::InstanceKind::DropGlue(def_id, ty) => { // FIXME(#91576): Drop shims for coroutines aren't subject to the MIR passes at the end // of this function. Is this intentional? if let Some(ty::Coroutine(coroutine_def_id, args)) = ty.map(Ty::kind) { @@ -127,16 +127,16 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' build_drop_shim(tcx, def_id, ty) } - ty::InstanceDef::ThreadLocalShim(..) => build_thread_local_shim(tcx, instance), - ty::InstanceDef::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty), - ty::InstanceDef::FnPtrAddrShim(def_id, ty) => build_fn_ptr_addr_shim(tcx, def_id, ty), - ty::InstanceDef::AsyncDropGlueCtorShim(def_id, ty) => { + ty::InstanceKind::ThreadLocalShim(..) => build_thread_local_shim(tcx, instance), + ty::InstanceKind::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty), + ty::InstanceKind::FnPtrAddrShim(def_id, ty) => build_fn_ptr_addr_shim(tcx, def_id, ty), + ty::InstanceKind::AsyncDropGlueCtorShim(def_id, ty) => { async_destructor_ctor::build_async_destructor_ctor_shim(tcx, def_id, ty) } - ty::InstanceDef::Virtual(..) => { - bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance) + ty::InstanceKind::Virtual(..) => { + bug!("InstanceKind::Virtual ({:?}) is for direct calls only", instance) } - ty::InstanceDef::Intrinsic(_) => { + ty::InstanceKind::Intrinsic(_) => { bug!("creating shims from intrinsics ({:?}) is unsupported", instance) } }; @@ -240,7 +240,7 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) block(&mut blocks, TerminatorKind::Goto { target: return_block }); block(&mut blocks, TerminatorKind::Return); - let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty)); + let source = MirSource::from_instance(ty::InstanceKind::DropGlue(def_id, ty)); let mut body = new_body(source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span); @@ -392,7 +392,10 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> { } } -fn build_thread_local_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'tcx> { +fn build_thread_local_shim<'tcx>( + tcx: TyCtxt<'tcx>, + instance: ty::InstanceKind<'tcx>, +) -> Body<'tcx> { let def_id = instance.def_id(); let span = tcx.def_span(def_id); @@ -472,7 +475,7 @@ impl<'tcx> CloneShimBuilder<'tcx> { } fn into_mir(self) -> Body<'tcx> { - let source = MirSource::from_instance(ty::InstanceDef::CloneShim( + let source = MirSource::from_instance(ty::InstanceKind::CloneShim( self.def_id, self.sig.inputs_and_output[0], )); @@ -682,14 +685,14 @@ impl<'tcx> CloneShimBuilder<'tcx> { #[instrument(level = "debug", skip(tcx), ret)] fn build_call_shim<'tcx>( tcx: TyCtxt<'tcx>, - instance: ty::InstanceDef<'tcx>, + instance: ty::InstanceKind<'tcx>, rcvr_adjustment: Option<Adjustment>, call_kind: CallKind<'tcx>, ) -> Body<'tcx> { // `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used // to instantiate into the signature of the shim. It is not necessary for users of this - // MIR body to perform further instantiations (see `InstanceDef::has_polymorphic_mir_body`). - let (sig_args, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance { + // MIR body to perform further instantiations (see `InstanceKind::has_polymorphic_mir_body`). + let (sig_args, untuple_args) = if let ty::InstanceKind::FnPtrShim(_, ty) = instance { let sig = tcx.instantiate_bound_regions_with_erased(ty.fn_sig(tcx)); let untuple_args = sig.inputs(); @@ -741,8 +744,8 @@ fn build_call_shim<'tcx>( } // FIXME(eddyb) avoid having this snippet both here and in - // `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?). - if let ty::InstanceDef::VTableShim(..) = instance { + // `Instance::fn_sig` (introduce `InstanceKind::fn_sig`?). + if let ty::InstanceKind::VTableShim(..) = instance { // Modify fn(self, ...) to fn(self: *mut Self, ...) let mut inputs_and_output = sig.inputs_and_output.to_vec(); let self_arg = &mut inputs_and_output[0]; @@ -1007,7 +1010,7 @@ fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'t terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }), is_cleanup: false, }; - let source = MirSource::from_instance(ty::InstanceDef::FnPtrAddrShim(def_id, self_ty)); + let source = MirSource::from_instance(ty::InstanceKind::FnPtrAddrShim(def_id, self_ty)); new_body(source, IndexVec::from_elem_n(start_block, 1), locals, sig.inputs().len(), span) } @@ -1087,7 +1090,7 @@ fn build_construct_coroutine_by_move_shim<'tcx>( is_cleanup: false, }; - let source = MirSource::from_instance(ty::InstanceDef::ConstructCoroutineInClosureShim { + let source = MirSource::from_instance(ty::InstanceKind::ConstructCoroutineInClosureShim { coroutine_closure_def_id, receiver_by_ref, }); diff --git a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs index aa9c87d8f80..ea4f5fca59e 100644 --- a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs +++ b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs @@ -529,7 +529,7 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> { last_bb.terminator = Some(Terminator { source_info, kind: TerminatorKind::Return }); - let source = MirSource::from_instance(ty::InstanceDef::AsyncDropGlueCtorShim( + let source = MirSource::from_instance(ty::InstanceKind::AsyncDropGlueCtorShim( self.def_id, self.self_ty, )); @@ -561,7 +561,7 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> { // If projection of Discriminant then compare with `Ty::discriminant_ty` if let ty::Alias(ty::Projection, ty::AliasTy { args, def_id, .. }) = expected_ty.kind() - && Some(*def_id) == self.tcx.lang_items().discriminant_type() + && self.tcx.is_lang_item(*def_id, LangItem::Discriminant) && args.first().unwrap().as_type().unwrap().discriminant_ty(self.tcx) == operand_ty { return; diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs index f19c34cae7a..c2108795372 100644 --- a/compiler/rustc_mir_transform/src/sroa.rs +++ b/compiler/rustc_mir_transform/src/sroa.rs @@ -1,4 +1,5 @@ use rustc_data_structures::flat_map_in_place::FlatMapInPlace; +use rustc_hir::LangItem; use rustc_index::bit_set::{BitSet, GrowableBitSet}; use rustc_index::IndexVec; use rustc_middle::bug; @@ -70,7 +71,7 @@ fn escaping_locals<'tcx>( // Exclude #[repr(simd)] types so that they are not de-optimized into an array return true; } - if Some(def.did()) == tcx.lang_items().dyn_metadata() { + if tcx.is_lang_item(def.did(), LangItem::DynMetadata) { // codegen wants to see the `DynMetadata<T>`, // not the inner reference-to-opaque-type. return true; diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs index 586c1254995..2cca1a6f507 100644 --- a/compiler/rustc_mir_transform/src/validate.rs +++ b/compiler/rustc_mir_transform/src/validate.rs @@ -1,6 +1,7 @@ //! Validates the MIR to ensure that invariants are upheld. use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_hir::LangItem; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_infer::traits::Reveal; @@ -9,7 +10,7 @@ use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::{ - self, CoroutineArgsExt, InstanceDef, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt, + self, CoroutineArgsExt, InstanceKind, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt, Variance, }; use rustc_middle::{bug, span_bug}; @@ -43,7 +44,7 @@ impl<'tcx> MirPass<'tcx> for Validator { // terribly important that they pass the validator. However, I think other passes might // still see them, in which case they might be surprised. It would probably be better if we // didn't put this through the MIR pipeline at all. - if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) { + if matches!(body.source.instance, InstanceKind::Intrinsic(..) | InstanceKind::Virtual(..)) { return; } let def_id = body.source.def_id(); @@ -94,7 +95,7 @@ impl<'tcx> MirPass<'tcx> for Validator { } if let MirPhase::Runtime(_) = body.phase { - if let ty::InstanceDef::Item(_) = body.source.instance { + if let ty::InstanceKind::Item(_) = body.source.instance { if body.has_free_regions() { cfg_checker.fail( Location::START, @@ -689,7 +690,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } ty::Adt(adt_def, args) => { // see <https://github.com/rust-lang/rust/blob/7601adcc764d42c9f2984082b49948af652df986/compiler/rustc_middle/src/ty/layout.rs#L861-L864> - if Some(adt_def.did()) == self.tcx.lang_items().dyn_metadata() { + if self.tcx.is_lang_item(adt_def.did(), LangItem::DynMetadata) { self.fail( location, format!( |
