diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
| -rw-r--r-- | compiler/rustc_mir_transform/src/coroutine.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/coverage/spans.rs | 180 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs | 15 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/cross_crate_inline.rs | 9 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/elaborate_drops.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/inline.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/known_panics_lint.rs (renamed from compiler/rustc_mir_transform/src/const_prop_lint.rs) | 32 | ||||
| -rw-r--r-- | compiler/rustc_mir_transform/src/lib.rs | 4 |
8 files changed, 64 insertions, 192 deletions
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index a0851aa557b..54b13a40e92 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -1615,11 +1615,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform { (args.discr_ty(tcx), coroutine_kind.movability() == hir::Movability::Movable) } _ => { - tcx.dcx().span_delayed_bug( - body.span, - format!("unexpected coroutine type {coroutine_ty}"), - ); - return; + tcx.dcx().span_bug(body.span, format!("unexpected coroutine type {coroutine_ty}")); } }; diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index 934e77e7deb..98fb1d8e1c9 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -61,7 +61,7 @@ pub(super) fn generate_coverage_spans( hir_info, basic_coverage_blocks, ); - let coverage_spans = SpansRefiner::refine_sorted_spans(basic_coverage_blocks, sorted_spans); + let coverage_spans = SpansRefiner::refine_sorted_spans(sorted_spans); mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| { // Each span produced by the generator represents an ordinary code region. BcbMapping { kind: BcbMappingKind::Code(bcb), span } @@ -88,8 +88,6 @@ pub(super) fn generate_coverage_spans( #[derive(Debug)] struct CurrCovspan { - /// This is used as the basis for [`PrevCovspan::original_span`], so it must - /// not be modified. span: Span, bcb: BasicCoverageBlock, is_closure: bool, @@ -102,7 +100,7 @@ impl CurrCovspan { fn into_prev(self) -> PrevCovspan { let Self { span, bcb, is_closure } = self; - PrevCovspan { original_span: span, span, bcb, merged_spans: vec![span], is_closure } + PrevCovspan { span, bcb, merged_spans: vec![span], is_closure } } fn into_refined(self) -> RefinedCovspan { @@ -115,7 +113,6 @@ impl CurrCovspan { #[derive(Debug)] struct PrevCovspan { - original_span: Span, span: Span, bcb: BasicCoverageBlock, /// List of all the original spans from MIR that have been merged into this @@ -135,42 +132,17 @@ impl PrevCovspan { self.merged_spans.push(other.span); } - fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) { + fn cutoff_statements_at(mut self, cutoff_pos: BytePos) -> Option<RefinedCovspan> { self.merged_spans.retain(|span| span.hi() <= cutoff_pos); if let Some(max_hi) = self.merged_spans.iter().map(|span| span.hi()).max() { self.span = self.span.with_hi(max_hi); } - } - - fn into_dup(self) -> DuplicateCovspan { - let Self { original_span, span, bcb, merged_spans: _, is_closure } = self; - // Only unmodified spans end up in `pending_dups`. - debug_assert_eq!(original_span, span); - DuplicateCovspan { span, bcb, is_closure } - } - - fn refined_copy(&self) -> RefinedCovspan { - let &Self { original_span: _, span, bcb, merged_spans: _, is_closure } = self; - RefinedCovspan { span, bcb, is_closure } - } - fn into_refined(self) -> RefinedCovspan { - self.refined_copy() + if self.merged_spans.is_empty() { None } else { Some(self.into_refined()) } } -} - -#[derive(Debug)] -struct DuplicateCovspan { - span: Span, - bcb: BasicCoverageBlock, - is_closure: bool, -} -impl DuplicateCovspan { - /// Returns a copy of this covspan, as a [`RefinedCovspan`]. - /// Should only be called in places that would otherwise clone this covspan. fn refined_copy(&self) -> RefinedCovspan { - let &Self { span, bcb, is_closure } = self; + let &Self { span, bcb, merged_spans: _, is_closure } = self; RefinedCovspan { span, bcb, is_closure } } @@ -205,10 +177,7 @@ impl RefinedCovspan { /// * Merge spans that represent continuous (both in source code and control flow), non-branching /// execution /// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures) -struct SpansRefiner<'a> { - /// The BasicCoverageBlock Control Flow Graph (BCB CFG). - basic_coverage_blocks: &'a CoverageGraph, - +struct SpansRefiner { /// The initial set of coverage spans, sorted by `Span` (`lo` and `hi`) and by relative /// dominance between the `BasicCoverageBlock`s of equal `Span`s. sorted_spans_iter: std::vec::IntoIter<SpanFromMir>, @@ -223,36 +192,22 @@ struct SpansRefiner<'a> { /// If that `curr` was discarded, `prev` retains its value from the previous iteration. some_prev: Option<PrevCovspan>, - /// One or more coverage spans with the same `Span` but different `BasicCoverageBlock`s, and - /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list. - /// If a new `curr` span also fits this criteria (compared to an existing list of - /// `pending_dups`), that `curr` moves to `prev` before possibly being added to - /// the `pending_dups` list, on the next iteration. As a result, if `prev` and `pending_dups` - /// have the same `Span`, the criteria for `pending_dups` holds for `prev` as well: a `prev` - /// with a matching `Span` does not dominate any `pending_dup` and no `pending_dup` dominates a - /// `prev` with a matching `Span`) - pending_dups: Vec<DuplicateCovspan>, - /// The final coverage spans to add to the coverage map. A `Counter` or `Expression` /// will also be injected into the MIR for each BCB that has associated spans. refined_spans: Vec<RefinedCovspan>, } -impl<'a> SpansRefiner<'a> { +impl SpansRefiner { /// Takes the initial list of (sorted) spans extracted from MIR, and "refines" /// them by merging compatible adjacent spans, removing redundant spans, /// and carving holes in spans when they overlap in unwanted ways. - fn refine_sorted_spans( - basic_coverage_blocks: &'a CoverageGraph, - sorted_spans: Vec<SpanFromMir>, - ) -> Vec<RefinedCovspan> { + fn refine_sorted_spans(sorted_spans: Vec<SpanFromMir>) -> Vec<RefinedCovspan> { + let sorted_spans_len = sorted_spans.len(); let this = Self { - basic_coverage_blocks, sorted_spans_iter: sorted_spans.into_iter(), some_curr: None, some_prev: None, - pending_dups: Vec::new(), - refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2), + refined_spans: Vec::with_capacity(sorted_spans_len), }; this.to_refined_spans() @@ -292,21 +247,11 @@ impl<'a> SpansRefiner<'a> { self.take_curr(); // Discards curr. } else if curr.is_closure { self.carve_out_span_for_closure(); - } else if prev.original_span == prev.span && prev.span == curr.span { - // Prev and curr have the same span, and prev's span hasn't - // been modified by other spans. - self.update_pending_dups(); } else { self.cutoff_prev_at_overlapping_curr(); } } - // Drain any remaining dups into the output. - for dup in self.pending_dups.drain(..) { - debug!(" ...adding at least one pending dup={:?}", dup); - self.refined_spans.push(dup.into_refined()); - } - // There is usually a final span remaining in `prev` after the loop ends, // so add it to the output as well. if let Some(prev) = self.some_prev.take() { @@ -359,36 +304,6 @@ impl<'a> SpansRefiner<'a> { self.some_prev.take().unwrap_or_else(|| bug!("some_prev is None (take_prev)")) } - /// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the - /// `pending_dups` spans), then one of the following two things happened during the previous - /// iteration: - /// * the previous `curr` span (which is now `prev`) was not a duplicate of the pending_dups - /// (in which case there should be at least two spans in `pending_dups`); or - /// * the `span` of `prev` was modified by `curr_mut().merge_from(prev)` (in which case - /// `pending_dups` could have as few as one span) - /// In either case, no more spans will match the span of `pending_dups`, so - /// add the `pending_dups` if they don't overlap `curr`, and clear the list. - fn maybe_flush_pending_dups(&mut self) { - let Some(last_dup) = self.pending_dups.last() else { return }; - if last_dup.span == self.prev().span { - return; - } - - debug!( - " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \ - previous iteration, or prev started a new disjoint span" - ); - if last_dup.span.hi() <= self.curr().span.lo() { - for dup in self.pending_dups.drain(..) { - debug!(" ...adding at least one pending={:?}", dup); - self.refined_spans.push(dup.into_refined()); - } - } else { - self.pending_dups.clear(); - } - assert!(self.pending_dups.is_empty()); - } - /// Advance `prev` to `curr` (if any), and `curr` to the next coverage span in sorted order. fn next_coverage_span(&mut self) -> bool { if let Some(curr) = self.some_curr.take() { @@ -408,7 +323,6 @@ impl<'a> SpansRefiner<'a> { ); } else { self.some_curr = Some(CurrCovspan::new(curr.span, curr.bcb, curr.is_closure)); - self.maybe_flush_pending_dups(); return true; } } @@ -433,13 +347,6 @@ impl<'a> SpansRefiner<'a> { let mut pre_closure = self.prev().refined_copy(); pre_closure.span = pre_closure.span.with_hi(left_cutoff); debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure); - - for mut dup in self.pending_dups.iter().map(DuplicateCovspan::refined_copy) { - dup.span = dup.span.with_hi(left_cutoff); - debug!(" ...and at least one pre_closure dup={:?}", dup); - self.refined_spans.push(dup); - } - self.refined_spans.push(pre_closure); } @@ -448,58 +355,9 @@ impl<'a> SpansRefiner<'a> { self.prev_mut().span = self.prev().span.with_lo(right_cutoff); debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev()); - for dup in &mut self.pending_dups { - debug!(" ...and at least one overlapping dup={:?}", dup); - dup.span = dup.span.with_lo(right_cutoff); - } - // Prevent this curr from becoming prev. let closure_covspan = self.take_curr().into_refined(); self.refined_spans.push(closure_covspan); // since self.prev() was already updated - } else { - self.pending_dups.clear(); - } - } - - /// Called if `curr.span` equals `prev.original_span` (and potentially equal to all - /// `pending_dups` spans, if any). Keep in mind, `prev.span()` may have been changed. - /// If prev.span() was merged into other spans (with matching BCB, for instance), - /// `prev.span.hi()` will be greater than (further right of) `prev.original_span.hi()`. - /// If prev.span() was split off to the right of a closure, prev.span().lo() will be - /// greater than prev.original_span.lo(). The actual span of `prev.original_span` is - /// not as important as knowing that `prev()` **used to have the same span** as `curr()`, - /// which means their sort order is still meaningful for determining the dominator - /// relationship. - /// - /// When two coverage spans have the same `Span`, dominated spans can be discarded; but if - /// neither coverage span dominates the other, both (or possibly more than two) are held, - /// until their disposition is determined. In this latter case, the `prev` dup is moved into - /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration. - fn update_pending_dups(&mut self) { - let prev_bcb = self.prev().bcb; - let curr_bcb = self.curr().bcb; - - // Equal coverage spans are ordered by dominators before dominated (if any), so it should be - // impossible for `curr` to dominate any previous coverage span. - debug_assert!(!self.basic_coverage_blocks.dominates(curr_bcb, prev_bcb)); - - // `prev` is a duplicate of `curr`, so add it to the list of pending dups. - // If it dominates `curr`, it will be removed by the subsequent discard step. - let prev = self.take_prev().into_dup(); - debug!(?prev, "adding prev to pending dups"); - self.pending_dups.push(prev); - - let initial_pending_count = self.pending_dups.len(); - if initial_pending_count > 0 { - self.pending_dups - .retain(|dup| !self.basic_coverage_blocks.dominates(dup.bcb, curr_bcb)); - - let n_discarded = initial_pending_count - self.pending_dups.len(); - if n_discarded > 0 { - debug!( - " discarded {n_discarded} of {initial_pending_count} pending_dups that dominated curr", - ); - } } } @@ -516,19 +374,13 @@ impl<'a> SpansRefiner<'a> { if it has statements that end before curr; prev={:?}", self.prev() ); - if self.pending_dups.is_empty() { - let curr_span = self.curr().span; - self.prev_mut().cutoff_statements_at(curr_span.lo()); - if self.prev().merged_spans.is_empty() { - debug!(" ... no non-overlapping statements to add"); - } else { - debug!(" ... adding modified prev={:?}", self.prev()); - let prev = self.take_prev().into_refined(); - self.refined_spans.push(prev); - } + + let curr_span = self.curr().span; + if let Some(prev) = self.take_prev().cutoff_statements_at(curr_span.lo()) { + debug!("after cutoff, adding {prev:?}"); + self.refined_spans.push(prev); } else { - // with `pending_dups`, `prev` cannot have any statements that don't overlap - self.pending_dups.clear(); + debug!("prev was eliminated by cutoff"); } } } diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs index 2db358379fe..b91ab811918 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs @@ -52,14 +52,19 @@ pub(super) fn mir_to_initial_sorted_coverage_spans( // - Span A extends further left, or // - Both have the same start and span A extends further right .then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse()) - // If both spans are equal, sort the BCBs in dominator order, - // so that dominating BCBs come before other BCBs they dominate. - .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb)) - // If two spans are otherwise identical, put closure spans first, - // as this seems to be what the refinement step expects. + // If two spans have the same lo & hi, put closure spans first, + // as they take precedence over non-closure spans. .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse()) + // After deduplication, we want to keep only the most-dominated BCB. + .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb).reverse()) }); + // Among covspans with the same span, keep only one. Closure spans take + // precedence, otherwise keep the one with the most-dominated BCB. + // (Ideally we should try to preserve _all_ non-dominating BCBs, but that + // requires a lot more complexity in the span refiner, for little benefit.) + initial_spans.dedup_by(|b, a| a.span.source_equal(b.span)); + initial_spans } diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs index 5f01b841867..483fd753e70 100644 --- a/compiler/rustc_mir_transform/src/cross_crate_inline.rs +++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs @@ -9,6 +9,7 @@ use rustc_middle::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_session::config::InliningThreshold; use rustc_session::config::OptLevel; +use rustc_span::sym; pub fn provide(providers: &mut Providers) { providers.cross_crate_inlinable = cross_crate_inlinable; @@ -34,6 +35,14 @@ fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { return true; } + if tcx.has_attr(def_id, sym::rustc_intrinsic) { + // Intrinsic fallback bodies are always cross-crate inlineable. + // To ensure that the MIR inliner doesn't cluelessly try to inline fallback + // bodies even when the backend would implement something better, we stop + // the MIR inliner from ever inlining an intrinsic. + return true; + } + // Obey source annotations first; this is important because it means we can use // #[inline(never)] to force code generation. match codegen_fn_attrs.inline { diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index 8575f552f0a..03d952abad1 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -380,7 +380,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { LookupResult::Parent(None) => {} LookupResult::Parent(Some(_)) => { if !replace { - self.tcx.dcx().span_delayed_bug( + self.tcx.dcx().span_bug( terminator.source_info.span, format!("drop of untracked value {bb:?}"), ); diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 956d855ab81..2009539d4d0 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -13,6 +13,7 @@ use rustc_middle::ty::TypeVisitableExt; use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; use rustc_session::config::OptLevel; use rustc_span::source_map::Spanned; +use rustc_span::sym; use rustc_target::abi::FieldIdx; use rustc_target::spec::abi::Abi; @@ -170,6 +171,13 @@ impl<'tcx> Inliner<'tcx> { let cross_crate_inlinable = self.tcx.cross_crate_inlinable(callsite.callee.def_id()); self.check_codegen_attributes(callsite, callee_attrs, cross_crate_inlinable)?; + // Intrinsic fallback bodies are automatically made cross-crate inlineable, + // but at this stage we don't know whether codegen knows the intrinsic, + // so just conservatively don't inline it. + if self.tcx.has_attr(callsite.callee.def_id(), sym::rustc_intrinsic) { + return Err("Callee is an intrinsic, do not inline fallback bodies"); + } + let terminator = caller_body[callsite.block].terminator.as_ref().unwrap(); let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() }; let destination_ty = destination.ty(&caller_body.local_decls, self.tcx).ty; diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 6f2ef8f9a4f..a9cd688c315 100644 --- a/compiler/rustc_mir_transform/src/const_prop_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -1,5 +1,8 @@ -//! Propagates constants for early reporting of statically known -//! assertion failures +//! A lint that checks for known panics like +//! overflows, division by zero, +//! out-of-bound access etc. +//! Uses const propagation to determine the +//! values of operands during checks. use std::fmt::Debug; @@ -21,9 +24,9 @@ use crate::dataflow_const_prop::DummyMachine; use crate::errors::{AssertLint, AssertLintKind}; use crate::MirLint; -pub struct ConstPropLint; +pub struct KnownPanicsLint; -impl<'tcx> MirLint<'tcx> for ConstPropLint { +impl<'tcx> MirLint<'tcx> for KnownPanicsLint { fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { if body.tainted_by_errors.is_some() { return; @@ -37,31 +40,28 @@ impl<'tcx> MirLint<'tcx> for ConstPropLint { // Only run const prop on functions, methods, closures and associated constants if !is_fn_like && !is_assoc_const { // skip anon_const/statics/consts because they'll be evaluated by miri anyway - trace!("ConstPropLint skipped for {:?}", def_id); + trace!("KnownPanicsLint skipped for {:?}", def_id); return; } // FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles // computing their layout. if tcx.is_coroutine(def_id.to_def_id()) { - trace!("ConstPropLint skipped for coroutine {:?}", def_id); + trace!("KnownPanicsLint skipped for coroutine {:?}", def_id); return; } - trace!("ConstPropLint starting for {:?}", def_id); + trace!("KnownPanicsLint starting for {:?}", def_id); - // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold - // constants, instead of just checking for const-folding succeeding. - // That would require a uniform one-def no-mutation analysis - // and RPO (or recursing when needing the value of a local). let mut linter = ConstPropagator::new(body, tcx); linter.visit_body(body); - trace!("ConstPropLint done for {:?}", def_id); + trace!("KnownPanicsLint done for {:?}", def_id); } } -/// Finds optimization opportunities on the MIR. +/// Visits MIR nodes, performs const propagation +/// and runs lint checks as it goes struct ConstPropagator<'mir, 'tcx> { ecx: InterpCx<'mir, 'tcx, DummyMachine>, tcx: TyCtxt<'tcx>, @@ -238,7 +238,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // dedicated error variants should be introduced instead. assert!( !error.kind().formatted_string(), - "const-prop encountered formatting error: {}", + "known panics lint encountered formatting error: {}", format_interp_error(self.ecx.tcx.dcx(), error), ); None @@ -253,7 +253,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { return None; } - // Normalization needed b/c const prop lint runs in + // Normalization needed b/c known panics lint runs in // `mir_drops_elaborated_and_const_checked`, which happens before // optimized MIR. Only after optimizing the MIR can we guarantee // that the `RevealAll` pass has happened and that the body's consts @@ -864,6 +864,8 @@ pub enum ConstPropMode { NoPropagation, } +/// A visitor that determines locals in a MIR body +/// that can be const propagated pub struct CanConstProp { can_const_prop: IndexVec<Local, ConstPropMode>, // False at the beginning. Once set, no more assignments are allowed to that local. diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 74b36eb5ee8..945c3c662a6 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -59,7 +59,6 @@ mod remove_place_mention; mod add_subtyping_projections; pub mod cleanup_post_borrowck; mod const_debuginfo; -mod const_prop_lint; mod copy_prop; mod coroutine; mod cost_checker; @@ -83,6 +82,7 @@ mod gvn; pub mod inline; mod instsimplify; mod jump_threading; +mod known_panics_lint; mod large_enums; mod lint; mod lower_intrinsics; @@ -533,7 +533,7 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &elaborate_box_derefs::ElaborateBoxDerefs, &coroutine::StateTransform, &add_retag::AddRetag, - &Lint(const_prop_lint::ConstPropLint), + &Lint(known_panics_lint::KnownPanicsLint), ]; pm::run_passes_no_validate(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::Initial))); } |
