diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
16 files changed, 1183 insertions, 331 deletions
diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs index da82f8de781..48a6a83e146 100644 --- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs +++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs @@ -29,7 +29,7 @@ impl<'tcx> MirPass<'tcx> for CleanupPostBorrowck { for statement in basic_block.statements.iter_mut() { match statement.kind { StatementKind::AscribeUserType(..) - | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Fake, _))) + | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Fake(_), _))) | StatementKind::Coverage( // These kinds of coverage statements are markers inserted during // MIR building, and are not needed after InstrumentCoverage. diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index e2a911f0dc7..3008016863e 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -80,6 +80,10 @@ use rustc_span::symbol::sym; use rustc_span::Span; use rustc_target::abi::{FieldIdx, VariantIdx}; use rustc_target::spec::PanicStrategy; +use rustc_trait_selection::infer::TyCtxtInferExt as _; +use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _; +use rustc_trait_selection::traits::ObligationCtxt; +use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode}; use std::{iter, ops}; pub struct StateTransform; @@ -1256,7 +1260,7 @@ fn create_coroutine_drop_shim<'tcx>( } // Replace the return variable - body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(Ty::new_unit(tcx), source_info); + body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(tcx.types.unit, source_info); make_coroutine_state_argument_indirect(tcx, &mut body); @@ -1584,10 +1588,46 @@ pub(crate) fn mir_coroutine_witnesses<'tcx>( let (_, coroutine_layout, _) = compute_layout(liveness_info, body); check_suspend_tys(tcx, &coroutine_layout, body); + check_field_tys_sized(tcx, &coroutine_layout, def_id); Some(coroutine_layout) } +fn check_field_tys_sized<'tcx>( + tcx: TyCtxt<'tcx>, + coroutine_layout: &CoroutineLayout<'tcx>, + def_id: LocalDefId, +) { + // No need to check if unsized_locals/unsized_fn_params is disabled, + // since we will error during typeck. + if !tcx.features().unsized_locals && !tcx.features().unsized_fn_params { + return; + } + + let infcx = tcx.infer_ctxt().ignoring_regions().build(); + let param_env = tcx.param_env(def_id); + + let ocx = ObligationCtxt::new(&infcx); + for field_ty in &coroutine_layout.field_tys { + ocx.register_bound( + ObligationCause::new( + field_ty.source_info.span, + def_id, + ObligationCauseCode::SizedCoroutineInterior(def_id), + ), + param_env, + field_ty.ty, + tcx.require_lang_item(hir::LangItem::Sized, Some(field_ty.source_info.span)), + ); + } + + let errors = ocx.select_all_or_error(); + debug!(?errors); + if !errors.is_empty() { + infcx.err_ctxt().report_fulfillment_errors(errors); + } +} + impl<'tcx> MirPass<'tcx> for StateTransform { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let Some(old_yield_ty) = body.yield_ty() else { diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs new file mode 100644 index 00000000000..d364658efb6 --- /dev/null +++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs @@ -0,0 +1,275 @@ +use std::collections::BTreeSet; + +use rustc_data_structures::graph::DirectedGraph; +use rustc_index::bit_set::BitSet; +use rustc_middle::mir::coverage::{ + BlockMarkerId, BranchSpan, ConditionInfo, CoverageKind, MCDCBranchSpan, MCDCDecisionSpan, +}; +use rustc_middle::mir::{self, BasicBlock, StatementKind}; +use rustc_span::Span; + +use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB}; +use crate::coverage::spans::{ + extract_refined_covspans, unexpand_into_body_span_with_visible_macro, +}; +use crate::coverage::ExtractedHirInfo; +use rustc_index::IndexVec; + +#[derive(Clone, Debug)] +pub(super) enum BcbMappingKind { + /// Associates an ordinary executable code span with its corresponding BCB. + Code(BasicCoverageBlock), + + // Ordinary branch mappings are stored separately, so they don't have a + // variant in this enum. + // + /// Associates a mcdc branch span with condition info besides fields for normal branch. + MCDCBranch { + true_bcb: BasicCoverageBlock, + false_bcb: BasicCoverageBlock, + /// If `None`, this actually represents a normal branch mapping inserted + /// for code that was too complex for MC/DC. + condition_info: Option<ConditionInfo>, + decision_depth: u16, + }, + /// Associates a mcdc decision with its join BCB. + MCDCDecision { + end_bcbs: BTreeSet<BasicCoverageBlock>, + bitmap_idx: u32, + conditions_num: u16, + decision_depth: u16, + }, +} + +#[derive(Debug)] +pub(super) struct BcbMapping { + pub(super) kind: BcbMappingKind, + pub(super) span: Span, +} + +/// This is separate from [`BcbMappingKind`] to help prepare for larger changes +/// that will be needed for improved branch coverage in the future. +/// (See <https://github.com/rust-lang/rust/pull/124217>.) +#[derive(Debug)] +pub(super) struct BcbBranchPair { + pub(super) span: Span, + pub(super) true_bcb: BasicCoverageBlock, + pub(super) false_bcb: BasicCoverageBlock, +} + +pub(super) struct CoverageSpans { + bcb_has_mappings: BitSet<BasicCoverageBlock>, + pub(super) mappings: Vec<BcbMapping>, + pub(super) branch_pairs: Vec<BcbBranchPair>, + test_vector_bitmap_bytes: u32, +} + +impl CoverageSpans { + pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool { + self.bcb_has_mappings.contains(bcb) + } + + pub(super) fn test_vector_bitmap_bytes(&self) -> u32 { + self.test_vector_bitmap_bytes + } +} + +/// Extracts coverage-relevant spans from MIR, and associates them with +/// their corresponding BCBs. +/// +/// Returns `None` if no coverage-relevant spans could be extracted. +pub(super) fn generate_coverage_spans( + mir_body: &mir::Body<'_>, + hir_info: &ExtractedHirInfo, + basic_coverage_blocks: &CoverageGraph, +) -> Option<CoverageSpans> { + let mut mappings = vec![]; + let mut branch_pairs = vec![]; + + if hir_info.is_async_fn { + // An async function desugars into a function that returns a future, + // with the user code wrapped in a closure. Any spans in the desugared + // outer function will be unhelpful, so just keep the signature span + // and ignore all of the spans in the MIR body. + if let Some(span) = hir_info.fn_sig_span_extended { + mappings.push(BcbMapping { kind: BcbMappingKind::Code(START_BCB), span }); + } + } else { + extract_refined_covspans(mir_body, hir_info, basic_coverage_blocks, &mut mappings); + + branch_pairs.extend(extract_branch_pairs(mir_body, hir_info, basic_coverage_blocks)); + + mappings.extend(extract_mcdc_mappings(mir_body, hir_info.body_span, basic_coverage_blocks)); + } + + if mappings.is_empty() && branch_pairs.is_empty() { + return None; + } + + // Identify which BCBs have one or more mappings. + let mut bcb_has_mappings = BitSet::new_empty(basic_coverage_blocks.num_nodes()); + let mut insert = |bcb| { + bcb_has_mappings.insert(bcb); + }; + let mut test_vector_bitmap_bytes = 0; + for BcbMapping { kind, span: _ } in &mappings { + match *kind { + BcbMappingKind::Code(bcb) => insert(bcb), + BcbMappingKind::MCDCBranch { true_bcb, false_bcb, .. } => { + insert(true_bcb); + insert(false_bcb); + } + BcbMappingKind::MCDCDecision { bitmap_idx, conditions_num, .. } => { + // `bcb_has_mappings` is used for inject coverage counters + // but they are not needed for decision BCBs. + // While the length of test vector bitmap should be calculated here. + test_vector_bitmap_bytes = test_vector_bitmap_bytes + .max(bitmap_idx + (1_u32 << conditions_num as u32).div_ceil(8)); + } + } + } + for &BcbBranchPair { true_bcb, false_bcb, .. } in &branch_pairs { + insert(true_bcb); + insert(false_bcb); + } + + Some(CoverageSpans { bcb_has_mappings, mappings, branch_pairs, test_vector_bitmap_bytes }) +} + +fn resolve_block_markers( + branch_info: &mir::coverage::BranchInfo, + mir_body: &mir::Body<'_>, +) -> IndexVec<BlockMarkerId, Option<BasicBlock>> { + let mut block_markers = IndexVec::<BlockMarkerId, Option<BasicBlock>>::from_elem_n( + None, + branch_info.num_block_markers, + ); + + // Fill out the mapping from block marker IDs to their enclosing blocks. + for (bb, data) in mir_body.basic_blocks.iter_enumerated() { + for statement in &data.statements { + if let StatementKind::Coverage(CoverageKind::BlockMarker { id }) = statement.kind { + block_markers[id] = Some(bb); + } + } + } + + block_markers +} + +// FIXME: There is currently a lot of redundancy between +// `extract_branch_pairs` and `extract_mcdc_mappings`. This is needed so +// that they can each be modified without interfering with the other, but in +// the long term we should try to bring them together again when branch coverage +// and MC/DC coverage support are more mature. + +pub(super) fn extract_branch_pairs( + mir_body: &mir::Body<'_>, + hir_info: &ExtractedHirInfo, + basic_coverage_blocks: &CoverageGraph, +) -> Vec<BcbBranchPair> { + let Some(branch_info) = mir_body.coverage_branch_info.as_deref() else { return vec![] }; + + let block_markers = resolve_block_markers(branch_info, mir_body); + + branch_info + .branch_spans + .iter() + .filter_map(|&BranchSpan { span: raw_span, true_marker, false_marker }| { + // For now, ignore any branch span that was introduced by + // expansion. This makes things like assert macros less noisy. + if !raw_span.ctxt().outer_expn_data().is_root() { + return None; + } + let (span, _) = + unexpand_into_body_span_with_visible_macro(raw_span, hir_info.body_span)?; + + let bcb_from_marker = + |marker: BlockMarkerId| basic_coverage_blocks.bcb_from_bb(block_markers[marker]?); + + let true_bcb = bcb_from_marker(true_marker)?; + let false_bcb = bcb_from_marker(false_marker)?; + + Some(BcbBranchPair { span, true_bcb, false_bcb }) + }) + .collect::<Vec<_>>() +} + +pub(super) fn extract_mcdc_mappings( + mir_body: &mir::Body<'_>, + body_span: Span, + basic_coverage_blocks: &CoverageGraph, +) -> Vec<BcbMapping> { + let Some(branch_info) = mir_body.coverage_branch_info.as_deref() else { + return vec![]; + }; + + let block_markers = resolve_block_markers(branch_info, mir_body); + + let bcb_from_marker = + |marker: BlockMarkerId| basic_coverage_blocks.bcb_from_bb(block_markers[marker]?); + + let check_branch_bcb = + |raw_span: Span, true_marker: BlockMarkerId, false_marker: BlockMarkerId| { + // For now, ignore any branch span that was introduced by + // expansion. This makes things like assert macros less noisy. + if !raw_span.ctxt().outer_expn_data().is_root() { + return None; + } + let (span, _) = unexpand_into_body_span_with_visible_macro(raw_span, body_span)?; + + let true_bcb = bcb_from_marker(true_marker)?; + let false_bcb = bcb_from_marker(false_marker)?; + Some((span, true_bcb, false_bcb)) + }; + + let mcdc_branch_filter_map = |&MCDCBranchSpan { + span: raw_span, + true_marker, + false_marker, + condition_info, + decision_depth, + }| { + check_branch_bcb(raw_span, true_marker, false_marker).map(|(span, true_bcb, false_bcb)| { + BcbMapping { + kind: BcbMappingKind::MCDCBranch { + true_bcb, + false_bcb, + condition_info, + decision_depth, + }, + span, + } + }) + }; + + let mut next_bitmap_idx = 0; + + let decision_filter_map = |decision: &MCDCDecisionSpan| { + let (span, _) = unexpand_into_body_span_with_visible_macro(decision.span, body_span)?; + + let end_bcbs = decision + .end_markers + .iter() + .map(|&marker| bcb_from_marker(marker)) + .collect::<Option<_>>()?; + + let bitmap_idx = next_bitmap_idx; + next_bitmap_idx += (1_u32 << decision.conditions_num).div_ceil(8); + + Some(BcbMapping { + kind: BcbMappingKind::MCDCDecision { + end_bcbs, + bitmap_idx, + conditions_num: decision.conditions_num as u16, + decision_depth: decision.decision_depth, + }, + span, + }) + }; + + std::iter::empty() + .chain(branch_info.mcdc_branch_spans.iter().filter_map(mcdc_branch_filter_map)) + .chain(branch_info.mcdc_decision_spans.iter().filter_map(decision_filter_map)) + .collect::<Vec<_>>() +} diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 9e8648b0f93..9edde666246 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -2,14 +2,14 @@ pub mod query; mod counters; mod graph; +mod mappings; mod spans; - #[cfg(test)] mod tests; use self::counters::{CounterIncrementSite, CoverageCounters}; use self::graph::{BasicCoverageBlock, CoverageGraph}; -use self::spans::{BcbMapping, BcbMappingKind, CoverageSpans}; +use self::mappings::{BcbBranchPair, BcbMapping, BcbMappingKind, CoverageSpans}; use crate::MirPass; @@ -71,7 +71,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir: //////////////////////////////////////////////////// // Compute coverage spans from the `CoverageGraph`. let Some(coverage_spans) = - spans::generate_coverage_spans(mir_body, &hir_info, &basic_coverage_blocks) + mappings::generate_coverage_spans(mir_body, &hir_info, &basic_coverage_blocks) else { // No relevant spans were found in MIR, so skip instrumenting this function. return; @@ -102,12 +102,23 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir: inject_mcdc_statements(mir_body, &basic_coverage_blocks, &coverage_spans); + let mcdc_max_decision_depth = coverage_spans + .mappings + .iter() + .filter_map(|bcb_mapping| match bcb_mapping.kind { + BcbMappingKind::MCDCDecision { decision_depth, .. } => Some(decision_depth), + _ => None, + }) + .max() + .unwrap_or(0); + mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo { function_source_hash: hir_info.function_source_hash, num_counters: coverage_counters.num_counters(), mcdc_bitmap_bytes: coverage_spans.test_vector_bitmap_bytes(), expressions: coverage_counters.into_expressions(), mappings, + mcdc_max_decision_depth, })); } @@ -141,21 +152,26 @@ fn create_mappings<'tcx>( let mut mappings = Vec::new(); - mappings.extend(coverage_spans.all_bcb_mappings().filter_map( + mappings.extend(coverage_spans.mappings.iter().filter_map( |BcbMapping { kind: bcb_mapping_kind, span }| { let kind = match *bcb_mapping_kind { BcbMappingKind::Code(bcb) => MappingKind::Code(term_for_bcb(bcb)), - BcbMappingKind::Branch { true_bcb, false_bcb } => MappingKind::Branch { + BcbMappingKind::MCDCBranch { + true_bcb, false_bcb, condition_info: None, .. + } => MappingKind::Branch { true_term: term_for_bcb(true_bcb), false_term: term_for_bcb(false_bcb), }, - BcbMappingKind::MCDCBranch { true_bcb, false_bcb, condition_info } => { - MappingKind::MCDCBranch { - true_term: term_for_bcb(true_bcb), - false_term: term_for_bcb(false_bcb), - mcdc_params: condition_info, - } - } + BcbMappingKind::MCDCBranch { + true_bcb, + false_bcb, + condition_info: Some(mcdc_params), + .. + } => MappingKind::MCDCBranch { + true_term: term_for_bcb(true_bcb), + false_term: term_for_bcb(false_bcb), + mcdc_params, + }, BcbMappingKind::MCDCDecision { bitmap_idx, conditions_num, .. } => { MappingKind::MCDCDecision(DecisionInfo { bitmap_idx, conditions_num }) } @@ -165,6 +181,16 @@ fn create_mappings<'tcx>( }, )); + mappings.extend(coverage_spans.branch_pairs.iter().filter_map( + |&BcbBranchPair { span, true_bcb, false_bcb }| { + let true_term = term_for_bcb(true_bcb); + let false_term = term_for_bcb(false_bcb); + let kind = MappingKind::Branch { true_term, false_term }; + let code_region = make_code_region(source_map, file_name, span, body_span)?; + Some(Mapping { kind, code_region }) + }, + )); + mappings } @@ -232,24 +258,28 @@ fn inject_mcdc_statements<'tcx>( } // Inject test vector update first because `inject_statement` always insert new statement at head. - for (end_bcbs, bitmap_idx) in - coverage_spans.all_bcb_mappings().filter_map(|mapping| match &mapping.kind { - BcbMappingKind::MCDCDecision { end_bcbs, bitmap_idx, .. } => { - Some((end_bcbs, *bitmap_idx)) + for (end_bcbs, bitmap_idx, decision_depth) in + coverage_spans.mappings.iter().filter_map(|mapping| match &mapping.kind { + BcbMappingKind::MCDCDecision { end_bcbs, bitmap_idx, decision_depth, .. } => { + Some((end_bcbs, *bitmap_idx, *decision_depth)) } _ => None, }) { for end in end_bcbs { let end_bb = basic_coverage_blocks[*end].leader_bb(); - inject_statement(mir_body, CoverageKind::TestVectorBitmapUpdate { bitmap_idx }, end_bb); + inject_statement( + mir_body, + CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth }, + end_bb, + ); } } - for (true_bcb, false_bcb, condition_id) in - coverage_spans.all_bcb_mappings().filter_map(|mapping| match mapping.kind { - BcbMappingKind::MCDCBranch { true_bcb, false_bcb, condition_info } => { - Some((true_bcb, false_bcb, condition_info.condition_id)) + for (true_bcb, false_bcb, condition_id, decision_depth) in + coverage_spans.mappings.iter().filter_map(|mapping| match mapping.kind { + BcbMappingKind::MCDCBranch { true_bcb, false_bcb, condition_info, decision_depth } => { + Some((true_bcb, false_bcb, condition_info?.condition_id, decision_depth)) } _ => None, }) @@ -257,13 +287,13 @@ fn inject_mcdc_statements<'tcx>( let true_bb = basic_coverage_blocks[true_bcb].leader_bb(); inject_statement( mir_body, - CoverageKind::CondBitmapUpdate { id: condition_id, value: true }, + CoverageKind::CondBitmapUpdate { id: condition_id, value: true, decision_depth }, true_bb, ); let false_bb = basic_coverage_blocks[false_bcb].leader_bb(); inject_statement( mir_body, - CoverageKind::CondBitmapUpdate { id: condition_id, value: false }, + CoverageKind::CondBitmapUpdate { id: condition_id, value: false, decision_depth }, false_bb, ); } diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index a4cd8a38c66..d6432e2e9d4 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -1,125 +1,31 @@ -use rustc_data_structures::graph::DirectedGraph; -use rustc_index::bit_set::BitSet; use rustc_middle::mir; -use rustc_middle::mir::coverage::ConditionInfo; use rustc_span::{BytePos, Span}; -use std::collections::BTreeSet; -use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB}; +use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph}; +use crate::coverage::mappings::{BcbMapping, BcbMappingKind}; use crate::coverage::spans::from_mir::SpanFromMir; use crate::coverage::ExtractedHirInfo; mod from_mir; -#[derive(Clone, Debug)] -pub(super) enum BcbMappingKind { - /// Associates an ordinary executable code span with its corresponding BCB. - Code(BasicCoverageBlock), - /// Associates a branch span with BCBs for its true and false arms. - Branch { true_bcb: BasicCoverageBlock, false_bcb: BasicCoverageBlock }, - /// Associates a mcdc branch span with condition info besides fields for normal branch. - MCDCBranch { - true_bcb: BasicCoverageBlock, - false_bcb: BasicCoverageBlock, - condition_info: ConditionInfo, - }, - /// Associates a mcdc decision with its join BCB. - MCDCDecision { end_bcbs: BTreeSet<BasicCoverageBlock>, bitmap_idx: u32, conditions_num: u16 }, -} - -#[derive(Debug)] -pub(super) struct BcbMapping { - pub(super) kind: BcbMappingKind, - pub(super) span: Span, -} - -pub(super) struct CoverageSpans { - bcb_has_mappings: BitSet<BasicCoverageBlock>, - mappings: Vec<BcbMapping>, - test_vector_bitmap_bytes: u32, -} - -impl CoverageSpans { - pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool { - self.bcb_has_mappings.contains(bcb) - } - - pub(super) fn all_bcb_mappings(&self) -> impl Iterator<Item = &BcbMapping> { - self.mappings.iter() - } - - pub(super) fn test_vector_bitmap_bytes(&self) -> u32 { - self.test_vector_bitmap_bytes - } -} +// FIXME(#124545) It's awkward that we have to re-export this, because it's an +// internal detail of `from_mir` that is also needed when handling branch and +// MC/DC spans. Ideally we would find a more natural home for it. +pub(super) use from_mir::unexpand_into_body_span_with_visible_macro; -/// Extracts coverage-relevant spans from MIR, and associates them with -/// their corresponding BCBs. -/// -/// Returns `None` if no coverage-relevant spans could be extracted. -pub(super) fn generate_coverage_spans( +pub(super) fn extract_refined_covspans( mir_body: &mir::Body<'_>, hir_info: &ExtractedHirInfo, basic_coverage_blocks: &CoverageGraph, -) -> Option<CoverageSpans> { - let mut mappings = vec![]; - - if hir_info.is_async_fn { - // An async function desugars into a function that returns a future, - // with the user code wrapped in a closure. Any spans in the desugared - // outer function will be unhelpful, so just keep the signature span - // and ignore all of the spans in the MIR body. - if let Some(span) = hir_info.fn_sig_span_extended { - mappings.push(BcbMapping { kind: BcbMappingKind::Code(START_BCB), span }); - } - } else { - let sorted_spans = from_mir::mir_to_initial_sorted_coverage_spans( - mir_body, - hir_info, - basic_coverage_blocks, - ); - let coverage_spans = SpansRefiner::refine_sorted_spans(sorted_spans); - mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| { - // Each span produced by the generator represents an ordinary code region. - BcbMapping { kind: BcbMappingKind::Code(bcb), span } - })); - - mappings.extend(from_mir::extract_branch_mappings( - mir_body, - hir_info.body_span, - basic_coverage_blocks, - )); - } - - if mappings.is_empty() { - return None; - } - - // Identify which BCBs have one or more mappings. - let mut bcb_has_mappings = BitSet::new_empty(basic_coverage_blocks.num_nodes()); - let mut insert = |bcb| { - bcb_has_mappings.insert(bcb); - }; - let mut test_vector_bitmap_bytes = 0; - for BcbMapping { kind, span: _ } in &mappings { - match *kind { - BcbMappingKind::Code(bcb) => insert(bcb), - BcbMappingKind::Branch { true_bcb, false_bcb } - | BcbMappingKind::MCDCBranch { true_bcb, false_bcb, .. } => { - insert(true_bcb); - insert(false_bcb); - } - BcbMappingKind::MCDCDecision { bitmap_idx, conditions_num, .. } => { - // `bcb_has_mappings` is used for inject coverage counters - // but they are not needed for decision BCBs. - // While the length of test vector bitmap should be calculated here. - test_vector_bitmap_bytes = test_vector_bitmap_bytes - .max(bitmap_idx + (1_u32 << conditions_num as u32).div_ceil(8)); - } - } - } - - Some(CoverageSpans { bcb_has_mappings, mappings, test_vector_bitmap_bytes }) + mappings: &mut impl Extend<BcbMapping>, +) { + let sorted_spans = + from_mir::mir_to_initial_sorted_coverage_spans(mir_body, hir_info, basic_coverage_blocks); + let coverage_spans = SpansRefiner::refine_sorted_spans(sorted_spans); + mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| { + // Each span produced by the generator represents an ordinary code region. + BcbMapping { kind: BcbMappingKind::Code(bcb), span } + })); } #[derive(Debug)] diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs index b9919a2ae88..4ce37b5defc 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs @@ -1,11 +1,8 @@ use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashSet; -use rustc_index::IndexVec; -use rustc_middle::mir::coverage::{ - BlockMarkerId, BranchSpan, CoverageKind, MCDCBranchSpan, MCDCDecisionSpan, -}; +use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::mir::{ - self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator, + self, AggregateKind, FakeReadCause, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, }; use rustc_span::{ExpnKind, MacroKind, Span, Symbol}; @@ -13,7 +10,6 @@ use rustc_span::{ExpnKind, MacroKind, Span, Symbol}; use crate::coverage::graph::{ BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB, }; -use crate::coverage::spans::{BcbMapping, BcbMappingKind}; use crate::coverage::ExtractedHirInfo; /// Traverses the MIR body to produce an initial collection of coverage-relevant @@ -287,7 +283,7 @@ fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> { /// /// [^1]Expansions result from Rust syntax including macros, syntactic sugar, /// etc.). -fn unexpand_into_body_span_with_visible_macro( +pub(crate) fn unexpand_into_body_span_with_visible_macro( original_span: Span, body_span: Span, ) -> Option<(Span, Option<Symbol>)> { @@ -365,92 +361,3 @@ impl SpanFromMir { Self { span, visible_macro, bcb, is_hole } } } - -pub(super) fn extract_branch_mappings( - mir_body: &mir::Body<'_>, - body_span: Span, - basic_coverage_blocks: &CoverageGraph, -) -> Vec<BcbMapping> { - let Some(branch_info) = mir_body.coverage_branch_info.as_deref() else { - return vec![]; - }; - - let mut block_markers = IndexVec::<BlockMarkerId, Option<BasicBlock>>::from_elem_n( - None, - branch_info.num_block_markers, - ); - - // Fill out the mapping from block marker IDs to their enclosing blocks. - for (bb, data) in mir_body.basic_blocks.iter_enumerated() { - for statement in &data.statements { - if let StatementKind::Coverage(CoverageKind::BlockMarker { id }) = statement.kind { - block_markers[id] = Some(bb); - } - } - } - - let bcb_from_marker = - |marker: BlockMarkerId| basic_coverage_blocks.bcb_from_bb(block_markers[marker]?); - - let check_branch_bcb = - |raw_span: Span, true_marker: BlockMarkerId, false_marker: BlockMarkerId| { - // For now, ignore any branch span that was introduced by - // expansion. This makes things like assert macros less noisy. - if !raw_span.ctxt().outer_expn_data().is_root() { - return None; - } - let (span, _) = unexpand_into_body_span_with_visible_macro(raw_span, body_span)?; - - let true_bcb = bcb_from_marker(true_marker)?; - let false_bcb = bcb_from_marker(false_marker)?; - Some((span, true_bcb, false_bcb)) - }; - - let branch_filter_map = |&BranchSpan { span: raw_span, true_marker, false_marker }| { - check_branch_bcb(raw_span, true_marker, false_marker).map(|(span, true_bcb, false_bcb)| { - BcbMapping { kind: BcbMappingKind::Branch { true_bcb, false_bcb }, span } - }) - }; - - let mcdc_branch_filter_map = - |&MCDCBranchSpan { span: raw_span, true_marker, false_marker, condition_info }| { - check_branch_bcb(raw_span, true_marker, false_marker).map( - |(span, true_bcb, false_bcb)| BcbMapping { - kind: BcbMappingKind::MCDCBranch { true_bcb, false_bcb, condition_info }, - span, - }, - ) - }; - - let mut next_bitmap_idx = 0; - - let decision_filter_map = |decision: &MCDCDecisionSpan| { - let (span, _) = unexpand_into_body_span_with_visible_macro(decision.span, body_span)?; - - let end_bcbs = decision - .end_markers - .iter() - .map(|&marker| bcb_from_marker(marker)) - .collect::<Option<_>>()?; - - let bitmap_idx = next_bitmap_idx; - next_bitmap_idx += (1_u32 << decision.conditions_num).div_ceil(8); - - Some(BcbMapping { - kind: BcbMappingKind::MCDCDecision { - end_bcbs, - bitmap_idx, - conditions_num: decision.conditions_num as u16, - }, - span, - }) - }; - - branch_info - .branch_spans - .iter() - .filter_map(branch_filter_map) - .chain(branch_info.mcdc_branch_spans.iter().filter_map(mcdc_branch_filter_map)) - .chain(branch_info.mcdc_decision_spans.iter().filter_map(decision_filter_map)) - .collect::<Vec<_>>() -} diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs index ca63f5550ae..370e930b740 100644 --- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs +++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs @@ -160,7 +160,7 @@ pub fn deduced_param_attrs<'tcx>( return &[]; } - // If the Freeze language item isn't present, then don't bother. + // If the Freeze lang item isn't present, then don't bother. if tcx.lang_items().freeze_trait().is_none() { return &[]; } diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 625d8f53939..7a3b08f82f6 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -332,7 +332,8 @@ impl<'tcx> Inliner<'tcx> { | InstanceDef::DropGlue(..) | InstanceDef::CloneShim(..) | InstanceDef::ThreadLocalShim(..) - | InstanceDef::FnPtrAddrShim(..) => return Ok(()), + | InstanceDef::FnPtrAddrShim(..) + | InstanceDef::AsyncDropGlueCtorShim(..) => return Ok(()), } if self.tcx.is_constructor(callee_def_id) { @@ -719,18 +720,12 @@ impl<'tcx> Inliner<'tcx> { kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) }, }); - // Copy only unevaluated constants from the callee_body into the caller_body. - // Although we are only pushing `ConstKind::Unevaluated` consts to - // `required_consts`, here we may not only have `ConstKind::Unevaluated` - // because we are calling `instantiate_and_normalize_erasing_regions`. - caller_body.required_consts.extend(callee_body.required_consts.iter().copied().filter( - |&ct| match ct.const_ { - Const::Ty(_) => { - bug!("should never encounter ty::UnevaluatedConst in `required_consts`") - } - Const::Val(..) | Const::Unevaluated(..) => true, - }, - )); + // Copy required constants from the callee_body into the caller_body. Although we are only + // pushing unevaluated consts to `required_consts`, here they may have been evaluated + // because we are calling `instantiate_and_normalize_erasing_regions` -- so we filter again. + caller_body.required_consts.extend( + callee_body.required_consts.into_iter().filter(|ct| ct.const_.is_required_const()), + ); // Now that we incorporated the callee's `required_consts`, we can remove the callee from // `mentioned_items` -- but we have to take their `mentioned_items` in return. This does // some extra work here to save the monomorphization collector work later. It helps a lot, @@ -746,8 +741,9 @@ impl<'tcx> Inliner<'tcx> { caller_body.mentioned_items.remove(idx); caller_body.mentioned_items.extend(callee_body.mentioned_items); } else { - // If we can't find the callee, there's no point in adding its items. - // Probably it already got removed by being inlined elsewhere in the same function. + // If we can't find the callee, there's no point in adding its items. Probably it + // already got removed by being inlined elsewhere in the same function, so we already + // took its items. } } @@ -1083,7 +1079,8 @@ fn try_instance_mir<'tcx>( tcx: TyCtxt<'tcx>, instance: InstanceDef<'tcx>, ) -> Result<&'tcx Body<'tcx>, &'static str> { - if let ty::InstanceDef::DropGlue(_, Some(ty)) = instance + if let ty::InstanceDef::DropGlue(_, Some(ty)) + | ty::InstanceDef::AsyncDropGlueCtorShim(_, Some(ty)) = instance && let ty::Adt(def, args) = ty.kind() { let fields = def.all_fields(); diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs index 99c7b616f1b..8c5f965108b 100644 --- a/compiler/rustc_mir_transform/src/inline/cycle.rs +++ b/compiler/rustc_mir_transform/src/inline/cycle.rs @@ -94,8 +94,10 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( | InstanceDef::CloneShim(..) => {} // This shim does not call any other functions, thus there can be no recursion. - InstanceDef::FnPtrAddrShim(..) => continue, - InstanceDef::DropGlue(..) => { + InstanceDef::FnPtrAddrShim(..) => { + continue; + } + InstanceDef::DropGlue(..) | InstanceDef::AsyncDropGlueCtorShim(..) => { // FIXME: A not fully instantiated drop shim can cause ICEs if one attempts to // have its MIR built. Likely oli-obk just screwed up the `ParamEnv`s, so this // needs some more analysis. diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index b8dbf8a759f..90c1c7ba63b 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -583,33 +583,21 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { val.into() } - Aggregate(ref kind, ref fields) => { - // Do not const prop union fields as they can be - // made to produce values that don't match their - // underlying layout's type (see ICE #121534). - // If the last element of the `Adt` tuple - // is `Some` it indicates the ADT is a union - if let AggregateKind::Adt(_, _, _, _, Some(_)) = **kind { - return None; - }; - Value::Aggregate { - fields: fields - .iter() - .map(|field| { - self.eval_operand(field).map_or(Value::Uninit, Value::Immediate) - }) - .collect(), - variant: match **kind { - AggregateKind::Adt(_, variant, _, _, _) => variant, - AggregateKind::Array(_) - | AggregateKind::Tuple - | AggregateKind::RawPtr(_, _) - | AggregateKind::Closure(_, _) - | AggregateKind::Coroutine(_, _) - | AggregateKind::CoroutineClosure(_, _) => VariantIdx::ZERO, - }, - } - } + Aggregate(ref kind, ref fields) => Value::Aggregate { + fields: fields + .iter() + .map(|field| self.eval_operand(field).map_or(Value::Uninit, Value::Immediate)) + .collect(), + variant: match **kind { + AggregateKind::Adt(_, variant, _, _, _) => variant, + AggregateKind::Array(_) + | AggregateKind::Tuple + | AggregateKind::RawPtr(_, _) + | AggregateKind::Closure(_, _) + | AggregateKind::Coroutine(_, _) + | AggregateKind::CoroutineClosure(_, _) => VariantIdx::ZERO, + }, + }, Repeat(ref op, n) => { trace!(?op, ?n); @@ -896,13 +884,20 @@ impl CanConstProp { }; for (local, val) in cpv.can_const_prop.iter_enumerated_mut() { let ty = body.local_decls[local].ty; - match tcx.layout_of(param_env.and(ty)) { - Ok(layout) if layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) => {} - // Either the layout fails to compute, then we can't use this local anyway - // or the local is too large, then we don't want to. - _ => { - *val = ConstPropMode::NoPropagation; - continue; + if ty.is_union() { + // Unions are incompatible with the current implementation of + // const prop because Rust has no concept of an active + // variant of a union + *val = ConstPropMode::NoPropagation; + } else { + match tcx.layout_of(param_env.and(ty)) { + Ok(layout) if layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) => {} + // Either the layout fails to compute, then we can't use this local anyway + // or the local is too large, then we don't want to. + _ => { + *val = ConstPropMode::NoPropagation; + continue; + } } } } diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index e477c068229..e69c5da757e 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -4,7 +4,6 @@ #![feature(cow_is_borrowed)] #![feature(decl_macro)] #![feature(impl_trait_in_assoc_type)] -#![feature(inline_const)] #![feature(is_sorted)] #![feature(let_chains)] #![feature(map_try_insert)] @@ -333,6 +332,8 @@ fn mir_promoted( body.tainted_by_errors = Some(error_reported); } + // Collect `required_consts` *before* promotion, so if there are any consts being promoted + // we still add them to the list in the outer MIR body. let mut required_consts = Vec::new(); let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts); for (bb, bb_data) in traversal::reverse_postorder(&body) { diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs index 8137525a332..2267a621a83 100644 --- a/compiler/rustc_mir_transform/src/lower_slice_len.rs +++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs @@ -21,7 +21,7 @@ impl<'tcx> MirPass<'tcx> for LowerSliceLenCalls { pub fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let language_items = tcx.lang_items(); let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else { - // there is no language item to compare to :) + // there is no lang item to compare to :) return; }; diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index 1f4af0ec63d..689a547689a 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -13,6 +13,7 @@ //! move analysis runs after promotion on broken MIR. use either::{Left, Right}; +use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_middle::mir; use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor}; @@ -175,6 +176,12 @@ fn collect_temps_and_candidates<'tcx>( struct Validator<'a, 'tcx> { ccx: &'a ConstCx<'a, 'tcx>, temps: &'a mut IndexSlice<Local, TempState>, + /// For backwards compatibility, we are promoting function calls in `const`/`static` + /// initializers. But we want to avoid evaluating code that might panic and that otherwise would + /// not have been evaluated, so we only promote such calls in basic blocks that are guaranteed + /// to execute. In other words, we only promote such calls in basic blocks that are definitely + /// not dead code. Here we cache the result of computing that set of basic blocks. + promotion_safe_blocks: Option<FxHashSet<BasicBlock>>, } impl<'a, 'tcx> std::ops::Deref for Validator<'a, 'tcx> { @@ -260,7 +267,9 @@ impl<'tcx> Validator<'_, 'tcx> { self.validate_rvalue(rhs) } Right(terminator) => match &terminator.kind { - TerminatorKind::Call { func, args, .. } => self.validate_call(func, args), + TerminatorKind::Call { func, args, .. } => { + self.validate_call(func, args, loc.block) + } TerminatorKind::Yield { .. } => Err(Unpromotable), kind => { span_bug!(terminator.source_info.span, "{:?} not promotable", kind); @@ -384,7 +393,7 @@ impl<'tcx> Validator<'_, 'tcx> { match kind { // Reject these borrow types just to be safe. // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase. - BorrowKind::Fake | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => { + BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => { return Err(Unpromotable); } @@ -588,29 +597,79 @@ impl<'tcx> Validator<'_, 'tcx> { Ok(()) } + /// Computes the sets of blocks of this MIR that are definitely going to be executed + /// if the function returns successfully. That makes it safe to promote calls in them + /// that might fail. + fn promotion_safe_blocks(body: &mir::Body<'tcx>) -> FxHashSet<BasicBlock> { + let mut safe_blocks = FxHashSet::default(); + let mut safe_block = START_BLOCK; + loop { + safe_blocks.insert(safe_block); + // Let's see if we can find another safe block. + safe_block = match body.basic_blocks[safe_block].terminator().kind { + TerminatorKind::Goto { target } => target, + TerminatorKind::Call { target: Some(target), .. } + | TerminatorKind::Drop { target, .. } => { + // This calls a function or the destructor. `target` does not get executed if + // the callee loops or panics. But in both cases the const already fails to + // evaluate, so we are fine considering `target` a safe block for promotion. + target + } + TerminatorKind::Assert { target, .. } => { + // Similar to above, we only consider successful execution. + target + } + _ => { + // No next safe block. + break; + } + }; + } + safe_blocks + } + + /// Returns whether the block is "safe" for promotion, which means it cannot be dead code. + /// We use this to avoid promoting operations that can fail in dead code. + fn is_promotion_safe_block(&mut self, block: BasicBlock) -> bool { + let body = self.body; + let safe_blocks = + self.promotion_safe_blocks.get_or_insert_with(|| Self::promotion_safe_blocks(body)); + safe_blocks.contains(&block) + } + fn validate_call( &mut self, callee: &Operand<'tcx>, args: &[Spanned<Operand<'tcx>>], + block: BasicBlock, ) -> Result<(), Unpromotable> { + // Validate the operands. If they fail, there's no question -- we cannot promote. + self.validate_operand(callee)?; + for arg in args { + self.validate_operand(&arg.node)?; + } + + // Functions marked `#[rustc_promotable]` are explicitly allowed to be promoted, so we can + // accept them at this point. let fn_ty = callee.ty(self.body, self.tcx); + if let ty::FnDef(def_id, _) = *fn_ty.kind() { + if self.tcx.is_promotable_const_fn(def_id) { + return Ok(()); + } + } - // Inside const/static items, we promote all (eligible) function calls. - // Everywhere else, we require `#[rustc_promotable]` on the callee. - let promote_all_const_fn = matches!( + // Ideally, we'd stop here and reject the rest. + // But for backward compatibility, we have to accept some promotion in const/static + // initializers. Inline consts are explicitly excluded, they are more recent so we have no + // backwards compatibility reason to allow more promotion inside of them. + let promote_all_fn = matches!( self.const_kind, Some(hir::ConstContext::Static(_) | hir::ConstContext::Const { inline: false }) ); - if !promote_all_const_fn { - if let ty::FnDef(def_id, _) = *fn_ty.kind() { - // Never promote runtime `const fn` calls of - // functions without `#[rustc_promotable]`. - if !self.tcx.is_promotable_const_fn(def_id) { - return Err(Unpromotable); - } - } + if !promote_all_fn { + return Err(Unpromotable); } - + // Make sure the callee is a `const fn`. let is_const_fn = match *fn_ty.kind() { ty::FnDef(def_id, _) => self.tcx.is_const_fn_raw(def_id), _ => false, @@ -618,23 +677,23 @@ impl<'tcx> Validator<'_, 'tcx> { if !is_const_fn { return Err(Unpromotable); } - - self.validate_operand(callee)?; - for arg in args { - self.validate_operand(&arg.node)?; + // The problem is, this may promote calls to functions that panic. + // We don't want to introduce compilation errors if there's a panic in a call in dead code. + // So we ensure that this is not dead code. + if !self.is_promotion_safe_block(block) { + return Err(Unpromotable); } - + // This passed all checks, so let's accept. Ok(()) } } -// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`. fn validate_candidates( ccx: &ConstCx<'_, '_>, temps: &mut IndexSlice<Local, TempState>, candidates: &[Candidate], ) -> Vec<Candidate> { - let mut validator = Validator { ccx, temps }; + let mut validator = Validator { ccx, temps, promotion_safe_blocks: None }; candidates .iter() @@ -653,6 +712,10 @@ struct Promoter<'a, 'tcx> { /// If true, all nested temps are also kept in the /// source MIR, not moved to the promoted MIR. keep_original: bool, + + /// If true, add the new const (the promoted) to the required_consts of the parent MIR. + /// This is initially false and then set by the visitor when it encounters a `Call` terminator. + add_to_required: bool, } impl<'a, 'tcx> Promoter<'a, 'tcx> { @@ -755,6 +818,10 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { TerminatorKind::Call { mut func, mut args, call_source: desugar, fn_span, .. } => { + // This promoted involves a function call, so it may fail to evaluate. + // Let's make sure it is added to `required_consts` so that that failure cannot get lost. + self.add_to_required = true; + self.visit_operand(&mut func, loc); for arg in &mut args { self.visit_operand(&mut arg.node, loc); @@ -789,7 +856,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { fn promote_candidate(mut self, candidate: Candidate, next_promoted_id: usize) -> Body<'tcx> { let def = self.source.source.def_id(); - let mut rvalue = { + let (mut rvalue, promoted_op) = { let promoted = &mut self.promoted; let promoted_id = Promoted::new(next_promoted_id); let tcx = self.tcx; @@ -799,11 +866,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { let args = tcx.erase_regions(GenericArgs::identity_for_item(tcx, def)); let uneval = mir::UnevaluatedConst { def, args, promoted: Some(promoted_id) }; - Operand::Constant(Box::new(ConstOperand { - span, - user_ty: None, - const_: Const::Unevaluated(uneval, ty), - })) + ConstOperand { span, user_ty: None, const_: Const::Unevaluated(uneval, ty) } }; let blocks = self.source.basic_blocks.as_mut(); @@ -836,22 +899,26 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { let promoted_ref = local_decls.push(promoted_ref); assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref); + let promoted_operand = promoted_operand(ref_ty, span); let promoted_ref_statement = Statement { source_info: statement.source_info, kind: StatementKind::Assign(Box::new(( Place::from(promoted_ref), - Rvalue::Use(promoted_operand(ref_ty, span)), + Rvalue::Use(Operand::Constant(Box::new(promoted_operand))), ))), }; self.extra_statements.push((loc, promoted_ref_statement)); - Rvalue::Ref( - tcx.lifetimes.re_erased, - *borrow_kind, - Place { - local: mem::replace(&mut place.local, promoted_ref), - projection: List::empty(), - }, + ( + Rvalue::Ref( + tcx.lifetimes.re_erased, + *borrow_kind, + Place { + local: mem::replace(&mut place.local, promoted_ref), + projection: List::empty(), + }, + ), + promoted_operand, ) }; @@ -863,6 +930,12 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { let span = self.promoted.span; self.assign(RETURN_PLACE, rvalue, span); + + // Now that we did promotion, we know whether we'll want to add this to `required_consts`. + if self.add_to_required { + self.source.required_consts.push(promoted_op); + } + self.promoted } } @@ -878,6 +951,14 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> { *local = self.promote_temp(*local); } } + + fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, _location: Location) { + if constant.const_.is_required_const() { + self.promoted.required_consts.push(*constant); + } + + // Skipping `super_constant` as the visitor is otherwise only looking for locals. + } } fn promote_candidates<'tcx>( @@ -931,8 +1012,10 @@ fn promote_candidates<'tcx>( temps: &mut temps, extra_statements: &mut extra_statements, keep_original: false, + add_to_required: false, }; + // `required_consts` of the promoted itself gets filled while building the MIR body. let mut promoted = promoter.promote_candidate(candidate, promotions.len()); promoted.source.promoted = Some(promotions.next_index()); promotions.push(promoted); diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs index abde6a47e83..71ac929d35e 100644 --- a/compiler/rustc_mir_transform/src/required_consts.rs +++ b/compiler/rustc_mir_transform/src/required_consts.rs @@ -1,6 +1,5 @@ use rustc_middle::mir::visit::Visitor; -use rustc_middle::mir::{Const, ConstOperand, Location}; -use rustc_middle::ty::ConstKind; +use rustc_middle::mir::{ConstOperand, Location}; pub struct RequiredConstsVisitor<'a, 'tcx> { required_consts: &'a mut Vec<ConstOperand<'tcx>>, @@ -14,14 +13,8 @@ impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> { impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> { fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, _: Location) { - let const_ = constant.const_; - match const_ { - Const::Ty(c) => match c.kind() { - ConstKind::Param(_) | ConstKind::Error(_) | ConstKind::Value(_) => {} - _ => bug!("only ConstKind::Param/Value should be encountered here, got {:#?}", c), - }, - Const::Unevaluated(..) => self.required_consts.push(*constant), - Const::Val(..) => {} + if constant.const_.is_required_const() { + self.required_consts.push(*constant); } } } diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index fa6906bdd55..1c85a604053 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -22,6 +22,8 @@ use crate::{ use rustc_middle::mir::patch::MirPatch; use rustc_mir_dataflow::elaborate_drops::{self, DropElaborator, DropFlagMode, DropStyle}; +mod async_destructor_ctor; + pub fn provide(providers: &mut Providers) { providers.mir_shims = make_shim; } @@ -127,6 +129,9 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' ty::InstanceDef::ThreadLocalShim(..) => build_thread_local_shim(tcx, instance), ty::InstanceDef::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty), ty::InstanceDef::FnPtrAddrShim(def_id, ty) => build_fn_ptr_addr_shim(tcx, def_id, ty), + ty::InstanceDef::AsyncDropGlueCtorShim(def_id, ty) => { + async_destructor_ctor::build_async_destructor_ctor_shim(tcx, def_id, ty) + } ty::InstanceDef::Virtual(..) => { bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance) } diff --git a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs new file mode 100644 index 00000000000..80eadb9abdc --- /dev/null +++ b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs @@ -0,0 +1,618 @@ +use std::iter; + +use itertools::Itertools; +use rustc_ast::Mutability; +use rustc_const_eval::interpret; +use rustc_hir::def_id::DefId; +use rustc_hir::lang_items::LangItem; +use rustc_index::{Idx, IndexVec}; +use rustc_middle::mir::{ + BasicBlock, BasicBlockData, Body, CallSource, CastKind, Const, ConstOperand, ConstValue, Local, + LocalDecl, MirSource, Operand, Place, PlaceElem, Rvalue, SourceInfo, Statement, StatementKind, + Terminator, TerminatorKind, UnwindAction, UnwindTerminateReason, RETURN_PLACE, +}; +use rustc_middle::ty::adjustment::PointerCoercion; +use rustc_middle::ty::util::Discr; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_span::source_map::respan; +use rustc_span::{Span, Symbol}; +use rustc_target::abi::{FieldIdx, VariantIdx}; +use rustc_target::spec::PanicStrategy; + +use super::{local_decls_for_sig, new_body}; + +pub fn build_async_destructor_ctor_shim<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + ty: Option<Ty<'tcx>>, +) -> Body<'tcx> { + debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty); + + AsyncDestructorCtorShimBuilder::new(tcx, def_id, ty).build() +} + +/// Builder for async_drop_in_place shim. Functions as a stack machine +/// to build up an expression using combinators. Stack contains pairs +/// of locals and types. Combinator is a not yet instantiated pair of a +/// function and a type, is considered to be an operator which consumes +/// operands from the stack by instantiating its function and its type +/// with operand types and moving locals into the function call. Top +/// pair is considered to be the last operand. +// FIXME: add mir-opt tests +struct AsyncDestructorCtorShimBuilder<'tcx> { + tcx: TyCtxt<'tcx>, + def_id: DefId, + self_ty: Option<Ty<'tcx>>, + span: Span, + source_info: SourceInfo, + param_env: ty::ParamEnv<'tcx>, + + stack: Vec<Operand<'tcx>>, + last_bb: BasicBlock, + top_cleanup_bb: Option<BasicBlock>, + + locals: IndexVec<Local, LocalDecl<'tcx>>, + bbs: IndexVec<BasicBlock, BasicBlockData<'tcx>>, +} + +#[derive(Clone, Copy)] +enum SurfaceDropKind { + Async, + Sync, +} + +impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> { + const SELF_PTR: Local = Local::from_u32(1); + const INPUT_COUNT: usize = 1; + const MAX_STACK_LEN: usize = 2; + + fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Option<Ty<'tcx>>) -> Self { + let args = if let Some(ty) = self_ty { + tcx.mk_args(&[ty.into()]) + } else { + ty::GenericArgs::identity_for_item(tcx, def_id) + }; + let sig = tcx.fn_sig(def_id).instantiate(tcx, args); + let sig = tcx.instantiate_bound_regions_with_erased(sig); + let span = tcx.def_span(def_id); + + let source_info = SourceInfo::outermost(span); + + debug_assert_eq!(sig.inputs().len(), Self::INPUT_COUNT); + let locals = local_decls_for_sig(&sig, span); + + // Usual case: noop() + unwind resume + return + let mut bbs = IndexVec::with_capacity(3); + let param_env = tcx.param_env_reveal_all_normalized(def_id); + AsyncDestructorCtorShimBuilder { + tcx, + def_id, + self_ty, + span, + source_info, + param_env, + + stack: Vec::with_capacity(Self::MAX_STACK_LEN), + last_bb: bbs.push(BasicBlockData::new(None)), + top_cleanup_bb: match tcx.sess.panic_strategy() { + PanicStrategy::Unwind => { + // Don't drop input arg because it's just a pointer + Some(bbs.push(BasicBlockData { + statements: Vec::new(), + terminator: Some(Terminator { + source_info, + kind: TerminatorKind::UnwindResume, + }), + is_cleanup: true, + })) + } + PanicStrategy::Abort => None, + }, + + locals, + bbs, + } + } + + fn build(self) -> Body<'tcx> { + let (tcx, def_id, Some(self_ty)) = (self.tcx, self.def_id, self.self_ty) else { + return self.build_zst_output(); + }; + + let surface_drop_kind = || { + let param_env = tcx.param_env_reveal_all_normalized(def_id); + if self_ty.has_surface_async_drop(tcx, param_env) { + Some(SurfaceDropKind::Async) + } else if self_ty.has_surface_drop(tcx, param_env) { + Some(SurfaceDropKind::Sync) + } else { + None + } + }; + + match self_ty.kind() { + ty::Array(elem_ty, _) => self.build_slice(true, *elem_ty), + ty::Slice(elem_ty) => self.build_slice(false, *elem_ty), + + ty::Tuple(elem_tys) => self.build_chain(None, elem_tys.iter()), + ty::Adt(adt_def, args) if adt_def.is_struct() => { + let field_tys = adt_def.non_enum_variant().fields.iter().map(|f| f.ty(tcx, args)); + self.build_chain(surface_drop_kind(), field_tys) + } + ty::Closure(_, args) => self.build_chain(None, args.as_closure().upvar_tys().iter()), + ty::CoroutineClosure(_, args) => { + self.build_chain(None, args.as_coroutine_closure().upvar_tys().iter()) + } + + ty::Adt(adt_def, args) if adt_def.is_enum() => { + self.build_enum(*adt_def, *args, surface_drop_kind()) + } + + ty::Adt(adt_def, _) => { + assert!(adt_def.is_union()); + match surface_drop_kind().unwrap() { + SurfaceDropKind::Async => self.build_fused_async_surface(), + SurfaceDropKind::Sync => self.build_fused_sync_surface(), + } + } + + ty::Bound(..) + | ty::Foreign(_) + | ty::Placeholder(_) + | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) | ty::TyVar(_)) + | ty::Param(_) + | ty::Alias(..) => { + bug!("Building async destructor for unexpected type: {self_ty:?}") + } + + _ => { + bug!( + "Building async destructor constructor shim is not yet implemented for type: {self_ty:?}" + ) + } + } + } + + fn build_enum( + mut self, + adt_def: ty::AdtDef<'tcx>, + args: ty::GenericArgsRef<'tcx>, + surface_drop: Option<SurfaceDropKind>, + ) -> Body<'tcx> { + let tcx = self.tcx; + + let surface = match surface_drop { + None => None, + Some(kind) => { + self.put_self(); + Some(match kind { + SurfaceDropKind::Async => self.combine_async_surface(), + SurfaceDropKind::Sync => self.combine_sync_surface(), + }) + } + }; + + let mut other = None; + for (variant_idx, discr) in adt_def.discriminants(tcx) { + let variant = adt_def.variant(variant_idx); + + let mut chain = None; + for (field_idx, field) in variant.fields.iter_enumerated() { + let field_ty = field.ty(tcx, args); + self.put_variant_field(variant.name, variant_idx, field_idx, field_ty); + let defer = self.combine_defer(field_ty); + chain = Some(match chain { + None => defer, + Some(chain) => self.combine_chain(chain, defer), + }) + } + let variant_dtor = chain.unwrap_or_else(|| self.put_noop()); + + other = Some(match other { + None => variant_dtor, + Some(other) => { + self.put_self(); + self.put_discr(discr); + self.combine_either(other, variant_dtor) + } + }); + } + let variants_dtor = other.unwrap_or_else(|| self.put_noop()); + + let dtor = match surface { + None => variants_dtor, + Some(surface) => self.combine_chain(surface, variants_dtor), + }; + self.combine_fuse(dtor); + self.return_() + } + + fn build_chain<I>(mut self, surface_drop: Option<SurfaceDropKind>, elem_tys: I) -> Body<'tcx> + where + I: Iterator<Item = Ty<'tcx>> + ExactSizeIterator, + { + let surface = match surface_drop { + None => None, + Some(kind) => { + self.put_self(); + Some(match kind { + SurfaceDropKind::Async => self.combine_async_surface(), + SurfaceDropKind::Sync => self.combine_sync_surface(), + }) + } + }; + + let mut chain = None; + for (field_idx, field_ty) in elem_tys.enumerate().map(|(i, ty)| (FieldIdx::new(i), ty)) { + self.put_field(field_idx, field_ty); + let defer = self.combine_defer(field_ty); + chain = Some(match chain { + None => defer, + Some(chain) => self.combine_chain(chain, defer), + }) + } + let chain = chain.unwrap_or_else(|| self.put_noop()); + + let dtor = match surface { + None => chain, + Some(surface) => self.combine_chain(surface, chain), + }; + self.combine_fuse(dtor); + self.return_() + } + + fn build_zst_output(mut self) -> Body<'tcx> { + self.put_zst_output(); + self.return_() + } + + fn build_fused_async_surface(mut self) -> Body<'tcx> { + self.put_self(); + let surface = self.combine_async_surface(); + self.combine_fuse(surface); + self.return_() + } + + fn build_fused_sync_surface(mut self) -> Body<'tcx> { + self.put_self(); + let surface = self.combine_sync_surface(); + self.combine_fuse(surface); + self.return_() + } + + fn build_slice(mut self, is_array: bool, elem_ty: Ty<'tcx>) -> Body<'tcx> { + if is_array { + self.put_array_as_slice(elem_ty) + } else { + self.put_self() + } + let dtor = self.combine_slice(elem_ty); + self.combine_fuse(dtor); + self.return_() + } + + fn put_zst_output(&mut self) { + let return_ty = self.locals[RETURN_PLACE].ty; + self.put_operand(Operand::Constant(Box::new(ConstOperand { + span: self.span, + user_ty: None, + const_: Const::zero_sized(return_ty), + }))); + } + + /// Puts `to_drop: *mut Self` on top of the stack. + fn put_self(&mut self) { + self.put_operand(Operand::Copy(Self::SELF_PTR.into())) + } + + /// Given that `Self is [ElemTy; N]` puts `to_drop: *mut [ElemTy]` + /// on top of the stack. + fn put_array_as_slice(&mut self, elem_ty: Ty<'tcx>) { + let slice_ptr_ty = Ty::new_mut_ptr(self.tcx, Ty::new_slice(self.tcx, elem_ty)); + self.put_temp_rvalue(Rvalue::Cast( + CastKind::PointerCoercion(PointerCoercion::Unsize), + Operand::Copy(Self::SELF_PTR.into()), + slice_ptr_ty, + )) + } + + /// If given Self is a struct puts `to_drop: *mut FieldTy` on top + /// of the stack. + fn put_field(&mut self, field: FieldIdx, field_ty: Ty<'tcx>) { + let place = Place { + local: Self::SELF_PTR, + projection: self + .tcx + .mk_place_elems(&[PlaceElem::Deref, PlaceElem::Field(field, field_ty)]), + }; + self.put_temp_rvalue(Rvalue::AddressOf(Mutability::Mut, place)) + } + + /// If given Self is an enum puts `to_drop: *mut FieldTy` on top of + /// the stack. + fn put_variant_field( + &mut self, + variant_sym: Symbol, + variant: VariantIdx, + field: FieldIdx, + field_ty: Ty<'tcx>, + ) { + let place = Place { + local: Self::SELF_PTR, + projection: self.tcx.mk_place_elems(&[ + PlaceElem::Deref, + PlaceElem::Downcast(Some(variant_sym), variant), + PlaceElem::Field(field, field_ty), + ]), + }; + self.put_temp_rvalue(Rvalue::AddressOf(Mutability::Mut, place)) + } + + /// If given Self is an enum puts `to_drop: *mut FieldTy` on top of + /// the stack. + fn put_discr(&mut self, discr: Discr<'tcx>) { + let (size, _) = discr.ty.int_size_and_signed(self.tcx); + self.put_operand(Operand::const_from_scalar( + self.tcx, + discr.ty, + interpret::Scalar::from_uint(discr.val, size), + self.span, + )); + } + + /// Puts `x: RvalueType` on top of the stack. + fn put_temp_rvalue(&mut self, rvalue: Rvalue<'tcx>) { + let last_bb = &mut self.bbs[self.last_bb]; + debug_assert!(last_bb.terminator.is_none()); + let source_info = self.source_info; + + let local_ty = rvalue.ty(&self.locals, self.tcx); + // We need to create a new local to be able to "consume" it with + // a combinator + let local = self.locals.push(LocalDecl::with_source_info(local_ty, source_info)); + last_bb.statements.extend_from_slice(&[ + Statement { source_info, kind: StatementKind::StorageLive(local) }, + Statement { + source_info, + kind: StatementKind::Assign(Box::new((local.into(), rvalue))), + }, + ]); + + self.put_operand(Operand::Move(local.into())); + } + + /// Puts operand on top of the stack. + fn put_operand(&mut self, operand: Operand<'tcx>) { + if let Some(top_cleanup_bb) = &mut self.top_cleanup_bb { + let source_info = self.source_info; + match &operand { + Operand::Copy(_) | Operand::Constant(_) => { + *top_cleanup_bb = self.bbs.push(BasicBlockData { + statements: Vec::new(), + terminator: Some(Terminator { + source_info, + kind: TerminatorKind::Goto { target: *top_cleanup_bb }, + }), + is_cleanup: true, + }); + } + Operand::Move(place) => { + let local = place.as_local().unwrap(); + *top_cleanup_bb = self.bbs.push(BasicBlockData { + statements: Vec::new(), + terminator: Some(Terminator { + source_info, + kind: if self.locals[local].ty.needs_drop(self.tcx, self.param_env) { + TerminatorKind::Drop { + place: local.into(), + target: *top_cleanup_bb, + unwind: UnwindAction::Terminate( + UnwindTerminateReason::InCleanup, + ), + replace: false, + } + } else { + TerminatorKind::Goto { target: *top_cleanup_bb } + }, + }), + is_cleanup: true, + }); + } + }; + } + self.stack.push(operand); + } + + /// Puts `noop: async_drop::Noop` on top of the stack + fn put_noop(&mut self) -> Ty<'tcx> { + self.apply_combinator(0, LangItem::AsyncDropNoop, &[]) + } + + fn combine_async_surface(&mut self) -> Ty<'tcx> { + self.apply_combinator(1, LangItem::SurfaceAsyncDropInPlace, &[self.self_ty.unwrap().into()]) + } + + fn combine_sync_surface(&mut self) -> Ty<'tcx> { + self.apply_combinator( + 1, + LangItem::AsyncDropSurfaceDropInPlace, + &[self.self_ty.unwrap().into()], + ) + } + + fn combine_fuse(&mut self, inner_future_ty: Ty<'tcx>) -> Ty<'tcx> { + self.apply_combinator(1, LangItem::AsyncDropFuse, &[inner_future_ty.into()]) + } + + fn combine_slice(&mut self, elem_ty: Ty<'tcx>) -> Ty<'tcx> { + self.apply_combinator(1, LangItem::AsyncDropSlice, &[elem_ty.into()]) + } + + fn combine_defer(&mut self, to_drop_ty: Ty<'tcx>) -> Ty<'tcx> { + self.apply_combinator(1, LangItem::AsyncDropDefer, &[to_drop_ty.into()]) + } + + fn combine_chain(&mut self, first: Ty<'tcx>, second: Ty<'tcx>) -> Ty<'tcx> { + self.apply_combinator(2, LangItem::AsyncDropChain, &[first.into(), second.into()]) + } + + fn combine_either(&mut self, other: Ty<'tcx>, matched: Ty<'tcx>) -> Ty<'tcx> { + self.apply_combinator( + 4, + LangItem::AsyncDropEither, + &[other.into(), matched.into(), self.self_ty.unwrap().into()], + ) + } + + fn return_(mut self) -> Body<'tcx> { + let last_bb = &mut self.bbs[self.last_bb]; + debug_assert!(last_bb.terminator.is_none()); + let source_info = self.source_info; + + let (1, Some(output)) = (self.stack.len(), self.stack.pop()) else { + span_bug!( + self.span, + "async destructor ctor shim builder finished with invalid number of stack items: expected 1 found {}", + self.stack.len(), + ) + }; + #[cfg(debug_assertions)] + if let Some(ty) = self.self_ty { + debug_assert_eq!( + output.ty(&self.locals, self.tcx), + ty.async_destructor_ty(self.tcx, self.param_env), + "output async destructor types did not match for type: {ty:?}", + ); + } + + let dead_storage = match &output { + Operand::Move(place) => Some(Statement { + source_info, + kind: StatementKind::StorageDead(place.as_local().unwrap()), + }), + _ => None, + }; + + last_bb.statements.extend( + iter::once(Statement { + source_info, + kind: StatementKind::Assign(Box::new((RETURN_PLACE.into(), Rvalue::Use(output)))), + }) + .chain(dead_storage), + ); + + last_bb.terminator = Some(Terminator { source_info, kind: TerminatorKind::Return }); + + let source = MirSource::from_instance(ty::InstanceDef::AsyncDropGlueCtorShim( + self.def_id, + self.self_ty, + )); + new_body(source, self.bbs, self.locals, Self::INPUT_COUNT, self.span) + } + + fn apply_combinator( + &mut self, + arity: usize, + function: LangItem, + args: &[ty::GenericArg<'tcx>], + ) -> Ty<'tcx> { + let function = self.tcx.require_lang_item(function, Some(self.span)); + let operands_split = self + .stack + .len() + .checked_sub(arity) + .expect("async destructor ctor shim combinator tried to consume too many items"); + let operands = &self.stack[operands_split..]; + + let func_ty = Ty::new_fn_def(self.tcx, function, args.iter().copied()); + let func_sig = func_ty.fn_sig(self.tcx).no_bound_vars().unwrap(); + #[cfg(debug_assertions)] + operands.iter().zip(func_sig.inputs()).for_each(|(operand, expected_ty)| { + let operand_ty = operand.ty(&self.locals, self.tcx); + if operand_ty == *expected_ty { + return; + } + + // If projection of Discriminant then compare with `Ty::discriminant_ty` + if let ty::Alias(ty::AliasKind::Projection, ty::AliasTy { args, def_id, .. }) = + expected_ty.kind() + && Some(*def_id) == self.tcx.lang_items().discriminant_type() + && args.first().unwrap().as_type().unwrap().discriminant_ty(self.tcx) == operand_ty + { + return; + } + + span_bug!( + self.span, + "Operand type and combinator argument type are not equal. + operand_ty: {:?} + argument_ty: {:?} +", + operand_ty, + expected_ty + ); + }); + + let target = self.bbs.push(BasicBlockData { + statements: operands + .iter() + .rev() + .filter_map(|o| { + if let Operand::Move(Place { local, projection }) = o { + assert!(projection.is_empty()); + Some(Statement { + source_info: self.source_info, + kind: StatementKind::StorageDead(*local), + }) + } else { + None + } + }) + .collect(), + terminator: None, + is_cleanup: false, + }); + + let dest_ty = func_sig.output(); + let dest = + self.locals.push(LocalDecl::with_source_info(dest_ty, self.source_info).immutable()); + + let unwind = if let Some(top_cleanup_bb) = &mut self.top_cleanup_bb { + for _ in 0..arity { + *top_cleanup_bb = + self.bbs[*top_cleanup_bb].terminator().successors().exactly_one().ok().unwrap(); + } + UnwindAction::Cleanup(*top_cleanup_bb) + } else { + UnwindAction::Unreachable + }; + + let last_bb = &mut self.bbs[self.last_bb]; + debug_assert!(last_bb.terminator.is_none()); + last_bb.statements.push(Statement { + source_info: self.source_info, + kind: StatementKind::StorageLive(dest), + }); + last_bb.terminator = Some(Terminator { + source_info: self.source_info, + kind: TerminatorKind::Call { + func: Operand::Constant(Box::new(ConstOperand { + span: self.span, + user_ty: None, + const_: Const::Val(ConstValue::ZeroSized, func_ty), + })), + destination: dest.into(), + target: Some(target), + unwind, + call_source: CallSource::Misc, + fn_span: self.span, + args: self.stack.drain(operands_split..).map(|o| respan(self.span, o)).collect(), + }, + }); + + self.put_operand(Operand::Move(dest.into())); + self.last_bb = target; + + dest_ty + } +} |
