diff options
Diffstat (limited to 'compiler/rustc_mir_transform/src')
22 files changed, 768 insertions, 396 deletions
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs index d889bc90c9d..84c8a91b082 100644 --- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs +++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs @@ -51,11 +51,20 @@ impl<'tcx> crate::MirPass<'tcx> for AbortUnwindingCalls { // This will filter to functions with `extern "C-unwind"` ABIs, for // example. for block in body.basic_blocks.as_mut() { + let Some(terminator) = &mut block.terminator else { continue }; + let span = terminator.source_info.span; + + // If we see an `UnwindResume` terminator inside a function that cannot unwind, we need + // to replace it with `UnwindTerminate`. + if let TerminatorKind::UnwindResume = &terminator.kind + && !body_can_unwind + { + terminator.kind = TerminatorKind::UnwindTerminate(UnwindTerminateReason::Abi); + } + if block.is_cleanup { continue; } - let Some(terminator) = &block.terminator else { continue }; - let span = terminator.source_info.span; let call_can_unwind = match &terminator.kind { TerminatorKind::Call { func, .. } => { @@ -87,14 +96,18 @@ impl<'tcx> crate::MirPass<'tcx> for AbortUnwindingCalls { if !call_can_unwind { // If this function call can't unwind, then there's no need for it // to have a landing pad. This means that we can remove any cleanup - // registered for it. + // registered for it (and turn it into `UnwindAction::Unreachable`). let cleanup = block.terminator_mut().unwind_mut().unwrap(); *cleanup = UnwindAction::Unreachable; - } else if !body_can_unwind { + } else if !body_can_unwind + && matches!(terminator.unwind(), Some(UnwindAction::Continue)) + { // Otherwise if this function can unwind, then if the outer function // can also unwind there's nothing to do. If the outer function - // can't unwind, however, we need to change the landing pad for this - // function call to one that aborts. + // can't unwind, however, we need to ensure that any `UnwindAction::Continue` + // is replaced with terminate. For those with `UnwindAction::Cleanup`, + // cleanup will still happen, and terminate will happen afterwards handled by + // the `UnwindResume` -> `UnwindTerminate` terminator replacement. let cleanup = block.terminator_mut().unwind_mut().unwrap(); *cleanup = UnwindAction::Terminate(UnwindTerminateReason::Abi); } diff --git a/compiler/rustc_mir_transform/src/check_undefined_transmutes.rs b/compiler/rustc_mir_transform/src/check_undefined_transmutes.rs new file mode 100644 index 00000000000..8ba14a1158e --- /dev/null +++ b/compiler/rustc_mir_transform/src/check_undefined_transmutes.rs @@ -0,0 +1,77 @@ +use rustc_middle::mir::visit::Visitor; +use rustc_middle::mir::{Body, Location, Operand, Terminator, TerminatorKind}; +use rustc_middle::ty::{AssocItem, AssocKind, TyCtxt}; +use rustc_session::lint::builtin::PTR_TO_INTEGER_TRANSMUTE_IN_CONSTS; +use rustc_span::sym; + +use crate::errors; + +/// Check for transmutes that exhibit undefined behavior. +/// For example, transmuting pointers to integers in a const context. +pub(super) struct CheckUndefinedTransmutes; + +impl<'tcx> crate::MirLint<'tcx> for CheckUndefinedTransmutes { + fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { + let mut checker = UndefinedTransmutesChecker { body, tcx }; + checker.visit_body(body); + } +} + +struct UndefinedTransmutesChecker<'a, 'tcx> { + body: &'a Body<'tcx>, + tcx: TyCtxt<'tcx>, +} + +impl<'a, 'tcx> UndefinedTransmutesChecker<'a, 'tcx> { + // This functions checks two things: + // 1. `function` takes a raw pointer as input and returns an integer as output. + // 2. `function` is called from a const function or an associated constant. + // + // Why do we consider const functions and associated constants only? + // + // Generally, undefined behavior in const items are handled by the evaluator. + // But, const functions and associated constants are evaluated only when referenced. + // This can result in undefined behavior in a library going unnoticed until + // the function or constant is actually used. + // + // Therefore, we only consider const functions and associated constants here and leave + // other const items to be handled by the evaluator. + fn is_ptr_to_int_in_const(&self, function: &Operand<'tcx>) -> bool { + let def_id = self.body.source.def_id(); + + if self.tcx.is_const_fn(def_id) + || matches!( + self.tcx.opt_associated_item(def_id), + Some(AssocItem { kind: AssocKind::Const, .. }) + ) + { + let fn_sig = function.ty(self.body, self.tcx).fn_sig(self.tcx).skip_binder(); + if let [input] = fn_sig.inputs() { + return input.is_unsafe_ptr() && fn_sig.output().is_integral(); + } + } + false + } +} + +impl<'tcx> Visitor<'tcx> for UndefinedTransmutesChecker<'_, 'tcx> { + // Check each block's terminator for calls to pointer to integer transmutes + // in const functions or associated constants and emit a lint. + fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { + if let TerminatorKind::Call { func, .. } = &terminator.kind + && let Some((func_def_id, _)) = func.const_fn_def() + && self.tcx.is_intrinsic(func_def_id, sym::transmute) + && self.is_ptr_to_int_in_const(func) + && let Some(call_id) = self.body.source.def_id().as_local() + { + let hir_id = self.tcx.local_def_id_to_hir_id(call_id); + let span = self.body.source_info(location).span; + self.tcx.emit_node_span_lint( + PTR_TO_INTEGER_TRANSMUTE_IN_CONSTS, + hir_id, + span, + errors::UndefinedTransmute, + ); + } + } +} diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index 8f032728f6b..cd291058977 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -1497,7 +1497,7 @@ fn check_field_tys_sized<'tcx>( ) { // No need to check if unsized_locals/unsized_fn_params is disabled, // since we will error during typeck. - if !tcx.features().unsized_locals && !tcx.features().unsized_fn_params { + if !tcx.features().unsized_locals() && !tcx.features().unsized_fn_params() { return; } @@ -1957,7 +1957,8 @@ fn check_must_not_suspend_ty<'tcx>( let descr_pre = &format!("{}array{} of ", data.descr_pre, plural_suffix); check_must_not_suspend_ty(tcx, ty, hir_id, param_env, SuspendCheckData { descr_pre, - plural_len: len.try_eval_target_usize(tcx, param_env).unwrap_or(0) as usize + 1, + // FIXME(must_not_suspend): This is wrong. We should handle printing unevaluated consts. + plural_len: len.try_to_target_usize(tcx).unwrap_or(0) as usize + 1, ..data }) } diff --git a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs index 65442877d2d..2c622b1927e 100644 --- a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs +++ b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs @@ -223,14 +223,15 @@ pub(crate) fn coroutine_by_move_body_def_id<'tcx>( // Inherited from the by-ref coroutine. body_def.codegen_fn_attrs(tcx.codegen_fn_attrs(coroutine_def_id).clone()); - body_def.constness(tcx.constness(coroutine_def_id).clone()); - body_def.coroutine_kind(tcx.coroutine_kind(coroutine_def_id).clone()); + body_def.coverage_attr_on(tcx.coverage_attr_on(coroutine_def_id)); + body_def.constness(tcx.constness(coroutine_def_id)); + body_def.coroutine_kind(tcx.coroutine_kind(coroutine_def_id)); body_def.def_ident_span(tcx.def_ident_span(coroutine_def_id)); body_def.def_span(tcx.def_span(coroutine_def_id)); - body_def.explicit_predicates_of(tcx.explicit_predicates_of(coroutine_def_id).clone()); + body_def.explicit_predicates_of(tcx.explicit_predicates_of(coroutine_def_id)); body_def.generics_of(tcx.generics_of(coroutine_def_id).clone()); - body_def.param_env(tcx.param_env(coroutine_def_id).clone()); - body_def.predicates_of(tcx.predicates_of(coroutine_def_id).clone()); + body_def.param_env(tcx.param_env(coroutine_def_id)); + body_def.predicates_of(tcx.predicates_of(coroutine_def_id)); // The type of the coroutine is the `by_move_coroutine_ty`. body_def.type_of(ty::EarlyBinder::bind(by_move_coroutine_ty)); diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index ef4031c5c03..9a533ea024d 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -4,7 +4,7 @@ use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph::DirectedGraph; use rustc_index::IndexVec; -use rustc_middle::bug; +use rustc_index::bit_set::BitSet; use rustc_middle::mir::coverage::{CounterId, CovTerm, Expression, ExpressionId, Op}; use tracing::{debug, debug_span, instrument}; @@ -13,13 +13,13 @@ use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, TraverseCoverage /// The coverage counter or counter expression associated with a particular /// BCB node or BCB edge. #[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub(super) enum BcbCounter { +enum BcbCounter { Counter { id: CounterId }, Expression { id: ExpressionId }, } impl BcbCounter { - pub(super) fn as_term(&self) -> CovTerm { + fn as_term(&self) -> CovTerm { match *self { BcbCounter::Counter { id, .. } => CovTerm::Counter(id), BcbCounter::Expression { id, .. } => CovTerm::Expression(id), @@ -57,13 +57,13 @@ pub(super) struct CoverageCounters { counter_increment_sites: IndexVec<CounterId, CounterIncrementSite>, /// Coverage counters/expressions that are associated with individual BCBs. - bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>, + node_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>, /// Coverage counters/expressions that are associated with the control-flow /// edge between two BCBs. /// /// We currently don't iterate over this map, but if we do in the future, /// switch it back to `FxIndexMap` to avoid query stability hazards. - bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, + edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, /// Table of expression data, associating each expression ID with its /// corresponding operator (+ or -) and its LHS/RHS operands. @@ -77,22 +77,23 @@ impl CoverageCounters { /// Ensures that each BCB node needing a counter has one, by creating physical /// counters or counter expressions for nodes and edges as required. pub(super) fn make_bcb_counters( - basic_coverage_blocks: &CoverageGraph, - bcb_needs_counter: impl Fn(BasicCoverageBlock) -> bool, + graph: &CoverageGraph, + bcb_needs_counter: &BitSet<BasicCoverageBlock>, ) -> Self { - let num_bcbs = basic_coverage_blocks.num_nodes(); + let mut builder = CountersBuilder::new(graph, bcb_needs_counter); + builder.make_bcb_counters(); - let mut this = Self { + builder.counters + } + + fn with_num_bcbs(num_bcbs: usize) -> Self { + Self { counter_increment_sites: IndexVec::new(), - bcb_counters: IndexVec::from_elem_n(None, num_bcbs), - bcb_edge_counters: FxHashMap::default(), + node_counters: IndexVec::from_elem_n(None, num_bcbs), + edge_counters: FxHashMap::default(), expressions: IndexVec::new(), expressions_memo: FxHashMap::default(), - }; - - MakeBcbCounters::new(&mut this, basic_coverage_blocks).make_bcb_counters(bcb_needs_counter); - - this + } } /// Shared helper used by [`Self::make_phys_node_counter`] and @@ -102,24 +103,18 @@ impl CoverageCounters { BcbCounter::Counter { id } } - /// Creates a new physical counter attached a BCB node. - /// The node must not already have a counter. + /// Creates a new physical counter for a BCB node. fn make_phys_node_counter(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { - let counter = self.make_counter_inner(CounterIncrementSite::Node { bcb }); - debug!(?bcb, ?counter, "node gets a physical counter"); - self.set_bcb_counter(bcb, counter) + self.make_counter_inner(CounterIncrementSite::Node { bcb }) } - /// Creates a new physical counter attached to a BCB edge. - /// The edge must not already have a counter. + /// Creates a new physical counter for a BCB edge. fn make_phys_edge_counter( &mut self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, ) -> BcbCounter { - let counter = self.make_counter_inner(CounterIncrementSite::Edge { from_bcb, to_bcb }); - debug!(?from_bcb, ?to_bcb, ?counter, "edge gets a physical counter"); - self.set_bcb_edge_counter(from_bcb, to_bcb, counter) + self.make_counter_inner(CounterIncrementSite::Edge { from_bcb, to_bcb }) } fn make_expression(&mut self, lhs: BcbCounter, op: Op, rhs: BcbCounter) -> BcbCounter { @@ -191,35 +186,31 @@ impl CoverageCounters { self.counter_increment_sites.len() } - fn set_bcb_counter(&mut self, bcb: BasicCoverageBlock, counter_kind: BcbCounter) -> BcbCounter { - if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) { - bug!( - "attempt to set a BasicCoverageBlock coverage counter more than once; \ - {bcb:?} already had counter {replaced:?}", - ); - } else { - counter_kind - } + fn set_node_counter(&mut self, bcb: BasicCoverageBlock, counter: BcbCounter) -> BcbCounter { + let existing = self.node_counters[bcb].replace(counter); + assert!( + existing.is_none(), + "node {bcb:?} already has a counter: {existing:?} => {counter:?}" + ); + counter } - fn set_bcb_edge_counter( + fn set_edge_counter( &mut self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, - counter_kind: BcbCounter, + counter: BcbCounter, ) -> BcbCounter { - if let Some(replaced) = self.bcb_edge_counters.insert((from_bcb, to_bcb), counter_kind) { - bug!( - "attempt to set an edge counter more than once; from_bcb: \ - {from_bcb:?} already had counter {replaced:?}", - ); - } else { - counter_kind - } + let existing = self.edge_counters.insert((from_bcb, to_bcb), counter); + assert!( + existing.is_none(), + "edge ({from_bcb:?} -> {to_bcb:?}) already has a counter: {existing:?} => {counter:?}" + ); + counter } - pub(super) fn bcb_counter(&self, bcb: BasicCoverageBlock) -> Option<BcbCounter> { - self.bcb_counters[bcb] + pub(super) fn term_for_bcb(&self, bcb: BasicCoverageBlock) -> Option<CovTerm> { + self.node_counters[bcb].map(|counter| counter.as_term()) } /// Returns an iterator over all the nodes/edges in the coverage graph that @@ -236,7 +227,7 @@ impl CoverageCounters { pub(super) fn bcb_nodes_with_coverage_expressions( &self, ) -> impl Iterator<Item = (BasicCoverageBlock, ExpressionId)> + Captures<'_> { - self.bcb_counters.iter_enumerated().filter_map(|(bcb, &counter_kind)| match counter_kind { + self.node_counters.iter_enumerated().filter_map(|(bcb, &counter)| match counter { // Yield the BCB along with its associated expression ID. Some(BcbCounter::Expression { id }) => Some((bcb, id)), // This BCB is associated with a counter or nothing, so skip it. @@ -263,21 +254,25 @@ impl CoverageCounters { } } -/// Helper struct that allows counter creation to inspect the BCB graph. -struct MakeBcbCounters<'a> { - coverage_counters: &'a mut CoverageCounters, - basic_coverage_blocks: &'a CoverageGraph, +/// Helper struct that allows counter creation to inspect the BCB graph, and +/// the set of nodes that need counters. +struct CountersBuilder<'a> { + counters: CoverageCounters, + graph: &'a CoverageGraph, + bcb_needs_counter: &'a BitSet<BasicCoverageBlock>, } -impl<'a> MakeBcbCounters<'a> { - fn new( - coverage_counters: &'a mut CoverageCounters, - basic_coverage_blocks: &'a CoverageGraph, - ) -> Self { - Self { coverage_counters, basic_coverage_blocks } +impl<'a> CountersBuilder<'a> { + fn new(graph: &'a CoverageGraph, bcb_needs_counter: &'a BitSet<BasicCoverageBlock>) -> Self { + assert_eq!(graph.num_nodes(), bcb_needs_counter.domain_size()); + Self { + counters: CoverageCounters::with_num_bcbs(graph.num_nodes()), + graph, + bcb_needs_counter, + } } - fn make_bcb_counters(&mut self, bcb_needs_counter: impl Fn(BasicCoverageBlock) -> bool) { + fn make_bcb_counters(&mut self) { debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock"); // Traverse the coverage graph, ensuring that every node that needs a @@ -287,10 +282,10 @@ impl<'a> MakeBcbCounters<'a> { // nodes within the loop are visited before visiting any nodes outside // the loop. It also keeps track of which loop(s) the traversal is // currently inside. - let mut traversal = TraverseCoverageGraphWithLoops::new(self.basic_coverage_blocks); + let mut traversal = TraverseCoverageGraphWithLoops::new(self.graph); while let Some(bcb) = traversal.next() { let _span = debug_span!("traversal", ?bcb).entered(); - if bcb_needs_counter(bcb) { + if self.bcb_needs_counter.contains(bcb) { self.make_node_counter_and_out_edge_counters(&traversal, bcb); } } @@ -314,25 +309,35 @@ impl<'a> MakeBcbCounters<'a> { // We might also use that counter to compute one of the out-edge counters. let node_counter = self.get_or_make_node_counter(from_bcb); - let successors = self.basic_coverage_blocks.successors[from_bcb].as_slice(); + let successors = self.graph.successors[from_bcb].as_slice(); // If this node's out-edges won't sum to the node's counter, // then there's no reason to create edge counters here. - if !self.basic_coverage_blocks[from_bcb].is_out_summable { + if !self.graph[from_bcb].is_out_summable { return; } - // Determine the set of out-edges that don't yet have edge counters. - let candidate_successors = self.basic_coverage_blocks.successors[from_bcb] + // When choosing which out-edge should be given a counter expression, ignore edges that + // already have counters, or could use the existing counter of their target node. + let out_edge_has_counter = |to_bcb| { + if self.counters.edge_counters.contains_key(&(from_bcb, to_bcb)) { + return true; + } + self.graph.sole_predecessor(to_bcb) == Some(from_bcb) + && self.counters.node_counters[to_bcb].is_some() + }; + + // Determine the set of out-edges that could benefit from being given an expression. + let candidate_successors = self.graph.successors[from_bcb] .iter() .copied() - .filter(|&to_bcb| self.edge_has_no_counter(from_bcb, to_bcb)) + .filter(|&to_bcb| !out_edge_has_counter(to_bcb)) .collect::<Vec<_>>(); debug!(?candidate_successors); // If there are out-edges without counters, choose one to be given an expression // (computed from this node and the other out-edges) instead of a physical counter. - let Some(expression_to_bcb) = + let Some(target_bcb) = self.choose_out_edge_for_expression(traversal, &candidate_successors) else { return; @@ -345,43 +350,44 @@ impl<'a> MakeBcbCounters<'a> { .iter() .copied() // Skip the chosen edge, since we'll calculate its count from this sum. - .filter(|&to_bcb| to_bcb != expression_to_bcb) + .filter(|&edge_target_bcb| edge_target_bcb != target_bcb) .map(|to_bcb| self.get_or_make_edge_counter(from_bcb, to_bcb)) .collect::<Vec<_>>(); - let Some(sum_of_all_other_out_edges) = - self.coverage_counters.make_sum(&other_out_edge_counters) + let Some(sum_of_all_other_out_edges) = self.counters.make_sum(&other_out_edge_counters) else { return; }; // Now create an expression for the chosen edge, by taking the counter // for its source node and subtracting the sum of its sibling out-edges. - let expression = self.coverage_counters.make_expression( - node_counter, - Op::Subtract, - sum_of_all_other_out_edges, - ); + let expression = + self.counters.make_expression(node_counter, Op::Subtract, sum_of_all_other_out_edges); - debug!("{expression_to_bcb:?} gets an expression: {expression:?}"); - if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(expression_to_bcb) { - // This edge normally wouldn't get its own counter, so attach the expression - // to its target node instead, so that `edge_has_no_counter` can see it. - assert_eq!(sole_pred, from_bcb); - self.coverage_counters.set_bcb_counter(expression_to_bcb, expression); - } else { - self.coverage_counters.set_bcb_edge_counter(from_bcb, expression_to_bcb, expression); - } + debug!("{target_bcb:?} gets an expression: {expression:?}"); + self.counters.set_edge_counter(from_bcb, target_bcb, expression); } #[instrument(level = "debug", skip(self))] fn get_or_make_node_counter(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { // If the BCB already has a counter, return it. - if let Some(counter_kind) = self.coverage_counters.bcb_counters[bcb] { - debug!("{bcb:?} already has a counter: {counter_kind:?}"); - return counter_kind; + if let Some(counter) = self.counters.node_counters[bcb] { + debug!("{bcb:?} already has a counter: {counter:?}"); + return counter; } - let predecessors = self.basic_coverage_blocks.predecessors[bcb].as_slice(); + let counter = self.make_node_counter_inner(bcb); + self.counters.set_node_counter(bcb, counter) + } + + fn make_node_counter_inner(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { + // If the node's sole in-edge already has a counter, use that. + if let Some(sole_pred) = self.graph.sole_predecessor(bcb) + && let Some(&edge_counter) = self.counters.edge_counters.get(&(sole_pred, bcb)) + { + return edge_counter; + } + + let predecessors = self.graph.predecessors[bcb].as_slice(); // Handle cases where we can't compute a node's count from its in-edges: // - START_BCB has no in-edges, so taking the sum would panic (or be wrong). @@ -390,7 +396,9 @@ impl<'a> MakeBcbCounters<'a> { // leading to infinite recursion. if predecessors.len() <= 1 || predecessors.contains(&bcb) { debug!(?bcb, ?predecessors, "node has <=1 predecessors or is its own predecessor"); - return self.coverage_counters.make_phys_node_counter(bcb); + let counter = self.counters.make_phys_node_counter(bcb); + debug!(?bcb, ?counter, "node gets a physical counter"); + return counter; } // A BCB with multiple incoming edges can compute its count by ensuring that counters @@ -400,13 +408,11 @@ impl<'a> MakeBcbCounters<'a> { .copied() .map(|from_bcb| self.get_or_make_edge_counter(from_bcb, bcb)) .collect::<Vec<_>>(); - let sum_of_in_edges: BcbCounter = self - .coverage_counters - .make_sum(&in_edge_counters) - .expect("there must be at least one in-edge"); + let sum_of_in_edges: BcbCounter = + self.counters.make_sum(&in_edge_counters).expect("there must be at least one in-edge"); debug!("{bcb:?} gets a new counter (sum of predecessor counters): {sum_of_in_edges:?}"); - self.coverage_counters.set_bcb_counter(bcb, sum_of_in_edges) + sum_of_in_edges } #[instrument(level = "debug", skip(self))] @@ -415,9 +421,24 @@ impl<'a> MakeBcbCounters<'a> { from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, ) -> BcbCounter { + // If the edge already has a counter, return it. + if let Some(&counter) = self.counters.edge_counters.get(&(from_bcb, to_bcb)) { + debug!("Edge {from_bcb:?}->{to_bcb:?} already has a counter: {counter:?}"); + return counter; + } + + let counter = self.make_edge_counter_inner(from_bcb, to_bcb); + self.counters.set_edge_counter(from_bcb, to_bcb, counter) + } + + fn make_edge_counter_inner( + &mut self, + from_bcb: BasicCoverageBlock, + to_bcb: BasicCoverageBlock, + ) -> BcbCounter { // If the target node has exactly one in-edge (i.e. this one), then just // use the node's counter, since it will have the same value. - if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(to_bcb) { + if let Some(sole_pred) = self.graph.sole_predecessor(to_bcb) { assert_eq!(sole_pred, from_bcb); // This call must take care not to invoke `get_or_make_edge` for // this edge, since that would result in infinite recursion! @@ -426,21 +447,15 @@ impl<'a> MakeBcbCounters<'a> { // If the source node has exactly one out-edge (i.e. this one) and would have // the same execution count as that edge, then just use the node's counter. - if let Some(simple_succ) = self.basic_coverage_blocks.simple_successor(from_bcb) { + if let Some(simple_succ) = self.graph.simple_successor(from_bcb) { assert_eq!(simple_succ, to_bcb); return self.get_or_make_node_counter(from_bcb); } - // If the edge already has a counter, return it. - if let Some(&counter_kind) = - self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb)) - { - debug!("Edge {from_bcb:?}->{to_bcb:?} already has a counter: {counter_kind:?}"); - return counter_kind; - } - // Make a new counter to count this edge. - self.coverage_counters.make_phys_edge_counter(from_bcb, to_bcb) + let counter = self.counters.make_phys_edge_counter(from_bcb, to_bcb); + debug!(?from_bcb, ?to_bcb, ?counter, "edge gets a physical counter"); + counter } /// Given a set of candidate out-edges (represented by their successor node), @@ -485,9 +500,9 @@ impl<'a> MakeBcbCounters<'a> { for &target_bcb in candidate_successors { // An edge is a reloop edge if its target dominates any BCB that has // an edge back to the loop header. (Otherwise it's an exit edge.) - let is_reloop_edge = reloop_bcbs.iter().any(|&reloop_bcb| { - self.basic_coverage_blocks.dominates(target_bcb, reloop_bcb) - }); + let is_reloop_edge = reloop_bcbs + .iter() + .any(|&reloop_bcb| self.graph.dominates(target_bcb, reloop_bcb)); if is_reloop_edge { // We found a good out-edge to be given an expression. return Some(target_bcb); @@ -500,21 +515,4 @@ impl<'a> MakeBcbCounters<'a> { None } - - #[inline] - fn edge_has_no_counter( - &self, - from_bcb: BasicCoverageBlock, - to_bcb: BasicCoverageBlock, - ) -> bool { - let edge_counter = - if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(to_bcb) { - assert_eq!(sole_pred, from_bcb); - self.coverage_counters.bcb_counters[to_bcb] - } else { - self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb)).copied() - }; - - edge_counter.is_none() - } } diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index d839f46cfbd..930fa129ef2 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -21,6 +21,10 @@ pub(crate) struct CoverageGraph { pub(crate) successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, pub(crate) predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, dominators: Option<Dominators<BasicCoverageBlock>>, + /// Allows nodes to be compared in some total order such that _if_ + /// `a` dominates `b`, then `a < b`. If neither node dominates the other, + /// their relative order is consistent but arbitrary. + dominator_order_rank: IndexVec<BasicCoverageBlock, u32>, } impl CoverageGraph { @@ -54,10 +58,27 @@ impl CoverageGraph { } } - let mut this = Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None }; + let num_nodes = bcbs.len(); + let mut this = Self { + bcbs, + bb_to_bcb, + successors, + predecessors, + dominators: None, + dominator_order_rank: IndexVec::from_elem_n(0, num_nodes), + }; + assert_eq!(num_nodes, this.num_nodes()); this.dominators = Some(dominators::dominators(&this)); + // The dominator rank of each node is just its index in a reverse-postorder traversal. + let reverse_post_order = graph::iterate::reverse_post_order(&this, this.start_node()); + // The coverage graph is created by traversal, so all nodes are reachable. + assert_eq!(reverse_post_order.len(), this.num_nodes()); + for (rank, bcb) in (0u32..).zip(reverse_post_order) { + this.dominator_order_rank[bcb] = rank; + } + // The coverage graph's entry-point node (bcb0) always starts with bb0, // which never has predecessors. Any other blocks merged into bcb0 can't // have multiple (coverage-relevant) predecessors, so bcb0 always has @@ -162,7 +183,7 @@ impl CoverageGraph { a: BasicCoverageBlock, b: BasicCoverageBlock, ) -> Ordering { - self.dominators.as_ref().unwrap().cmp_in_dominator_order(a, b) + self.dominator_order_rank[a].cmp(&self.dominator_order_rank[b]) } /// Returns the source of this node's sole in-edge, if it has exactly one. diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs index ec5ba354805..2db7c6cf1d6 100644 --- a/compiler/rustc_mir_transform/src/coverage/mappings.rs +++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs @@ -1,10 +1,11 @@ use std::collections::BTreeSet; +use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::graph::DirectedGraph; use rustc_index::IndexVec; use rustc_index::bit_set::BitSet; use rustc_middle::mir::coverage::{ - BlockMarkerId, BranchSpan, ConditionInfo, CoverageInfoHi, CoverageKind, + BlockMarkerId, BranchSpan, ConditionId, ConditionInfo, CoverageInfoHi, CoverageKind, }; use rustc_middle::mir::{self, BasicBlock, StatementKind}; use rustc_middle::ty::TyCtxt; @@ -14,6 +15,7 @@ use crate::coverage::ExtractedHirInfo; use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB}; use crate::coverage::spans::extract_refined_covspans; use crate::coverage::unexpand::unexpand_into_body_span; +use crate::errors::MCDCExceedsTestVectorLimit; /// Associates an ordinary executable code span with its corresponding BCB. #[derive(Debug)] @@ -38,10 +40,11 @@ pub(super) struct MCDCBranch { pub(super) span: Span, pub(super) true_bcb: BasicCoverageBlock, pub(super) false_bcb: BasicCoverageBlock, - /// If `None`, this actually represents a normal branch mapping inserted - /// for code that was too complex for MC/DC. - pub(super) condition_info: Option<ConditionInfo>, - pub(super) decision_depth: u16, + pub(super) condition_info: ConditionInfo, + // Offset added to test vector idx if this branch is evaluated to true. + pub(super) true_index: usize, + // Offset added to test vector idx if this branch is evaluated to false. + pub(super) false_index: usize, } /// Associates an MC/DC decision with its join BCBs. @@ -49,11 +52,15 @@ pub(super) struct MCDCBranch { pub(super) struct MCDCDecision { pub(super) span: Span, pub(super) end_bcbs: BTreeSet<BasicCoverageBlock>, - pub(super) bitmap_idx: u32, - pub(super) num_conditions: u16, + pub(super) bitmap_idx: usize, + pub(super) num_test_vectors: usize, pub(super) decision_depth: u16, } +// LLVM uses `i32` to index the bitmap. Thus `i32::MAX` is the hard limit for number of all test vectors +// in a function. +const MCDC_MAX_BITMAP_SIZE: usize = i32::MAX as usize; + #[derive(Default)] pub(super) struct ExtractedMappings { /// Store our own copy of [`CoverageGraph::num_nodes`], so that we don't @@ -62,9 +69,9 @@ pub(super) struct ExtractedMappings { pub(super) num_bcbs: usize, pub(super) code_mappings: Vec<CodeMapping>, pub(super) branch_pairs: Vec<BranchPair>, - pub(super) mcdc_bitmap_bytes: u32, - pub(super) mcdc_branches: Vec<MCDCBranch>, - pub(super) mcdc_decisions: Vec<MCDCDecision>, + pub(super) mcdc_bitmap_bits: usize, + pub(super) mcdc_degraded_branches: Vec<MCDCBranch>, + pub(super) mcdc_mappings: Vec<(MCDCDecision, Vec<MCDCBranch>)>, } /// Extracts coverage-relevant spans from MIR, and associates them with @@ -77,9 +84,9 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>( ) -> ExtractedMappings { let mut code_mappings = vec![]; let mut branch_pairs = vec![]; - let mut mcdc_bitmap_bytes = 0; - let mut mcdc_branches = vec![]; - let mut mcdc_decisions = vec![]; + let mut mcdc_bitmap_bits = 0; + let mut mcdc_degraded_branches = vec![]; + let mut mcdc_mappings = vec![]; if hir_info.is_async_fn || tcx.sess.coverage_no_mir_spans() { // An async function desugars into a function that returns a future, @@ -102,20 +109,21 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>( extract_mcdc_mappings( mir_body, + tcx, hir_info.body_span, basic_coverage_blocks, - &mut mcdc_bitmap_bytes, - &mut mcdc_branches, - &mut mcdc_decisions, + &mut mcdc_bitmap_bits, + &mut mcdc_degraded_branches, + &mut mcdc_mappings, ); ExtractedMappings { num_bcbs: basic_coverage_blocks.num_nodes(), code_mappings, branch_pairs, - mcdc_bitmap_bytes, - mcdc_branches, - mcdc_decisions, + mcdc_bitmap_bits, + mcdc_degraded_branches, + mcdc_mappings, } } @@ -126,9 +134,9 @@ impl ExtractedMappings { num_bcbs, code_mappings, branch_pairs, - mcdc_bitmap_bytes: _, - mcdc_branches, - mcdc_decisions, + mcdc_bitmap_bits: _, + mcdc_degraded_branches, + mcdc_mappings, } = self; // Identify which BCBs have one or more mappings. @@ -144,7 +152,10 @@ impl ExtractedMappings { insert(true_bcb); insert(false_bcb); } - for &MCDCBranch { true_bcb, false_bcb, .. } in mcdc_branches { + for &MCDCBranch { true_bcb, false_bcb, .. } in mcdc_degraded_branches + .iter() + .chain(mcdc_mappings.iter().map(|(_, branches)| branches.into_iter()).flatten()) + { insert(true_bcb); insert(false_bcb); } @@ -152,8 +163,8 @@ impl ExtractedMappings { // MC/DC decisions refer to BCBs, but don't require those BCBs to have counters. if bcbs_with_counter_mappings.is_empty() { debug_assert!( - mcdc_decisions.is_empty(), - "A function with no counter mappings shouldn't have any decisions: {mcdc_decisions:?}", + mcdc_mappings.is_empty(), + "A function with no counter mappings shouldn't have any decisions: {mcdc_mappings:?}", ); } @@ -230,11 +241,12 @@ pub(super) fn extract_branch_pairs( pub(super) fn extract_mcdc_mappings( mir_body: &mir::Body<'_>, + tcx: TyCtxt<'_>, body_span: Span, basic_coverage_blocks: &CoverageGraph, - mcdc_bitmap_bytes: &mut u32, - mcdc_branches: &mut impl Extend<MCDCBranch>, - mcdc_decisions: &mut impl Extend<MCDCDecision>, + mcdc_bitmap_bits: &mut usize, + mcdc_degraded_branches: &mut impl Extend<MCDCBranch>, + mcdc_mappings: &mut impl Extend<(MCDCDecision, Vec<MCDCBranch>)>, ) { let Some(coverage_info_hi) = mir_body.coverage_info_hi.as_deref() else { return }; @@ -257,43 +269,146 @@ pub(super) fn extract_mcdc_mappings( Some((span, true_bcb, false_bcb)) }; - mcdc_branches.extend(coverage_info_hi.mcdc_branch_spans.iter().filter_map( - |&mir::coverage::MCDCBranchSpan { - span: raw_span, - condition_info, - true_marker, - false_marker, - decision_depth, - }| { - let (span, true_bcb, false_bcb) = - check_branch_bcb(raw_span, true_marker, false_marker)?; - Some(MCDCBranch { span, true_bcb, false_bcb, condition_info, decision_depth }) - }, - )); - - mcdc_decisions.extend(coverage_info_hi.mcdc_decision_spans.iter().filter_map( - |decision: &mir::coverage::MCDCDecisionSpan| { - let span = unexpand_into_body_span(decision.span, body_span)?; - - let end_bcbs = decision - .end_markers - .iter() - .map(|&marker| bcb_from_marker(marker)) - .collect::<Option<_>>()?; - - // Each decision containing N conditions needs 2^N bits of space in - // the bitmap, rounded up to a whole number of bytes. - // The decision's "bitmap index" points to its first byte in the bitmap. - let bitmap_idx = *mcdc_bitmap_bytes; - *mcdc_bitmap_bytes += (1_u32 << decision.num_conditions).div_ceil(8); - - Some(MCDCDecision { + let to_mcdc_branch = |&mir::coverage::MCDCBranchSpan { + span: raw_span, + condition_info, + true_marker, + false_marker, + }| { + let (span, true_bcb, false_bcb) = check_branch_bcb(raw_span, true_marker, false_marker)?; + Some(MCDCBranch { + span, + true_bcb, + false_bcb, + condition_info, + true_index: usize::MAX, + false_index: usize::MAX, + }) + }; + + let mut get_bitmap_idx = |num_test_vectors: usize| -> Option<usize> { + let bitmap_idx = *mcdc_bitmap_bits; + let next_bitmap_bits = bitmap_idx.saturating_add(num_test_vectors); + (next_bitmap_bits <= MCDC_MAX_BITMAP_SIZE).then(|| { + *mcdc_bitmap_bits = next_bitmap_bits; + bitmap_idx + }) + }; + mcdc_degraded_branches + .extend(coverage_info_hi.mcdc_degraded_branch_spans.iter().filter_map(to_mcdc_branch)); + + mcdc_mappings.extend(coverage_info_hi.mcdc_spans.iter().filter_map(|(decision, branches)| { + if branches.len() == 0 { + return None; + } + let decision_span = unexpand_into_body_span(decision.span, body_span)?; + + let end_bcbs = decision + .end_markers + .iter() + .map(|&marker| bcb_from_marker(marker)) + .collect::<Option<_>>()?; + let mut branch_mappings: Vec<_> = branches.into_iter().filter_map(to_mcdc_branch).collect(); + if branch_mappings.len() != branches.len() { + mcdc_degraded_branches.extend(branch_mappings); + return None; + } + let num_test_vectors = calc_test_vectors_index(&mut branch_mappings); + let Some(bitmap_idx) = get_bitmap_idx(num_test_vectors) else { + tcx.dcx().emit_warn(MCDCExceedsTestVectorLimit { + span: decision_span, + max_num_test_vectors: MCDC_MAX_BITMAP_SIZE, + }); + mcdc_degraded_branches.extend(branch_mappings); + return None; + }; + // LLVM requires span of the decision contains all spans of its conditions. + // Usually the decision span meets the requirement well but in cases like macros it may not. + let span = branch_mappings + .iter() + .map(|branch| branch.span) + .reduce(|lhs, rhs| lhs.to(rhs)) + .map( + |joint_span| { + if decision_span.contains(joint_span) { decision_span } else { joint_span } + }, + ) + .expect("branch mappings are ensured to be non-empty as checked above"); + Some(( + MCDCDecision { span, end_bcbs, bitmap_idx, - num_conditions: decision.num_conditions as u16, + num_test_vectors, decision_depth: decision.decision_depth, - }) - }, - )); + }, + branch_mappings, + )) + })); +} + +// LLVM checks the executed test vector by accumulating indices of tested branches. +// We calculate number of all possible test vectors of the decision and assign indices +// to branches here. +// See [the rfc](https://discourse.llvm.org/t/rfc-coverage-new-algorithm-and-file-format-for-mc-dc/76798/) +// for more details about the algorithm. +// This function is mostly like [`TVIdxBuilder::TvIdxBuilder`](https://github.com/llvm/llvm-project/blob/d594d9f7f4dc6eb748b3261917db689fdc348b96/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp#L226) +fn calc_test_vectors_index(conditions: &mut Vec<MCDCBranch>) -> usize { + let mut indegree_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len()); + // `num_paths` is `width` described at the llvm rfc, which indicates how many paths reaching the condition node. + let mut num_paths_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len()); + let mut next_conditions = conditions + .iter_mut() + .map(|branch| { + let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info; + [true_next_id, false_next_id] + .into_iter() + .flatten() + .for_each(|next_id| indegree_stats[next_id] += 1); + (condition_id, branch) + }) + .collect::<FxIndexMap<_, _>>(); + + let mut queue = std::collections::VecDeque::from_iter( + next_conditions.swap_remove(&ConditionId::START).into_iter(), + ); + num_paths_stats[ConditionId::START] = 1; + let mut decision_end_nodes = Vec::new(); + while let Some(branch) = queue.pop_front() { + let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info; + let (false_index, true_index) = (&mut branch.false_index, &mut branch.true_index); + let this_paths_count = num_paths_stats[condition_id]; + // Note. First check the false next to ensure conditions are touched in same order with llvm-cov. + for (next, index) in [(false_next_id, false_index), (true_next_id, true_index)] { + if let Some(next_id) = next { + let next_paths_count = &mut num_paths_stats[next_id]; + *index = *next_paths_count; + *next_paths_count = next_paths_count.saturating_add(this_paths_count); + let next_indegree = &mut indegree_stats[next_id]; + *next_indegree -= 1; + if *next_indegree == 0 { + queue.push_back(next_conditions.swap_remove(&next_id).expect( + "conditions with non-zero indegree before must be in next_conditions", + )); + } + } else { + decision_end_nodes.push((this_paths_count, condition_id, index)); + } + } + } + assert!(next_conditions.is_empty(), "the decision tree has untouched nodes"); + let mut cur_idx = 0; + // LLVM hopes the end nodes are sorted in descending order by `num_paths` so that it can + // optimize bitmap size for decisions in tree form such as `a && b && c && d && ...`. + decision_end_nodes.sort_by_key(|(num_paths, _, _)| usize::MAX - *num_paths); + for (num_paths, condition_id, index) in decision_end_nodes { + assert_eq!( + num_paths, num_paths_stats[condition_id], + "end nodes should not be updated since they were visited" + ); + assert_eq!(*index, usize::MAX, "end nodes should not be assigned index before"); + *index = cur_idx; + cur_idx += num_paths; + } + cur_idx } diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 104f340c8d6..2e4c503f3ce 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -94,9 +94,8 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir: return; } - let bcb_has_counter_mappings = |bcb| bcbs_with_counter_mappings.contains(bcb); let coverage_counters = - CoverageCounters::make_bcb_counters(&basic_coverage_blocks, bcb_has_counter_mappings); + CoverageCounters::make_bcb_counters(&basic_coverage_blocks, &bcbs_with_counter_mappings); let mappings = create_mappings(tcx, &hir_info, &extracted_mappings, &coverage_counters); if mappings.is_empty() { @@ -115,16 +114,16 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir: inject_mcdc_statements(mir_body, &basic_coverage_blocks, &extracted_mappings); let mcdc_num_condition_bitmaps = extracted_mappings - .mcdc_decisions + .mcdc_mappings .iter() - .map(|&mappings::MCDCDecision { decision_depth, .. }| decision_depth) + .map(|&(mappings::MCDCDecision { decision_depth, .. }, _)| decision_depth) .max() .map_or(0, |max| usize::from(max) + 1); mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo { function_source_hash: hir_info.function_source_hash, num_counters: coverage_counters.num_counters(), - mcdc_bitmap_bytes: extracted_mappings.mcdc_bitmap_bytes, + mcdc_bitmap_bits: extracted_mappings.mcdc_bitmap_bits, expressions: coverage_counters.into_expressions(), mappings, mcdc_num_condition_bitmaps, @@ -153,12 +152,8 @@ fn create_mappings<'tcx>( &source_file.name.for_scope(tcx.sess, RemapPathScopeComponents::MACRO).to_string_lossy(), ); - let term_for_bcb = |bcb| { - coverage_counters - .bcb_counter(bcb) - .expect("all BCBs with spans were given counters") - .as_term() - }; + let term_for_bcb = + |bcb| coverage_counters.term_for_bcb(bcb).expect("all BCBs with spans were given counters"); let region_for_span = |span: Span| make_source_region(source_map, file_name, span, body_span); // Fully destructure the mappings struct to make sure we don't miss any kinds. @@ -166,9 +161,9 @@ fn create_mappings<'tcx>( num_bcbs: _, code_mappings, branch_pairs, - mcdc_bitmap_bytes: _, - mcdc_branches, - mcdc_decisions, + mcdc_bitmap_bits: _, + mcdc_degraded_branches, + mcdc_mappings, } = extracted_mappings; let mut mappings = Vec::new(); @@ -191,26 +186,79 @@ fn create_mappings<'tcx>( }, )); - mappings.extend(mcdc_branches.iter().filter_map( - |&mappings::MCDCBranch { span, true_bcb, false_bcb, condition_info, decision_depth: _ }| { + let term_for_bcb = + |bcb| coverage_counters.term_for_bcb(bcb).expect("all BCBs with spans were given counters"); + + // MCDC branch mappings are appended with their decisions in case decisions were ignored. + mappings.extend(mcdc_degraded_branches.iter().filter_map( + |&mappings::MCDCBranch { + span, + true_bcb, + false_bcb, + condition_info: _, + true_index: _, + false_index: _, + }| { let source_region = region_for_span(span)?; let true_term = term_for_bcb(true_bcb); let false_term = term_for_bcb(false_bcb); - let kind = match condition_info { - Some(mcdc_params) => MappingKind::MCDCBranch { true_term, false_term, mcdc_params }, - None => MappingKind::Branch { true_term, false_term }, - }; - Some(Mapping { kind, source_region }) + Some(Mapping { kind: MappingKind::Branch { true_term, false_term }, source_region }) }, )); - mappings.extend(mcdc_decisions.iter().filter_map( - |&mappings::MCDCDecision { span, bitmap_idx, num_conditions, .. }| { - let source_region = region_for_span(span)?; - let kind = MappingKind::MCDCDecision(DecisionInfo { bitmap_idx, num_conditions }); - Some(Mapping { kind, source_region }) - }, - )); + for (decision, branches) in mcdc_mappings { + let num_conditions = branches.len() as u16; + let conditions = branches + .into_iter() + .filter_map( + |&mappings::MCDCBranch { + span, + true_bcb, + false_bcb, + condition_info, + true_index: _, + false_index: _, + }| { + let source_region = region_for_span(span)?; + let true_term = term_for_bcb(true_bcb); + let false_term = term_for_bcb(false_bcb); + Some(Mapping { + kind: MappingKind::MCDCBranch { + true_term, + false_term, + mcdc_params: condition_info, + }, + source_region, + }) + }, + ) + .collect::<Vec<_>>(); + + if conditions.len() == num_conditions as usize + && let Some(source_region) = region_for_span(decision.span) + { + // LLVM requires end index for counter mapping regions. + let kind = MappingKind::MCDCDecision(DecisionInfo { + bitmap_idx: (decision.bitmap_idx + decision.num_test_vectors) as u32, + num_conditions, + }); + mappings.extend( + std::iter::once(Mapping { kind, source_region }).chain(conditions.into_iter()), + ); + } else { + mappings.extend(conditions.into_iter().map(|mapping| { + let MappingKind::MCDCBranch { true_term, false_term, mcdc_params: _ } = + mapping.kind + else { + unreachable!("all mappings here are MCDCBranch as shown above"); + }; + Mapping { + kind: MappingKind::Branch { true_term, false_term }, + source_region: mapping.source_region, + } + })) + } + } mappings } @@ -279,44 +327,41 @@ fn inject_mcdc_statements<'tcx>( basic_coverage_blocks: &CoverageGraph, extracted_mappings: &ExtractedMappings, ) { - // Inject test vector update first because `inject_statement` always insert new statement at - // head. - for &mappings::MCDCDecision { - span: _, - ref end_bcbs, - bitmap_idx, - num_conditions: _, - decision_depth, - } in &extracted_mappings.mcdc_decisions - { - for end in end_bcbs { - let end_bb = basic_coverage_blocks[*end].leader_bb(); + for (decision, conditions) in &extracted_mappings.mcdc_mappings { + // Inject test vector update first because `inject_statement` always insert new statement at head. + for &end in &decision.end_bcbs { + let end_bb = basic_coverage_blocks[end].leader_bb(); inject_statement( mir_body, - CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth }, + CoverageKind::TestVectorBitmapUpdate { + bitmap_idx: decision.bitmap_idx as u32, + decision_depth: decision.decision_depth, + }, end_bb, ); } - } - for &mappings::MCDCBranch { span: _, true_bcb, false_bcb, condition_info, decision_depth } in - &extracted_mappings.mcdc_branches - { - let Some(condition_info) = condition_info else { continue }; - let id = condition_info.condition_id; - - let true_bb = basic_coverage_blocks[true_bcb].leader_bb(); - inject_statement( - mir_body, - CoverageKind::CondBitmapUpdate { id, value: true, decision_depth }, - true_bb, - ); - let false_bb = basic_coverage_blocks[false_bcb].leader_bb(); - inject_statement( - mir_body, - CoverageKind::CondBitmapUpdate { id, value: false, decision_depth }, - false_bb, - ); + for &mappings::MCDCBranch { + span: _, + true_bcb, + false_bcb, + condition_info: _, + true_index, + false_index, + } in conditions + { + for (index, bcb) in [(false_index, false_bcb), (true_index, true_bcb)] { + let bb = basic_coverage_blocks[bcb].leader_bb(); + inject_statement( + mir_body, + CoverageKind::CondBitmapUpdate { + index: index as u32, + decision_depth: decision.decision_depth, + }, + bb, + ); + } + } } } @@ -479,6 +524,11 @@ fn extract_hir_info<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> ExtractedHir // FIXME(#79625): Consider improving MIR to provide the information needed, to avoid going back // to HIR for it. + // HACK: For synthetic MIR bodies (async closures), use the def id of the HIR body. + if tcx.is_synthetic_mir(def_id) { + return extract_hir_info(tcx, tcx.local_parent(def_id)); + } + let hir_node = tcx.hir_node_by_def_id(def_id); let fn_body_id = hir_node.body_id().expect("HIR node is a function with body"); let hir_body = tcx.hir().body(fn_body_id); diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index e65a5fdd5e7..df151f8cca3 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -63,7 +63,8 @@ fn coverage_attr_on(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { Some([item]) if item.has_name(sym::on) => return true, Some(_) | None => { // Other possibilities should have been rejected by `rustc_parse::validate_attr`. - tcx.dcx().span_bug(attr.span, "unexpected value of coverage attribute"); + // Use `span_delayed_bug` to avoid an ICE in failing builds (#127880). + tcx.dcx().span_delayed_bug(attr.span, "unexpected value of coverage attribute"); } } } diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index da7d20cf19a..085c738f1f9 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -87,7 +87,7 @@ fn remove_unwanted_expansion_spans(covspans: &mut Vec<SpanFromMir>) { covspans.retain(|covspan| { match covspan.expn_kind { // Retain only the first await-related or macro-expanded covspan with this span. - Some(ExpnKind::Desugaring(kind)) if kind == DesugaringKind::Await => { + Some(ExpnKind::Desugaring(DesugaringKind::Await)) => { deduplicated_spans.insert(covspan.span) } Some(ExpnKind::Macro(MacroKind::Bang, _)) => deduplicated_spans.insert(covspan.span), diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 88dc8e74a8c..002216f50f2 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -3,7 +3,9 @@ //! Currently, this pass only propagates scalar values. use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str}; -use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable}; +use rustc_const_eval::interpret::{ + ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok, +}; use rustc_data_structures::fx::FxHashMap; use rustc_hir::def::DefKind; use rustc_middle::bug; @@ -236,6 +238,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { FlatSet::Elem(op) => self .ecx .int_to_int_or_float(&op, layout) + .discard_err() .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)), FlatSet::Bottom => FlatSet::Bottom, FlatSet::Top => FlatSet::Top, @@ -249,6 +252,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { FlatSet::Elem(op) => self .ecx .float_to_float_or_int(&op, layout) + .discard_err() .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)), FlatSet::Bottom => FlatSet::Bottom, FlatSet::Top => FlatSet::Top, @@ -271,6 +275,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { FlatSet::Elem(value) => self .ecx .unary_op(*op, &value) + .discard_err() .map_or(FlatSet::Top, |val| self.wrap_immediate(*val)), FlatSet::Bottom => FlatSet::Bottom, FlatSet::Top => FlatSet::Top, @@ -364,8 +369,8 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } } Operand::Constant(box constant) => { - if let Ok(constant) = - self.ecx.eval_mir_constant(&constant.const_, constant.span, None) + if let Some(constant) = + self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err() { self.assign_constant(state, place, constant, &[]); } @@ -387,7 +392,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { for &(mut proj_elem) in projection { if let PlaceElem::Index(index) = proj_elem { if let FlatSet::Elem(index) = state.get(index.into(), &self.map) - && let Ok(offset) = index.to_target_usize(&self.tcx) + && let Some(offset) = index.to_target_usize(&self.tcx).discard_err() && let Some(min_length) = offset.checked_add(1) { proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false }; @@ -395,7 +400,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { return; } } - operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) { + operand = if let Some(operand) = self.ecx.project(&operand, proj_elem).discard_err() { operand } else { return; @@ -406,24 +411,24 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { place, operand, &mut |elem, op| match elem { - TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(), - TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(), + TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(), + TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(), TrackElem::Discriminant => { - let variant = self.ecx.read_discriminant(op).ok()?; + let variant = self.ecx.read_discriminant(op).discard_err()?; let discr_value = - self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?; + self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?; Some(discr_value.into()) } TrackElem::DerefLen => { - let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into(); - let len_usize = op.len(&self.ecx).ok()?; + let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into(); + let len_usize = op.len(&self.ecx).discard_err()?; let layout = self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).unwrap(); Some(ImmTy::from_uint(len_usize, layout).into()) } }, &mut |place, op| { - if let Ok(imm) = self.ecx.read_immediate_raw(op) + if let Some(imm) = self.ecx.read_immediate_raw(op).discard_err() && let Some(imm) = imm.right() { let elem = self.wrap_immediate(*imm); @@ -447,11 +452,11 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { (FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom), // Both sides are known, do the actual computation. (FlatSet::Elem(left), FlatSet::Elem(right)) => { - match self.ecx.binary_op(op, &left, &right) { + match self.ecx.binary_op(op, &left, &right).discard_err() { // Ideally this would return an Immediate, since it's sometimes // a pair and sometimes not. But as a hack we always return a pair // and just make the 2nd component `Bottom` when it does not exist. - Ok(val) => { + Some(val) => { if matches!(val.layout.abi, Abi::ScalarPair(..)) { let (val, overflow) = val.to_scalar_pair(); (FlatSet::Elem(val), FlatSet::Elem(overflow)) @@ -470,7 +475,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } let arg_scalar = const_arg.to_scalar(); - let Ok(arg_value) = arg_scalar.to_bits(layout.size) else { + let Some(arg_value) = arg_scalar.to_bits(layout.size).discard_err() else { return (FlatSet::Top, FlatSet::Top); }; @@ -519,7 +524,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } let enum_ty_layout = self.tcx.layout_of(self.param_env.and(enum_ty)).ok()?; let discr_value = - self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).ok()?; + self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).discard_err()?; Some(discr_value.to_scalar()) } @@ -595,7 +600,7 @@ impl<'a, 'tcx> Collector<'a, 'tcx> { .intern_with_temp_alloc(layout, |ecx, dest| { try_write_constant(ecx, dest, place, ty, state, map) }) - .ok()?; + .discard_err()?; return Some(Const::Val(ConstValue::Indirect { alloc_id, offset: Size::ZERO }, ty)); } @@ -632,7 +637,7 @@ fn try_write_constant<'tcx>( // Fast path for ZSTs. if layout.is_zst() { - return Ok(()); + return interp_ok(()); } // Fast path for scalars. @@ -717,7 +722,7 @@ fn try_write_constant<'tcx>( ty::Error(_) | ty::Infer(..) | ty::CoroutineWitness(..) => bug!(), } - Ok(()) + interp_ok(()) } impl<'mir, 'tcx> @@ -830,7 +835,7 @@ impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> { if let PlaceElem::Index(local) = elem { let offset = self.before_effect.get(&(location, local.into()))?; let offset = offset.try_to_scalar()?; - let offset = offset.to_target_usize(&self.tcx).ok()?; + let offset = offset.to_target_usize(&self.tcx).discard_err()?; let min_length = offset.checked_add(1)?; Some(PlaceElem::ConstantIndex { offset, min_length, from_end: false }) } else { diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index c35aec42408..30e1ac05e03 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -58,8 +58,7 @@ impl<'tcx> crate::MirPass<'tcx> for ElaborateDrops { let param_env = tcx.param_env_reveal_all_normalized(def_id); // For types that do not need dropping, the behaviour is trivial. So we only need to track // init/uninit for types that do need dropping. - let move_data = - MoveData::gather_moves(body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env)); + let move_data = MoveData::gather_moves(body, tcx, |ty| ty.needs_drop(tcx, param_env)); let elaborate_patch = { let env = MoveDataParamEnv { move_data, param_env }; @@ -133,7 +132,7 @@ impl InitializationData<'_, '_> { } fn maybe_live_dead(&self, path: MovePathIndex) -> (bool, bool) { - (self.inits.contains(path), self.uninits.contains(path)) + (self.inits.get().contains(path), self.uninits.get().contains(path)) } } diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs index 84d44c2ab4c..8b309147c64 100644 --- a/compiler/rustc_mir_transform/src/errors.rs +++ b/compiler/rustc_mir_transform/src/errors.rs @@ -89,6 +89,14 @@ pub(crate) struct FnItemRef { pub ident: String, } +#[derive(Diagnostic)] +#[diag(mir_transform_exceeds_mcdc_test_vector_limit)] +pub(crate) struct MCDCExceedsTestVectorLimit { + #[primary_span] + pub(crate) span: Span, + pub(crate) max_num_test_vectors: usize, +} + pub(crate) struct MustNotSupend<'a, 'tcx> { pub tcx: TyCtxt<'tcx>, pub yield_sp: Span, @@ -121,3 +129,10 @@ pub(crate) struct MustNotSuspendReason { pub span: Span, pub reason: String, } + +#[derive(LintDiagnostic)] +#[diag(mir_transform_undefined_transmute)] +#[note] +#[note(mir_transform_note2)] +#[help] +pub(crate) struct UndefinedTransmute; diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index f735d08fca5..79c62372df0 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -103,7 +103,7 @@ use rustc_middle::ty::layout::{HasParamEnv, LayoutOf}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::DUMMY_SP; use rustc_span::def_id::DefId; -use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Size, VariantIdx}; +use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx}; use smallvec::SmallVec; use tracing::{debug, instrument, trace}; @@ -288,7 +288,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { values: FxIndexSet::with_capacity_and_hasher(num_values, Default::default()), evaluated: IndexVec::with_capacity(num_values), next_opaque: Some(1), - feature_unsized_locals: tcx.features().unsized_locals, + feature_unsized_locals: tcx.features().unsized_locals(), ssa, dominators, reused_locals: BitSet::new_empty(local_decls.len()), @@ -393,7 +393,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { Repeat(..) => return None, Constant { ref value, disambiguator: _ } => { - self.ecx.eval_mir_constant(value, DUMMY_SP, None).ok()? + self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()? } Aggregate(kind, variant, ref fields) => { let fields = fields @@ -419,29 +419,32 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ImmTy::uninit(ty).into() } else if matches!(kind, AggregateTy::RawPtr { .. }) { // Pointers don't have fields, so don't `project_field` them. - let data = self.ecx.read_pointer(fields[0]).ok()?; + let data = self.ecx.read_pointer(fields[0]).discard_err()?; let meta = if fields[1].layout.is_zst() { MemPlaceMeta::None } else { - MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).ok()?) + MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).discard_err()?) }; let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx); ImmTy::from_immediate(ptr_imm, ty).into() } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { - let dest = self.ecx.allocate(ty, MemoryKind::Stack).ok()?; + let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?; let variant_dest = if let Some(variant) = variant { - self.ecx.project_downcast(&dest, variant).ok()? + self.ecx.project_downcast(&dest, variant).discard_err()? } else { dest.clone() }; for (field_index, op) in fields.into_iter().enumerate() { - let field_dest = self.ecx.project_field(&variant_dest, field_index).ok()?; - self.ecx.copy_op(op, &field_dest).ok()?; + let field_dest = + self.ecx.project_field(&variant_dest, field_index).discard_err()?; + self.ecx.copy_op(op, &field_dest).discard_err()?; } - self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?; + self.ecx + .write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest) + .discard_err()?; self.ecx .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id()) - .ok()?; + .discard_err()?; dest.into() } else { return None; @@ -467,7 +470,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // This should have been replaced by a `ConstantIndex` earlier. ProjectionElem::Index(_) => return None, }; - self.ecx.project(value, elem).ok()? + self.ecx.project(value, elem).discard_err()? } Address { place, kind, provenance: _ } => { if !place.is_indirect_first_projection() { @@ -475,14 +478,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } let local = self.locals[place.local]?; let pointer = self.evaluated[local].as_ref()?; - let mut mplace = self.ecx.deref_pointer(pointer).ok()?; + let mut mplace = self.ecx.deref_pointer(pointer).discard_err()?; for proj in place.projection.iter().skip(1) { // We have no call stack to associate a local with a value, so we cannot // interpret indexing. if matches!(proj, ProjectionElem::Index(_)) { return None; } - mplace = self.ecx.project(&mplace, proj).ok()?; + mplace = self.ecx.project(&mplace, proj).discard_err()?; } let pointer = mplace.to_ref(&self.ecx); let ty = match kind { @@ -500,15 +503,15 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { Discriminant(base) => { let base = self.evaluated[base].as_ref()?; - let variant = self.ecx.read_discriminant(base).ok()?; + let variant = self.ecx.read_discriminant(base).discard_err()?; let discr_value = - self.ecx.discriminant_for_variant(base.layout.ty, variant).ok()?; + self.ecx.discriminant_for_variant(base.layout.ty, variant).discard_err()?; discr_value.into() } Len(slice) => { let slice = self.evaluated[slice].as_ref()?; let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); - let len = slice.len(&self.ecx).ok()?; + let len = slice.len(&self.ecx).discard_err()?; let imm = ImmTy::from_uint(len, usize_layout); imm.into() } @@ -535,67 +538,83 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } UnaryOp(un_op, operand) => { let operand = self.evaluated[operand].as_ref()?; - let operand = self.ecx.read_immediate(operand).ok()?; - let val = self.ecx.unary_op(un_op, &operand).ok()?; + let operand = self.ecx.read_immediate(operand).discard_err()?; + let val = self.ecx.unary_op(un_op, &operand).discard_err()?; val.into() } BinaryOp(bin_op, lhs, rhs) => { let lhs = self.evaluated[lhs].as_ref()?; - let lhs = self.ecx.read_immediate(lhs).ok()?; + let lhs = self.ecx.read_immediate(lhs).discard_err()?; let rhs = self.evaluated[rhs].as_ref()?; - let rhs = self.ecx.read_immediate(rhs).ok()?; - let val = self.ecx.binary_op(bin_op, &lhs, &rhs).ok()?; + let rhs = self.ecx.read_immediate(rhs).discard_err()?; + let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_err()?; val.into() } Cast { kind, value, from: _, to } => match kind { CastKind::IntToInt | CastKind::IntToFloat => { let value = self.evaluated[value].as_ref()?; - let value = self.ecx.read_immediate(value).ok()?; + let value = self.ecx.read_immediate(value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.int_to_int_or_float(&value, to).ok()?; + let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?; res.into() } CastKind::FloatToFloat | CastKind::FloatToInt => { let value = self.evaluated[value].as_ref()?; - let value = self.ecx.read_immediate(value).ok()?; + let value = self.ecx.read_immediate(value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.float_to_float_or_int(&value, to).ok()?; + let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?; res.into() } CastKind::Transmute => { let value = self.evaluated[value].as_ref()?; let to = self.ecx.layout_of(to).ok()?; - // `offset` for immediates only supports scalar/scalar-pair ABIs, - // so bail out if the target is not one. + // `offset` for immediates generally only supports projections that match the + // type of the immediate. However, as a HACK, we exploit that it can also do + // limited transmutes: it only works between types with the same layout, and + // cannot transmute pointers to integers. if value.as_mplace_or_imm().is_right() { - match (value.layout.abi, to.abi) { - (Abi::Scalar(..), Abi::Scalar(..)) => {} - (Abi::ScalarPair(..), Abi::ScalarPair(..)) => {} - _ => return None, + let can_transmute = match (value.layout.abi, to.abi) { + (Abi::Scalar(s1), Abi::Scalar(s2)) => { + s1.size(&self.ecx) == s2.size(&self.ecx) + && !matches!(s1.primitive(), Primitive::Pointer(..)) + } + (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => { + a1.size(&self.ecx) == a2.size(&self.ecx) && + b1.size(&self.ecx) == b2.size(&self.ecx) && + // The alignment of the second component determines its offset, so that also needs to match. + b1.align(&self.ecx) == b2.align(&self.ecx) && + // None of the inputs may be a pointer. + !matches!(a1.primitive(), Primitive::Pointer(..)) + && !matches!(b1.primitive(), Primitive::Pointer(..)) + } + _ => false, + }; + if !can_transmute { + return None; } } - value.offset(Size::ZERO, to, &self.ecx).ok()? + value.offset(Size::ZERO, to, &self.ecx).discard_err()? } CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) => { let src = self.evaluated[value].as_ref()?; let to = self.ecx.layout_of(to).ok()?; - let dest = self.ecx.allocate(to, MemoryKind::Stack).ok()?; - self.ecx.unsize_into(src, to, &dest.clone().into()).ok()?; + let dest = self.ecx.allocate(to, MemoryKind::Stack).discard_err()?; + self.ecx.unsize_into(src, to, &dest.clone().into()).discard_err()?; self.ecx .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id()) - .ok()?; + .discard_err()?; dest.into() } CastKind::FnPtrToPtr | CastKind::PtrToPtr => { let src = self.evaluated[value].as_ref()?; - let src = self.ecx.read_immediate(src).ok()?; + let src = self.ecx.read_immediate(src).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let ret = self.ecx.ptr_to_ptr(&src, to).ok()?; + let ret = self.ecx.ptr_to_ptr(&src, to).discard_err()?; ret.into() } CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer, _) => { let src = self.evaluated[value].as_ref()?; - let src = self.ecx.read_immediate(src).ok()?; + let src = self.ecx.read_immediate(src).discard_err()?; let to = self.ecx.layout_of(to).ok()?; ImmTy::from_immediate(*src, to).into() } @@ -708,7 +727,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { && let Some(idx) = self.locals[idx_local] { if let Some(offset) = self.evaluated[idx].as_ref() - && let Ok(offset) = self.ecx.read_target_usize(offset) + && let Some(offset) = self.ecx.read_target_usize(offset).discard_err() && let Some(min_length) = offset.checked_add(1) { projection.to_mut()[i] = @@ -868,7 +887,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { && let DefKind::Enum = self.tcx.def_kind(enum_did) { let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args); - let discr = self.ecx.discriminant_for_variant(enum_ty, variant).ok()?; + let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_err()?; return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty)); } @@ -1134,7 +1153,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { (UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => { return Some(fields[1]); } - // We have an unsizing cast, which assigns the length to fat pointer metadata. + // We have an unsizing cast, which assigns the length to wide pointer metadata. ( UnOp::PtrMetadata, Value::Cast { @@ -1223,8 +1242,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let as_bits = |value| { let constant = self.evaluated[value].as_ref()?; if layout.abi.is_scalar() { - let scalar = self.ecx.read_scalar(constant).ok()?; - scalar.to_bits(constant.layout.size).ok() + let scalar = self.ecx.read_scalar(constant).discard_err()?; + scalar.to_bits(constant.layout.size).discard_err() } else { // `constant` is a wide pointer. Do not evaluate to bits. None @@ -1418,7 +1437,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let mut inner = self.simplify_place_value(place, location)?; - // The length information is stored in the fat pointer. + // The length information is stored in the wide pointer. // Reborrowing copies length information from one pointer to the other. while let Value::Address { place: borrowed, .. } = self.get(inner) && let [PlaceElem::Deref] = borrowed.projection[..] @@ -1427,7 +1446,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { inner = borrowed; } - // We have an unsizing cast, which assigns the length to fat pointer metadata. + // We have an unsizing cast, which assigns the length to wide pointer metadata. if let Value::Cast { kind, from, to, .. } = self.get(inner) && let CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) = kind && let Some(from) = from.builtin_deref(true) @@ -1484,7 +1503,7 @@ fn op_to_prop_const<'tcx>( // If this constant has scalar ABI, return it as a `ConstValue::Scalar`. if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi - && let Ok(scalar) = ecx.read_scalar(op) + && let Some(scalar) = ecx.read_scalar(op).discard_err() { if !scalar.try_to_scalar_int().is_ok() { // Check that we do not leak a pointer. @@ -1498,12 +1517,12 @@ fn op_to_prop_const<'tcx>( // If this constant is already represented as an `Allocation`, // try putting it into global memory to return it. if let Either::Left(mplace) = op.as_mplace_or_imm() { - let (size, _align) = ecx.size_and_align_of_mplace(&mplace).ok()??; + let (size, _align) = ecx.size_and_align_of_mplace(&mplace).discard_err()??; // Do not try interning a value that contains provenance. // Due to https://github.com/rust-lang/rust/issues/79738, doing so could lead to bugs. // FIXME: remove this hack once that issue is fixed. - let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).ok()??; + let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).discard_err()??; if alloc_ref.has_provenance() { return None; } @@ -1511,7 +1530,7 @@ fn op_to_prop_const<'tcx>( let pointer = mplace.ptr().into_pointer_or_addr().ok()?; let (prov, offset) = pointer.into_parts(); let alloc_id = prov.alloc_id(); - intern_const_alloc_for_constprop(ecx, alloc_id).ok()?; + intern_const_alloc_for_constprop(ecx, alloc_id).discard_err()?; // `alloc_id` may point to a static. Codegen will choke on an `Indirect` with anything // by `GlobalAlloc::Memory`, so do fall through to copying if needed. @@ -1526,7 +1545,8 @@ fn op_to_prop_const<'tcx>( } // Everything failed: create a new allocation to hold the data. - let alloc_id = ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).ok()?; + let alloc_id = + ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).discard_err()?; let value = ConstValue::Indirect { alloc_id, offset: Size::ZERO }; // Check that we do not leak a pointer. diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs index 9d85b5ba5a7..9b9b0b705bf 100644 --- a/compiler/rustc_mir_transform/src/jump_threading.rs +++ b/compiler/rustc_mir_transform/src/jump_threading.rs @@ -200,7 +200,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { debug!(?discr, ?bb); let discr_ty = discr.ty(self.body, self.tcx).ty; - let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { + return; + }; let Some(discr) = self.map.find(discr.as_ref()) else { return }; debug!(?discr); @@ -388,24 +390,24 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { lhs, constant, &mut |elem, op| match elem { - TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(), - TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(), + TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(), + TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(), TrackElem::Discriminant => { - let variant = self.ecx.read_discriminant(op).ok()?; + let variant = self.ecx.read_discriminant(op).discard_err()?; let discr_value = - self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?; + self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?; Some(discr_value.into()) } TrackElem::DerefLen => { - let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into(); - let len_usize = op.len(&self.ecx).ok()?; + let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into(); + let len_usize = op.len(&self.ecx).discard_err()?; let layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); Some(ImmTy::from_uint(len_usize, layout).into()) } }, &mut |place, op| { if let Some(conditions) = state.try_get_idx(place, &self.map) - && let Ok(imm) = self.ecx.read_immediate_raw(op) + && let Some(imm) = self.ecx.read_immediate_raw(op).discard_err() && let Some(imm) = imm.right() && let Immediate::Scalar(Scalar::Int(int)) = *imm { @@ -429,8 +431,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { match rhs { // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. Operand::Constant(constant) => { - let Ok(constant) = - self.ecx.eval_mir_constant(&constant.const_, constant.span, None) + let Some(constant) = + self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err() else { return; }; @@ -469,8 +471,10 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { AggregateKind::Adt(.., Some(_)) => return, AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => { if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant) - && let Ok(discr_value) = - self.ecx.discriminant_for_variant(agg_ty, *variant_index) + && let Some(discr_value) = self + .ecx + .discriminant_for_variant(agg_ty, *variant_index) + .discard_err() { self.process_immediate(bb, discr_target, discr_value, state); } @@ -490,8 +494,16 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { } // Transfer the conditions on the copy rhs, after inversing polarity. Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => { + if !place.ty(self.body, self.tcx).ty.is_bool() { + // Constructing the conditions by inverting the polarity + // of equality is only correct for bools. That is to say, + // `!a == b` is not `a != b` for integers greater than 1 bit. + return; + } let Some(conditions) = state.try_get_idx(lhs, &self.map) else { return }; let Some(place) = self.map.find(place.as_ref()) else { return }; + // FIXME: I think This could be generalized to not bool if we + // actually perform a logical not on the condition's value. let conds = conditions.map(self.arena, Condition::inv); state.insert_value_idx(place, conds, &self.map); } @@ -516,9 +528,7 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { // Avoid handling them, though this could be extended in the future. return; } - let Some(value) = - value.const_.normalize(self.tcx, self.param_env).try_to_scalar_int() - else { + let Some(value) = value.const_.try_eval_scalar_int(self.tcx, self.param_env) else { return; }; let conds = conditions.map(self.arena, |c| Condition { @@ -557,7 +567,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { // `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant // of a niche encoding. If we cannot ensure that we write to the discriminant, do // nothing. - let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return }; + let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { + return; + }; let writes_discriminant = match enum_layout.variants { Variants::Single { index } => { assert_eq!(index, *variant_index); @@ -570,7 +582,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { } => *variant_index != untagged_variant, }; if writes_discriminant { - let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index) + let Some(discr) = + self.ecx.discriminant_for_variant(enum_ty, *variant_index).discard_err() else { return; }; @@ -647,7 +660,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> { let Some(discr) = discr.place() else { return }; let discr_ty = discr.ty(self.body, self.tcx).ty; - let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { + return; + }; let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return }; if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) { diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 783e7aabe85..08923748eb2 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -6,7 +6,7 @@ use std::fmt::Debug; use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ - ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, + ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok, }; use rustc_data_structures::fx::FxHashSet; use rustc_hir::HirId; @@ -101,7 +101,7 @@ impl<'tcx> Value<'tcx> { } (PlaceElem::Index(idx), Value::Aggregate { fields, .. }) => { let idx = prop.get_const(idx.into())?.immediate()?; - let idx = prop.ecx.read_target_usize(idx).ok()?.try_into().ok()?; + let idx = prop.ecx.read_target_usize(idx).discard_err()?.try_into().ok()?; if idx <= FieldIdx::MAX_AS_U32 { fields.get(FieldIdx::from_u32(idx)).unwrap_or(&Value::Uninit) } else { @@ -231,21 +231,20 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { where F: FnOnce(&mut Self) -> InterpResult<'tcx, T>, { - match f(self) { - Ok(val) => Some(val), - Err(error) => { - trace!("InterpCx operation failed: {:?}", error); + f(self) + .map_err_info(|err| { + trace!("InterpCx operation failed: {:?}", err); // Some errors shouldn't come up because creating them causes // an allocation, which we should avoid. When that happens, // dedicated error variants should be introduced instead. assert!( - !error.kind().formatted_string(), + !err.kind().formatted_string(), "known panics lint encountered formatting error: {}", - format_interp_error(self.ecx.tcx.dcx(), error), + format_interp_error(self.ecx.tcx.dcx(), err), ); - None - } - } + err + }) + .discard_err() } /// Returns the value, if any, of evaluating `c`. @@ -315,7 +314,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { .ecx .binary_op(BinOp::SubWithOverflow, &ImmTy::from_int(0, arg.layout), &arg)? .to_scalar_pair(); - Ok((arg, overflow.to_bool()?)) + interp_ok((arg, overflow.to_bool()?)) })?; if overflow { self.report_assert_as_lint( @@ -349,7 +348,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let left_ty = left.ty(self.local_decls(), self.tcx); let left_size = self.ecx.layout_of(left_ty).ok()?.size; let right_size = r.layout.size; - let r_bits = r.to_scalar().to_bits(right_size).ok(); + let r_bits = r.to_scalar().to_bits(right_size).discard_err(); if r_bits.is_some_and(|b| b >= left_size.bits() as u128) { debug!("check_binary_op: reporting assert for {:?}", location); let panic = AssertKind::Overflow( @@ -496,7 +495,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { // This can be `None` if the lhs wasn't const propagated and we just // triggered the assert on the value of the rhs. self.eval_operand(op) - .and_then(|op| self.ecx.read_immediate(&op).ok()) + .and_then(|op| self.ecx.read_immediate(&op).discard_err()) .map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int())) }; let msg = match msg { @@ -601,13 +600,15 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } Len(place) => { - let len = match self.get_const(place)? { - Value::Immediate(src) => src.len(&self.ecx).ok()?, - Value::Aggregate { fields, .. } => fields.len() as u64, - Value::Uninit => match place.ty(self.local_decls(), self.tcx).ty.kind() { - ty::Array(_, n) => n.try_eval_target_usize(self.tcx, self.param_env)?, - _ => return None, - }, + let len = if let ty::Array(_, n) = place.ty(self.local_decls(), self.tcx).ty.kind() + { + n.try_to_target_usize(self.tcx)? + } else { + match self.get_const(place)? { + Value::Immediate(src) => src.len(&self.ecx).discard_err()?, + Value::Aggregate { fields, .. } => fields.len() as u64, + Value::Uninit => return None, + } }; ImmTy::from_scalar(Scalar::from_target_usize(len, self), layout).into() } @@ -615,7 +616,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { Ref(..) | RawPtr(..) => return None, NullaryOp(ref null_op, ty) => { - let op_layout = self.use_ecx(|this| this.ecx.layout_of(ty))?; + let op_layout = self.ecx.layout_of(ty).ok()?; let val = match null_op { NullOp::SizeOf => op_layout.size.bytes(), NullOp::AlignOf => op_layout.align.abi.bytes(), @@ -633,16 +634,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { Cast(ref kind, ref value, to) => match kind { CastKind::IntToInt | CastKind::IntToFloat => { let value = self.eval_operand(value)?; - let value = self.ecx.read_immediate(&value).ok()?; + let value = self.ecx.read_immediate(&value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.int_to_int_or_float(&value, to).ok()?; + let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?; res.into() } CastKind::FloatToFloat | CastKind::FloatToInt => { let value = self.eval_operand(value)?; - let value = self.ecx.read_immediate(&value).ok()?; + let value = self.ecx.read_immediate(&value).discard_err()?; let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.float_to_float_or_int(&value, to).ok()?; + let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?; res.into() } CastKind::Transmute => { @@ -656,7 +657,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { _ => return None, } - value.offset(Size::ZERO, to, &self.ecx).ok()?.into() + value.offset(Size::ZERO, to, &self.ecx).discard_err()?.into() } _ => return None, }, @@ -781,7 +782,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { TerminatorKind::SwitchInt { ref discr, ref targets } => { if let Some(ref value) = self.eval_operand(discr) && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value)) - && let Ok(constant) = value_const.to_bits(value_const.size()) + && let Some(constant) = value_const.to_bits(value_const.size()).discard_err() { // We managed to evaluate the discriminant, so we know we only need to visit // one target. diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 4c090665992..d184328748f 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -51,6 +51,7 @@ mod add_subtyping_projections; mod check_alignment; mod check_const_item_mutation; mod check_packed_ref; +mod check_undefined_transmutes; // This pass is public to allow external drivers to perform MIR cleanup pub mod cleanup_post_borrowck; mod copy_prop; @@ -298,6 +299,7 @@ fn mir_built(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> { &Lint(check_packed_ref::CheckPackedRef), &Lint(check_const_item_mutation::CheckConstItemMutation), &Lint(function_item_references::FunctionItemReferences), + &Lint(check_undefined_transmutes::CheckUndefinedTransmutes), // What we need to do constant evaluation. &simplify::SimplifyCfg::Initial, &Lint(sanity_check::SanityCheck), diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index d963ca5c485..86c4b241a2b 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -329,7 +329,7 @@ impl<'tcx> Validator<'_, 'tcx> { // Determine the type of the thing we are indexing. && let ty::Array(_, len) = place_base.ty(self.body, self.tcx).ty.kind() // It's an array; determine its length. - && let Some(len) = len.try_eval_target_usize(self.tcx, self.param_env) + && let Some(len) = len.try_to_target_usize(self.tcx) // If the index is in-bounds, go ahead. && idx < len { @@ -407,7 +407,7 @@ impl<'tcx> Validator<'_, 'tcx> { // mutably without consequences. However, only &mut [] // is allowed right now. if let ty::Array(_, len) = ty.kind() { - match len.try_eval_target_usize(self.tcx, self.param_env) { + match len.try_to_target_usize(self.tcx) { Some(0) => {} _ => return Err(Unpromotable), } diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs index a62a892716f..53e53d9d5ba 100644 --- a/compiler/rustc_mir_transform/src/ref_prop.rs +++ b/compiler/rustc_mir_transform/src/ref_prop.rs @@ -179,7 +179,7 @@ fn compute_replacement<'tcx>( } else { // This is a proper dereference. We can only allow it if `target` is live. maybe_dead.seek_after_primary_effect(loc); - let maybe_dead = maybe_dead.contains(target.local); + let maybe_dead = maybe_dead.get().contains(target.local); !maybe_dead } }; diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs index e6647edf3f5..09969a4c7cc 100644 --- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs @@ -19,8 +19,7 @@ pub(super) struct RemoveUninitDrops; impl<'tcx> crate::MirPass<'tcx> for RemoveUninitDrops { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let param_env = tcx.param_env(body.source.def_id()); - let move_data = - MoveData::gather_moves(body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env)); + let move_data = MoveData::gather_moves(body, tcx, |ty| ty.needs_drop(tcx, param_env)); let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &move_data) .into_engine(tcx, body) diff --git a/compiler/rustc_mir_transform/src/single_use_consts.rs b/compiler/rustc_mir_transform/src/single_use_consts.rs index 9884b6dd1c3..277a33c0311 100644 --- a/compiler/rustc_mir_transform/src/single_use_consts.rs +++ b/compiler/rustc_mir_transform/src/single_use_consts.rs @@ -185,15 +185,14 @@ impl<'tcx> MutVisitor<'tcx> for LocalReplacer<'tcx> { && let Some(local) = place.as_local() && local == self.local { - let const_op = self + let const_op = *self .operand .as_ref() .unwrap_or_else(|| { bug!("the operand was already stolen"); }) .constant() - .unwrap() - .clone(); + .unwrap(); var_debug_info.value = VarDebugInfoContents::Const(const_op); } } diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs index eda0b8c75f3..25e68f44456 100644 --- a/compiler/rustc_mir_transform/src/validate.rs +++ b/compiler/rustc_mir_transform/src/validate.rs @@ -4,7 +4,8 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_hir::LangItem; use rustc_index::IndexVec; use rustc_index::bit_set::BitSet; -use rustc_infer::traits::Reveal; +use rustc_infer::infer::TyCtxtInferExt; +use rustc_infer::traits::{Obligation, ObligationCause, Reveal}; use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -16,6 +17,8 @@ use rustc_middle::ty::{ use rustc_middle::{bug, span_bug}; use rustc_target::abi::{FIRST_VARIANT, Size}; use rustc_target::spec::abi::Abi; +use rustc_trait_selection::traits::ObligationCtxt; +use rustc_type_ir::Upcast; use crate::util::{is_within_packed, relate_types}; @@ -586,6 +589,33 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { crate::util::relate_types(self.tcx, self.param_env, variance, src, dest) } + + /// Check that the given predicate definitely holds in the param-env of this MIR body. + fn predicate_must_hold_modulo_regions( + &self, + pred: impl Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>>, + ) -> bool { + let pred: ty::Predicate<'tcx> = pred.upcast(self.tcx); + + // We sometimes have to use `defining_opaque_types` for predicates + // to succeed here and figuring out how exactly that should work + // is annoying. It is harmless enough to just not validate anything + // in that case. We still check this after analysis as all opaque + // types have been revealed at this point. + if pred.has_opaque_types() { + return true; + } + + let infcx = self.tcx.infer_ctxt().build(); + let ocx = ObligationCtxt::new(&infcx); + ocx.register_obligation(Obligation::new( + self.tcx, + ObligationCause::dummy(), + self.param_env, + pred, + )); + ocx.select_all_or_error().is_empty() + } } impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { @@ -1202,8 +1232,18 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } CastKind::PointerCoercion(PointerCoercion::Unsize, _) => { - // This is used for all `CoerceUnsized` types, - // not just pointers/references, so is hard to check. + // Pointers being unsize coerced should at least implement + // `CoerceUnsized`. + if !self.predicate_must_hold_modulo_regions(ty::TraitRef::new( + self.tcx, + self.tcx.require_lang_item( + LangItem::CoerceUnsized, + Some(self.body.source_info(location).span), + ), + [op_ty, *target_type], + )) { + self.fail(location, format!("Unsize coercion, but `{op_ty}` isn't coercible to `{target_type}`")); + } } CastKind::PointerCoercion(PointerCoercion::DynStar, _) => { // FIXME(dyn-star): make sure nothing needs to be done here. |
