diff options
| author | Ralf Jung <post@ralfj.de> | 2024-12-12 12:24:31 +0100 |
|---|---|---|
| committer | Ralf Jung <post@ralfj.de> | 2024-12-12 12:24:31 +0100 |
| commit | f590fa92140b5cc375bd4bc16ca6e930c5bf9e33 (patch) | |
| tree | 47d0fb5e03dc56be5e33f9b84f668b540b990e31 /compiler/rustc_mir_transform/src | |
| parent | cc797a2b741be734289ba857e99dc72be908d92d (diff) | |
| parent | a4ef751e26a90dd3b6b35fdbfef1e4854f9d80e1 (diff) | |
| download | rust-f590fa92140b5cc375bd4bc16ca6e930c5bf9e33.tar.gz rust-f590fa92140b5cc375bd4bc16ca6e930c5bf9e33.zip | |
Merge from rustc
Diffstat (limited to 'compiler/rustc_mir_transform/src')
6 files changed, 263 insertions, 179 deletions
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index 46efdd16ee8..9e80f1f1c4a 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -9,7 +9,7 @@ use rustc_index::bit_set::BitSet; use rustc_middle::mir::coverage::{CounterId, CovTerm, Expression, ExpressionId, Op}; use tracing::{debug, debug_span, instrument}; -use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, TraverseCoverageGraphWithLoops}; +use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, ReadyFirstTraversal}; #[cfg(test)] mod tests; @@ -236,23 +236,12 @@ impl<'a> CountersBuilder<'a> { // Traverse the coverage graph, ensuring that every node that needs a // coverage counter has one. - // - // The traversal tries to ensure that, when a loop is encountered, all - // nodes within the loop are visited before visiting any nodes outside - // the loop. - let mut traversal = TraverseCoverageGraphWithLoops::new(self.graph); - while let Some(bcb) = traversal.next() { + for bcb in ReadyFirstTraversal::new(self.graph) { let _span = debug_span!("traversal", ?bcb).entered(); if self.bcb_needs_counter.contains(bcb) { self.make_node_counter_and_out_edge_counters(bcb); } } - - assert!( - traversal.is_complete(), - "`TraverseCoverageGraphWithLoops` missed some `BasicCoverageBlock`s: {:?}", - traversal.unvisited(), - ); } /// Make sure the given node has a node counter, and then make sure each of diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index 092bce1de2c..ad6774fccd6 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -9,7 +9,6 @@ use rustc_data_structures::graph::dominators::Dominators; use rustc_data_structures::graph::{self, DirectedGraph, StartNode}; use rustc_index::IndexVec; use rustc_index::bit_set::BitSet; -use rustc_middle::bug; use rustc_middle::mir::{self, BasicBlock, Terminator, TerminatorKind}; use tracing::debug; @@ -462,138 +461,6 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera CoverageSuccessors { targets, is_yield } } -/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the -/// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that -/// ensures a loop is completely traversed before processing Blocks after the end of the loop. -#[derive(Debug)] -struct TraversalContext { - /// BCB with one or more incoming loop backedges, indicating which loop - /// this context is for. - /// - /// If `None`, this is the non-loop context for the function as a whole. - loop_header: Option<BasicCoverageBlock>, - - /// Worklist of BCBs to be processed in this context. - worklist: VecDeque<BasicCoverageBlock>, -} - -pub(crate) struct TraverseCoverageGraphWithLoops<'a> { - basic_coverage_blocks: &'a CoverageGraph, - - context_stack: Vec<TraversalContext>, - visited: BitSet<BasicCoverageBlock>, -} - -impl<'a> TraverseCoverageGraphWithLoops<'a> { - pub(crate) fn new(basic_coverage_blocks: &'a CoverageGraph) -> Self { - let worklist = VecDeque::from([basic_coverage_blocks.start_node()]); - let context_stack = vec![TraversalContext { loop_header: None, worklist }]; - - // `context_stack` starts with a `TraversalContext` for the main function context (beginning - // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top - // of the stack as loops are entered, and popped off of the stack when a loop's worklist is - // exhausted. - let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes()); - Self { basic_coverage_blocks, context_stack, visited } - } - - pub(crate) fn next(&mut self) -> Option<BasicCoverageBlock> { - debug!( - "TraverseCoverageGraphWithLoops::next - context_stack: {:?}", - self.context_stack.iter().rev().collect::<Vec<_>>() - ); - - while let Some(context) = self.context_stack.last_mut() { - let Some(bcb) = context.worklist.pop_front() else { - // This stack level is exhausted; pop it and try the next one. - self.context_stack.pop(); - continue; - }; - - if !self.visited.insert(bcb) { - debug!("Already visited: {bcb:?}"); - continue; - } - debug!("Visiting {bcb:?}"); - - if self.basic_coverage_blocks.is_loop_header.contains(bcb) { - debug!("{bcb:?} is a loop header! Start a new TraversalContext..."); - self.context_stack - .push(TraversalContext { loop_header: Some(bcb), worklist: VecDeque::new() }); - } - self.add_successors_to_worklists(bcb); - return Some(bcb); - } - - None - } - - fn add_successors_to_worklists(&mut self, bcb: BasicCoverageBlock) { - let successors = &self.basic_coverage_blocks.successors[bcb]; - debug!("{:?} has {} successors:", bcb, successors.len()); - - for &successor in successors { - if successor == bcb { - debug!( - "{:?} has itself as its own successor. (Note, the compiled code will \ - generate an infinite loop.)", - bcb - ); - // Don't re-add this successor to the worklist. We are already processing it. - // FIXME: This claims to skip just the self-successor, but it actually skips - // all other successors as well. Does that matter? - break; - } - - // Add successors of the current BCB to the appropriate context. Successors that - // stay within a loop are added to the BCBs context worklist. Successors that - // exit the loop (they are not dominated by the loop header) must be reachable - // from other BCBs outside the loop, and they will be added to a different - // worklist. - // - // Branching blocks (with more than one successor) must be processed before - // blocks with only one successor, to prevent unnecessarily complicating - // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the - // branching block would have given an `Expression` (or vice versa). - - let context = self - .context_stack - .iter_mut() - .rev() - .find(|context| match context.loop_header { - Some(loop_header) => { - self.basic_coverage_blocks.dominates(loop_header, successor) - } - None => true, - }) - .unwrap_or_else(|| bug!("should always fall back to the root non-loop context")); - debug!("adding to worklist for {:?}", context.loop_header); - - // FIXME: The code below had debug messages claiming to add items to a - // particular end of the worklist, but was confused about which end was - // which. The existing behaviour has been preserved for now, but it's - // unclear what the intended behaviour was. - - if self.basic_coverage_blocks.successors[successor].len() > 1 { - context.worklist.push_back(successor); - } else { - context.worklist.push_front(successor); - } - } - } - - pub(crate) fn is_complete(&self) -> bool { - self.visited.count() == self.visited.domain_size() - } - - pub(crate) fn unvisited(&self) -> Vec<BasicCoverageBlock> { - let mut unvisited_set: BitSet<BasicCoverageBlock> = - BitSet::new_filled(self.visited.domain_size()); - unvisited_set.subtract(&self.visited); - unvisited_set.iter().collect::<Vec<_>>() - } -} - /// Wrapper around a [`mir::BasicBlocks`] graph that restricts each node's /// successors to only the ones considered "relevant" when building a coverage /// graph. @@ -622,3 +489,126 @@ impl<'a, 'tcx> graph::Successors for CoverageRelevantSubgraph<'a, 'tcx> { self.coverage_successors(bb).into_iter() } } + +/// State of a node in the coverage graph during ready-first traversal. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +enum ReadyState { + /// This node has not yet been added to the fallback queue or ready queue. + Unqueued, + /// This node is currently in the fallback queue. + InFallbackQueue, + /// This node's predecessors have all been visited, so it is in the ready queue. + /// (It might also have a stale entry in the fallback queue.) + InReadyQueue, + /// This node has been visited. + /// (It might also have a stale entry in the fallback queue.) + Visited, +} + +/// Iterator that visits nodes in the coverage graph, in an order that always +/// prefers "ready" nodes whose predecessors have already been visited. +pub(crate) struct ReadyFirstTraversal<'a> { + graph: &'a CoverageGraph, + + /// For each node, the number of its predecessor nodes that haven't been visited yet. + n_unvisited_preds: IndexVec<BasicCoverageBlock, u32>, + /// Indicates whether a node has been visited, or which queue it is in. + state: IndexVec<BasicCoverageBlock, ReadyState>, + + /// Holds unvisited nodes whose predecessors have all been visited. + ready_queue: VecDeque<BasicCoverageBlock>, + /// Holds unvisited nodes with some unvisited predecessors. + /// Also contains stale entries for nodes that were upgraded to ready. + fallback_queue: VecDeque<BasicCoverageBlock>, +} + +impl<'a> ReadyFirstTraversal<'a> { + pub(crate) fn new(graph: &'a CoverageGraph) -> Self { + let num_nodes = graph.num_nodes(); + + let n_unvisited_preds = + IndexVec::from_fn_n(|node| graph.predecessors[node].len() as u32, num_nodes); + let mut state = IndexVec::from_elem_n(ReadyState::Unqueued, num_nodes); + + // We know from coverage graph construction that the start node is the + // only node with no predecessors. + debug_assert!( + n_unvisited_preds.iter_enumerated().all(|(node, &n)| (node == START_BCB) == (n == 0)) + ); + let ready_queue = VecDeque::from(vec![START_BCB]); + state[START_BCB] = ReadyState::InReadyQueue; + + Self { graph, state, n_unvisited_preds, ready_queue, fallback_queue: VecDeque::new() } + } + + /// Returns the next node from the ready queue, or else the next unvisited + /// node from the fallback queue. + fn next_inner(&mut self) -> Option<BasicCoverageBlock> { + // Always prefer to yield a ready node if possible. + if let Some(node) = self.ready_queue.pop_front() { + assert_eq!(self.state[node], ReadyState::InReadyQueue); + return Some(node); + } + + while let Some(node) = self.fallback_queue.pop_front() { + match self.state[node] { + // This entry in the fallback queue is not stale, so yield it. + ReadyState::InFallbackQueue => return Some(node), + // This node was added to the fallback queue, but later became + // ready and was visited via the ready queue. Ignore it here. + ReadyState::Visited => {} + // Unqueued nodes can't be in the fallback queue, by definition. + // We know that the ready queue is empty at this point. + ReadyState::Unqueued | ReadyState::InReadyQueue => unreachable!( + "unexpected state for {node:?} in the fallback queue: {:?}", + self.state[node] + ), + } + } + + None + } + + fn mark_visited_and_enqueue_successors(&mut self, node: BasicCoverageBlock) { + assert!(self.state[node] < ReadyState::Visited); + self.state[node] = ReadyState::Visited; + + // For each of this node's successors, decrease the successor's + // "unvisited predecessors" count, and enqueue it if appropriate. + for &succ in &self.graph.successors[node] { + let is_unqueued = match self.state[succ] { + ReadyState::Unqueued => true, + ReadyState::InFallbackQueue => false, + ReadyState::InReadyQueue => { + unreachable!("nodes in the ready queue have no unvisited predecessors") + } + // The successor was already visited via one of its other predecessors. + ReadyState::Visited => continue, + }; + + self.n_unvisited_preds[succ] -= 1; + if self.n_unvisited_preds[succ] == 0 { + // This node's predecessors have all been visited, so add it to + // the ready queue. If it's already in the fallback queue, that + // fallback entry will be ignored later. + self.state[succ] = ReadyState::InReadyQueue; + self.ready_queue.push_back(succ); + } else if is_unqueued { + // This node has unvisited predecessors, so add it to the + // fallback queue in case we run out of ready nodes later. + self.state[succ] = ReadyState::InFallbackQueue; + self.fallback_queue.push_back(succ); + } + } + } +} + +impl<'a> Iterator for ReadyFirstTraversal<'a> { + type Item = BasicCoverageBlock; + + fn next(&mut self) -> Option<Self::Item> { + let node = self.next_inner()?; + self.mark_visited_and_enqueue_successors(node); + Some(node) + } +} diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index 0090f6f3040..edaec3c7965 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -1,8 +1,11 @@ use rustc_data_structures::captures::Captures; use rustc_index::bit_set::BitSet; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; -use rustc_middle::mir::coverage::{CovTerm, CoverageKind, MappingKind}; -use rustc_middle::mir::{Body, CoverageIdsInfo, Statement, StatementKind}; +use rustc_middle::mir::coverage::{ + CounterId, CovTerm, CoverageIdsInfo, CoverageKind, Expression, ExpressionId, + FunctionCoverageInfo, MappingKind, Op, +}; +use rustc_middle::mir::{Body, Statement, StatementKind}; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::util::Providers; @@ -87,10 +90,10 @@ fn coverage_ids_info<'tcx>( ) -> CoverageIdsInfo { let mir_body = tcx.instance_mir(instance_def); - let Some(fn_cov_info) = mir_body.function_coverage_info.as_ref() else { + let Some(fn_cov_info) = mir_body.function_coverage_info.as_deref() else { return CoverageIdsInfo { counters_seen: BitSet::new_empty(0), - expressions_seen: BitSet::new_empty(0), + zero_expressions: BitSet::new_empty(0), }; }; @@ -123,7 +126,10 @@ fn coverage_ids_info<'tcx>( } } - CoverageIdsInfo { counters_seen, expressions_seen } + let zero_expressions = + identify_zero_expressions(fn_cov_info, &counters_seen, &expressions_seen); + + CoverageIdsInfo { counters_seen, zero_expressions } } fn all_coverage_in_mir_body<'a, 'tcx>( @@ -141,3 +147,94 @@ fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool { let scope_data = &body.source_scopes[statement.source_info.scope]; scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some() } + +/// Identify expressions that will always have a value of zero, and note +/// their IDs in a `BitSet`. Mappings that refer to a zero expression +/// can instead become mappings to a constant zero value. +/// +/// This function mainly exists to preserve the simplifications that were +/// already being performed by the Rust-side expression renumbering, so that +/// the resulting coverage mappings don't get worse. +fn identify_zero_expressions( + fn_cov_info: &FunctionCoverageInfo, + counters_seen: &BitSet<CounterId>, + expressions_seen: &BitSet<ExpressionId>, +) -> BitSet<ExpressionId> { + // The set of expressions that either were optimized out entirely, or + // have zero as both of their operands, and will therefore always have + // a value of zero. Other expressions that refer to these as operands + // can have those operands replaced with `CovTerm::Zero`. + let mut zero_expressions = BitSet::new_empty(fn_cov_info.expressions.len()); + + // Simplify a copy of each expression based on lower-numbered expressions, + // and then update the set of always-zero expressions if necessary. + // (By construction, expressions can only refer to other expressions + // that have lower IDs, so one pass is sufficient.) + for (id, expression) in fn_cov_info.expressions.iter_enumerated() { + if !expressions_seen.contains(id) { + // If an expression was not seen, it must have been optimized away, + // so any operand that refers to it can be replaced with zero. + zero_expressions.insert(id); + continue; + } + + // We don't need to simplify the actual expression data in the + // expressions list; we can just simplify a temporary copy and then + // use that to update the set of always-zero expressions. + let Expression { mut lhs, op, mut rhs } = *expression; + + // If an expression has an operand that is also an expression, the + // operand's ID must be strictly lower. This is what lets us find + // all zero expressions in one pass. + let assert_operand_expression_is_lower = |operand_id: ExpressionId| { + assert!( + operand_id < id, + "Operand {operand_id:?} should be less than {id:?} in {expression:?}", + ) + }; + + // If an operand refers to a counter or expression that is always + // zero, then that operand can be replaced with `CovTerm::Zero`. + let maybe_set_operand_to_zero = |operand: &mut CovTerm| { + if let CovTerm::Expression(id) = *operand { + assert_operand_expression_is_lower(id); + } + + if is_zero_term(&counters_seen, &zero_expressions, *operand) { + *operand = CovTerm::Zero; + } + }; + maybe_set_operand_to_zero(&mut lhs); + maybe_set_operand_to_zero(&mut rhs); + + // Coverage counter values cannot be negative, so if an expression + // involves subtraction from zero, assume that its RHS must also be zero. + // (Do this after simplifications that could set the LHS to zero.) + if lhs == CovTerm::Zero && op == Op::Subtract { + rhs = CovTerm::Zero; + } + + // After the above simplifications, if both operands are zero, then + // we know that this expression is always zero too. + if lhs == CovTerm::Zero && rhs == CovTerm::Zero { + zero_expressions.insert(id); + } + } + + zero_expressions +} + +/// Returns `true` if the given term is known to have a value of zero, taking +/// into account knowledge of which counters are unused and which expressions +/// are always zero. +fn is_zero_term( + counters_seen: &BitSet<CounterId>, + zero_expressions: &BitSet<ExpressionId>, + term: CovTerm, +) -> bool { + match term { + CovTerm::Zero => true, + CovTerm::Counter(id) => !counters_seen.contains(id), + CovTerm::Expression(id) => zero_expressions.contains(id), + } +} diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index d017202f48b..b94c925b1db 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -534,8 +534,13 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { // This allows the set of visited edges to grow monotonically with the lattice. FlatSet::Bottom => TerminatorEdges::None, FlatSet::Elem(scalar) => { - let choice = scalar.assert_scalar_int().to_bits_unchecked(); - TerminatorEdges::Single(targets.target_for_value(choice)) + if let Ok(scalar_int) = scalar.try_to_scalar_int() { + TerminatorEdges::Single( + targets.target_for_value(scalar_int.to_bits_unchecked()), + ) + } else { + TerminatorEdges::SwitchInt { discr, targets } + } } FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets }, } diff --git a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs index 732a9cd9890..7fb421dea0c 100644 --- a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs +++ b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs @@ -7,7 +7,7 @@ use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_errors::Subdiagnostic; use rustc_hir::CRATE_HIR_ID; use rustc_hir::def_id::{DefId, LocalDefId}; -use rustc_index::bit_set::ChunkedBitSet; +use rustc_index::bit_set::MixedBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_macros::{LintDiagnostic, Subdiagnostic}; use rustc_middle::bug; @@ -49,24 +49,24 @@ struct DropsReachable<'a, 'mir, 'tcx> { move_data: &'a MoveData<'tcx>, maybe_init: &'a mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>, block_drop_value_info: &'a mut IndexSlice<BasicBlock, MovePathIndexAtBlock>, - collected_drops: &'a mut ChunkedBitSet<MovePathIndex>, - visited: FxHashMap<BasicBlock, Rc<RefCell<ChunkedBitSet<MovePathIndex>>>>, + collected_drops: &'a mut MixedBitSet<MovePathIndex>, + visited: FxHashMap<BasicBlock, Rc<RefCell<MixedBitSet<MovePathIndex>>>>, } impl<'a, 'mir, 'tcx> DropsReachable<'a, 'mir, 'tcx> { fn visit(&mut self, block: BasicBlock) { let move_set_size = self.move_data.move_paths.len(); - let make_new_path_set = || Rc::new(RefCell::new(ChunkedBitSet::new_empty(move_set_size))); + let make_new_path_set = || Rc::new(RefCell::new(MixedBitSet::new_empty(move_set_size))); let data = &self.body.basic_blocks[block]; let Some(terminator) = &data.terminator else { return }; - // Given that we observe these dropped locals here at `block` so far, - // we will try to update the successor blocks. - // An occupied entry at `block` in `self.visited` signals that we have visited `block` before. + // Given that we observe these dropped locals here at `block` so far, we will try to update + // the successor blocks. An occupied entry at `block` in `self.visited` signals that we + // have visited `block` before. let dropped_local_here = Rc::clone(self.visited.entry(block).or_insert_with(make_new_path_set)); - // We could have invoked reverse lookup for a `MovePathIndex` every time, but unfortunately it is expensive. - // Let's cache them in `self.block_drop_value_info`. + // We could have invoked reverse lookup for a `MovePathIndex` every time, but unfortunately + // it is expensive. Let's cache them in `self.block_drop_value_info`. match self.block_drop_value_info[block] { MovePathIndexAtBlock::Some(dropped) => { dropped_local_here.borrow_mut().insert(dropped); @@ -76,23 +76,24 @@ impl<'a, 'mir, 'tcx> DropsReachable<'a, 'mir, 'tcx> { && let LookupResult::Exact(idx) | LookupResult::Parent(Some(idx)) = self.move_data.rev_lookup.find(place.as_ref()) { - // Since we are working with MIRs at a very early stage, - // observing a `drop` terminator is not indicative enough that - // the drop will definitely happen. - // That is decided in the drop elaboration pass instead. - // Therefore, we need to consult with the maybe-initialization information. + // Since we are working with MIRs at a very early stage, observing a `drop` + // terminator is not indicative enough that the drop will definitely happen. + // That is decided in the drop elaboration pass instead. Therefore, we need to + // consult with the maybe-initialization information. self.maybe_init.seek_before_primary_effect(Location { block, statement_index: data.statements.len(), }); - // Check if the drop of `place` under inspection is really in effect. - // This is true only when `place` may have been initialized along a control flow path from a BID to the drop program point today. - // In other words, this is where the drop of `place` will happen in the future instead. + // Check if the drop of `place` under inspection is really in effect. This is + // true only when `place` may have been initialized along a control flow path + // from a BID to the drop program point today. In other words, this is where + // the drop of `place` will happen in the future instead. if let MaybeReachable::Reachable(maybe_init) = self.maybe_init.get() && maybe_init.contains(idx) { - // We also cache the drop information, so that we do not need to check on data-flow cursor again + // We also cache the drop information, so that we do not need to check on + // data-flow cursor again. self.block_drop_value_info[block] = MovePathIndexAtBlock::Some(idx); dropped_local_here.borrow_mut().insert(idx); } else { @@ -139,8 +140,9 @@ impl<'a, 'mir, 'tcx> DropsReachable<'a, 'mir, 'tcx> { // Let's check the observed dropped places in. self.collected_drops.union(&*dropped_local_there.borrow()); if self.drop_span.is_none() { - // FIXME(@dingxiangfei2009): it turns out that `self.body.source_scopes` are still a bit wonky. - // There is a high chance that this span still points to a block rather than a statement semicolon. + // FIXME(@dingxiangfei2009): it turns out that `self.body.source_scopes` are + // still a bit wonky. There is a high chance that this span still points to a + // block rather than a statement semicolon. *self.drop_span = Some(terminator.source_info.span); } // Now we have discovered a simple control flow path from a future drop point @@ -394,10 +396,10 @@ pub(crate) fn run_lint<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &Body< for (&block, candidates) in &bid_per_block { // We will collect drops on locals on paths between BID points to their actual drop locations // into `all_locals_dropped`. - let mut all_locals_dropped = ChunkedBitSet::new_empty(move_data.move_paths.len()); + let mut all_locals_dropped = MixedBitSet::new_empty(move_data.move_paths.len()); let mut drop_span = None; for &(_, place) in candidates.iter() { - let mut collected_drops = ChunkedBitSet::new_empty(move_data.move_paths.len()); + let mut collected_drops = MixedBitSet::new_empty(move_data.move_paths.len()); // ## On detecting change in relative drop order ## // Iterate through each BID-containing block `block`. // If the place `P` targeted by the BID is "maybe initialized", @@ -425,8 +427,9 @@ pub(crate) fn run_lint<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &Body< // We shall now exclude some local bindings for the following cases. { - let mut to_exclude = ChunkedBitSet::new_empty(all_locals_dropped.domain_size()); - // We will now do subtraction from the candidate dropped locals, because of the following reasons. + let mut to_exclude = MixedBitSet::new_empty(all_locals_dropped.domain_size()); + // We will now do subtraction from the candidate dropped locals, because of the + // following reasons. for path_idx in all_locals_dropped.iter() { let move_path = &move_data.move_paths[path_idx]; let dropped_local = move_path.place.local; diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs index f786c676e9e..e955d8277a4 100644 --- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs @@ -1,5 +1,5 @@ use rustc_abi::FieldIdx; -use rustc_index::bit_set::ChunkedBitSet; +use rustc_index::bit_set::MixedBitSet; use rustc_middle::mir::{Body, TerminatorKind}; use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, VariantDef}; use rustc_mir_dataflow::impls::MaybeInitializedPlaces; @@ -67,7 +67,7 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveUninitDrops { fn is_needs_drop_and_init<'tcx>( tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, - maybe_inits: &ChunkedBitSet<MovePathIndex>, + maybe_inits: &MixedBitSet<MovePathIndex>, move_data: &MoveData<'tcx>, ty: Ty<'tcx>, mpi: MovePathIndex, |
