diff options
| author | bors <bors@rust-lang.org> | 2022-04-30 05:06:47 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2022-04-30 05:06:47 +0000 |
| commit | 9a98c63b30bd8419be662b8fb3f3b22ba33d72c5 (patch) | |
| tree | 1603f7b9476f6194d86bb9db61a7b8b1f3aea1a8 /compiler | |
| parent | 0c8e520b3e0d9fbdafb8311f1b18a1f847531051 (diff) | |
| parent | 7149bbcdc5739e4ed1fa3697c1b4d441f1d4441c (diff) | |
| download | rust-9a98c63b30bd8419be662b8fb3f3b22ba33d72c5.tar.gz rust-9a98c63b30bd8419be662b8fb3f3b22ba33d72c5.zip | |
Auto merge of #96500 - SparrowLii:rpo, r=tmiasko
Reduce duplication of RPO calculation of mir Computing the RPO of mir is not a low-cost thing, but it is duplicate in many places. In particular the `iterate_to_fixpoint` method which is called multiple times when computing the data flow. This PR reduces the number of times the RPO is recalculated as much as possible, which should save some compile time.
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/mod.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_const_eval/src/transform/promote_consts.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/mir/mod.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/mir/traversal.rs | 90 |
4 files changed, 96 insertions, 7 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 6c139df0a85..0c958de64fa 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -244,7 +244,6 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( fx.debug_introduce_locals(&mut bx); // Codegen the body of each block using reverse postorder - // FIXME(eddyb) reuse RPO iterator between `analysis` and this. for (bb, _) in traversal::reverse_postorder(&mir) { fx.codegen_block(bb); } diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs index faea2111d92..1052d588fad 100644 --- a/compiler/rustc_const_eval/src/transform/promote_consts.rs +++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs @@ -13,7 +13,7 @@ //! move analysis runs after promotion on broken MIR. use rustc_hir as hir; -use rustc_middle::mir::traversal::ReversePostorder; +use rustc_middle::mir::traversal::ReversePostorderIter; use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; use rustc_middle::ty::cast::CastTy; @@ -170,7 +170,7 @@ impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> { pub fn collect_temps_and_candidates<'tcx>( ccx: &ConstCx<'_, 'tcx>, - rpo: &mut ReversePostorder<'_, 'tcx>, + rpo: &mut ReversePostorderIter<'_, 'tcx>, ) -> (IndexVec<Local, TempState>, Vec<Candidate>) { let mut collector = Collector { temps: IndexVec::from_elem(TempState::Undefined, &ccx.body.local_decls), diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index 883fc72cd56..45999f87658 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -62,7 +62,9 @@ pub mod spanview; mod switch_sources; pub mod tcx; pub mod terminator; +use crate::mir::traversal::PostorderCache; pub use terminator::*; + pub mod traversal; mod type_foldable; pub mod visit; @@ -323,6 +325,7 @@ pub struct Body<'tcx> { predecessor_cache: PredecessorCache, switch_source_cache: SwitchSourceCache, is_cyclic: GraphIsCyclicCache, + postorder_cache: PostorderCache, pub tainted_by_errors: Option<ErrorGuaranteed>, } @@ -372,6 +375,7 @@ impl<'tcx> Body<'tcx> { predecessor_cache: PredecessorCache::new(), switch_source_cache: SwitchSourceCache::new(), is_cyclic: GraphIsCyclicCache::new(), + postorder_cache: PostorderCache::new(), tainted_by_errors, }; body.is_polymorphic = body.has_param_types_or_consts(); @@ -401,6 +405,7 @@ impl<'tcx> Body<'tcx> { predecessor_cache: PredecessorCache::new(), switch_source_cache: SwitchSourceCache::new(), is_cyclic: GraphIsCyclicCache::new(), + postorder_cache: PostorderCache::new(), tainted_by_errors: None, }; body.is_polymorphic = body.has_param_types_or_consts(); @@ -422,6 +427,7 @@ impl<'tcx> Body<'tcx> { self.predecessor_cache.invalidate(); self.switch_source_cache.invalidate(); self.is_cyclic.invalidate(); + self.postorder_cache.invalidate(); &mut self.basic_blocks } @@ -432,6 +438,7 @@ impl<'tcx> Body<'tcx> { self.predecessor_cache.invalidate(); self.switch_source_cache.invalidate(); self.is_cyclic.invalidate(); + self.postorder_cache.invalidate(); (&mut self.basic_blocks, &mut self.local_decls) } @@ -446,6 +453,7 @@ impl<'tcx> Body<'tcx> { self.predecessor_cache.invalidate(); self.switch_source_cache.invalidate(); self.is_cyclic.invalidate(); + self.postorder_cache.invalidate(); (&mut self.basic_blocks, &mut self.local_decls, &mut self.var_debug_info) } diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs index d08bede1d73..8d831cc73b8 100644 --- a/compiler/rustc_middle/src/mir/traversal.rs +++ b/compiler/rustc_middle/src/mir/traversal.rs @@ -1,4 +1,7 @@ +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; +use rustc_data_structures::sync::OnceCell; use rustc_index::bit_set::BitSet; +use rustc_serialize as serialize; use super::*; @@ -268,10 +271,6 @@ impl<'a, 'tcx> ReversePostorder<'a, 'tcx> { } } -pub fn reverse_postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> ReversePostorder<'a, 'tcx> { - ReversePostorder::new(body, START_BLOCK) -} - impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> { type Item = (BasicBlock, &'a BasicBlockData<'tcx>); @@ -307,3 +306,86 @@ pub fn reachable_as_bitset<'tcx>(body: &Body<'tcx>) -> BitSet<BasicBlock> { (&mut iter).for_each(drop); iter.visited } + +#[derive(Clone)] +pub struct ReversePostorderIter<'a, 'tcx> { + body: &'a Body<'tcx>, + blocks: &'a Vec<BasicBlock>, + idx: usize, +} + +impl<'a, 'tcx> Iterator for ReversePostorderIter<'a, 'tcx> { + type Item = (BasicBlock, &'a BasicBlockData<'tcx>); + + fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { + if self.idx == 0 { + return None; + } + self.idx -= 1; + + self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb])) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.idx, Some(self.idx)) + } +} + +impl<'a, 'tcx> ExactSizeIterator for ReversePostorderIter<'a, 'tcx> {} + +pub fn reverse_postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> ReversePostorderIter<'a, 'tcx> { + let blocks = body.postorder_cache.compute(body); + + let len = blocks.len(); + + ReversePostorderIter { body, blocks, idx: len } +} + +#[derive(Clone, Debug)] +pub(super) struct PostorderCache { + cache: OnceCell<Vec<BasicBlock>>, +} + +impl PostorderCache { + #[inline] + pub(super) fn new() -> Self { + PostorderCache { cache: OnceCell::new() } + } + + /// Invalidates the postorder cache. + #[inline] + pub(super) fn invalidate(&mut self) { + self.cache = OnceCell::new(); + } + + /// Returns the &Vec<BasicBlocks> represents the postorder graph for this MIR. + #[inline] + pub(super) fn compute(&self, body: &Body<'_>) -> &Vec<BasicBlock> { + self.cache.get_or_init(|| Postorder::new(body, START_BLOCK).map(|(bb, _)| bb).collect()) + } +} + +impl<S: serialize::Encoder> serialize::Encodable<S> for PostorderCache { + #[inline] + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_unit() + } +} + +impl<D: serialize::Decoder> serialize::Decodable<D> for PostorderCache { + #[inline] + fn decode(_: &mut D) -> Self { + Self::new() + } +} + +impl<CTX> HashStable<CTX> for PostorderCache { + #[inline] + fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) { + // do nothing + } +} + +TrivialTypeFoldableAndLiftImpls! { + PostorderCache, +} |
