about summary refs log tree commit diff
diff options
context:
space:
mode:
authorMazdak Farrokhzad <twingoow@gmail.com>2019-06-30 21:57:22 +0200
committerGitHub <noreply@github.com>2019-06-30 21:57:22 +0200
commit543c4648bd2028672d091cec378a50034c8709a0 (patch)
tree16fc04064e9db91b985f21b7fb17ead864277f42
parent0af8e872ea5ac77effa59f8d3f8794f12cb8865c (diff)
parent07c5e2b310bf20fdccedc6a927f1417cb9ddc7fa (diff)
downloadrust-543c4648bd2028672d091cec378a50034c8709a0.tar.gz
rust-543c4648bd2028672d091cec378a50034c8709a0.zip
Rollup merge of #62062 - ecstatic-morse:dataflow-order, r=nagisa
Use a more efficient iteration order for forward dataflow

Currently, dataflow begins by visiting each block in order of ID (`BasicBlock(0)`, `BasicBlock(1)`, etc.). This PR changes that initial iteration to reverse post-order (see [this blog post](https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#data-flow-analysis) for more info). This ensures that the effects of all predecessors will be applied before a basic block is visited if the CFG has no back-edges, and should result in less total iterations even when back-edges exist. This should not change the results of dataflow analysis.

The current ordering for basic blocks may be pretty close to RPO already--`BasicBlock(0)` is already the start block--in which case the cost of doing the traversal up front will outweigh the efficiency gains.
A perf run is needed to check this.

r? @pnkfelix (I think).
-rw-r--r--src/librustc_mir/dataflow/mod.rs20
1 files changed, 18 insertions, 2 deletions
diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs
index 80f65a9c8d0..6cdd9de8b95 100644
--- a/src/librustc_mir/dataflow/mod.rs
+++ b/src/librustc_mir/dataflow/mod.rs
@@ -228,9 +228,25 @@ where
     BD: BitDenotation<'tcx>,
 {
     fn walk_cfg(&mut self, in_out: &mut BitSet<BD::Idx>) {
-        let mut dirty_queue: WorkQueue<mir::BasicBlock> =
-            WorkQueue::with_all(self.builder.body.basic_blocks().len());
         let body = self.builder.body;
+
+        // Initialize the dirty queue in reverse post-order. This makes it more likely that the
+        // entry state for each basic block will have the effects of its predecessors applied
+        // before it is processed. In fact, for CFGs without back edges, this guarantees that
+        // dataflow will converge in exactly `N` iterations, where `N` is the number of basic
+        // blocks.
+        let mut dirty_queue: WorkQueue<mir::BasicBlock> =
+            WorkQueue::with_none(body.basic_blocks().len());
+        for (bb, _) in traversal::reverse_postorder(body) {
+            dirty_queue.insert(bb);
+        }
+
+        // Add blocks which are not reachable from START_BLOCK to the work queue. These blocks will
+        // be processed after the ones added above.
+        for bb in body.basic_blocks().indices() {
+            dirty_queue.insert(bb);
+        }
+
         while let Some(bb) = dirty_queue.pop() {
             let (on_entry, trans) = self.builder.flow_state.sets.get_mut(bb.index());
             debug_assert!(in_out.words().len() == on_entry.words().len());