about summary refs log tree commit diff
path: root/src/librustc_mir/dataflow
diff options
context:
space:
mode:
authorDylan MacKenzie <ecstaticmorse@gmail.com>2019-06-22 13:00:17 -0700
committerDylan MacKenzie <ecstaticmorse@gmail.com>2019-06-27 11:38:52 -0700
commit07c5e2b310bf20fdccedc6a927f1417cb9ddc7fa (patch)
treeadc11640e181a0bc90162d9f356b5b8a66592cf6 /src/librustc_mir/dataflow
parenta5b17298f2d1c5994c73e540ce7c44830af0d4dc (diff)
downloadrust-07c5e2b310bf20fdccedc6a927f1417cb9ddc7fa.tar.gz
rust-07c5e2b310bf20fdccedc6a927f1417cb9ddc7fa.zip
Use a more efficient iteration order for forward dataflow
Currently, dataflow begins by visiting each block in order of ID
(`BasicBlock(0)`, `BasicBlock(1)`, etc.). This PR changes that initial
iteration to reverse post-order. This ensures that the effects of all
predecessors will be applied before a basic block is visited if the CFG
has no back-edges, and should result in less total iterations even when
back-edges exist. This should not change the results of dataflow
analysis.

The current ordering for basic blocks is pretty close to RPO
already--`BasicBlock(0)` is already the start block, so the gains from
this are pretty small, especially since we need to do an extra traversal
up front.

Note that some basic blocks are unreachable from the `START_BLOCK`
during dataflow. We add these blocks to the work queue as well to
preserve the original behavior.
Diffstat (limited to 'src/librustc_mir/dataflow')
-rw-r--r--src/librustc_mir/dataflow/mod.rs20
1 files changed, 18 insertions, 2 deletions
diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs
index 80f65a9c8d0..6cdd9de8b95 100644
--- a/src/librustc_mir/dataflow/mod.rs
+++ b/src/librustc_mir/dataflow/mod.rs
@@ -228,9 +228,25 @@ where
     BD: BitDenotation<'tcx>,
 {
     fn walk_cfg(&mut self, in_out: &mut BitSet<BD::Idx>) {
-        let mut dirty_queue: WorkQueue<mir::BasicBlock> =
-            WorkQueue::with_all(self.builder.body.basic_blocks().len());
         let body = self.builder.body;
+
+        // Initialize the dirty queue in reverse post-order. This makes it more likely that the
+        // entry state for each basic block will have the effects of its predecessors applied
+        // before it is processed. In fact, for CFGs without back edges, this guarantees that
+        // dataflow will converge in exactly `N` iterations, where `N` is the number of basic
+        // blocks.
+        let mut dirty_queue: WorkQueue<mir::BasicBlock> =
+            WorkQueue::with_none(body.basic_blocks().len());
+        for (bb, _) in traversal::reverse_postorder(body) {
+            dirty_queue.insert(bb);
+        }
+
+        // Add blocks which are not reachable from START_BLOCK to the work queue. These blocks will
+        // be processed after the ones added above.
+        for bb in body.basic_blocks().indices() {
+            dirty_queue.insert(bb);
+        }
+
         while let Some(bb) = dirty_queue.pop() {
             let (on_entry, trans) = self.builder.flow_state.sets.get_mut(bb.index());
             debug_assert!(in_out.words().len() == on_entry.words().len());