about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorDylan MacKenzie <ecstaticmorse@gmail.com>2019-06-22 13:27:16 -0700
committerDylan MacKenzie <ecstaticmorse@gmail.com>2019-06-27 11:40:35 -0700
commite2479e263ef1135dcd1318cce51ee8fafeae62ae (patch)
tree19828079e1b9e1d8977aa2d79a9a45bd8ffa6c51 /src
parenta1e954e82d209d9cfe1291be6fc32983c9fc9fab (diff)
downloadrust-e2479e263ef1135dcd1318cce51ee8fafeae62ae.tar.gz
rust-e2479e263ef1135dcd1318cce51ee8fafeae62ae.zip
Use more efficient iteration order for backward dataflow
This applies the same basic principle as #62062 to the reverse dataflow
analysis used to compute liveness information. It is functionally
equivalent, except that post-order is used instead of reverse post-order.

Some `mir::Body`s contain basic blocks which are not reachable from the
`START_BLOCK`. We need to add them to the work queue as well to preserve
the original semantics.
Diffstat (limited to 'src')
-rw-r--r--src/librustc_mir/util/liveness.rs21
1 files changed, 18 insertions, 3 deletions
diff --git a/src/librustc_mir/util/liveness.rs b/src/librustc_mir/util/liveness.rs
index cf0fc09472b..8ead571d966 100644
--- a/src/librustc_mir/util/liveness.rs
+++ b/src/librustc_mir/util/liveness.rs
@@ -75,9 +75,24 @@ pub fn liveness_of_locals<'tcx>(
 
     let mut bits = LiveVarSet::new_empty(num_live_vars);
 
-    // queue of things that need to be re-processed, and a set containing
-    // the things currently in the queue
-    let mut dirty_queue: WorkQueue<BasicBlock> = WorkQueue::with_all(body.basic_blocks().len());
+    // The dirty queue contains the set of basic blocks whose entry sets have changed since they
+    // were last processed. At the start of the analysis, we initialize the queue in post-order to
+    // make it more likely that the entry set for a given basic block will have the effects of all
+    // its successors in the CFG applied before it is processed.
+    //
+    // FIXME(ecstaticmorse): Reverse post-order on the reverse CFG may generate a better iteration
+    // order when cycles are present, but the overhead of computing the reverse CFG may outweigh
+    // any benefits. Benchmark this and find out.
+    let mut dirty_queue: WorkQueue<BasicBlock> = WorkQueue::with_none(body.basic_blocks().len());
+    for (bb, _) in traversal::postorder(body) {
+        dirty_queue.insert(bb);
+    }
+
+    // Add blocks which are not reachable from START_BLOCK to the work queue. These blocks will
+    // be processed after the ones added above.
+    for bb in body.basic_blocks().indices() {
+        dirty_queue.insert(bb);
+    }
 
     let predecessors = body.predecessors();