about summary refs log tree commit diff
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2019-05-22 04:42:20 +0000
committerbors <bors@rust-lang.org>2019-05-22 04:42:20 +0000
commit1cc822c261f5c94a41eb725755fdda7ca6efbda2 (patch)
tree7cef8fc95f0d6c858dcebd6741f51db0b1be441e
parentdbfe70dfcdb0eab5e1e21f419c718e58cf62029b (diff)
parent26c37d7b168781d8be0236fdcdcb502b6c2b8ee2 (diff)
downloadrust-1cc822c261f5c94a41eb725755fdda7ca6efbda2.tar.gz
rust-1cc822c261f5c94a41eb725755fdda7ca6efbda2.zip
Auto merge of #60840 - tmandry:preserve-scope-in-generator-mir, r=cramertj
Preserve local scopes in generator MIR

Part of #52924, depended upon by the generator layout optimization #60187.

This PR adds `StorageDead` statements in more places in generators, so we can see when non-`Drop` locals have gone out of scope and recover their storage.

The reason this is only done for generators is compiler performance. See https://github.com/rust-lang/rust/pull/60187#issuecomment-485637811 for what happens when we do this for all functions.

For `Drop` locals, we modify the `MaybeStorageLive` analysis to use `drop` to indicate that storage is no longer live for the local. Once `drop` returns or unwinds to our function, we implicitly assume that the local is `StorageDead`.

Instead of using `drop`, it is possible to emit more `StorageDead` statements in the MIR for `Drop` locals so we can handle all locals the same. I am fine with doing it that way, but this was the simplest approach for my purposes. It is also likely to be more performant.

r? @Zoxc (feel free to reassign)
cc @cramertj @eddyb @RalfJung @rust-lang/wg-async-await
-rw-r--r--src/librustc_mir/build/mod.rs11
-rw-r--r--src/librustc_mir/build/scope.rs114
-rw-r--r--src/librustc_mir/dataflow/impls/storage_liveness.rs11
-rw-r--r--src/test/mir-opt/generator-drop-cleanup.rs1
4 files changed, 93 insertions, 44 deletions
diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs
index 16ab233bd2e..e46c5153555 100644
--- a/src/librustc_mir/build/mod.rs
+++ b/src/librustc_mir/build/mod.rs
@@ -343,6 +343,7 @@ struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
 
     fn_span: Span,
     arg_count: usize,
+    is_generator: bool,
 
     /// The current set of scopes, updated as we traverse;
     /// see the `scope` module for more details.
@@ -689,7 +690,8 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>,
         return_ty,
         return_ty_span,
         upvar_debuginfo,
-        upvar_mutbls);
+        upvar_mutbls,
+        body.is_generator);
 
     let call_site_scope = region::Scope {
         id: body.value.hir_id.local_id,
@@ -759,6 +761,7 @@ fn construct_const<'a, 'gcx, 'tcx>(
         const_ty_span,
         vec![],
         vec![],
+        false,
     );
 
     let mut block = START_BLOCK;
@@ -788,7 +791,7 @@ fn construct_error<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>,
     let owner_id = hir.tcx().hir().body_owner(body_id);
     let span = hir.tcx().hir().span(owner_id);
     let ty = hir.tcx().types.err;
-    let mut builder = Builder::new(hir, span, 0, Safety::Safe, ty, span, vec![], vec![]);
+    let mut builder = Builder::new(hir, span, 0, Safety::Safe, ty, span, vec![], vec![], false);
     let source_info = builder.source_info(span);
     builder.cfg.terminate(START_BLOCK, source_info, TerminatorKind::Unreachable);
     builder.finish(None)
@@ -802,7 +805,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
            return_ty: Ty<'tcx>,
            return_span: Span,
            __upvar_debuginfo_codegen_only_do_not_use: Vec<UpvarDebuginfo>,
-           upvar_mutbls: Vec<Mutability>)
+           upvar_mutbls: Vec<Mutability>,
+           is_generator: bool)
            -> Builder<'a, 'gcx, 'tcx> {
         let lint_level = LintLevel::Explicit(hir.root_lint_level);
         let mut builder = Builder {
@@ -810,6 +814,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
             cfg: CFG { basic_blocks: IndexVec::new() },
             fn_span: span,
             arg_count,
+            is_generator,
             scopes: vec![],
             block_context: BlockContext::new(),
             source_scopes: IndexVec::new(),
diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs
index 4aa463b37ab..3b11e335fb8 100644
--- a/src/librustc_mir/build/scope.rs
+++ b/src/librustc_mir/build/scope.rs
@@ -83,9 +83,10 @@ use rustc::middle::region;
 use rustc::ty::Ty;
 use rustc::hir;
 use rustc::mir::*;
-use syntax_pos::{Span};
+use syntax_pos::{Span, DUMMY_SP};
 use rustc_data_structures::fx::FxHashMap;
 use std::collections::hash_map::Entry;
+use std::mem;
 
 #[derive(Debug)]
 pub struct Scope<'tcx> {
@@ -107,6 +108,8 @@ pub struct Scope<'tcx> {
     ///  * polluting the cleanup MIR with StorageDead creates
     ///    landing pads even though there's no actual destructors
     ///  * freeing up stack space has no effect during unwinding
+    /// Note that for generators we do emit StorageDeads, for the
+    /// use of optimizations in the MIR generator transform.
     needs_cleanup: bool,
 
     /// set of places to drop when exiting this scope. This starts
@@ -466,10 +469,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
     /// This path terminates in GeneratorDrop. Returns the start of the path.
     /// None indicates there’s no cleanup to do at this point.
     pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
-        if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
-            return None;
-        }
-
         // Fill in the cache for unwinds
         self.diverge_cleanup_gen(true);
 
@@ -480,7 +479,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
         let result = block;
 
         while let Some(scope) = scopes.next() {
-            if !scope.needs_cleanup {
+            if !scope.needs_cleanup && !self.is_generator {
                 continue;
             }
 
@@ -802,7 +801,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
 
         for scope in self.scopes[first_uncached..].iter_mut() {
             target = build_diverge_scope(&mut self.cfg, scope.region_scope_span,
-                                         scope, target, generator_drop);
+                                         scope, target, generator_drop, self.is_generator);
         }
 
         target
@@ -900,12 +899,6 @@ fn build_scope_drops<'tcx>(
     // drops panic (panicking while unwinding will abort, so there's no need for
     // another set of arrows). The drops for the unwind path should have already
     // been generated by `diverge_cleanup_gen`.
-    //
-    // The code in this function reads from right to left.
-    // Storage dead drops have to be done left to right (since we can only push
-    // to the end of a Vec). So, we find the next drop and then call
-    // push_storage_deads which will iterate backwards through them so that
-    // they are added in the correct order.
 
     let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| {
         if let DropKind::Value { cached_block } = drop_data.kind {
@@ -936,11 +929,6 @@ fn build_scope_drops<'tcx>(
                 block = next;
             }
             DropKind::Storage => {
-                // We do not need to emit StorageDead for generator drops
-                if generator_drop {
-                    continue
-                }
-
                 // Drop the storage for both value and storage drops.
                 // Only temps and vars need their storage dead.
                 match drop_data.location {
@@ -962,7 +950,8 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
                              span: Span,
                              scope: &mut Scope<'tcx>,
                              mut target: BasicBlock,
-                             generator_drop: bool)
+                             generator_drop: bool,
+                             is_generator: bool)
                              -> BasicBlock
 {
     // Build up the drops in **reverse** order. The end result will
@@ -981,41 +970,90 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
         scope: source_scope
     };
 
-    // Next, build up the drops. Here we iterate the vector in
+    // We keep track of StorageDead statements to prepend to our current block
+    // and store them here, in reverse order.
+    let mut storage_deads = vec![];
+
+    let mut target_built_by_us = false;
+
+    // Build up the drops. Here we iterate the vector in
     // *forward* order, so that we generate drops[0] first (right to
     // left in diagram above).
     for (j, drop_data) in scope.drops.iter_mut().enumerate() {
         debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
         // Only full value drops are emitted in the diverging path,
-        // not StorageDead.
+        // not StorageDead, except in the case of generators.
         //
         // Note: This may not actually be what we desire (are we
         // "freeing" stack storage as we unwind, or merely observing a
         // frozen stack)? In particular, the intent may have been to
         // match the behavior of clang, but on inspection eddyb says
         // this is not what clang does.
-        let cached_block = match drop_data.kind {
-            DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
-            DropKind::Storage => continue
-        };
-        target = if let Some(cached_block) = *cached_block {
-            cached_block
-        } else {
-            let block = cfg.start_new_cleanup_block();
-            cfg.terminate(block, source_info(drop_data.span),
-                          TerminatorKind::Drop {
-                              location: drop_data.location.clone(),
-                              target,
-                              unwind: None
-                          });
-            *cached_block = Some(block);
-            block
+        match drop_data.kind {
+            DropKind::Storage if is_generator => {
+                // Only temps and vars need their storage dead.
+                match drop_data.location {
+                    Place::Base(PlaceBase::Local(index)) => {
+                        storage_deads.push(Statement {
+                            source_info: source_info(drop_data.span),
+                            kind: StatementKind::StorageDead(index)
+                        });
+                    }
+                    _ => unreachable!(),
+                };
+            }
+            DropKind::Storage => {}
+            DropKind::Value { ref mut cached_block } => {
+                let cached_block = cached_block.ref_mut(generator_drop);
+                target = if let Some(cached_block) = *cached_block {
+                    storage_deads.clear();
+                    target_built_by_us = false;
+                    cached_block
+                } else {
+                    push_storage_deads(
+                        cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope);
+                    let block = cfg.start_new_cleanup_block();
+                    cfg.terminate(block, source_info(drop_data.span),
+                                  TerminatorKind::Drop {
+                                      location: drop_data.location.clone(),
+                                      target,
+                                      unwind: None
+                                  });
+                    *cached_block = Some(block);
+                    target_built_by_us = true;
+                    block
+                };
+            }
         };
     }
-
+    push_storage_deads(cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope);
     *scope.cached_unwind.ref_mut(generator_drop) = Some(target);
 
+    assert!(storage_deads.is_empty());
     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
 
     target
 }
+
+fn push_storage_deads(cfg: &mut CFG<'tcx>,
+                      target: &mut BasicBlock,
+                      storage_deads: &mut Vec<Statement<'tcx>>,
+                      target_built_by_us: bool,
+                      source_scope: SourceScope) {
+    if storage_deads.is_empty() { return; }
+    if !target_built_by_us {
+        // We cannot add statements to an existing block, so we create a new
+        // block for our StorageDead statements.
+        let block = cfg.start_new_cleanup_block();
+        let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope };
+        cfg.terminate(block, source_info, TerminatorKind::Goto { target: *target });
+        *target = block;
+    }
+    let statements = &mut cfg.block_data_mut(*target).statements;
+    storage_deads.reverse();
+    debug!("push_storage_deads({:?}), storage_deads={:?}, statements={:?}",
+           *target, storage_deads, statements);
+    storage_deads.append(statements);
+    mem::swap(statements, storage_deads);
+    assert!(storage_deads.is_empty());
+}
diff --git a/src/librustc_mir/dataflow/impls/storage_liveness.rs b/src/librustc_mir/dataflow/impls/storage_liveness.rs
index 6b8eb6f17f6..3bf11c57379 100644
--- a/src/librustc_mir/dataflow/impls/storage_liveness.rs
+++ b/src/librustc_mir/dataflow/impls/storage_liveness.rs
@@ -43,9 +43,14 @@ impl<'a, 'tcx> BitDenotation<'tcx> for MaybeStorageLive<'a, 'tcx> {
     }
 
     fn terminator_effect(&self,
-                         _sets: &mut BlockSets<'_, Local>,
-                         _loc: Location) {
-        // Terminators have no effect
+                         sets: &mut BlockSets<'_, Local>,
+                         loc: Location) {
+        match &self.mir[loc.block].terminator().kind {
+            TerminatorKind::Drop { location, .. } => if let Some(l) = location.local() {
+                sets.kill(l);
+            }
+            _ => (),
+        }
     }
 
     fn propagate_call_return(
diff --git a/src/test/mir-opt/generator-drop-cleanup.rs b/src/test/mir-opt/generator-drop-cleanup.rs
index 9cc4272fafa..30f6d0deb91 100644
--- a/src/test/mir-opt/generator-drop-cleanup.rs
+++ b/src/test/mir-opt/generator-drop-cleanup.rs
@@ -17,6 +17,7 @@ fn main() {
 //     switchInt(move _5) -> [0u32: bb4, 3u32: bb7, otherwise: bb8];
 // }
 // bb1: {
+//     StorageDead(_3);
 //     goto -> bb5;
 // }
 // bb2: {