summary refs log tree commit diff
diff options
context:
space:
mode:
authorNiko Matsakis <nikomat@amazon.com>2024-12-19 13:53:35 +0000
committerJosh Stone <jistone@redhat.com>2025-01-02 14:28:32 -0800
commit8018b401dacf25aa92b6bd196b2cf9c4a20a4c6f (patch)
treea6130e0404f24134a399166c26298f614e91d4b9
parent432bd9de3259e5cd73b0e798a05ddffe8eea7759 (diff)
downloadrust-8018b401dacf25aa92b6bd196b2cf9c4a20a4c6f.tar.gz
rust-8018b401dacf25aa92b6bd196b2cf9c4a20a4c6f.zip
explain how `build_scope_drops` works
(cherry picked from commit b5350610608b724a3f152ccd889a78f82860ed69)
-rw-r--r--compiler/rustc_mir_build/src/build/scope.rs40
1 files changed, 38 insertions, 2 deletions
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
index 4530df00c72..7e7e38832e8 100644
--- a/compiler/rustc_mir_build/src/build/scope.rs
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -1396,12 +1396,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
 }
 
 /// Builds drops for `pop_scope` and `leave_top_scope`.
+/// 
+/// # Parameters
+/// 
+/// * `unwind_drops`, the drop tree data structure storing what needs to be cleaned up if unwind occurs
+/// * `scope`, describes the drops that will occur on exiting the scope in regular execution
+/// * `block`, the block to branch to once drops are complete (assuming no unwind occurs)
+/// * `unwind_to`, describes the drops that would occur at this point in the code if a
+///   panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
+///   instructions on unwinding)
+/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
+/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
 fn build_scope_drops<'tcx>(
     cfg: &mut CFG<'tcx>,
     unwind_drops: &mut DropTree,
     scope: &Scope,
-    mut block: BasicBlock,
-    mut unwind_to: DropIdx,
+    block: BasicBlock,
+    unwind_to: DropIdx,
     storage_dead_on_unwind: bool,
     arg_count: usize,
 ) -> BlockAnd<()> {
@@ -1425,6 +1436,18 @@ fn build_scope_drops<'tcx>(
     // statement. For other functions we don't worry about StorageDead. The
     // drops for the unwind path should have already been generated by
     // `diverge_cleanup_gen`.
+    
+    // `unwind_to` indicates what needs to be dropped should unwinding occur.
+    // This is a subset of what needs to be dropped when exiting the scope.
+    // As we unwind the scope, we will also move `unwind_to` backwards to match,
+    // so that we can use it should a destructor panic.
+    let mut unwind_to = unwind_to;
+
+    // The block that we should jump to after drops complete. We start by building the final drop (`drops[n]` 
+    // in the diagram above) and then build the drops (e.g., `drop[1]`, `drop[0]`) that come before it.
+    // block begins as the successor of `drops[n]` and then becomes `drops[n]` so that `drops[n-1]`
+    // will branch to `drops[n]`.
+    let mut block = block;
 
     for drop_data in scope.drops.iter().rev() {
         let source_info = drop_data.source_info;
@@ -1435,6 +1458,9 @@ fn build_scope_drops<'tcx>(
                 // `unwind_to` should drop the value that we're about to
                 // schedule. If dropping this value panics, then we continue
                 // with the *next* value on the unwind path.
+                //
+                // We adjust this BEFORE we create the drop (e.g., `drops[n]`)
+                // because `drops[n]` should unwind to `drops[n-1]`.
                 debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
                 debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
                 unwind_to = unwind_drops.drops[unwind_to].next;
@@ -1466,6 +1492,11 @@ fn build_scope_drops<'tcx>(
                     continue;
                 }
 
+                // As in the `DropKind::Storage` case below: 
+                // normally lint-related drops are not emitted for unwind,
+                // so we can just leave `unwind_to` unmodified, but in some
+                // cases we emit things ALSO on the unwind path, so we need to adjust
+                // `unwind_to` in that case.
                 if storage_dead_on_unwind {
                     debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
                     debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
@@ -1481,6 +1512,11 @@ fn build_scope_drops<'tcx>(
                 });
             }
             DropKind::Storage => {
+                // Ordinarily, storage-dead nodes are not emitted on unwind, so we don't
+                // need to adjust `unwind_to` on this path. However, in some specific cases
+                // we *do* emit storage-dead nodes on the unwind path, and in that case now that
+                // the storage-dead has completed, we need to adjust the `unwind_to` pointer
+                // so that any future drops we emit will not register storage-dead.
                 if storage_dead_on_unwind {
                     debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
                     debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);