diff options
| author | bors <bors@rust-lang.org> | 2013-05-03 22:06:37 -0700 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2013-05-03 22:06:37 -0700 |
| commit | c3ab74b8b933a1bc2c5f207ae5c023cf3e7aeb58 (patch) | |
| tree | 806e916682d9782c60fc639f3a74846511836c69 | |
| parent | 821979f9282decc1ba92391249140f49a7102319 (diff) | |
| parent | c15fa3a02aa3e7e5111f0410abf7321387a7a97f (diff) | |
| download | rust-c3ab74b8b933a1bc2c5f207ae5c023cf3e7aeb58.tar.gz rust-c3ab74b8b933a1bc2c5f207ae5c023cf3e7aeb58.zip | |
auto merge of #6227 : graydon/rust/issue-6112-box-annihilator, r=graydon
during task annihilation, since it is easy to tread on freed memory.
| -rw-r--r-- | src/libcore/cleanup.rs | 31 |
1 files changed, 25 insertions, 6 deletions
diff --git a/src/libcore/cleanup.rs b/src/libcore/cleanup.rs index a07c6b4811b..aca49c94644 100644 --- a/src/libcore/cleanup.rs +++ b/src/libcore/cleanup.rs @@ -126,14 +126,17 @@ struct AnnihilateStats { n_bytes_freed: uint } -unsafe fn each_live_alloc(f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) { +unsafe fn each_live_alloc(read_next_before: bool, + f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) { + //! Walks the internal list of allocations + use managed; let task: *Task = transmute(rustrt::rust_get_task()); let box = (*task).boxed_region.live_allocs; let mut box: *mut BoxRepr = transmute(copy box); while box != mut_null() { - let next = transmute(copy (*box).header.next); + let next_before = transmute(copy (*box).header.next); let uniq = (*box).header.ref_count == managed::raw::RC_MANAGED_UNIQUE; @@ -141,7 +144,11 @@ unsafe fn each_live_alloc(f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) { break } - box = next + if read_next_before { + box = next_before; + } else { + box = transmute(copy (*box).header.next); + } } } @@ -173,7 +180,10 @@ pub unsafe fn annihilate() { }; // Pass 1: Make all boxes immortal. - for each_live_alloc |box, uniq| { + // + // In this pass, nothing gets freed, so it does not matter whether + // we read the next field before or after the callback. + for each_live_alloc(true) |box, uniq| { stats.n_total_boxes += 1; if uniq { stats.n_unique_boxes += 1; @@ -183,7 +193,11 @@ pub unsafe fn annihilate() { } // Pass 2: Drop all boxes. - for each_live_alloc |box, uniq| { + // + // In this pass, unique-managed boxes may get freed, but not + // managed boxes, so we must read the `next` field *after* the + // callback, as the original value may have been freed. + for each_live_alloc(false) |box, uniq| { if !uniq { let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc); let drop_glue: DropGlue = transmute(((*tydesc).drop_glue, 0)); @@ -192,7 +206,12 @@ pub unsafe fn annihilate() { } // Pass 3: Free all boxes. - for each_live_alloc |box, uniq| { + // + // In this pass, managed boxes may get freed (but not + // unique-managed boxes, though I think that none of those are + // left), so we must read the `next` field before, since it will + // not be valid after. + for each_live_alloc(true) |box, uniq| { if !uniq { stats.n_bytes_freed += (*((*box).header.type_desc)).size |
