diff options
| author | Daniel Micay <danielmicay@gmail.com> | 2014-01-15 07:47:37 -0500 |
|---|---|---|
| committer | Daniel Micay <danielmicay@gmail.com> | 2014-01-15 08:22:59 -0500 |
| commit | 29840addd46b6ae01b61ee93247164d5818f09e0 (patch) | |
| tree | 2dbf462276fb745a0790ba733f13bdef806b0585 | |
| parent | 197fe67e11af002033bb2dc7b5a48ef433a7b103 (diff) | |
| download | rust-29840addd46b6ae01b61ee93247164d5818f09e0.tar.gz rust-29840addd46b6ae01b61ee93247164d5818f09e0.zip | |
remove the concept of managed-unique from libstd
Closes #11545
| -rw-r--r-- | src/libstd/cleanup.rs | 44 | ||||
| -rw-r--r-- | src/libstd/managed.rs | 3 |
2 files changed, 16 insertions, 31 deletions
diff --git a/src/libstd/cleanup.rs b/src/libstd/cleanup.rs index fd9040423a3..ab374ebccfe 100644 --- a/src/libstd/cleanup.rs +++ b/src/libstd/cleanup.rs @@ -17,6 +17,8 @@ use unstable::raw; type DropGlue<'a> = 'a |**TyDesc, *c_void|; +static RC_IMMORTAL : uint = 0x77777777; + /* * Box annihilation * @@ -25,24 +27,21 @@ type DropGlue<'a> = 'a |**TyDesc, *c_void|; struct AnnihilateStats { n_total_boxes: uint, - n_unique_boxes: uint, n_bytes_freed: uint } unsafe fn each_live_alloc(read_next_before: bool, - f: |alloc: *mut raw::Box<()>, uniq: bool| -> bool) + f: |alloc: *mut raw::Box<()>| -> bool) -> bool { //! Walks the internal list of allocations - use managed; use rt::local_heap; let mut alloc = local_heap::live_allocs(); while alloc != ptr::mut_null() { let next_before = (*alloc).next; - let uniq = (*alloc).ref_count == managed::RC_MANAGED_UNIQUE; - if !f(alloc, uniq) { + if !f(alloc) { return false; } @@ -70,11 +69,9 @@ fn debug_mem() -> bool { pub unsafe fn annihilate() { use rt::local_heap::local_free; use mem; - use managed; let mut stats = AnnihilateStats { n_total_boxes: 0, - n_unique_boxes: 0, n_bytes_freed: 0 }; @@ -82,13 +79,9 @@ pub unsafe fn annihilate() { // // In this pass, nothing gets freed, so it does not matter whether // we read the next field before or after the callback. - each_live_alloc(true, |alloc, uniq| { + each_live_alloc(true, |alloc| { stats.n_total_boxes += 1; - if uniq { - stats.n_unique_boxes += 1; - } else { - (*alloc).ref_count = managed::RC_IMMORTAL; - } + (*alloc).ref_count = RC_IMMORTAL; true }); @@ -97,12 +90,10 @@ pub unsafe fn annihilate() { // In this pass, unique-managed boxes may get freed, but not // managed boxes, so we must read the `next` field *after* the // callback, as the original value may have been freed. - each_live_alloc(false, |alloc, uniq| { - if !uniq { - let tydesc = (*alloc).type_desc; - let data = &(*alloc).data as *(); - ((*tydesc).drop_glue)(data as *i8); - } + each_live_alloc(false, |alloc| { + let tydesc = (*alloc).type_desc; + let data = &(*alloc).data as *(); + ((*tydesc).drop_glue)(data as *i8); true }); @@ -112,13 +103,11 @@ pub unsafe fn annihilate() { // unique-managed boxes, though I think that none of those are // left), so we must read the `next` field before, since it will // not be valid after. - each_live_alloc(true, |alloc, uniq| { - if !uniq { - stats.n_bytes_freed += - (*((*alloc).type_desc)).size - + mem::size_of::<raw::Box<()>>(); - local_free(alloc as *i8); - } + each_live_alloc(true, |alloc| { + stats.n_bytes_freed += + (*((*alloc).type_desc)).size + + mem::size_of::<raw::Box<()>>(); + local_free(alloc as *i8); true }); @@ -126,8 +115,7 @@ pub unsafe fn annihilate() { // We do logging here w/o allocation. debug!("annihilator stats:\n \ total boxes: {}\n \ - unique boxes: {}\n \ bytes freed: {}", - stats.n_total_boxes, stats.n_unique_boxes, stats.n_bytes_freed); + stats.n_total_boxes, stats.n_bytes_freed); } } diff --git a/src/libstd/managed.rs b/src/libstd/managed.rs index c5705665896..914cc25250c 100644 --- a/src/libstd/managed.rs +++ b/src/libstd/managed.rs @@ -14,9 +14,6 @@ use ptr::to_unsafe_ptr; #[cfg(not(test))] use cmp::*; -pub static RC_MANAGED_UNIQUE : uint = (-2) as uint; -pub static RC_IMMORTAL : uint = 0x77777777; - /// Returns the refcount of a shared box (as just before calling this) #[inline] pub fn refcount<T>(t: @T) -> uint { |
