about summary refs log tree commit diff
diff options
context:
space:
mode:
authorDaniel Micay <danielmicay@gmail.com>2014-05-12 02:51:00 -0400
committerDaniel Micay <danielmicay@gmail.com>2014-05-12 02:52:32 -0400
commit8b912bc56be35149a405752f134b8b659366a35c (patch)
tree2bed14bec41fcbba6737ef3199623b81e679f0ac
parent72fc4a5eb72b8ba96dba66400c7eecac93b0b252 (diff)
downloadrust-8b912bc56be35149a405752f134b8b659366a35c.tar.gz
rust-8b912bc56be35149a405752f134b8b659366a35c.zip
register snapshots
-rw-r--r--src/libarena/lib.rs22
-rw-r--r--src/libcore/should_not_exist.rs16
-rw-r--r--src/libstd/rt/heap.rs38
-rw-r--r--src/libstd/slice.rs48
-rw-r--r--src/snapshots.txt8
5 files changed, 10 insertions, 122 deletions
diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs
index af9ffd5a4b6..61b87c77163 100644
--- a/src/libarena/lib.rs
+++ b/src/libarena/lib.rs
@@ -341,32 +341,10 @@ struct TypedArenaChunk<T> {
 }
 
 impl<T> TypedArenaChunk<T> {
-    #[cfg(stage0)]
     #[inline]
     fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
            -> Box<TypedArenaChunk<T>> {
         let mut size = mem::size_of::<TypedArenaChunk<T>>();
-        size = round_up(size, min_align_of::<T>());
-        let elem_size = mem::size_of::<T>();
-        let elems_size = elem_size.checked_mul(&capacity).unwrap();
-        size = size.checked_add(&elems_size).unwrap();
-
-        let mut chunk = unsafe {
-            let chunk = exchange_malloc(size);
-            let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
-            mem::move_val_init(&mut chunk.next, next);
-            chunk
-        };
-
-        chunk.capacity = capacity;
-        chunk
-    }
-
-    #[inline]
-    #[cfg(not(stage0))]
-    fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-           -> Box<TypedArenaChunk<T>> {
-        let mut size = mem::size_of::<TypedArenaChunk<T>>();
         size = round_up(size, mem::min_align_of::<T>());
         let elem_size = mem::size_of::<T>();
         let elems_size = elem_size.checked_mul(&capacity).unwrap();
diff --git a/src/libcore/should_not_exist.rs b/src/libcore/should_not_exist.rs
index d8df20218c5..7ecf53a6348 100644
--- a/src/libcore/should_not_exist.rs
+++ b/src/libcore/should_not_exist.rs
@@ -28,26 +28,10 @@ use str::StrSlice;
 
 #[allow(ctypes)]
 extern {
-    #[cfg(stage0)]
-    fn rust_malloc(size: uint) -> *u8;
-    #[cfg(not(stage0))]
     fn rust_malloc(size: uint, align: uint) -> *u8;
     fn rust_free(ptr: *u8, size: uint, align: uint);
 }
 
-#[cfg(stage0)]
-unsafe fn alloc(cap: uint) -> *mut Vec<()> {
-    let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
-    let ret = rust_malloc(cap) as *mut Vec<()>;
-    if ret.is_null() {
-        intrinsics::abort();
-    }
-    (*ret).fill = 0;
-    (*ret).alloc = cap;
-    ret
-}
-
-#[cfg(not(stage0))]
 unsafe fn alloc(cap: uint) -> *mut Vec<()> {
     let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
     // this should use the real alignment, but the new representation will take care of that
diff --git a/src/libstd/rt/heap.rs b/src/libstd/rt/heap.rs
index b729fb38035..e616b9b8beb 100644
--- a/src/libstd/rt/heap.rs
+++ b/src/libstd/rt/heap.rs
@@ -114,15 +114,7 @@ pub fn stats_print() {
 }
 
 /// The allocator for unique pointers.
-#[cfg(stage0)]
-#[lang="exchange_malloc"]
-#[inline(always)]
-pub unsafe fn exchange_malloc_(size: uint) -> *mut u8 {
-    exchange_malloc(size)
-}
-
-/// The allocator for unique pointers.
-#[cfg(not(test), not(stage0))]
+#[cfg(not(test))]
 #[lang="exchange_malloc"]
 #[inline(always)]
 pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
@@ -130,23 +122,6 @@ pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
 }
 
 /// The allocator for unique pointers.
-#[cfg(stage0)]
-#[inline]
-pub unsafe fn exchange_malloc(size: uint) -> *mut u8 {
-    // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
-    // allocations can point to this `static`. It would be incorrect to use a null
-    // pointer, due to enums assuming types like unique pointers are never null.
-    static EMPTY: () = ();
-
-    if size == 0 {
-        &EMPTY as *() as *mut u8
-    } else {
-        allocate(size, 8)
-    }
-}
-
-/// The allocator for unique pointers.
-#[cfg(not(stage0))]
 #[inline]
 pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
     // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
@@ -187,16 +162,7 @@ unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uin
 #[no_mangle]
 #[doc(hidden)]
 #[deprecated]
-#[cfg(stage0, not(test))]
-pub unsafe extern "C" fn rust_malloc(size: uint) -> *mut u8 {
-    exchange_malloc(size)
-}
-
-// hack for libcore
-#[no_mangle]
-#[doc(hidden)]
-#[deprecated]
-#[cfg(not(stage0), not(test))]
+#[cfg(not(test))]
 pub unsafe extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
     exchange_malloc(size, align)
 }
diff --git a/src/libstd/slice.rs b/src/libstd/slice.rs
index 3c0d0efa766..66471ee3923 100644
--- a/src/libstd/slice.rs
+++ b/src/libstd/slice.rs
@@ -291,54 +291,6 @@ pub trait CloneableVector<T> {
 impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
     /// Returns a copy of `v`.
     #[inline]
-    #[cfg(stage0)]
-    fn to_owned(&self) -> ~[T] {
-        use RawVec = core::raw::Vec;
-        use num::{CheckedAdd, CheckedMul};
-        use option::Expect;
-
-        let len = self.len();
-        let data_size = len.checked_mul(&mem::size_of::<T>());
-        let data_size = data_size.expect("overflow in to_owned()");
-        let size = mem::size_of::<RawVec<()>>().checked_add(&data_size);
-        let size = size.expect("overflow in to_owned()");
-
-        unsafe {
-            // this should pass the real required alignment
-            let ret = exchange_malloc(size) as *mut RawVec<()>;
-
-            (*ret).fill = len * mem::nonzero_size_of::<T>();
-            (*ret).alloc = len * mem::nonzero_size_of::<T>();
-
-            // Be careful with the following loop. We want it to be optimized
-            // to a memcpy (or something similarly fast) when T is Copy. LLVM
-            // is easily confused, so any extra operations during the loop can
-            // prevent this optimization.
-            let mut i = 0;
-            let p = &mut (*ret).data as *mut _ as *mut T;
-            try_finally(
-                &mut i, (),
-                |i, ()| while *i < len {
-                    mem::move_val_init(
-                        &mut(*p.offset(*i as int)),
-                        self.unsafe_ref(*i).clone());
-                    *i += 1;
-                },
-                |i| if *i < len {
-                    // we must be failing, clean up after ourselves
-                    for j in range(0, *i as int) {
-                        ptr::read(&*p.offset(j));
-                    }
-                    // FIXME: #13994 (should pass align and size here)
-                    deallocate(ret as *mut u8, 0, 8);
-                });
-            mem::transmute(ret)
-        }
-    }
-
-    /// Returns a copy of `v`.
-    #[inline]
-    #[cfg(not(stage0))]
     fn to_owned(&self) -> ~[T] {
         use RawVec = core::raw::Vec;
         use num::{CheckedAdd, CheckedMul};
diff --git a/src/snapshots.txt b/src/snapshots.txt
index 7abfee775df..e88ae02c1c6 100644
--- a/src/snapshots.txt
+++ b/src/snapshots.txt
@@ -1,3 +1,11 @@
+S 2014-05-11 72fc4a5
+  freebsd-x86_64 82db6355b0b7c8023c8845a74e2f224da2831b50
+  linux-i386 91901299d5f86f5b67377d940073908a1f0e4e82
+  linux-x86_64 2a80e40bb8d832dba307ad6a43bb63081627c22c
+  macos-i386 3d7ce9b9201f07cecddae6f1b8025e9c28b10bbf
+  macos-x86_64 4cfe69a0499d486a7bfdb9cd05c52845ad607dcb
+  winnt-i386 328d13aeb6c573125c57d7103a12bebd34fadd1f
+
 S 2014-05-09 47ecc2e
   freebsd-x86_64 5c085972690e1f9412c3c0c7ec64f6b148fe04fd
   linux-i386 690d2e310c025f10c54b1f2b9f32c65ea34575ed