about summary refs log tree commit diff
path: root/src/liballoc
diff options
context:
space:
mode:
authorMathieu David <mathieudavid@mathieudavid.org>2015-07-27 20:46:01 +0200
committerMathieu David <mathieudavid@mathieudavid.org>2015-07-27 20:46:01 +0200
commitf6e9240a99e86d2c799dc29f179dd2870e51f71d (patch)
treea7e5ba20745b16949a45a4612b2708e262693a7b /src/liballoc
parent003c3eaa62981b791f9eb7bcad015baa1e00d98c (diff)
parent3351afeecffcc9ebaeb1188a5cde976da8e4a5aa (diff)
downloadrust-f6e9240a99e86d2c799dc29f179dd2870e51f71d.tar.gz
rust-f6e9240a99e86d2c799dc29f179dd2870e51f71d.zip
Fix the relative path issue by including the files using include_bytes!
Diffstat (limited to 'src/liballoc')
-rw-r--r--src/liballoc/arc.rs264
-rw-r--r--src/liballoc/boxed.rs117
-rw-r--r--src/liballoc/boxed_test.rs8
-rw-r--r--src/liballoc/lib.rs22
-rw-r--r--src/liballoc/raw_vec.rs453
-rw-r--r--src/liballoc/rc.rs10
6 files changed, 757 insertions, 117 deletions
diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs
index 7bfeaec36d7..2a47fd29bd6 100644
--- a/src/liballoc/arc.rs
+++ b/src/liballoc/arc.rs
@@ -77,13 +77,15 @@ use core::atomic;
 use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
 use core::fmt;
 use core::cmp::Ordering;
-use core::mem::{min_align_of_val, size_of_val};
+use core::mem::{align_of_val, size_of_val};
 use core::intrinsics::drop_in_place;
 use core::mem;
 use core::nonzero::NonZero;
 use core::ops::{Deref, CoerceUnsized};
+use core::ptr;
 use core::marker::Unsize;
 use core::hash::{Hash, Hasher};
+use core::usize;
 use heap::deallocate;
 
 /// An atomically reference counted wrapper for shared state.
@@ -145,6 +147,8 @@ pub struct Weak<T: ?Sized> {
 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> { }
 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> { }
 
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@@ -154,7 +158,12 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
 
 struct ArcInner<T: ?Sized> {
     strong: atomic::AtomicUsize,
+
+    // the value usize::MAX acts as a sentinel for temporarily "locking" the
+    // ability to upgrade weak pointers or downgrade strong ones; this is used
+    // to avoid races in `make_unique` and `get_mut`.
     weak: atomic::AtomicUsize,
+
     data: T,
 }
 
@@ -201,9 +210,25 @@ impl<T: ?Sized> Arc<T> {
     #[unstable(feature = "arc_weak",
                reason = "Weak pointers may not belong in this module.")]
     pub fn downgrade(&self) -> Weak<T> {
-        // See the clone() impl for why this is relaxed
-        self.inner().weak.fetch_add(1, Relaxed);
-        Weak { _ptr: self._ptr }
+        loop {
+            // This Relaxed is OK because we're checking the value in the CAS
+            // below.
+            let cur = self.inner().weak.load(Relaxed);
+
+            // check if the weak counter is currently "locked"; if so, spin.
+            if cur == usize::MAX { continue }
+
+            // NOTE: this code currently ignores the possibility of overflow
+            // into usize::MAX; in general both Rc and Arc need to be adjusted
+            // to deal with overflow.
+
+            // Unlike with Clone(), we need this to be an Acquire read to
+            // synchronize with the write coming from `is_unique`, so that the
+            // events prior to that write happen before this read.
+            if self.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur {
+                return Weak { _ptr: self._ptr }
+            }
+        }
     }
 
     /// Get the number of weak references to this value.
@@ -241,7 +266,7 @@ impl<T: ?Sized> Arc<T> {
 
         if self.inner().weak.fetch_sub(1, Release) == 1 {
             atomic::fence(Acquire);
-            deallocate(ptr as *mut u8, size_of_val(&*ptr), min_align_of_val(&*ptr))
+            deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
         }
     }
 }
@@ -258,51 +283,6 @@ pub fn weak_count<T: ?Sized>(this: &Arc<T>) -> usize { Arc::weak_count(this) }
 #[deprecated(since = "1.2.0", reason = "renamed to Arc::strong_count")]
 pub fn strong_count<T: ?Sized>(this: &Arc<T>) -> usize { Arc::strong_count(this) }
 
-
-/// Returns a mutable reference to the contained value if the `Arc<T>` is unique.
-///
-/// Returns `None` if the `Arc<T>` is not unique.
-///
-/// This function is marked **unsafe** because it is racy if weak pointers
-/// are active.
-///
-/// # Examples
-///
-/// ```
-/// # #![feature(arc_unique, alloc)]
-/// extern crate alloc;
-/// # fn main() {
-/// use alloc::arc::{Arc, get_mut};
-///
-/// # unsafe {
-/// let mut x = Arc::new(3);
-/// *get_mut(&mut x).unwrap() = 4;
-/// assert_eq!(*x, 4);
-///
-/// let _y = x.clone();
-/// assert!(get_mut(&mut x).is_none());
-/// # }
-/// # }
-/// ```
-#[inline]
-#[unstable(feature = "arc_unique")]
-#[deprecated(since = "1.2.0",
-             reason = "this function is unsafe with weak pointers")]
-pub unsafe fn get_mut<T: ?Sized>(this: &mut Arc<T>) -> Option<&mut T> {
-    // FIXME(#24880) potential race with upgraded weak pointers here
-    if Arc::strong_count(this) == 1 && Arc::weak_count(this) == 0 {
-        // This unsafety is ok because we're guaranteed that the pointer
-        // returned is the *only* pointer that will ever be returned to T. Our
-        // reference count is guaranteed to be 1 at this point, and we required
-        // the Arc itself to be `mut`, so we're returning the only possible
-        // reference to the inner data.
-        let inner = &mut **this._ptr;
-        Some(&mut inner.data)
-    } else {
-        None
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> Clone for Arc<T> {
     /// Makes a clone of the `Arc<T>`.
@@ -350,10 +330,9 @@ impl<T: Clone> Arc<T> {
     /// Make a mutable reference from the given `Arc<T>`.
     ///
     /// This is also referred to as a copy-on-write operation because the inner
-    /// data is cloned if the reference count is greater than one.
-    ///
-    /// This method is marked **unsafe** because it is racy if weak pointers
-    /// are active.
+    /// data is cloned if the (strong) reference count is greater than one. If
+    /// we hold the only strong reference, any existing weak references will no
+    /// longer be upgradeable.
     ///
     /// # Examples
     ///
@@ -361,33 +340,140 @@ impl<T: Clone> Arc<T> {
     /// # #![feature(arc_unique)]
     /// use std::sync::Arc;
     ///
-    /// # unsafe {
     /// let mut five = Arc::new(5);
     ///
-    /// let mut_five = five.make_unique();
-    /// # }
+    /// let mut_five = Arc::make_unique(&mut five);
     /// ```
     #[inline]
     #[unstable(feature = "arc_unique")]
-    #[deprecated(since = "1.2.0",
-                 reason = "this function is unsafe with weak pointers")]
-    pub unsafe fn make_unique(&mut self) -> &mut T {
-        // FIXME(#24880) potential race with upgraded weak pointers here
+    pub fn make_unique(this: &mut Arc<T>) -> &mut T {
+        // Note that we hold both a strong reference and a weak reference.
+        // Thus, releasing our strong reference only will not, by itself, cause
+        // the memory to be deallocated.
         //
-        // Note that we hold a strong reference, which also counts as a weak
-        // reference, so we only clone if there is an additional reference of
-        // either kind.
-        if self.inner().strong.load(SeqCst) != 1 ||
-           self.inner().weak.load(SeqCst) != 1 {
-            *self = Arc::new((**self).clone())
+        // Use Acquire to ensure that we see any writes to `weak` that happen
+        // before release writes (i.e., decrements) to `strong`. Since we hold a
+        // weak count, there's no chance the ArcInner itself could be
+        // deallocated.
+        if this.inner().strong.compare_and_swap(1, 0, Acquire) != 1 {
+            // Another srong pointer exists; clone
+            *this = Arc::new((**this).clone());
+        } else if this.inner().weak.load(Relaxed) != 1 {
+            // Relaxed suffices in the above because this is fundamentally an
+            // optimization: we are always racing with weak pointers being
+            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
+
+            // We removed the last strong ref, but there are additional weak
+            // refs remaining. We'll move the contents to a new Arc, and
+            // invalidate the other weak refs.
+
+            // Note that it is not possible for the read of `weak` to yield
+            // usize::MAX (i.e., locked), since the weak count can only be
+            // locked by a thread with a strong reference.
+
+            // Materialize our own implicit weak pointer, so that it can clean
+            // up the ArcInner as needed.
+            let weak = Weak { _ptr: this._ptr };
+
+            // mark the data itself as already deallocated
+            unsafe {
+                // there is no data race in the implicit write caused by `read`
+                // here (due to zeroing) because data is no longer accessed by
+                // other threads (due to there being no more strong refs at this
+                // point).
+                let mut swap = Arc::new(ptr::read(&(**weak._ptr).data));
+                mem::swap(this, &mut swap);
+                mem::forget(swap);
+            }
+        } else {
+            // We were the sole reference of either kind; bump back up the
+            // strong ref count.
+            this.inner().strong.store(1, Release);
         }
+
         // As with `get_mut()`, the unsafety is ok because our reference was
         // either unique to begin with, or became one upon cloning the contents.
-        let inner = &mut **self._ptr;
-        &mut inner.data
+        unsafe {
+            let inner = &mut **this._ptr;
+            &mut inner.data
+        }
     }
 }
 
+impl<T: ?Sized> Arc<T> {
+    /// Returns a mutable reference to the contained value if the `Arc<T>` is unique.
+    ///
+    /// Returns `None` if the `Arc<T>` is not unique.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # #![feature(arc_unique, alloc)]
+    /// extern crate alloc;
+    /// # fn main() {
+    /// use alloc::arc::Arc;
+    ///
+    /// let mut x = Arc::new(3);
+    /// *Arc::get_mut(&mut x).unwrap() = 4;
+    /// assert_eq!(*x, 4);
+    ///
+    /// let _y = x.clone();
+    /// assert!(Arc::get_mut(&mut x).is_none());
+    /// # }
+    /// ```
+    #[inline]
+    #[unstable(feature = "arc_unique")]
+    pub fn get_mut(this: &mut Arc<T>) -> Option<&mut T> {
+        if this.is_unique() {
+            // This unsafety is ok because we're guaranteed that the pointer
+            // returned is the *only* pointer that will ever be returned to T. Our
+            // reference count is guaranteed to be 1 at this point, and we required
+            // the Arc itself to be `mut`, so we're returning the only possible
+            // reference to the inner data.
+            unsafe {
+                let inner = &mut **this._ptr;
+                Some(&mut inner.data)
+            }
+        } else {
+            None
+        }
+    }
+
+    /// Determine whether this is the unique reference (including weak refs) to
+    /// the underlying data.
+    ///
+    /// Note that this requires locking the weak ref count.
+    fn is_unique(&mut self) -> bool {
+        // lock the weak pointer count if we appear to be the sole weak pointer
+        // holder.
+        //
+        // The acquire label here ensures a happens-before relationship with any
+        // writes to `strong` prior to decrements of the `weak` count (via drop,
+        // which uses Release).
+        if self.inner().weak.compare_and_swap(1, usize::MAX, Acquire) == 1 {
+            // Due to the previous acquire read, this will observe any writes to
+            // `strong` that were due to upgrading weak pointers; only strong
+            // clones remain, which require that the strong count is > 1 anyway.
+            let unique = self.inner().strong.load(Relaxed) == 1;
+
+            // The release write here synchronizes with a read in `downgrade`,
+            // effectively preventing the above read of `strong` from happening
+            // after the write.
+            self.inner().weak.store(1, Release); // release the lock
+            unique
+        } else {
+            false
+        }
+    }
+}
+
+#[inline]
+#[unstable(feature = "arc_unique")]
+#[deprecated(since = "1.2", reason = "use Arc::get_mut instead")]
+pub fn get_mut<T: ?Sized>(this: &mut Arc<T>) -> Option<&mut T> {
+    Arc::get_mut(this)
+}
+
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: ?Sized> Drop for Arc<T> {
     /// Drops the `Arc<T>`.
@@ -483,9 +569,15 @@ impl<T: ?Sized> Weak<T> {
         // fetch_add because once the count hits 0 it must never be above 0.
         let inner = self.inner();
         loop {
-            let n = inner.strong.load(SeqCst);
+            // Relaxed load because any write of 0 that we can observe
+            // leaves the field in a permanently zero state (so a
+            // "stale" read of 0 is fine), and any other value is
+            // confirmed via the CAS below.
+            let n = inner.strong.load(Relaxed);
             if n == 0 { return None }
-            let old = inner.strong.compare_and_swap(n, n + 1, SeqCst);
+
+            // Relaxed is valid for the same reason it is on Arc's Clone impl
+            let old = inner.strong.compare_and_swap(n, n + 1, Relaxed);
             if old == n { return Some(Arc { _ptr: self._ptr }) }
         }
     }
@@ -516,9 +608,12 @@ impl<T: ?Sized> Clone for Weak<T> {
     /// ```
     #[inline]
     fn clone(&self) -> Weak<T> {
-        // See comments in Arc::clone() for why this is relaxed
+        // See comments in Arc::clone() for why this is relaxed.  This can use a
+        // fetch_add (ignoring the lock) because the weak count is only locked
+        // where are *no other* weak pointers in existence. (So we can't be
+        // running this code in that case).
         self.inner().weak.fetch_add(1, Relaxed);
-        Weak { _ptr: self._ptr }
+        return Weak { _ptr: self._ptr }
     }
 }
 
@@ -561,11 +656,16 @@ impl<T: ?Sized> Drop for Weak<T> {
         // If we find out that we were the last weak pointer, then its time to
         // deallocate the data entirely. See the discussion in Arc::drop() about
         // the memory orderings
+        //
+        // It's not necessary to check for the locked state here, because the
+        // weak count can only be locked if there was precisely one weak ref,
+        // meaning that drop could only subsequently run ON that remaining weak
+        // ref, which can only happen after the lock is released.
         if self.inner().weak.fetch_sub(1, Release) == 1 {
             atomic::fence(Acquire);
             unsafe { deallocate(ptr as *mut u8,
                                 size_of_val(&*ptr),
-                                min_align_of_val(&*ptr)) }
+                                align_of_val(&*ptr)) }
         }
     }
 }
@@ -792,13 +892,13 @@ mod tests {
             let mut cow1 = cow0.clone();
             let mut cow2 = cow1.clone();
 
-            assert!(75 == *cow0.make_unique());
-            assert!(75 == *cow1.make_unique());
-            assert!(75 == *cow2.make_unique());
+            assert!(75 == *Arc::make_unique(&mut cow0));
+            assert!(75 == *Arc::make_unique(&mut cow1));
+            assert!(75 == *Arc::make_unique(&mut cow2));
 
-            *cow0.make_unique() += 1;
-            *cow1.make_unique() += 2;
-            *cow2.make_unique() += 3;
+            *Arc::make_unique(&mut cow0) += 1;
+            *Arc::make_unique(&mut cow1) += 2;
+            *Arc::make_unique(&mut cow2) += 3;
 
             assert!(76 == *cow0);
             assert!(77 == *cow1);
@@ -822,7 +922,7 @@ mod tests {
         assert!(75 == *cow2);
 
         unsafe {
-            *cow0.make_unique() += 1;
+            *Arc::make_unique(&mut cow0) += 1;
         }
 
         assert!(76 == *cow0);
@@ -845,7 +945,7 @@ mod tests {
         assert!(75 == *cow1_weak.upgrade().unwrap());
 
         unsafe {
-            *cow0.make_unique() += 1;
+            *Arc::make_unique(&mut cow0) += 1;
         }
 
         assert!(76 == *cow0);
diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs
index 1039756363e..acf22094233 100644
--- a/src/liballoc/boxed.rs
+++ b/src/liballoc/boxed.rs
@@ -55,14 +55,17 @@
 
 use core::prelude::*;
 
+use heap;
+
 use core::any::Any;
 use core::cmp::Ordering;
 use core::fmt;
 use core::hash::{self, Hash};
-use core::marker::Unsize;
+use core::marker::{self, Unsize};
 use core::mem;
 use core::ops::{CoerceUnsized, Deref, DerefMut};
-use core::ptr::{Unique};
+use core::ops::{Placer, Boxed, Place, InPlace, BoxPlace};
+use core::ptr::Unique;
 use core::raw::{TraitObject};
 
 /// A value that represents the heap. This is the default place that the `box`
@@ -72,7 +75,7 @@ use core::raw::{TraitObject};
 ///
 /// ```
 /// # #![feature(box_heap)]
-/// #![feature(box_syntax)]
+/// #![feature(box_syntax, placement_in_syntax)]
 /// use std::boxed::HEAP;
 ///
 /// fn main() {
@@ -83,7 +86,12 @@ use core::raw::{TraitObject};
 #[lang = "exchange_heap"]
 #[unstable(feature = "box_heap",
            reason = "may be renamed; uncertain about custom allocator design")]
-pub const HEAP: () = ();
+pub const HEAP: ExchangeHeapSingleton =
+    ExchangeHeapSingleton { _force_singleton: () };
+
+/// This the singleton type used solely for `boxed::HEAP`.
+#[derive(Copy, Clone)]
+pub struct ExchangeHeapSingleton { _force_singleton: () }
 
 /// A pointer type for heap allocation.
 ///
@@ -91,7 +99,97 @@ pub const HEAP: () = ();
 #[lang = "owned_box"]
 #[stable(feature = "rust1", since = "1.0.0")]
 #[fundamental]
-pub struct Box<T>(Unique<T>);
+pub struct Box<T: ?Sized>(Unique<T>);
+
+/// `IntermediateBox` represents uninitialized backing storage for `Box`.
+///
+/// FIXME (pnkfelix): Ideally we would just reuse `Box<T>` instead of
+/// introducing a separate `IntermediateBox<T>`; but then you hit
+/// issues when you e.g. attempt to destructure an instance of `Box`,
+/// since it is a lang item and so it gets special handling by the
+/// compiler.  Easier just to make this parallel type for now.
+///
+/// FIXME (pnkfelix): Currently the `box` protocol only supports
+/// creating instances of sized types. This IntermediateBox is
+/// designed to be forward-compatible with a future protocol that
+/// supports creating instances of unsized types; that is why the type
+/// parameter has the `?Sized` generalization marker, and is also why
+/// this carries an explicit size. However, it probably does not need
+/// to carry the explicit alignment; that is just a work-around for
+/// the fact that the `align_of` intrinsic currently requires the
+/// input type to be Sized (which I do not think is strictly
+/// necessary).
+#[unstable(feature = "placement_in", reason = "placement box design is still being worked out.")]
+pub struct IntermediateBox<T: ?Sized>{
+    ptr: *mut u8,
+    size: usize,
+    align: usize,
+    marker: marker::PhantomData<*mut T>,
+}
+
+impl<T> Place<T> for IntermediateBox<T> {
+    fn pointer(&mut self) -> *mut T {
+        unsafe { ::core::mem::transmute(self.ptr) }
+    }
+}
+
+unsafe fn finalize<T>(b: IntermediateBox<T>) -> Box<T> {
+    let p = b.ptr as *mut T;
+    mem::forget(b);
+    mem::transmute(p)
+}
+
+fn make_place<T>() -> IntermediateBox<T> {
+    let size = mem::size_of::<T>();
+    let align = mem::align_of::<T>();
+
+    let p = if size == 0 {
+        heap::EMPTY as *mut u8
+    } else {
+        let p = unsafe {
+            heap::allocate(size, align)
+        };
+        if p.is_null() {
+            panic!("Box make_place allocation failure.");
+        }
+        p
+    };
+
+    IntermediateBox { ptr: p, size: size, align: align, marker: marker::PhantomData }
+}
+
+impl<T> BoxPlace<T> for IntermediateBox<T> {
+    fn make_place() -> IntermediateBox<T> { make_place() }
+}
+
+impl<T> InPlace<T> for IntermediateBox<T> {
+    type Owner = Box<T>;
+    unsafe fn finalize(self) -> Box<T> { finalize(self) }
+}
+
+impl<T> Boxed for Box<T> {
+    type Data = T;
+    type Place = IntermediateBox<T>;
+    unsafe fn finalize(b: IntermediateBox<T>) -> Box<T> { finalize(b) }
+}
+
+impl<T> Placer<T> for ExchangeHeapSingleton {
+    type Place = IntermediateBox<T>;
+
+    fn make_place(self) -> IntermediateBox<T> {
+        make_place()
+    }
+}
+
+impl<T: ?Sized> Drop for IntermediateBox<T> {
+    fn drop(&mut self) {
+        if self.size > 0 {
+            unsafe {
+                heap::deallocate(self.ptr, self.size, self.align)
+            }
+        }
+    }
+}
 
 impl<T> Box<T> {
     /// Allocates memory on the heap and then moves `x` into it.
@@ -116,7 +214,7 @@ impl<T : ?Sized> Box<T> {
     /// of `T` and releases memory. Since the way `Box` allocates and
     /// releases memory is unspecified, the only valid pointer to pass
     /// to this function is the one taken from another `Box` with
-    /// `boxed::into_raw` function.
+    /// `Box::into_raw` function.
     ///
     /// Function is unsafe, because improper use of this function may
     /// lead to memory problems like double-free, for example if the
@@ -140,10 +238,8 @@ impl<T : ?Sized> Box<T> {
     /// # Examples
     /// ```
     /// # #![feature(box_raw)]
-    /// use std::boxed;
-    ///
     /// let seventeen = Box::new(17u32);
-    /// let raw = boxed::into_raw(seventeen);
+    /// let raw = Box::into_raw(seventeen);
     /// let boxed_again = unsafe { Box::from_raw(raw) };
     /// ```
     #[unstable(feature = "box_raw", reason = "may be renamed")]
@@ -201,8 +297,7 @@ impl<T: Clone> Clone for Box<T> {
     /// let y = x.clone();
     /// ```
     #[inline]
-    fn clone(&self) -> Box<T> { box {(**self).clone()} }
-
+    fn clone(&self) -> Box<T> { box (HEAP) {(**self).clone()} }
     /// Copies `source`'s contents into `self` without creating a new allocation.
     ///
     /// # Examples
diff --git a/src/liballoc/boxed_test.rs b/src/liballoc/boxed_test.rs
index fc44ac4eac6..2ef23b26a56 100644
--- a/src/liballoc/boxed_test.rs
+++ b/src/liballoc/boxed_test.rs
@@ -76,9 +76,9 @@ fn deref() {
 
 #[test]
 fn raw_sized() {
+    let x = Box::new(17);
+    let p = Box::into_raw(x);
     unsafe {
-        let x = Box::new(17);
-        let p = boxed::into_raw(x);
         assert_eq!(17, *p);
         *p = 19;
         let y = Box::from_raw(p);
@@ -105,9 +105,9 @@ fn raw_trait() {
         }
     }
 
+    let x: Box<Foo> = Box::new(Bar(17));
+    let p = Box::into_raw(x);
     unsafe {
-        let x: Box<Foo> = Box::new(Bar(17));
-        let p = boxed::into_raw(x);
         assert_eq!(17, (*p).get());
         (*p).set(19);
         let y: Box<Foo> = Box::from_raw(p);
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index 7dcf7a76da0..f66495c4057 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -70,6 +70,8 @@
        test(no_crate_inject))]
 #![no_std]
 
+// SNAP d4432b3
+#![allow(unused_features)] // until feature(placement_in_syntax) is in snap
 #![feature(allocator)]
 #![feature(box_syntax)]
 #![feature(coerce_unsized)]
@@ -82,12 +84,15 @@
 #![feature(no_std)]
 #![feature(nonzero)]
 #![feature(optin_builtin_traits)]
+#![feature(placement_in_syntax)]
+#![feature(placement_new_protocol)]
 #![feature(raw)]
 #![feature(staged_api)]
 #![feature(unboxed_closures)]
 #![feature(unique)]
 #![feature(unsafe_no_drop_flag, filling_drop)]
 #![feature(unsize)]
+#![feature(core_slice_ext)]
 
 #![cfg_attr(test, feature(test, alloc, rustc_private, box_raw))]
 #![cfg_attr(all(not(feature = "external_funcs"), not(feature = "external_crate")),
@@ -122,6 +127,7 @@ mod boxed { pub use std::boxed::{Box, HEAP}; }
 mod boxed_test;
 pub mod arc;
 pub mod rc;
+pub mod raw_vec;
 
 /// Common out-of-memory routine
 #[cold]
@@ -133,19 +139,3 @@ pub fn oom() -> ! {
     //                allocate.
     unsafe { core::intrinsics::abort() }
 }
-
-// FIXME(#14344): When linking liballoc with libstd, this library will be linked
-//                as an rlib (it only exists as an rlib). It turns out that an
-//                optimized standard library doesn't actually use *any* symbols
-//                from this library. Everything is inlined and optimized away.
-//                This means that linkers will actually omit the object for this
-//                file, even though it may be needed in the future.
-//
-//                To get around this for now, we define a dummy symbol which
-//                will never get inlined so the stdlib can call it. The stdlib's
-//                reference to this symbol will cause this library's object file
-//                to get linked in to libstd successfully (the linker won't
-//                optimize it out).
-#[doc(hidden)]
-#[unstable(feature = "issue_14344_fixme")]
-pub fn fixme_14344_be_sure_to_link_to_collections() {}
diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs
new file mode 100644
index 00000000000..9311f44d9df
--- /dev/null
+++ b/src/liballoc/raw_vec.rs
@@ -0,0 +1,453 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::ptr::Unique;
+use core::mem;
+use core::slice::{self, SliceExt};
+use heap;
+use super::oom;
+use super::boxed::Box;
+use core::ops::Drop;
+
+/// A low-level utility for more ergonomically allocating, reallocating, and deallocating a
+/// a buffer of memory on the heap without having to worry about all the corner cases
+/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
+/// In particular:
+///
+/// * Produces heap::EMPTY on zero-sized types
+/// * Produces heap::EMPTY on zero-length allocations
+/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
+/// * Guards against 32-bit systems allocating more than isize::MAX bytes
+/// * Guards against overflowing your length
+/// * Aborts on OOM
+/// * Avoids freeing heap::EMPTY
+/// * Contains a ptr::Unique and thus endows the user with all related benefits
+///
+/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
+/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
+/// to handle the actual things *stored* inside of a RawVec.
+///
+/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
+/// This enables you to use capacity growing logic catch the overflows in your length
+/// that might occur with zero-sized types.
+///
+/// However this means that you need to be careful when roundtripping this type
+/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
+/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
+/// field. This allows zero-sized types to not be special-cased by consumers of
+/// this type.
+#[unsafe_no_drop_flag]
+pub struct RawVec<T> {
+    ptr: Unique<T>,
+    cap: usize,
+}
+
+impl<T> RawVec<T> {
+    /// Creates the biggest possible RawVec without allocating. If T has positive
+    /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
+    /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
+    /// delayed allocation.
+    pub fn new() -> Self {
+        unsafe {
+            // !0 is usize::MAX. This branch should be stripped at compile time.
+            let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
+
+            // heap::EMPTY doubles as "unallocated" and "zero-sized allocation"
+            RawVec { ptr: Unique::new(heap::EMPTY as *mut T), cap: cap }
+        }
+    }
+
+    /// Creates a RawVec with exactly the capacity and alignment requirements
+    /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
+    /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
+    /// get a RawVec with the requested capacity!
+    ///
+    /// # Panics
+    ///
+    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+    /// * Panics on 32-bit platforms if the requested capacity exceeds
+    ///   `isize::MAX` bytes.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM
+    pub fn with_capacity(cap: usize) -> Self {
+        unsafe {
+            let elem_size = mem::size_of::<T>();
+
+            let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
+            alloc_guard(alloc_size);
+
+            // handles ZSTs and `cap = 0` alike
+            let ptr = if alloc_size == 0 {
+                heap::EMPTY as *mut u8
+            } else {
+                let align = mem::align_of::<T>();
+                let ptr = heap::allocate(alloc_size, align);
+                if ptr.is_null() { oom() }
+                ptr
+            };
+
+            RawVec { ptr: Unique::new(ptr as *mut _), cap: cap }
+        }
+    }
+
+    /// Reconstitutes a RawVec from a pointer and capacity.
+    ///
+    /// # Undefined Behaviour
+    ///
+    /// The ptr must be allocated, and with the given capacity. The
+    /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
+    /// If the ptr and capacity come from a RawVec, then this is guaranteed.
+    pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
+        RawVec { ptr: Unique::new(ptr), cap: cap }
+    }
+
+    /// Converts a `Box<[T]>` into a `RawVec<T>`.
+    pub fn from_box(mut slice: Box<[T]>) -> Self {
+        unsafe {
+            let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len());
+            mem::forget(slice);
+            result
+        }
+    }
+}
+
+impl<T> RawVec<T> {
+    /// Gets a raw pointer to the start of the allocation. Note that this is
+    /// heap::EMPTY if `cap = 0` or T is zero-sized. In the former case, you must
+    /// be careful.
+    pub fn ptr(&self) -> *mut T {
+        *self.ptr
+    }
+
+    /// Gets the capacity of the allocation.
+    ///
+    /// This will always be `usize::MAX` if `T` is zero-sized.
+    pub fn cap(&self) -> usize {
+        if mem::size_of::<T>() == 0 { !0 } else { self.cap }
+    }
+
+    /// Doubles the size of the type's backing allocation. This is common enough
+    /// to want to do that it's easiest to just have a dedicated method. Slightly
+    /// more efficient logic can be provided for this than the general case.
+    ///
+    /// This function is ideal for when pushing elements one-at-a-time because
+    /// you don't need to incur the costs of the more general computations
+    /// reserve needs to do to guard against overflow. You do however need to
+    /// manually check if your `len == cap`.
+    ///
+    /// # Panics
+    ///
+    /// * Panics if T is zero-sized on the assumption that you managed to exhaust
+    ///   all `usize::MAX` slots in your imaginary buffer.
+    /// * Panics on 32-bit platforms if the requested capacity exceeds
+    ///   `isize::MAX` bytes.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM
+    ///
+    /// # Examples
+    ///
+    /// ```ignore
+    /// struct MyVec<T> {
+    ///     buf: RawVec<T>,
+    ///     len: usize,
+    /// }
+    ///
+    /// impl<T> MyVec<T> {
+    ///     pub fn push(&mut self, elem: T) {
+    ///         if self.len == self.buf.cap() { self.buf.double(); }
+    ///         // double would have aborted or panicked if the len exceeded
+    ///         // `isize::MAX` so this is safe to do unchecked now.
+    ///         unsafe {
+    ///             ptr::write(self.buf.ptr().offset(self.len as isize), elem);
+    ///         }
+    ///         self.len += 1;
+    ///     }
+    /// }
+    /// ```
+    #[inline(never)]
+    #[cold]
+    pub fn double(&mut self) {
+        unsafe {
+            let elem_size = mem::size_of::<T>();
+
+            // since we set the capacity to usize::MAX when elem_size is
+            // 0, getting to here necessarily means the RawVec is overfull.
+            assert!(elem_size != 0, "capacity overflow");
+
+            let align = mem::align_of::<T>();
+
+            let (new_cap, ptr) = if self.cap == 0 {
+                // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
+                let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
+                let ptr = heap::allocate(new_cap * elem_size, align);
+                (new_cap, ptr)
+            } else {
+                // Since we guarantee that we never allocate more than isize::MAX bytes,
+                // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
+                let new_cap = 2 * self.cap;
+                let new_alloc_size = new_cap * elem_size;
+                alloc_guard(new_alloc_size);
+                let ptr = heap::reallocate(self.ptr() as *mut _,
+                                           self.cap * elem_size,
+                                           new_alloc_size,
+                                           align);
+                (new_cap, ptr)
+            };
+
+            // If allocate or reallocate fail, we'll get `null` back
+            if ptr.is_null() { oom() }
+
+            self.ptr = Unique::new(ptr as *mut _);
+            self.cap = new_cap;
+        }
+    }
+
+    /// Ensures that the buffer contains at least enough space to hold
+    /// `used_cap + needed_extra_cap` elements. If it doesn't already,
+    /// will reallocate the minimum possible amount of memory necessary.
+    /// Generally this will be exactly the amount of memory necessary,
+    /// but in principle the allocator is free to give back more than
+    /// we asked for.
+    ///
+    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
+    /// the requested space. This is not really unsafe, but the unsafe
+    /// code *you* write that relies on the behaviour of this function may break.
+    ///
+    /// # Panics
+    ///
+    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+    /// * Panics on 32-bit platforms if the requested capacity exceeds
+    ///   `isize::MAX` bytes.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM
+    pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
+        unsafe {
+            let elem_size = mem::size_of::<T>();
+            let align = mem::align_of::<T>();
+
+            // NOTE: we don't early branch on ZSTs here because we want this
+            // to actually catch "asking for more than usize::MAX" in that case.
+            // If we make it past the first branch then we are guaranteed to
+            // panic.
+
+            // Don't actually need any more capacity.
+            // Wrapping in case they gave a bad `used_cap`.
+            if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
+
+            // Nothing we can really do about these checks :(
+            let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow");
+            let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
+            alloc_guard(new_alloc_size);
+
+            let ptr = if self.cap == 0 {
+                heap::allocate(new_alloc_size, align)
+            } else {
+                heap::reallocate(self.ptr() as *mut _,
+                                 self.cap * elem_size,
+                                 new_alloc_size,
+                                 align)
+            };
+
+            // If allocate or reallocate fail, we'll get `null` back
+            if ptr.is_null() { oom() }
+
+            self.ptr = Unique::new(ptr as *mut _);
+            self.cap = new_cap;
+        }
+    }
+
+    /// Ensures that the buffer contains at least enough space to hold
+    /// `used_cap + needed_extra_cap` elements. If it doesn't already have
+    /// enough capacity, will reallocate enough space plus comfortable slack
+    /// space to get amortized `O(1)` behaviour. Will limit this behaviour
+    /// if it would needlessly cause itself to panic.
+    ///
+    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
+    /// the requested space. This is not really unsafe, but the unsafe
+    /// code *you* write that relies on the behaviour of this function may break.
+    ///
+    /// This is ideal for implementing a bulk-push operation like `extend`.
+    ///
+    /// # Panics
+    ///
+    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+    /// * Panics on 32-bit platforms if the requested capacity exceeds
+    ///   `isize::MAX` bytes.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM
+    ///
+    /// # Examples
+    ///
+    /// ```ignore
+    /// struct MyVec<T> {
+    ///     buf: RawVec<T>,
+    ///     len: usize,
+    /// }
+    ///
+    /// impl<T> MyVec<T> {
+    ///     pub fn push_all(&mut self, elems: &[T]) {
+    ///         self.buf.reserve(self.len, elems.len());
+    ///         // reserve would have aborted or panicked if the len exceeded
+    ///         // `isize::MAX` so this is safe to do unchecked now.
+    ///         for x in elems {
+    ///             unsafe {
+    ///                 ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
+    ///             }
+    ///             self.len += 1;
+    ///         }
+    ///     }
+    /// }
+    /// ```
+    pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
+        unsafe {
+            let elem_size = mem::size_of::<T>();
+            let align = mem::align_of::<T>();
+
+            // NOTE: we don't early branch on ZSTs here because we want this
+            // to actually catch "asking for more than usize::MAX" in that case.
+            // If we make it past the first branch then we are guaranteed to
+            // panic.
+
+            // Don't actually need any more capacity.
+            // Wrapping in case they give a bas `used_cap`
+            if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
+
+            // Nothing we can really do about these checks :(
+            let new_cap = used_cap.checked_add(needed_extra_cap)
+                                  .and_then(|cap| cap.checked_mul(2))
+                                  .expect("capacity overflow");
+            let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
+            // FIXME: may crash and burn on over-reserve
+            alloc_guard(new_alloc_size);
+
+            let ptr = if self.cap == 0 {
+                heap::allocate(new_alloc_size, align)
+            } else {
+                heap::reallocate(self.ptr() as *mut _,
+                                 self.cap * elem_size,
+                                 new_alloc_size,
+                                 align)
+            };
+
+            // If allocate or reallocate fail, we'll get `null` back
+            if ptr.is_null() { oom() }
+
+            self.ptr = Unique::new(ptr as *mut _);
+            self.cap = new_cap;
+        }
+    }
+
+    /// Shrinks the allocation down to the specified amount. If the given amount
+    /// is 0, actually completely deallocates.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the given amount is *larger* than the current capacity.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM.
+    pub fn shrink_to_fit(&mut self, amount: usize) {
+        let elem_size = mem::size_of::<T>();
+        let align = mem::align_of::<T>();
+
+        // Set the `cap` because they might be about to promote to a `Box<[T]>`
+        if elem_size == 0 {
+            self.cap = amount;
+            return;
+        }
+
+        // This check is my waterloo; it's the only thing Vec wouldn't have to do.
+        assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
+
+        if amount == 0 {
+            mem::replace(self, RawVec::new());
+        } else if self.cap != amount {
+            unsafe {
+                // Overflow check is unnecessary as the vector is already at
+                // least this large.
+                let ptr = heap::reallocate(self.ptr() as *mut _,
+                                           self.cap * elem_size,
+                                           amount * elem_size,
+                                           align);
+                if ptr.is_null() { oom() }
+                self.ptr = Unique::new(ptr as *mut _);
+            }
+            self.cap = amount;
+        }
+    }
+
+    /// Converts the entire buffer into `Box<[T]>`.
+    ///
+    /// While it is not *strictly* Undefined Behaviour to call
+    /// this procedure while some of the RawVec is unintialized,
+    /// it cetainly makes it trivial to trigger it.
+    ///
+    /// Note that this will correctly reconstitute any `cap` changes
+    /// that may have been performed. (see description of type for details)
+    pub unsafe fn into_box(self) -> Box<[T]> {
+        // NOTE: not calling `cap()` here, actually using the real `cap` field!
+        let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
+        let output: Box<[T]> = Box::from_raw(slice);
+        mem::forget(self);
+        output
+    }
+
+    /// This is a stupid name in the hopes that someone will find this in the
+    /// not too distant future and remove it with the rest of
+    /// #[unsafe_no_drop_flag]
+    pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool {
+        self.cap != mem::POST_DROP_USIZE
+    }
+}
+
+impl<T> Drop for RawVec<T> {
+    /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
+    fn drop(&mut self) {
+        let elem_size = mem::size_of::<T>();
+        if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() {
+            let align = mem::align_of::<T>();
+
+            let num_bytes = elem_size * self.cap;
+            unsafe {
+                heap::deallocate(*self.ptr as *mut _, num_bytes, align);
+            }
+        }
+    }
+}
+
+
+
+// We need to guarantee the following:
+// * We don't ever allocate `> isize::MAX` byte-size objects
+// * We don't overflow `usize::MAX` and actually allocate too little
+//
+// On 64-bit we just need to check for overflow since trying to allocate
+// `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra
+// guard for this in case we're running on a platform which can use all 4GB in
+// user-space. e.g. PAE or x32
+
+#[inline]
+#[cfg(target_pointer_width = "64")]
+fn alloc_guard(_alloc_size: usize) { }
+
+#[inline]
+#[cfg(target_pointer_width = "32")]
+fn alloc_guard(alloc_size: usize) {
+    assert!(alloc_size <= ::core::isize::MAX as usize, "capacity overflow");
+}
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index d5b6c86ef35..d461eeea0b7 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -162,7 +162,7 @@ use core::fmt;
 use core::hash::{Hasher, Hash};
 use core::intrinsics::{assume, drop_in_place};
 use core::marker::{self, Unsize};
-use core::mem::{self, min_align_of, size_of, min_align_of_val, size_of_val, forget};
+use core::mem::{self, align_of, size_of, align_of_val, size_of_val, forget};
 use core::nonzero::NonZero;
 use core::ops::{CoerceUnsized, Deref};
 use core::ptr;
@@ -246,7 +246,7 @@ impl<T> Rc<T> {
                 // destruct the box and skip our Drop
                 // we can ignore the refcounts because we know we're unique
                 deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(),
-                            min_align_of::<RcBox<T>>());
+                            align_of::<RcBox<T>>());
                 forget(rc);
                 Ok(val)
             }
@@ -496,7 +496,7 @@ impl<T: ?Sized> Drop for Rc<T> {
                     if self.weak() == 0 {
                         deallocate(ptr as *mut u8,
                                    size_of_val(&*ptr),
-                                   min_align_of_val(&*ptr))
+                                   align_of_val(&*ptr))
                     }
                 }
             }
@@ -734,6 +734,8 @@ pub struct Weak<T: ?Sized> {
 impl<T: ?Sized> !marker::Send for Weak<T> {}
 impl<T: ?Sized> !marker::Sync for Weak<T> {}
 
+impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+
 #[unstable(feature = "rc_weak",
            reason = "Weak pointers may not belong in this module.")]
 impl<T: ?Sized> Weak<T> {
@@ -805,7 +807,7 @@ impl<T: ?Sized> Drop for Weak<T> {
                 // the strong pointers have disappeared.
                 if self.weak() == 0 {
                     deallocate(ptr as *mut u8, size_of_val(&*ptr),
-                               min_align_of_val(&*ptr))
+                               align_of_val(&*ptr))
                 }
             }
         }