about summary refs log tree commit diff
path: root/src/liballoc
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2017-06-20 05:02:19 +0000
committerbors <bors@rust-lang.org>2017-06-20 05:02:19 +0000
commit1143eb26a2e405cdccbf6789c89d4581ad969868 (patch)
tree81cb3feed88063287f8e090ff0b58f01cb891726 /src/liballoc
parente00c0401b8fd2c33dc25d2589fd3523e8defa93e (diff)
parent55a629d496f9393dff5c3a8d4511bf2686bf365b (diff)
downloadrust-1143eb26a2e405cdccbf6789c89d4581ad969868.tar.gz
rust-1143eb26a2e405cdccbf6789c89d4581ad969868.zip
Auto merge of #42313 - pnkfelix:allocator-integration, r=alexcrichton
Allocator integration

Lets start getting some feedback on `trait Alloc`.

Here is:
 *  the `trait Alloc` itself,
 * the `struct Layout` and `enum AllocErr` that its API relies on
 * a `struct HeapAlloc` that exposes the system allocator as an instance of `Alloc`
 * an integration of `Alloc` with `RawVec`
 * ~~an integration of `Alloc` with `Vec`~~

 TODO
 * [x] split `fn realloc_in_place` into `grow` and `shrink` variants
 * [x] add `# Unsafety` and `# Errors` sections to documentation for all relevant methods
 * [x] remove `Vec` integration with `Allocator`
 * [x] add `allocate_zeroed` impl to `HeapAllocator`
 * [x] remove typedefs e.g. `type Size = usize;`
 * [x] impl `trait Error` for all error types in PR
 * [x] make `Layout::from_size_align` public
 * [x] clarify docs of `fn padding_needed_for`.
 * [x] revise `Layout` constructors to ensure that [size+align combination is valid](https://github.com/rust-lang/rust/pull/42313#issuecomment-306845446)
 * [x] resolve mismatch re requirements of align on dealloc. See [comment](https://github.com/rust-lang/rust/pull/42313#issuecomment-306202489).
Diffstat (limited to 'src/liballoc')
-rw-r--r--src/liballoc/allocator.rs1037
-rw-r--r--src/liballoc/heap.rs79
-rw-r--r--src/liballoc/lib.rs4
-rw-r--r--src/liballoc/raw_vec.rs337
4 files changed, 1339 insertions, 118 deletions
diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs
new file mode 100644
index 00000000000..9bddce29957
--- /dev/null
+++ b/src/liballoc/allocator.rs
@@ -0,0 +1,1037 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![unstable(feature = "allocator_api",
+            reason = "the precise API and guarantees it provides may be tweaked \
+                      slightly, especially to possibly take into account the \
+                      types being stored to make room for a future \
+                      tracing garbage collector",
+            issue = "27700")]
+
+use core::cmp;
+use core::fmt;
+use core::mem;
+use core::usize;
+use core::ptr::{self, Unique};
+
+/// Represents the combination of a starting address and
+/// a total capacity of the returned block.
+#[derive(Debug)]
+pub struct Excess(pub *mut u8, pub usize);
+
+fn size_align<T>() -> (usize, usize) {
+    (mem::size_of::<T>(), mem::align_of::<T>())
+}
+
+/// Layout of a block of memory.
+///
+/// An instance of `Layout` describes a particular layout of memory.
+/// You build a `Layout` up as an input to give to an allocator.
+///
+/// All layouts have an associated non-negative size and a
+/// power-of-two alignment.
+///
+/// (Note however that layouts are *not* required to have positive
+/// size, even though many allocators require that all memory
+/// requeusts have positive size. A caller to the `Alloc::alloc`
+/// method must either ensure that conditions like this are met, or
+/// use specific allocators with looser requirements.)
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct Layout {
+    // size of the requested block of memory, measured in bytes.
+    size: usize,
+
+    // alignment of the requested block of memory, measured in bytes.
+    // we ensure that this is always a power-of-two, because API's
+    // like `posix_memalign` require it and it is a reasonable
+    // constraint to impose on Layout constructors.
+    //
+    // (However, we do not analogously require `align >= sizeof(void*)`,
+    //  even though that is *also* a requirement of `posix_memalign`.)
+    align: usize,
+}
+
+
+// FIXME: audit default implementations for overflow errors,
+// (potentially switching to overflowing_add and
+//  overflowing_mul as necessary).
+
+impl Layout {
+    /// Constructs a `Layout` from a given `size` and `align`,
+    /// or returns `None` if either of the following conditions
+    /// are not met:
+    ///
+    /// * `align` must be a power of two,
+    ///
+    /// * `size`, when rounded up to the nearest multiple of `align`,
+    ///    must not overflow (i.e. the rounded value must be less than
+    ///    `usize::MAX`).
+    pub fn from_size_align(size: usize, align: usize) -> Option<Layout> {
+        if !align.is_power_of_two() {
+            return None;
+        }
+
+        // (power-of-two implies align != 0.)
+
+        // Rounded up size is:
+        //   size_rounded_up = (size + align - 1) & !(align - 1);
+        //
+        // We know from above that align != 0. If adding (align - 1)
+        // does not overflow, then rounding up will be fine.
+        //
+        // Conversely, &-masking with !(align - 1) will subtract off
+        // only low-order-bits. Thus if overflow occurs with the sum,
+        // the &-mask cannot subtract enough to undo that overflow.
+        //
+        // Above implies that checking for summation overflow is both
+        // necessary and sufficient.
+        if size > usize::MAX - (align - 1) {
+            return None;
+        }
+
+        Some(Layout { size: size, align: align })
+    }
+
+    /// The minimum size in bytes for a memory block of this layout.
+    pub fn size(&self) -> usize { self.size }
+
+    /// The minimum byte alignment for a memory block of this layout.
+    pub fn align(&self) -> usize { self.align }
+
+    /// Constructs a `Layout` suitable for holding a value of type `T`.
+    pub fn new<T>() -> Self {
+        let (size, align) = size_align::<T>();
+        Layout::from_size_align(size, align).unwrap()
+    }
+
+    /// Produces layout describing a record that could be used to
+    /// allocate backing structure for `T` (which could be a trait
+    /// or other unsized type like a slice).
+    pub fn for_value<T: ?Sized>(t: &T) -> Self {
+        let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
+        Layout::from_size_align(size, align).unwrap()
+    }
+
+    /// Creates a layout describing the record that can hold a value
+    /// of the same layout as `self`, but that also is aligned to
+    /// alignment `align` (measured in bytes).
+    ///
+    /// If `self` already meets the prescribed alignment, then returns
+    /// `self`.
+    ///
+    /// Note that this method does not add any padding to the overall
+    /// size, regardless of whether the returned layout has a different
+    /// alignment. In other words, if `K` has size 16, `K.align_to(32)`
+    /// will *still* have size 16.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the combination of `self.size` and the given `align`
+    /// violates the conditions listed in `from_size_align`.
+    pub fn align_to(&self, align: usize) -> Self {
+        Layout::from_size_align(self.size, cmp::max(self.align, align)).unwrap()
+    }
+
+    /// Returns the amount of padding we must insert after `self`
+    /// to ensure that the following address will satisfy `align`
+    /// (measured in bytes).
+    ///
+    /// E.g. if `self.size` is 9, then `self.padding_needed_for(4)`
+    /// returns 3, because that is the minimum number of bytes of
+    /// padding required to get a 4-aligned address (assuming that the
+    /// corresponding memory block starts at a 4-aligned address).
+    ///
+    /// The return value of this function has no meaning if `align` is
+    /// not a power-of-two.
+    ///
+    /// Note that the utility of the returned value requires `align`
+    /// to be less than or equal to the alignment of the starting
+    /// address for the whole allocated block of memory. One way to
+    /// satisfy this constraint is to ensure `align <= self.align`.
+    pub fn padding_needed_for(&self, align: usize) -> usize {
+        let len = self.size();
+
+        // Rounded up value is:
+        //   len_rounded_up = (len + align - 1) & !(align - 1);
+        // and then we return the padding difference: `len_rounded_up - len`.
+        //
+        // We use modular arithmetic throughout:
+        //
+        // 1. align is guaranteed to be > 0, so align - 1 is always
+        //    valid.
+        //
+        // 2. `len + align - 1` can overflow by at most `align - 1`,
+        //    so the &-mask wth `!(align - 1)` will ensure that in the
+        //    case of overflow, `len_rounded_up` will itself be 0.
+        //    Thus the returned padding, when added to `len`, yields 0,
+        //    which trivially satisfies the alignment `align`.
+        //
+        // (Of course, attempts to allocate blocks of memory whose
+        // size and padding overflow in the above manner should cause
+        // the allocator to yield an error anyway.)
+
+        let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
+        return len_rounded_up.wrapping_sub(len);
+    }
+
+    /// Creates a layout describing the record for `n` instances of
+    /// `self`, with a suitable amount of padding between each to
+    /// ensure that each instance is given its requested size and
+    /// alignment. On success, returns `(k, offs)` where `k` is the
+    /// layout of the array and `offs` is the distance between the start
+    /// of each element in the array.
+    ///
+    /// On arithmetic overflow, returns `None`.
+    pub fn repeat(&self, n: usize) -> Option<(Self, usize)> {
+        let padded_size = match self.size.checked_add(self.padding_needed_for(self.align)) {
+            None => return None,
+            Some(padded_size) => padded_size,
+        };
+        let alloc_size = match padded_size.checked_mul(n) {
+            None => return None,
+            Some(alloc_size) => alloc_size,
+        };
+
+        // We can assume that `self.align` is a power-of-two.
+        // Furthermore, `alloc_size` has alreayd been rounded up
+        // to a multiple of `self.align`; therefore, the call
+        // to `Layout::from_size_align` below should never panic.
+        Some((Layout::from_size_align(alloc_size, self.align).unwrap(), padded_size))
+    }
+
+    /// Creates a layout describing the record for `self` followed by
+    /// `next`, including any necessary padding to ensure that `next`
+    /// will be properly aligned. Note that the result layout will
+    /// satisfy the alignment properties of both `self` and `next`.
+    ///
+    /// Returns `Some((k, offset))`, where `k` is layout of the concatenated
+    /// record and `offset` is the relative location, in bytes, of the
+    /// start of the `next` embedded witnin the concatenated record
+    /// (assuming that the record itself starts at offset 0).
+    ///
+    /// On arithmetic overflow, returns `None`.
+    pub fn extend(&self, next: Self) -> Option<(Self, usize)> {
+        let new_align = cmp::max(self.align, next.align);
+        let realigned = match Layout::from_size_align(self.size, new_align) {
+            None => return None,
+            Some(l) => l,
+        };
+
+        let pad = realigned.padding_needed_for(next.align);
+
+        let offset = match self.size.checked_add(pad) {
+            None => return None,
+            Some(offset) => offset,
+        };
+        let new_size = match offset.checked_add(next.size) {
+            None => return None,
+            Some(new_size) => new_size,
+        };
+
+        let layout = match Layout::from_size_align(new_size, new_align) {
+            None => return None,
+            Some(l) => l,
+        };
+        Some((layout, offset))
+    }
+
+    /// Creates a layout describing the record for `n` instances of
+    /// `self`, with no padding between each instance.
+    ///
+    /// Note that, unlike `repeat`, `repeat_packed` does not guarantee
+    /// that the repeated instances of `self` will be properly
+    /// aligned, even if a given instance of `self` is properly
+    /// aligned. In other words, if the layout returned by
+    /// `repeat_packed` is used to allocate an array, it is not
+    /// guaranteed that all elements in the array will be properly
+    /// aligned.
+    ///
+    /// On arithmetic overflow, returns `None`.
+    pub fn repeat_packed(&self, n: usize) -> Option<Self> {
+        let size = match self.size().checked_mul(n) {
+            None => return None,
+            Some(scaled) => scaled,
+        };
+
+        Layout::from_size_align(size, self.align)
+    }
+
+    /// Creates a layout describing the record for `self` followed by
+    /// `next` with no additional padding between the two. Since no
+    /// padding is inserted, the alignment of `next` is irrelevant,
+    /// and is not incoporated *at all* into the resulting layout.
+    ///
+    /// Returns `(k, offset)`, where `k` is layout of the concatenated
+    /// record and `offset` is the relative location, in bytes, of the
+    /// start of the `next` embedded witnin the concatenated record
+    /// (assuming that the record itself starts at offset 0).
+    ///
+    /// (The `offset` is always the same as `self.size()`; we use this
+    ///  signature out of convenience in matching the signature of
+    ///  `extend`.)
+    ///
+    /// On arithmetic overflow, returns `None`.
+    pub fn extend_packed(&self, next: Self) -> Option<(Self, usize)> {
+        let new_size = match self.size().checked_add(next.size()) {
+            None => return None,
+            Some(new_size) => new_size,
+        };
+        let layout = match Layout::from_size_align(new_size, self.align) {
+            None => return None,
+            Some(l) => l,
+        };
+        Some((layout, self.size()))
+    }
+
+    /// Creates a layout describing the record for a `[T; n]`.
+    ///
+    /// On arithmetic overflow, returns `None`.
+    pub fn array<T>(n: usize) -> Option<Self> {
+        Layout::new::<T>()
+            .repeat(n)
+            .map(|(k, offs)| {
+                debug_assert!(offs == mem::size_of::<T>());
+                k
+            })
+    }
+}
+
+/// The `AllocErr` error specifies whether an allocation failure is
+/// specifically due to resource exhaustion or if it is due to
+/// something wrong when combining the given input arguments with this
+/// allocator.
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum AllocErr {
+    /// Error due to hitting some resource limit or otherwise running
+    /// out of memory. This condition strongly implies that *some*
+    /// series of deallocations would allow a subsequent reissuing of
+    /// the original allocation request to succeed.
+    Exhausted { request: Layout },
+
+    /// Error due to allocator being fundamentally incapable of
+    /// satisfying the original request. This condition implies that
+    /// such an allocation request will never succeed on the given
+    /// allocator, regardless of environment, memory pressure, or
+    /// other contextual conditions.
+    ///
+    /// For example, an allocator that does not support requests for
+    /// large memory blocks might return this error variant.
+    Unsupported { details: &'static str },
+}
+
+impl AllocErr {
+    pub fn invalid_input(details: &'static str) -> Self {
+        AllocErr::Unsupported { details: details }
+    }
+    pub fn is_memory_exhausted(&self) -> bool {
+        if let AllocErr::Exhausted { .. } = *self { true } else { false }
+    }
+    pub fn is_request_unsupported(&self) -> bool {
+        if let AllocErr::Unsupported { .. } = *self { true } else { false }
+    }
+    pub fn description(&self) -> &str {
+        match *self {
+            AllocErr::Exhausted { .. } => "allocator memory exhausted",
+            AllocErr::Unsupported { .. } => "unsupported allocator request",
+        }
+    }
+}
+
+// (we need this for downstream impl of trait Error)
+impl fmt::Display for AllocErr {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{}", self.description())
+    }
+}
+
+/// The `CannotReallocInPlace` error is used when `grow_in_place` or
+/// `shrink_in_place` were unable to reuse the given memory block for
+/// a requested layout.
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct CannotReallocInPlace;
+
+impl CannotReallocInPlace {
+    pub fn description(&self) -> &str {
+        "cannot reallocate allocator's memory in place"
+    }
+}
+
+// (we need this for downstream impl of trait Error)
+impl fmt::Display for CannotReallocInPlace {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{}", self.description())
+    }
+}
+
+/// An implementation of `Alloc` can allocate, reallocate, and
+/// deallocate arbitrary blocks of data described via `Layout`.
+///
+/// Some of the methods require that a memory block be *currently
+/// allocated* via an allocator. This means that:
+///
+/// * the starting address for that memory block was previously
+///   returned by a previous call to an allocation method (`alloc`,
+///   `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or
+///   reallocation method (`realloc`, `realloc_excess`, or
+///   `realloc_array`), and
+///
+/// * the memory block has not been subsequently deallocated, where
+///   blocks are deallocated either by being passed to a deallocation
+///   method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being
+///   passed to a reallocation method (see above) that returns `Ok`.
+///
+/// A note regarding zero-sized types and zero-sized layouts: many
+/// methods in the `Alloc` trait state that allocation requests
+/// must be non-zero size, or else undefined behavior can result.
+///
+/// * However, some higher-level allocation methods (`alloc_one`,
+///   `alloc_array`) are well-defined on zero-sized types and can
+///   optionally support them: it is left up to the implementor
+///   whether to return `Err`, or to return `Ok` with some pointer.
+///
+/// * If an `Alloc` implementation chooses to return `Ok` in this
+///   case (i.e. the pointer denotes a zero-sized inaccessible block)
+///   then that returned pointer must be considered "currently
+///   allocated". On such an allocator, *all* methods that take
+///   currently-allocated pointers as inputs must accept these
+///   zero-sized pointers, *without* causing undefined behavior.
+///
+/// * In other words, if a zero-sized pointer can flow out of an
+///   allocator, then that allocator must likewise accept that pointer
+///   flowing back into its deallocation and reallocation methods.
+///
+/// Some of the methods require that a layout *fit* a memory block.
+/// What it means for a layout to "fit" a memory block means (or
+/// equivalently, for a memory block to "fit" a layout) is that the
+/// following two conditions must hold:
+///
+/// 1. The block's starting address must be aligned to `layout.align()`.
+///
+/// 2. The block's size must fall in the range `[use_min, use_max]`, where:
+///
+///    * `use_min` is `self.usable_size(layout).0`, and
+///
+///    * `use_max` is the capacity that was (or would have been)
+///      returned when (if) the block was allocated via a call to
+///      `alloc_excess` or `realloc_excess`.
+///
+/// Note that:
+///
+///  * the size of the layout most recently used to allocate the block
+///    is guaranteed to be in the range `[use_min, use_max]`, and
+///
+///  * a lower-bound on `use_max` can be safely approximated by a call to
+///    `usable_size`.
+///
+///  * if a layout `k` fits a memory block (denoted by `ptr`)
+///    currently allocated via an allocator `a`, then it is legal to
+///    use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`.
+pub unsafe trait Alloc {
+
+    // (Note: existing allocators have unspecified but well-defined
+    // behavior in response to a zero size allocation request ;
+    // e.g. in C, `malloc` of 0 will either return a null pointer or a
+    // unique pointer, but will not have arbitrary undefined
+    // behavior. Rust should consider revising the alloc::heap crate
+    // to reflect this reality.)
+
+    /// Returns a pointer meeting the size and alignment guarantees of
+    /// `layout`.
+    ///
+    /// If this method returns an `Ok(addr)`, then the `addr` returned
+    /// will be non-null address pointing to a block of storage
+    /// suitable for holding an instance of `layout`.
+    ///
+    /// The returned block of storage may or may not have its contents
+    /// initialized. (Extension subtraits might restrict this
+    /// behavior, e.g. to ensure initialization to particular sets of
+    /// bit patterns.)
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure that `layout` has non-zero size.
+    ///
+    /// (Extension subtraits might provide more specific bounds on
+    /// behavior, e.g. guarantee a sentinel address or a null pointer
+    /// in response to a zero-size allocation request.)
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either memory is exhausted or
+    /// `layout` does not meet allocator's size or alignment
+    /// constraints.
+    ///
+    /// Implementations are encouraged to return `Err` on memory
+    /// exhaustion rather than panicking or aborting, but this is not
+    /// a strict requirement. (Specifically: it is *legal* to
+    /// implement this trait atop an underlying native allocation
+    /// library that aborts on memory exhaustion.)
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// allocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr>;
+
+    /// Deallocate the memory referenced by `ptr`.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure all of the following:
+    ///
+    /// * `ptr` must denote a block of memory currently allocated via
+    ///   this allocator,
+    ///
+    /// * `layout` must *fit* that block of memory,
+    ///
+    /// * In addition to fitting the block of memory `layout`, the
+    ///   alignment of the `layout` must match the alignment used
+    ///   to allocate that block of memory.
+    unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout);
+
+    /// Allocator-specific method for signalling an out-of-memory
+    /// condition.
+    ///
+    /// `oom` aborts the thread or process, optionally performing
+    /// cleanup or logging diagnostic information before panicking or
+    /// aborting.
+    ///
+    /// `oom` is meant to be used by clients unable to cope with an
+    /// unsatisfied allocation request (signaled by an error such as
+    /// `AllocErr::Exhausted`), and wish to abandon computation rather
+    /// than attempt to recover locally. Such clients should pass the
+    /// signalling error value back into `oom`, where the allocator
+    /// may incorporate that error value into its diagnostic report
+    /// before aborting.
+    ///
+    /// Implementations of the `oom` method are discouraged from
+    /// infinitely regressing in nested calls to `oom`. In
+    /// practice this means implementors should eschew allocating,
+    /// especially from `self` (directly or indirectly).
+    ///
+    /// Implementions of the allocation and reallocation methods
+    /// (e.g. `alloc`, `alloc_one`, `realloc`) are discouraged from
+    /// panicking (or aborting) in the event of memory exhaustion;
+    /// instead they should return an appropriate error from the
+    /// invoked method, and let the client decide whether to invoke
+    /// this `oom` method in response.
+    fn oom(&mut self, _: AllocErr) -> ! {
+        unsafe { ::core::intrinsics::abort() }
+    }
+
+    // == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS ==
+    // usable_size
+
+    /// Returns bounds on the guaranteed usable size of a successful
+    /// allocation created with the specified `layout`.
+    ///
+    /// In particular, if one has a memory block allocated via a given
+    /// allocator `a` and layout `k` where `a.usable_size(k)` returns
+    /// `(l, u)`, then one can pass that block to `a.dealloc()` with a
+    /// layout in the size range [l, u].
+    ///
+    /// (All implementors of `usable_size` must ensure that
+    /// `l <= k.size() <= u`)
+    ///
+    /// Both the lower- and upper-bounds (`l` and `u` respectively)
+    /// are provided, because an allocator based on size classes could
+    /// misbehave if one attempts to deallocate a block without
+    /// providing a correct value for its size (i.e., one within the
+    /// range `[l, u]`).
+    ///
+    /// Clients who wish to make use of excess capacity are encouraged
+    /// to use the `alloc_excess` and `realloc_excess` instead, as
+    /// this method is constrained to report conservative values that
+    /// serve as valid bounds for *all possible* allocation method
+    /// calls.
+    ///
+    /// However, for clients that do not wish to track the capacity
+    /// returned by `alloc_excess` locally, this method is likely to
+    /// produce useful results.
+    fn usable_size(&self, layout: &Layout) -> (usize, usize) {
+        (layout.size(), layout.size())
+    }
+
+    // == METHODS FOR MEMORY REUSE ==
+    // realloc. alloc_excess, realloc_excess
+
+    /// Returns a pointer suitable for holding data described by
+    /// `new_layout`, meeting its size and alignment guarantees. To
+    /// accomplish this, this may extend or shrink the allocation
+    /// referenced by `ptr` to fit `new_layout`.
+    ///
+    /// If this returns `Ok`, then ownership of the memory block
+    /// referenced by `ptr` has been transferred to this
+    /// allocator. The memory may or may not have been freed, and
+    /// should be considered unusable (unless of course it was
+    /// transferred back to the caller again via the return value of
+    /// this method).
+    ///
+    /// If this method returns `Err`, then ownership of the memory
+    /// block has not been transferred to this allocator, and the
+    /// contents of the memory block are unaltered.
+    ///
+    /// For best results, `new_layout` should not impose a different
+    /// alignment constraint than `layout`. (In other words,
+    /// `new_layout.align()` should equal `layout.align()`.) However,
+    /// behavior is well-defined (though underspecified) when this
+    /// constraint is violated; further discussion below.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure all of the following:
+    ///
+    /// * `ptr` must be currently allocated via this allocator,
+    ///
+    /// * `layout` must *fit* the `ptr` (see above). (The `new_layout`
+    ///   argument need not fit it.)
+    ///
+    /// * `new_layout` must have size greater than zero.
+    ///
+    /// * the alignment of `new_layout` is non-zero.
+    ///
+    /// (Extension subtraits might provide more specific bounds on
+    /// behavior, e.g. guarantee a sentinel address or a null pointer
+    /// in response to a zero-size allocation request.)
+    ///
+    /// # Errors
+    ///
+    /// Returns `Err` only if `new_layout` does not match the
+    /// alignment of `layout`, or does not meet the allocator's size
+    /// and alignment constraints of the allocator, or if reallocation
+    /// otherwise fails.
+    ///
+    /// (Note the previous sentence did not say "if and only if" -- in
+    /// particular, an implementation of this method *can* return `Ok`
+    /// if `new_layout.align() != old_layout.align()`; or it can
+    /// return `Err` in that scenario, depending on whether this
+    /// allocator can dynamically adjust the alignment constraint for
+    /// the block.)
+    ///
+    /// Implementations are encouraged to return `Err` on memory
+    /// exhaustion rather than panicking or aborting, but this is not
+    /// a strict requirement. (Specifically: it is *legal* to
+    /// implement this trait atop an underlying native allocation
+    /// library that aborts on memory exhaustion.)
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// reallocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    unsafe fn realloc(&mut self,
+                      ptr: *mut u8,
+                      layout: Layout,
+                      new_layout: Layout) -> Result<*mut u8, AllocErr> {
+        let new_size = new_layout.size();
+        let old_size = layout.size();
+        let aligns_match = layout.align == new_layout.align;
+
+        if new_size >= old_size && aligns_match {
+            if let Ok(()) = self.grow_in_place(ptr, layout.clone(), new_layout.clone()) {
+                return Ok(ptr);
+            }
+        } else if new_size < old_size && aligns_match {
+            if let Ok(()) = self.shrink_in_place(ptr, layout.clone(), new_layout.clone()) {
+                return Ok(ptr);
+            }
+        }
+
+        // otherwise, fall back on alloc + copy + dealloc.
+        let result = self.alloc(new_layout);
+        if let Ok(new_ptr) = result {
+            ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size));
+            self.dealloc(ptr, layout);
+        }
+        result
+    }
+
+    /// Behaves like `alloc`, but also ensures that the contents
+    /// are set to zero before being returned.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe for the same reasons that `alloc` is.
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either memory is exhausted or
+    /// `layout` does not meet allocator's size or alignment
+    /// constraints, just as in `alloc`.
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// allocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+        let size = layout.size();
+        let p = self.alloc(layout);
+        if let Ok(p) = p {
+            ptr::write_bytes(p, 0, size);
+        }
+        p
+    }
+
+    /// Behaves like `alloc`, but also returns the whole size of
+    /// the returned block. For some `layout` inputs, like arrays, this
+    /// may include extra storage usable for additional data.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe for the same reasons that `alloc` is.
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either memory is exhausted or
+    /// `layout` does not meet allocator's size or alignment
+    /// constraints, just as in `alloc`.
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// allocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
+        let usable_size = self.usable_size(&layout);
+        self.alloc(layout).map(|p| Excess(p, usable_size.1))
+    }
+
+    /// Behaves like `realloc`, but also returns the whole size of
+    /// the returned block. For some `layout` inputs, like arrays, this
+    /// may include extra storage usable for additional data.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe for the same reasons that `realloc` is.
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either memory is exhausted or
+    /// `layout` does not meet allocator's size or alignment
+    /// constraints, just as in `realloc`.
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// reallocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    unsafe fn realloc_excess(&mut self,
+                             ptr: *mut u8,
+                             layout: Layout,
+                             new_layout: Layout) -> Result<Excess, AllocErr> {
+        let usable_size = self.usable_size(&new_layout);
+        self.realloc(ptr, layout, new_layout)
+            .map(|p| Excess(p, usable_size.1))
+    }
+
+    /// Attempts to extend the allocation referenced by `ptr` to fit `new_layout`.
+    ///
+    /// If this returns `Ok`, then the allocator has asserted that the
+    /// memory block referenced by `ptr` now fits `new_layout`, and thus can
+    /// be used to carry data of that layout. (The allocator is allowed to
+    /// expend effort to accomplish this, such as extending the memory block to
+    /// include successor blocks, or virtual memory tricks.)
+    ///
+    /// Regardless of what this method returns, ownership of the
+    /// memory block referenced by `ptr` has not been transferred, and
+    /// the contents of the memory block are unaltered.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure all of the following:
+    ///
+    /// * `ptr` must be currently allocated via this allocator,
+    ///
+    /// * `layout` must *fit* the `ptr` (see above); note the
+    ///   `new_layout` argument need not fit it,
+    ///
+    /// * `new_layout.size()` must not be less than `layout.size()`,
+    ///
+    /// * `new_layout.align()` must equal `layout.align()`.
+    ///
+    /// # Errors
+    ///
+    /// Returns `Err(CannotReallocInPlace)` when the allocator is
+    /// unable to assert that the memory block referenced by `ptr`
+    /// could fit `layout`.
+    ///
+    /// Note that one cannot pass `CannotReallocInPlace` to the `oom`
+    /// method; clients are expected either to be able to recover from
+    /// `grow_in_place` failures without aborting, or to fall back on
+    /// another reallocation method before resorting to an abort.
+    unsafe fn grow_in_place(&mut self,
+                            ptr: *mut u8,
+                            layout: Layout,
+                            new_layout: Layout) -> Result<(), CannotReallocInPlace> {
+        let _ = ptr; // this default implementation doesn't care about the actual address.
+        debug_assert!(new_layout.size >= layout.size);
+        debug_assert!(new_layout.align == layout.align);
+        let (_l, u) = self.usable_size(&layout);
+        // _l <= layout.size()                       [guaranteed by usable_size()]
+        //       layout.size() <= new_layout.size()  [required by this method]
+        if new_layout.size <= u {
+            return Ok(());
+        } else {
+            return Err(CannotReallocInPlace);
+        }
+    }
+
+    /// Attempts to shrink the allocation referenced by `ptr` to fit `new_layout`.
+    ///
+    /// If this returns `Ok`, then the allocator has asserted that the
+    /// memory block referenced by `ptr` now fits `new_layout`, and
+    /// thus can only be used to carry data of that smaller
+    /// layout. (The allocator is allowed to take advantage of this,
+    /// carving off portions of the block for reuse elsewhere.) The
+    /// truncated contents of the block within the smaller layout are
+    /// unaltered, and ownership of block has not been transferred.
+    ///
+    /// If this returns `Err`, then the memory block is considered to
+    /// still represent the original (larger) `layout`. None of the
+    /// block has been carved off for reuse elsewhere, ownership of
+    /// the memory block has not been transferred, and the contents of
+    /// the memory block are unaltered.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure all of the following:
+    ///
+    /// * `ptr` must be currently allocated via this allocator,
+    ///
+    /// * `layout` must *fit* the `ptr` (see above); note the
+    ///   `new_layout` argument need not fit it,
+    ///
+    /// * `new_layout.size()` must not be greater than `layout.size()`
+    ///   (and must be greater than zero),
+    ///
+    /// * `new_layout.align()` must equal `layout.align()`.
+    ///
+    /// # Errors
+    ///
+    /// Returns `Err(CannotReallocInPlace)` when the allocator is
+    /// unable to assert that the memory block referenced by `ptr`
+    /// could fit `layout`.
+    ///
+    /// Note that one cannot pass `CannotReallocInPlace` to the `oom`
+    /// method; clients are expected either to be able to recover from
+    /// `shrink_in_place` failures without aborting, or to fall back
+    /// on another reallocation method before resorting to an abort.
+    unsafe fn shrink_in_place(&mut self,
+                              ptr: *mut u8,
+                              layout: Layout,
+                              new_layout: Layout) -> Result<(), CannotReallocInPlace> {
+        let _ = ptr; // this default implementation doesn't care about the actual address.
+        debug_assert!(new_layout.size <= layout.size);
+        debug_assert!(new_layout.align == layout.align);
+        let (l, _u) = self.usable_size(&layout);
+        //                      layout.size() <= _u  [guaranteed by usable_size()]
+        // new_layout.size() <= layout.size()        [required by this method]
+        if l <= new_layout.size {
+            return Ok(());
+        } else {
+            return Err(CannotReallocInPlace);
+        }
+    }
+
+
+    // == COMMON USAGE PATTERNS ==
+    // alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array
+
+    /// Allocates a block suitable for holding an instance of `T`.
+    ///
+    /// Captures a common usage pattern for allocators.
+    ///
+    /// The returned block is suitable for passing to the
+    /// `alloc`/`realloc` methods of this allocator.
+    ///
+    /// Note to implementors: If this returns `Ok(ptr)`, then `ptr`
+    /// must be considered "currently allocated" and must be
+    /// acceptable input to methods such as `realloc` or `dealloc`,
+    /// *even if* `T` is a zero-sized type. In other words, if your
+    /// `Alloc` implementation overrides this method in a manner
+    /// that can return a zero-sized `ptr`, then all reallocation and
+    /// deallocation methods need to be similarly overridden to accept
+    /// such values as input.
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either memory is exhausted or
+    /// `T` does not meet allocator's size or alignment constraints.
+    ///
+    /// For zero-sized `T`, may return either of `Ok` or `Err`, but
+    /// will *not* yield undefined behavior.
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// allocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    fn alloc_one<T>(&mut self) -> Result<Unique<T>, AllocErr>
+        where Self: Sized
+    {
+        let k = Layout::new::<T>();
+        if k.size() > 0 {
+            unsafe { self.alloc(k).map(|p|Unique::new(*p as *mut T)) }
+        } else {
+            Err(AllocErr::invalid_input("zero-sized type invalid for alloc_one"))
+        }
+    }
+
+    /// Deallocates a block suitable for holding an instance of `T`.
+    ///
+    /// The given block must have been produced by this allocator,
+    /// and must be suitable for storing a `T` (in terms of alignment
+    /// as well as minimum and maximum size); otherwise yields
+    /// undefined behavior.
+    ///
+    /// Captures a common usage pattern for allocators.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure both:
+    ///
+    /// * `ptr` must denote a block of memory currently allocated via this allocator
+    ///
+    /// * the layout of `T` must *fit* that block of memory.
+    unsafe fn dealloc_one<T>(&mut self, ptr: Unique<T>)
+        where Self: Sized
+    {
+        let raw_ptr = ptr.as_ptr() as *mut u8;
+        let k = Layout::new::<T>();
+        if k.size() > 0 {
+            self.dealloc(raw_ptr, k);
+        }
+    }
+
+    /// Allocates a block suitable for holding `n` instances of `T`.
+    ///
+    /// Captures a common usage pattern for allocators.
+    ///
+    /// The returned block is suitable for passing to the
+    /// `alloc`/`realloc` methods of this allocator.
+    ///
+    /// Note to implementors: If this returns `Ok(ptr)`, then `ptr`
+    /// must be considered "currently allocated" and must be
+    /// acceptable input to methods such as `realloc` or `dealloc`,
+    /// *even if* `T` is a zero-sized type. In other words, if your
+    /// `Alloc` implementation overrides this method in a manner
+    /// that can return a zero-sized `ptr`, then all reallocation and
+    /// deallocation methods need to be similarly overridden to accept
+    /// such values as input.
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either memory is exhausted or
+    /// `[T; n]` does not meet allocator's size or alignment
+    /// constraints.
+    ///
+    /// For zero-sized `T` or `n == 0`, may return either of `Ok` or
+    /// `Err`, but will *not* yield undefined behavior.
+    ///
+    /// Always returns `Err` on arithmetic overflow.
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// allocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    fn alloc_array<T>(&mut self, n: usize) -> Result<Unique<T>, AllocErr>
+        where Self: Sized
+    {
+        match Layout::array::<T>(n) {
+            Some(ref layout) if layout.size() > 0 => {
+                unsafe {
+                    self.alloc(layout.clone())
+                        .map(|p| {
+                            Unique::new(p as *mut T)
+                        })
+                }
+            }
+            _ => Err(AllocErr::invalid_input("invalid layout for alloc_array")),
+        }
+    }
+
+    /// Reallocates a block previously suitable for holding `n_old`
+    /// instances of `T`, returning a block suitable for holding
+    /// `n_new` instances of `T`.
+    ///
+    /// Captures a common usage pattern for allocators.
+    ///
+    /// The returned block is suitable for passing to the
+    /// `alloc`/`realloc` methods of this allocator.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure all of the following:
+    ///
+    /// * `ptr` must be currently allocated via this allocator,
+    ///
+    /// * the layout of `[T; n_old]` must *fit* that block of memory.
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either memory is exhausted or
+    /// `[T; n_new]` does not meet allocator's size or alignment
+    /// constraints.
+    ///
+    /// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or
+    /// `Err`, but will *not* yield undefined behavior.
+    ///
+    /// Always returns `Err` on arithmetic overflow.
+    ///
+    /// Clients wishing to abort computation in response to an
+    /// reallocation error are encouraged to call the allocator's `oom`
+    /// method, rather than directly invoking `panic!` or similar.
+    unsafe fn realloc_array<T>(&mut self,
+                               ptr: Unique<T>,
+                               n_old: usize,
+                               n_new: usize) -> Result<Unique<T>, AllocErr>
+        where Self: Sized
+    {
+        match (Layout::array::<T>(n_old), Layout::array::<T>(n_new), ptr.as_ptr()) {
+            (Some(ref k_old), Some(ref k_new), ptr) if k_old.size() > 0 && k_new.size() > 0 => {
+                self.realloc(ptr as *mut u8, k_old.clone(), k_new.clone())
+                    .map(|p|Unique::new(p as *mut T))
+            }
+            _ => {
+                Err(AllocErr::invalid_input("invalid layout for realloc_array"))
+            }
+        }
+    }
+
+    /// Deallocates a block suitable for holding `n` instances of `T`.
+    ///
+    /// Captures a common usage pattern for allocators.
+    ///
+    /// # Unsafety
+    ///
+    /// This function is unsafe because undefined behavior can result
+    /// if the caller does not ensure both:
+    ///
+    /// * `ptr` must denote a block of memory currently allocated via this allocator
+    ///
+    /// * the layout of `[T; n]` must *fit* that block of memory.
+    ///
+    /// # Errors
+    ///
+    /// Returning `Err` indicates that either `[T; n]` or the given
+    /// memory block does not meet allocator's size or alignment
+    /// constraints.
+    ///
+    /// Always returns `Err` on arithmetic overflow.
+    unsafe fn dealloc_array<T>(&mut self, ptr: Unique<T>, n: usize) -> Result<(), AllocErr>
+        where Self: Sized
+    {
+        let raw_ptr = ptr.as_ptr() as *mut u8;
+        match Layout::array::<T>(n) {
+            Some(ref k) if k.size() > 0 => {
+                Ok(self.dealloc(raw_ptr, k.clone()))
+            }
+            _ => {
+                Err(AllocErr::invalid_input("invalid layout for dealloc_array"))
+            }
+        }
+    }
+}
diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs
index 5ff21c86483..d46c6a83ff3 100644
--- a/src/liballoc/heap.rs
+++ b/src/liballoc/heap.rs
@@ -15,7 +15,8 @@
                       tracing garbage collector",
             issue = "27700")]
 
-use core::{isize, usize};
+use allocator::{Alloc, AllocErr, CannotReallocInPlace, Layout};
+use core::{isize, usize, cmp, ptr};
 use core::intrinsics::{min_align_of_val, size_of_val};
 
 #[allow(improper_ctypes)]
@@ -44,6 +45,82 @@ fn check_size_and_alignment(size: usize, align: usize) {
                   align);
 }
 
+#[derive(Copy, Clone, Default, Debug)]
+pub struct HeapAlloc;
+
+unsafe impl Alloc for HeapAlloc {
+    unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+        let addr = allocate(layout.size(), layout.align());
+        if addr.is_null() {
+            Err(AllocErr::Exhausted { request: layout })
+        } else {
+            Ok(addr)
+        }
+    }
+
+    unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+        let addr = allocate_zeroed(layout.size(), layout.align());
+        if addr.is_null() {
+            Err(AllocErr::Exhausted { request: layout })
+        } else {
+            Ok(addr)
+        }
+    }
+
+    unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
+        deallocate(ptr, layout.size(), layout.align());
+    }
+
+    fn usable_size(&self, layout: &Layout) -> (usize, usize) {
+        (layout.size(), usable_size(layout.size(), layout.align()))
+    }
+
+    unsafe fn realloc(&mut self,
+                      ptr: *mut u8,
+                      layout: Layout,
+                      new_layout: Layout)
+                      -> Result<*mut u8, AllocErr>
+    {
+        let old_size = layout.size();
+        let new_size = new_layout.size();
+        if layout.align() == new_layout.align() {
+            let new_ptr = reallocate(ptr, old_size, new_size, layout.align());
+            if new_ptr.is_null() {
+                // We assume `reallocate` already tried alloc + copy +
+                // dealloc fallback; thus pointless to repeat effort
+                Err(AllocErr::Exhausted { request: new_layout })
+            } else {
+                Ok(new_ptr)
+            }
+        } else {
+            // if alignments don't match, fall back on alloc + copy + dealloc
+            let result = self.alloc(new_layout);
+            if let Ok(new_ptr) = result {
+                ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size));
+                self.dealloc(ptr, layout);
+            }
+            result
+        }
+    }
+
+    unsafe fn grow_in_place(&mut self,
+                            ptr: *mut u8,
+                            layout: Layout,
+                            new_layout: Layout)
+                            -> Result<(), CannotReallocInPlace>
+    {
+        // grow_in_place spec requires this, and the spec for reallocate_inplace
+        // makes it hard to detect failure if it does not hold.
+        debug_assert!(new_layout.size() >= layout.size());
+
+        if layout.align() != new_layout.align() { // reallocate_inplace requires this.
+            return Err(CannotReallocInPlace);
+        }
+        let usable = reallocate_inplace(ptr, layout.size(), new_layout.size(), layout.align());
+        if usable >= new_layout.size() { Ok(()) } else { Err(CannotReallocInPlace) }
+    }
+}
+
 // FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias`
 
 /// Return a pointer to `size` bytes of memory aligned to `align`.
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index 5252dabc127..ca52943ea97 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -143,6 +143,10 @@ extern crate std_unicode;
 #[macro_use]
 mod macros;
 
+// Allocator trait and helper struct definitions
+
+pub mod allocator;
+
 // Heaps provided for low-level allocation strategies
 
 pub mod heap;
diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs
index 34ab0a19d4e..7117c446821 100644
--- a/src/liballoc/raw_vec.rs
+++ b/src/liballoc/raw_vec.rs
@@ -8,11 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use core::ptr::Unique;
+use allocator::{Alloc, Layout};
+use core::ptr::{self, Unique};
 use core::mem;
 use core::slice;
-use heap;
-use super::oom;
+use heap::{HeapAlloc};
 use super::boxed::Box;
 use core::ops::Drop;
 use core::cmp;
@@ -45,17 +45,16 @@ use core::cmp;
 /// field. This allows zero-sized types to not be special-cased by consumers of
 /// this type.
 #[allow(missing_debug_implementations)]
-pub struct RawVec<T> {
+pub struct RawVec<T, A: Alloc = HeapAlloc> {
     ptr: Unique<T>,
     cap: usize,
+    a: A,
 }
 
-impl<T> RawVec<T> {
-    /// Creates the biggest possible RawVec without allocating. If T has positive
-    /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
-    /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
-    /// delayed allocation.
-    pub fn new() -> Self {
+impl<T, A: Alloc> RawVec<T, A> {
+    /// Like `new` but parameterized over the choice of allocator for
+    /// the returned RawVec.
+    pub fn new_in(a: A) -> Self {
         // !0 is usize::MAX. This branch should be stripped at compile time.
         let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
 
@@ -63,35 +62,25 @@ impl<T> RawVec<T> {
         RawVec {
             ptr: Unique::empty(),
             cap: cap,
+            a: a,
         }
     }
 
-    /// Creates a RawVec with exactly the capacity and alignment requirements
-    /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
-    /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
-    /// get a RawVec with the requested capacity!
-    ///
-    /// # Panics
-    ///
-    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
-    /// * Panics on 32-bit platforms if the requested capacity exceeds
-    ///   `isize::MAX` bytes.
-    ///
-    /// # Aborts
-    ///
-    /// Aborts on OOM
+    /// Like `with_capacity` but parameterized over the choice of
+    /// allocator for the returned RawVec.
     #[inline]
-    pub fn with_capacity(cap: usize) -> Self {
-        RawVec::allocate(cap, false)
+    pub fn with_capacity_in(cap: usize, a: A) -> Self {
+        RawVec::allocate_in(cap, false, a)
     }
 
-    /// Like `with_capacity` but guarantees the buffer is zeroed.
+    /// Like `with_capacity_zeroed` but parameterized over the choice
+    /// of allocator for the returned RawVec.
     #[inline]
-    pub fn with_capacity_zeroed(cap: usize) -> Self {
-        RawVec::allocate(cap, true)
+    pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self {
+        RawVec::allocate_in(cap, true, a)
     }
 
-    fn allocate(cap: usize, zeroed: bool) -> Self {
+    fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
         unsafe {
             let elem_size = mem::size_of::<T>();
 
@@ -103,35 +92,93 @@ impl<T> RawVec<T> {
                 mem::align_of::<T>() as *mut u8
             } else {
                 let align = mem::align_of::<T>();
-                let ptr = if zeroed {
-                    heap::allocate_zeroed(alloc_size, align)
+                let result = if zeroed {
+                    a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap())
                 } else {
-                    heap::allocate(alloc_size, align)
+                    a.alloc(Layout::from_size_align(alloc_size, align).unwrap())
                 };
-                if ptr.is_null() {
-                    oom()
+                match result {
+                    Ok(ptr) => ptr,
+                    Err(err) => a.oom(err),
                 }
-                ptr
             };
 
             RawVec {
                 ptr: Unique::new(ptr as *mut _),
                 cap: cap,
+                a: a,
             }
         }
     }
+}
+
+impl<T> RawVec<T, HeapAlloc> {
+    /// Creates the biggest possible RawVec (on the system heap)
+    /// without allocating. If T has positive size, then this makes a
+    /// RawVec with capacity 0. If T has 0 size, then it it makes a
+    /// RawVec with capacity `usize::MAX`. Useful for implementing
+    /// delayed allocation.
+    pub fn new() -> Self {
+        Self::new_in(HeapAlloc)
+    }
+
+    /// Creates a RawVec (on the system heap) with exactly the
+    /// capacity and alignment requirements for a `[T; cap]`. This is
+    /// equivalent to calling RawVec::new when `cap` is 0 or T is
+    /// zero-sized. Note that if `T` is zero-sized this means you will
+    /// *not* get a RawVec with the requested capacity!
+    ///
+    /// # Panics
+    ///
+    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+    /// * Panics on 32-bit platforms if the requested capacity exceeds
+    ///   `isize::MAX` bytes.
+    ///
+    /// # Aborts
+    ///
+    /// Aborts on OOM
+    #[inline]
+    pub fn with_capacity(cap: usize) -> Self {
+        RawVec::allocate_in(cap, false, HeapAlloc)
+    }
+
+    /// Like `with_capacity` but guarantees the buffer is zeroed.
+    #[inline]
+    pub fn with_capacity_zeroed(cap: usize) -> Self {
+        RawVec::allocate_in(cap, true, HeapAlloc)
+    }
+}
+
+impl<T, A: Alloc> RawVec<T, A> {
+    /// Reconstitutes a RawVec from a pointer, capacity, and allocator.
+    ///
+    /// # Undefined Behavior
+    ///
+    /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
+    /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
+    /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
+    pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self {
+        RawVec {
+            ptr: Unique::new(ptr),
+            cap: cap,
+            a: a,
+        }
+    }
+}
 
-    /// Reconstitutes a RawVec from a pointer and capacity.
+impl<T> RawVec<T, HeapAlloc> {
+    /// Reconstitutes a RawVec from a pointer, capacity.
     ///
     /// # Undefined Behavior
     ///
-    /// The ptr must be allocated, and with the given capacity. The
+    /// The ptr must be allocated (on the system heap), and with the given capacity. The
     /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
     /// If the ptr and capacity come from a RawVec, then this is guaranteed.
     pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
         RawVec {
             ptr: Unique::new(ptr),
             cap: cap,
+            a: HeapAlloc,
         }
     }
 
@@ -145,7 +192,7 @@ impl<T> RawVec<T> {
     }
 }
 
-impl<T> RawVec<T> {
+impl<T, A: Alloc> RawVec<T, A> {
     /// Gets a raw pointer to the start of the allocation. Note that this is
     /// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
     /// be careful.
@@ -165,6 +212,16 @@ impl<T> RawVec<T> {
         }
     }
 
+    /// Returns a shared reference to the allocator backing this RawVec.
+    pub fn alloc(&self) -> &A {
+        &self.a
+    }
+
+    /// Returns a mutable reference to the allocator backing this RawVec.
+    pub fn alloc_mut(&mut self) -> &mut A {
+        &mut self.a
+    }
+
     /// Doubles the size of the type's backing allocation. This is common enough
     /// to want to do that it's easiest to just have a dedicated method. Slightly
     /// more efficient logic can be provided for this than the general case.
@@ -215,32 +272,28 @@ impl<T> RawVec<T> {
             // 0, getting to here necessarily means the RawVec is overfull.
             assert!(elem_size != 0, "capacity overflow");
 
-            let align = mem::align_of::<T>();
-
-            let (new_cap, ptr) = if self.cap == 0 {
+            let (new_cap, ptr_res) = if self.cap == 0 {
                 // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
                 let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
-                let ptr = heap::allocate(new_cap * elem_size, align);
-                (new_cap, ptr)
+                let ptr_res = self.a.alloc_array::<T>(new_cap);
+                (new_cap, ptr_res)
             } else {
                 // Since we guarantee that we never allocate more than isize::MAX bytes,
                 // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
                 let new_cap = 2 * self.cap;
                 let new_alloc_size = new_cap * elem_size;
                 alloc_guard(new_alloc_size);
-                let ptr = heap::reallocate(self.ptr() as *mut _,
-                                           self.cap * elem_size,
-                                           new_alloc_size,
-                                           align);
-                (new_cap, ptr)
+                let ptr_res = self.a.realloc_array(self.ptr, self.cap, new_cap);
+                (new_cap, ptr_res)
             };
 
             // If allocate or reallocate fail, we'll get `null` back
-            if ptr.is_null() {
-                oom()
-            }
+            let uniq = match ptr_res {
+                Err(err) => self.a.oom(err),
+                Ok(uniq) => uniq,
+            };
 
-            self.ptr = Unique::new(ptr as *mut _);
+            self.ptr = uniq;
             self.cap = new_cap;
         }
     }
@@ -262,7 +315,6 @@ impl<T> RawVec<T> {
     pub fn double_in_place(&mut self) -> bool {
         unsafe {
             let elem_size = mem::size_of::<T>();
-            let align = mem::align_of::<T>();
 
             // since we set the capacity to usize::MAX when elem_size is
             // 0, getting to here necessarily means the RawVec is overfull.
@@ -274,15 +326,20 @@ impl<T> RawVec<T> {
             let new_alloc_size = new_cap * elem_size;
 
             alloc_guard(new_alloc_size);
-            let size = heap::reallocate_inplace(self.ptr() as *mut _,
-                                                self.cap * elem_size,
-                                                new_alloc_size,
-                                                align);
-            if size >= new_alloc_size {
-                // We can't directly divide `size`.
-                self.cap = new_cap;
+
+            let ptr = self.ptr() as *mut _;
+            let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
+            let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
+            match self.a.grow_in_place(ptr, old_layout, new_layout) {
+                Ok(_) => {
+                    // We can't directly divide `size`.
+                    self.cap = new_cap;
+                    true
+                }
+                Err(_) => {
+                    false
+                }
             }
-            size >= new_alloc_size
         }
     }
 
@@ -309,7 +366,6 @@ impl<T> RawVec<T> {
     pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
         unsafe {
             let elem_size = mem::size_of::<T>();
-            let align = mem::align_of::<T>();
 
             // NOTE: we don't early branch on ZSTs here because we want this
             // to actually catch "asking for more than usize::MAX" in that case.
@@ -327,21 +383,19 @@ impl<T> RawVec<T> {
             let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
             alloc_guard(new_alloc_size);
 
-            let ptr = if self.cap == 0 {
-                heap::allocate(new_alloc_size, align)
+            let result = if self.cap == 0 {
+                self.a.alloc_array::<T>(new_cap)
             } else {
-                heap::reallocate(self.ptr() as *mut _,
-                                 self.cap * elem_size,
-                                 new_alloc_size,
-                                 align)
+                self.a.realloc_array(self.ptr, self.cap, new_cap)
             };
 
             // If allocate or reallocate fail, we'll get `null` back
-            if ptr.is_null() {
-                oom()
-            }
+            let uniq = match result {
+                Err(err) => self.a.oom(err),
+                Ok(uniq) => uniq,
+            };
 
-            self.ptr = Unique::new(ptr as *mut _);
+            self.ptr = uniq;
             self.cap = new_cap;
         }
     }
@@ -408,9 +462,6 @@ impl<T> RawVec<T> {
     /// ```
     pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
         unsafe {
-            let elem_size = mem::size_of::<T>();
-            let align = mem::align_of::<T>();
-
             // NOTE: we don't early branch on ZSTs here because we want this
             // to actually catch "asking for more than usize::MAX" in that case.
             // If we make it past the first branch then we are guaranteed to
@@ -426,21 +477,18 @@ impl<T> RawVec<T> {
             // FIXME: may crash and burn on over-reserve
             alloc_guard(new_alloc_size);
 
-            let ptr = if self.cap == 0 {
-                heap::allocate(new_alloc_size, align)
+            let result = if self.cap == 0 {
+                self.a.alloc_array::<T>(new_cap)
             } else {
-                heap::reallocate(self.ptr() as *mut _,
-                                 self.cap * elem_size,
-                                 new_alloc_size,
-                                 align)
+                self.a.realloc_array(self.ptr, self.cap, new_cap)
             };
 
-            // If allocate or reallocate fail, we'll get `null` back
-            if ptr.is_null() {
-                oom()
-            }
+            let uniq = match result {
+                Err(err) => self.a.oom(err),
+                Ok(uniq) => uniq,
+            };
 
-            self.ptr = Unique::new(ptr as *mut _);
+            self.ptr = uniq;
             self.cap = new_cap;
         }
     }
@@ -464,9 +512,6 @@ impl<T> RawVec<T> {
     ///   `isize::MAX` bytes.
     pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
         unsafe {
-            let elem_size = mem::size_of::<T>();
-            let align = mem::align_of::<T>();
-
             // NOTE: we don't early branch on ZSTs here because we want this
             // to actually catch "asking for more than usize::MAX" in that case.
             // If we make it past the first branch then we are guaranteed to
@@ -479,18 +524,26 @@ impl<T> RawVec<T> {
                 return false;
             }
 
-            let (_, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
+            let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
             // FIXME: may crash and burn on over-reserve
             alloc_guard(new_alloc_size);
 
-            let size = heap::reallocate_inplace(self.ptr() as *mut _,
-                                                self.cap * elem_size,
-                                                new_alloc_size,
-                                                align);
-            if size >= new_alloc_size {
-                self.cap = new_alloc_size / elem_size;
+            // Here, `cap < used_cap + needed_extra_cap <= new_cap`
+            // (regardless of whether `self.cap - used_cap` wrapped).
+            // Therefore we can safely call grow_in_place.
+
+            let ptr = self.ptr() as *mut _;
+            let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
+            let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
+            match self.a.grow_in_place(ptr, old_layout, new_layout) {
+                Ok(_) => {
+                    self.cap = new_cap;
+                    true
+                }
+                Err(_) => {
+                    false
+                }
             }
-            size >= new_alloc_size
         }
     }
 
@@ -506,7 +559,6 @@ impl<T> RawVec<T> {
     /// Aborts on OOM.
     pub fn shrink_to_fit(&mut self, amount: usize) {
         let elem_size = mem::size_of::<T>();
-        let align = mem::align_of::<T>();
 
         // Set the `cap` because they might be about to promote to a `Box<[T]>`
         if elem_size == 0 {
@@ -518,24 +570,30 @@ impl<T> RawVec<T> {
         assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
 
         if amount == 0 {
-            mem::replace(self, RawVec::new());
+            // We want to create a new zero-length vector within the
+            // same allocator.  We use ptr::write to avoid an
+            // erroneous attempt to drop the contents, and we use
+            // ptr::read to sidestep condition against destructuring
+            // types that implement Drop.
+
+            unsafe {
+                let a = ptr::read(&self.a as *const A);
+                self.dealloc_buffer();
+                ptr::write(self, RawVec::new_in(a));
+            }
         } else if self.cap != amount {
             unsafe {
-                // Overflow check is unnecessary as the vector is already at
-                // least this large.
-                let ptr = heap::reallocate(self.ptr() as *mut _,
-                                           self.cap * elem_size,
-                                           amount * elem_size,
-                                           align);
-                if ptr.is_null() {
-                    oom()
+                match self.a.realloc_array(self.ptr, self.cap, amount) {
+                    Err(err) => self.a.oom(err),
+                    Ok(uniq) => self.ptr = uniq,
                 }
-                self.ptr = Unique::new(ptr as *mut _);
             }
             self.cap = amount;
         }
     }
+}
 
+impl<T> RawVec<T, HeapAlloc> {
     /// Converts the entire buffer into `Box<[T]>`.
     ///
     /// While it is not *strictly* Undefined Behavior to call
@@ -553,21 +611,25 @@ impl<T> RawVec<T> {
     }
 }
 
-unsafe impl<#[may_dangle] T> Drop for RawVec<T> {
+impl<T, A: Alloc> RawVec<T, A> {
     /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
-    fn drop(&mut self) {
+    pub unsafe fn dealloc_buffer(&mut self) {
         let elem_size = mem::size_of::<T>();
         if elem_size != 0 && self.cap != 0 {
-            let align = mem::align_of::<T>();
-
-            let num_bytes = elem_size * self.cap;
-            unsafe {
-                heap::deallocate(self.ptr() as *mut u8, num_bytes, align);
-            }
+            let ptr = self.ptr() as *mut u8;
+            let layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
+            self.a.dealloc(ptr, layout);
         }
     }
 }
 
+unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> {
+    /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
+    fn drop(&mut self) {
+        unsafe { self.dealloc_buffer(); }
+    }
+}
+
 
 
 // We need to guarantee the following:
@@ -593,6 +655,46 @@ mod tests {
     use super::*;
 
     #[test]
+    fn allocator_param() {
+        use allocator::{Alloc, AllocErr};
+
+        // Writing a test of integration between third-party
+        // allocators and RawVec is a little tricky because the RawVec
+        // API does not expose fallible allocation methods, so we
+        // cannot check what happens when allocator is exhausted
+        // (beyond detecting a panic).
+        //
+        // Instead, this just checks that the RawVec methods do at
+        // least go through the Allocator API when it reserves
+        // storage.
+
+        // A dumb allocator that consumes a fixed amount of fuel
+        // before allocation attempts start failing.
+        struct BoundedAlloc { fuel: usize }
+        unsafe impl Alloc for BoundedAlloc {
+            unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+                let size = layout.size();
+                if size > self.fuel {
+                    return Err(AllocErr::Unsupported { details: "fuel exhausted" });
+                }
+                match HeapAlloc.alloc(layout) {
+                    ok @ Ok(_) => { self.fuel -= size; ok }
+                    err @ Err(_) => err,
+                }
+            }
+            unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
+                HeapAlloc.dealloc(ptr, layout)
+            }
+        }
+
+        let a = BoundedAlloc { fuel: 500 };
+        let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
+        assert_eq!(v.a.fuel, 450);
+        v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
+        assert_eq!(v.a.fuel, 250);
+    }
+
+    #[test]
     fn reserve_does_not_overallocate() {
         {
             let mut v: RawVec<u32> = RawVec::new();
@@ -624,4 +726,5 @@ mod tests {
         }
     }
 
+
 }