about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorAaron Turon <aturon@mozilla.com>2014-11-23 19:21:17 -0800
committerAaron Turon <aturon@mozilla.com>2014-12-18 23:31:34 -0800
commit2b3477d373603527d23cc578f3737857b7b253d7 (patch)
tree56022ebf11d5d27a6ef15f15d00d014a84a35837 /src/libstd
parent840de072085df360733c48396224e9966e2dc72c (diff)
downloadrust-2b3477d373603527d23cc578f3737857b7b253d7.tar.gz
rust-2b3477d373603527d23cc578f3737857b7b253d7.zip
libs: merge librustrt into libstd
This commit merges the `rustrt` crate into `std`, undoing part of the
facade. This merger continues the paring down of the runtime system.

Code relying on the public API of `rustrt` will break; some of this API
is now available through `std::rt`, but is likely to change and/or be
removed very soon.

[breaking-change]
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/c_str.rs844
-rw-r--r--src/libstd/comm/mod.rs11
-rw-r--r--src/libstd/comm/oneshot.rs4
-rw-r--r--src/libstd/comm/select.rs4
-rw-r--r--src/libstd/comm/shared.rs6
-rw-r--r--src/libstd/comm/stream.rs6
-rw-r--r--src/libstd/comm/sync.rs6
-rw-r--r--src/libstd/failure.rs6
-rw-r--r--src/libstd/io/stdio.rs7
-rw-r--r--src/libstd/lib.rs9
-rw-r--r--src/libstd/os.rs4
-rw-r--r--src/libstd/rt/args.rs167
-rw-r--r--src/libstd/rt/at_exit_imp.rs65
-rw-r--r--src/libstd/rt/backtrace.rs978
-rw-r--r--src/libstd/rt/bookkeeping.rs61
-rw-r--r--src/libstd/rt/exclusive.rs115
-rw-r--r--src/libstd/rt/libunwind.rs128
-rw-r--r--src/libstd/rt/local.rs131
-rw-r--r--src/libstd/rt/local_ptr.rs404
-rw-r--r--src/libstd/rt/macros.rs45
-rw-r--r--src/libstd/rt/mod.rs88
-rw-r--r--src/libstd/rt/mutex.rs406
-rw-r--r--src/libstd/rt/task.rs561
-rw-r--r--src/libstd/rt/thread.rs171
-rw-r--r--src/libstd/rt/thread_local_storage.rs115
-rw-r--r--src/libstd/rt/unwind.rs638
-rw-r--r--src/libstd/rt/util.rs147
-rw-r--r--src/libstd/rtdeps.rs2
-rw-r--r--src/libstd/sys/common/backtrace.rs131
-rw-r--r--src/libstd/sys/common/helper_thread.rs5
-rw-r--r--src/libstd/sys/common/mod.rs3
-rw-r--r--src/libstd/sys/common/stack.rs325
-rw-r--r--src/libstd/sys/common/thread.rs34
-rw-r--r--src/libstd/sys/common/thread_local.rs4
-rw-r--r--src/libstd/sys/unix/backtrace.rs493
-rw-r--r--src/libstd/sys/unix/mod.rs3
-rw-r--r--src/libstd/sys/unix/stack_overflow.rs291
-rw-r--r--src/libstd/sys/unix/thread.rs270
-rw-r--r--src/libstd/sys/windows/backtrace.rs371
-rw-r--r--src/libstd/sys/windows/mod.rs3
-rw-r--r--src/libstd/sys/windows/stack_overflow.rs120
-rw-r--r--src/libstd/sys/windows/thread.rs95
-rw-r--r--src/libstd/sys/windows/thread_local.rs6
-rw-r--r--src/libstd/task.rs12
-rw-r--r--src/libstd/thread_local/mod.rs2
-rw-r--r--src/libstd/thunk.rs52
46 files changed, 6306 insertions, 1043 deletions
diff --git a/src/libstd/c_str.rs b/src/libstd/c_str.rs
new file mode 100644
index 00000000000..27a139835c9
--- /dev/null
+++ b/src/libstd/c_str.rs
@@ -0,0 +1,844 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! C-string manipulation and management
+//!
+//! This modules provides the basic methods for creating and manipulating
+//! null-terminated strings for use with FFI calls (back to C). Most C APIs require
+//! that the string being passed to them is null-terminated, and by default rust's
+//! string types are *not* null terminated.
+//!
+//! The other problem with translating Rust strings to C strings is that Rust
+//! strings can validly contain a null-byte in the middle of the string (0 is a
+//! valid Unicode codepoint). This means that not all Rust strings can actually be
+//! translated to C strings.
+//!
+//! # Creation of a C string
+//!
+//! A C string is managed through the `CString` type defined in this module. It
+//! "owns" the internal buffer of characters and will automatically deallocate the
+//! buffer when the string is dropped. The `ToCStr` trait is implemented for `&str`
+//! and `&[u8]`, but the conversions can fail due to some of the limitations
+//! explained above.
+//!
+//! This also means that currently whenever a C string is created, an allocation
+//! must be performed to place the data elsewhere (the lifetime of the C string is
+//! not tied to the lifetime of the original string/data buffer). If C strings are
+//! heavily used in applications, then caching may be advisable to prevent
+//! unnecessary amounts of allocations.
+//!
+//! Be carefull to remember that the memory is managed by C allocator API and not
+//! by Rust allocator API.
+//! That means that the CString pointers should be freed with C allocator API
+//! if you intend to do that on your own, as the behaviour if you free them with
+//! Rust's allocator API is not well defined
+//!
+//! An example of creating and using a C string would be:
+//!
+//! ```rust
+//! extern crate libc;
+//!
+//! extern {
+//!     fn puts(s: *const libc::c_char);
+//! }
+//!
+//! fn main() {
+//!     let my_string = "Hello, world!";
+//!
+//!     // Allocate the C string with an explicit local that owns the string. The
+//!     // `c_buffer` pointer will be deallocated when `my_c_string` goes out of scope.
+//!     let my_c_string = my_string.to_c_str();
+//!     unsafe {
+//!         puts(my_c_string.as_ptr());
+//!     }
+//!
+//!     // Don't save/return the pointer to the C string, the `c_buffer` will be
+//!     // deallocated when this block returns!
+//!     my_string.with_c_str(|c_buffer| {
+//!         unsafe { puts(c_buffer); }
+//!     });
+//! }
+//! ```
+
+use string::String;
+use hash;
+use fmt;
+use kinds::marker;
+use mem;
+use core::prelude::*;
+
+use ptr;
+use raw::Slice;
+use slice;
+use str;
+use libc;
+
+/// The representation of a C String.
+///
+/// This structure wraps a `*libc::c_char`, and will automatically free the
+/// memory it is pointing to when it goes out of scope.
+#[allow(missing_copy_implementations)]
+pub struct CString {
+    buf: *const libc::c_char,
+    owns_buffer_: bool,
+}
+
+impl Clone for CString {
+    /// Clone this CString into a new, uniquely owned CString. For safety
+    /// reasons, this is always a deep clone with the memory allocated
+    /// with C's allocator API, rather than the usual shallow clone.
+    fn clone(&self) -> CString {
+        let len = self.len() + 1;
+        let buf = unsafe { libc::malloc(len as libc::size_t) } as *mut libc::c_char;
+        if buf.is_null() { ::alloc::oom() }
+        unsafe { ptr::copy_nonoverlapping_memory(buf, self.buf, len); }
+        CString { buf: buf as *const libc::c_char, owns_buffer_: true }
+    }
+}
+
+impl PartialEq for CString {
+    fn eq(&self, other: &CString) -> bool {
+        // Check if the two strings share the same buffer
+        if self.buf as uint == other.buf as uint {
+            true
+        } else {
+            unsafe {
+                libc::strcmp(self.buf, other.buf) == 0
+            }
+        }
+    }
+}
+
+impl PartialOrd for CString {
+    #[inline]
+    fn partial_cmp(&self, other: &CString) -> Option<Ordering> {
+        self.as_bytes().partial_cmp(other.as_bytes())
+    }
+}
+
+impl Eq for CString {}
+
+impl<S: hash::Writer> hash::Hash<S> for CString {
+    #[inline]
+    fn hash(&self, state: &mut S) {
+        self.as_bytes().hash(state)
+    }
+}
+
+impl CString {
+    /// Create a C String from a pointer, with memory managed by C's allocator
+    /// API, so avoid calling it with a pointer to memory managed by Rust's
+    /// allocator API, as the behaviour would not be well defined.
+    ///
+    ///# Panics
+    ///
+    /// Panics if `buf` is null
+    pub unsafe fn new(buf: *const libc::c_char, owns_buffer: bool) -> CString {
+        assert!(!buf.is_null());
+        CString { buf: buf, owns_buffer_: owns_buffer }
+    }
+
+    /// Return a pointer to the NUL-terminated string data.
+    ///
+    /// `.as_ptr` returns an internal pointer into the `CString`, and
+    /// may be invalidated when the `CString` falls out of scope (the
+    /// destructor will run, freeing the allocation if there is
+    /// one).
+    ///
+    /// ```rust
+    /// let foo = "some string";
+    ///
+    /// // right
+    /// let x = foo.to_c_str();
+    /// let p = x.as_ptr();
+    ///
+    /// // wrong (the CString will be freed, invalidating `p`)
+    /// let p = foo.to_c_str().as_ptr();
+    /// ```
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// extern crate libc;
+    ///
+    /// fn main() {
+    ///     let c_str = "foo bar".to_c_str();
+    ///     unsafe {
+    ///         libc::puts(c_str.as_ptr());
+    ///     }
+    /// }
+    /// ```
+    pub fn as_ptr(&self) -> *const libc::c_char {
+        self.buf
+    }
+
+    /// Return a mutable pointer to the NUL-terminated string data.
+    ///
+    /// `.as_mut_ptr` returns an internal pointer into the `CString`, and
+    /// may be invalidated when the `CString` falls out of scope (the
+    /// destructor will run, freeing the allocation if there is
+    /// one).
+    ///
+    /// ```rust
+    /// let foo = "some string";
+    ///
+    /// // right
+    /// let mut x = foo.to_c_str();
+    /// let p = x.as_mut_ptr();
+    ///
+    /// // wrong (the CString will be freed, invalidating `p`)
+    /// let p = foo.to_c_str().as_mut_ptr();
+    /// ```
+    pub fn as_mut_ptr(&mut self) -> *mut libc::c_char {
+        self.buf as *mut _
+    }
+
+    /// Returns whether or not the `CString` owns the buffer.
+    pub fn owns_buffer(&self) -> bool {
+        self.owns_buffer_
+    }
+
+    /// Converts the CString into a `&[u8]` without copying.
+    /// Includes the terminating NUL byte.
+    #[inline]
+    pub fn as_bytes<'a>(&'a self) -> &'a [u8] {
+        unsafe {
+            mem::transmute(Slice { data: self.buf, len: self.len() + 1 })
+        }
+    }
+
+    /// Converts the CString into a `&[u8]` without copying.
+    /// Does not include the terminating NUL byte.
+    #[inline]
+    pub fn as_bytes_no_nul<'a>(&'a self) -> &'a [u8] {
+        unsafe {
+            mem::transmute(Slice { data: self.buf, len: self.len() })
+        }
+    }
+
+    /// Converts the CString into a `&str` without copying.
+    /// Returns None if the CString is not UTF-8.
+    #[inline]
+    pub fn as_str<'a>(&'a self) -> Option<&'a str> {
+        let buf = self.as_bytes_no_nul();
+        str::from_utf8(buf)
+    }
+
+    /// Return a CString iterator.
+    pub fn iter<'a>(&'a self) -> CChars<'a> {
+        CChars {
+            ptr: self.buf,
+            marker: marker::ContravariantLifetime,
+        }
+    }
+
+    /// Unwraps the wrapped `*libc::c_char` from the `CString` wrapper.
+    ///
+    /// Any ownership of the buffer by the `CString` wrapper is
+    /// forgotten, meaning that the backing allocation of this
+    /// `CString` is not automatically freed if it owns the
+    /// allocation. In this case, a user of `.unwrap()` should ensure
+    /// the allocation is freed, to avoid leaking memory. You should
+    /// use libc's memory allocator in this case.
+    ///
+    /// Prefer `.as_ptr()` when just retrieving a pointer to the
+    /// string data, as that does not relinquish ownership.
+    pub unsafe fn into_inner(mut self) -> *const libc::c_char {
+        self.owns_buffer_ = false;
+        self.buf
+    }
+
+    /// Deprecated, use into_inner() instead
+    #[deprecated = "renamed to into_inner()"]
+    pub unsafe fn unwrap(self) -> *const libc::c_char { self.into_inner() }
+
+    /// Return the number of bytes in the CString (not including the NUL
+    /// terminator).
+    #[inline]
+    pub fn len(&self) -> uint {
+        unsafe { libc::strlen(self.buf) as uint }
+    }
+
+    /// Returns if there are no bytes in this string
+    #[inline]
+    pub fn is_empty(&self) -> bool { self.len() == 0 }
+}
+
+impl Drop for CString {
+    fn drop(&mut self) {
+        if self.owns_buffer_ {
+            unsafe {
+                libc::free(self.buf as *mut libc::c_void)
+            }
+        }
+    }
+}
+
+impl fmt::Show for CString {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        String::from_utf8_lossy(self.as_bytes_no_nul()).fmt(f)
+    }
+}
+
+/// A generic trait for converting a value to a CString.
+pub trait ToCStr for Sized? {
+    /// Copy the receiver into a CString.
+    ///
+    /// # Panics
+    ///
+    /// Panics the task if the receiver has an interior null.
+    fn to_c_str(&self) -> CString;
+
+    /// Unsafe variant of `to_c_str()` that doesn't check for nulls.
+    unsafe fn to_c_str_unchecked(&self) -> CString;
+
+    /// Work with a temporary CString constructed from the receiver.
+    /// The provided `*libc::c_char` will be freed immediately upon return.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// extern crate libc;
+    ///
+    /// fn main() {
+    ///     let s = "PATH".with_c_str(|path| unsafe {
+    ///         libc::getenv(path)
+    ///     });
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics the task if the receiver has an interior null.
+    #[inline]
+    fn with_c_str<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        let c_str = self.to_c_str();
+        f(c_str.as_ptr())
+    }
+
+    /// Unsafe variant of `with_c_str()` that doesn't check for nulls.
+    #[inline]
+    unsafe fn with_c_str_unchecked<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        let c_str = self.to_c_str_unchecked();
+        f(c_str.as_ptr())
+    }
+}
+
+impl ToCStr for str {
+    #[inline]
+    fn to_c_str(&self) -> CString {
+        self.as_bytes().to_c_str()
+    }
+
+    #[inline]
+    unsafe fn to_c_str_unchecked(&self) -> CString {
+        self.as_bytes().to_c_str_unchecked()
+    }
+
+    #[inline]
+    fn with_c_str<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        self.as_bytes().with_c_str(f)
+    }
+
+    #[inline]
+    unsafe fn with_c_str_unchecked<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        self.as_bytes().with_c_str_unchecked(f)
+    }
+}
+
+impl ToCStr for String {
+    #[inline]
+    fn to_c_str(&self) -> CString {
+        self.as_bytes().to_c_str()
+    }
+
+    #[inline]
+    unsafe fn to_c_str_unchecked(&self) -> CString {
+        self.as_bytes().to_c_str_unchecked()
+    }
+
+    #[inline]
+    fn with_c_str<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        self.as_bytes().with_c_str(f)
+    }
+
+    #[inline]
+    unsafe fn with_c_str_unchecked<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        self.as_bytes().with_c_str_unchecked(f)
+    }
+}
+
+// The length of the stack allocated buffer for `vec.with_c_str()`
+const BUF_LEN: uint = 128;
+
+impl ToCStr for [u8] {
+    fn to_c_str(&self) -> CString {
+        let mut cs = unsafe { self.to_c_str_unchecked() };
+        check_for_null(self, cs.as_mut_ptr());
+        cs
+    }
+
+    unsafe fn to_c_str_unchecked(&self) -> CString {
+        let self_len = self.len();
+        let buf = libc::malloc(self_len as libc::size_t + 1) as *mut u8;
+        if buf.is_null() { ::alloc::oom() }
+
+        ptr::copy_memory(buf, self.as_ptr(), self_len);
+        *buf.offset(self_len as int) = 0;
+
+        CString::new(buf as *const libc::c_char, true)
+    }
+
+    fn with_c_str<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        unsafe { with_c_str(self, true, f) }
+    }
+
+    unsafe fn with_c_str_unchecked<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        with_c_str(self, false, f)
+    }
+}
+
+impl<'a, Sized? T: ToCStr> ToCStr for &'a T {
+    #[inline]
+    fn to_c_str(&self) -> CString {
+        (**self).to_c_str()
+    }
+
+    #[inline]
+    unsafe fn to_c_str_unchecked(&self) -> CString {
+        (**self).to_c_str_unchecked()
+    }
+
+    #[inline]
+    fn with_c_str<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        (**self).with_c_str(f)
+    }
+
+    #[inline]
+    unsafe fn with_c_str_unchecked<T, F>(&self, f: F) -> T where
+        F: FnOnce(*const libc::c_char) -> T,
+    {
+        (**self).with_c_str_unchecked(f)
+    }
+}
+
+// Unsafe function that handles possibly copying the &[u8] into a stack array.
+unsafe fn with_c_str<T, F>(v: &[u8], checked: bool, f: F) -> T where
+    F: FnOnce(*const libc::c_char) -> T,
+{
+    let c_str = if v.len() < BUF_LEN {
+        let mut buf: [u8, .. BUF_LEN] = mem::uninitialized();
+        slice::bytes::copy_memory(&mut buf, v);
+        buf[v.len()] = 0;
+
+        let buf = buf.as_mut_ptr();
+        if checked {
+            check_for_null(v, buf as *mut libc::c_char);
+        }
+
+        return f(buf as *const libc::c_char)
+    } else if checked {
+        v.to_c_str()
+    } else {
+        v.to_c_str_unchecked()
+    };
+
+    f(c_str.as_ptr())
+}
+
+#[inline]
+fn check_for_null(v: &[u8], buf: *mut libc::c_char) {
+    for i in range(0, v.len()) {
+        unsafe {
+            let p = buf.offset(i as int);
+            assert!(*p != 0);
+        }
+    }
+}
+
+/// External iterator for a CString's bytes.
+///
+/// Use with the `std::iter` module.
+pub struct CChars<'a> {
+    ptr: *const libc::c_char,
+    marker: marker::ContravariantLifetime<'a>,
+}
+
+impl<'a> Iterator<libc::c_char> for CChars<'a> {
+    fn next(&mut self) -> Option<libc::c_char> {
+        let ch = unsafe { *self.ptr };
+        if ch == 0 {
+            None
+        } else {
+            self.ptr = unsafe { self.ptr.offset(1) };
+            Some(ch)
+        }
+    }
+}
+
+/// Parses a C "multistring", eg windows env values or
+/// the req->ptr result in a uv_fs_readdir() call.
+///
+/// Optionally, a `count` can be passed in, limiting the
+/// parsing to only being done `count`-times.
+///
+/// The specified closure is invoked with each string that
+/// is found, and the number of strings found is returned.
+pub unsafe fn from_c_multistring<F>(buf: *const libc::c_char,
+                                    count: Option<uint>,
+                                    mut f: F)
+                                    -> uint where
+    F: FnMut(&CString),
+{
+
+    let mut curr_ptr: uint = buf as uint;
+    let mut ctr = 0;
+    let (limited_count, limit) = match count {
+        Some(limit) => (true, limit),
+        None => (false, 0)
+    };
+    while ((limited_count && ctr < limit) || !limited_count)
+          && *(curr_ptr as *const libc::c_char) != 0 as libc::c_char {
+        let cstr = CString::new(curr_ptr as *const libc::c_char, false);
+        f(&cstr);
+        curr_ptr += cstr.len() + 1;
+        ctr += 1;
+    }
+    return ctr;
+}
+
+#[cfg(test)]
+mod tests {
+    use prelude::*;
+    use ptr;
+    use task;
+    use libc;
+
+    use super::*;
+
+    #[test]
+    fn test_str_multistring_parsing() {
+        unsafe {
+            let input = b"zero\0one\0\0";
+            let ptr = input.as_ptr();
+            let expected = ["zero", "one"];
+            let mut it = expected.iter();
+            let result = from_c_multistring(ptr as *const libc::c_char, None, |c| {
+                let cbytes = c.as_bytes_no_nul();
+                assert_eq!(cbytes, it.next().unwrap().as_bytes());
+            });
+            assert_eq!(result, 2);
+            assert!(it.next().is_none());
+        }
+    }
+
+    #[test]
+    fn test_str_to_c_str() {
+        let c_str = "".to_c_str();
+        unsafe {
+            assert_eq!(*c_str.as_ptr().offset(0), 0);
+        }
+
+        let c_str = "hello".to_c_str();
+        let buf = c_str.as_ptr();
+        unsafe {
+            assert_eq!(*buf.offset(0), 'h' as libc::c_char);
+            assert_eq!(*buf.offset(1), 'e' as libc::c_char);
+            assert_eq!(*buf.offset(2), 'l' as libc::c_char);
+            assert_eq!(*buf.offset(3), 'l' as libc::c_char);
+            assert_eq!(*buf.offset(4), 'o' as libc::c_char);
+            assert_eq!(*buf.offset(5), 0);
+        }
+    }
+
+    #[test]
+    fn test_vec_to_c_str() {
+        let b: &[u8] = &[];
+        let c_str = b.to_c_str();
+        unsafe {
+            assert_eq!(*c_str.as_ptr().offset(0), 0);
+        }
+
+        let c_str = b"hello".to_c_str();
+        let buf = c_str.as_ptr();
+        unsafe {
+            assert_eq!(*buf.offset(0), 'h' as libc::c_char);
+            assert_eq!(*buf.offset(1), 'e' as libc::c_char);
+            assert_eq!(*buf.offset(2), 'l' as libc::c_char);
+            assert_eq!(*buf.offset(3), 'l' as libc::c_char);
+            assert_eq!(*buf.offset(4), 'o' as libc::c_char);
+            assert_eq!(*buf.offset(5), 0);
+        }
+
+        let c_str = b"foo\xFF".to_c_str();
+        let buf = c_str.as_ptr();
+        unsafe {
+            assert_eq!(*buf.offset(0), 'f' as libc::c_char);
+            assert_eq!(*buf.offset(1), 'o' as libc::c_char);
+            assert_eq!(*buf.offset(2), 'o' as libc::c_char);
+            assert_eq!(*buf.offset(3), 0xffu8 as libc::c_char);
+            assert_eq!(*buf.offset(4), 0);
+        }
+    }
+
+    #[test]
+    fn test_unwrap() {
+        let c_str = "hello".to_c_str();
+        unsafe { libc::free(c_str.unwrap() as *mut libc::c_void) }
+    }
+
+    #[test]
+    fn test_as_ptr() {
+        let c_str = "hello".to_c_str();
+        let len = unsafe { libc::strlen(c_str.as_ptr()) };
+        assert_eq!(len, 5);
+    }
+
+    #[test]
+    fn test_iterator() {
+        let c_str = "".to_c_str();
+        let mut iter = c_str.iter();
+        assert_eq!(iter.next(), None);
+
+        let c_str = "hello".to_c_str();
+        let mut iter = c_str.iter();
+        assert_eq!(iter.next(), Some('h' as libc::c_char));
+        assert_eq!(iter.next(), Some('e' as libc::c_char));
+        assert_eq!(iter.next(), Some('l' as libc::c_char));
+        assert_eq!(iter.next(), Some('l' as libc::c_char));
+        assert_eq!(iter.next(), Some('o' as libc::c_char));
+        assert_eq!(iter.next(), None);
+    }
+
+    #[test]
+    fn test_to_c_str_fail() {
+        assert!(task::try(move|| { "he\x00llo".to_c_str() }).is_err());
+    }
+
+    #[test]
+    fn test_to_c_str_unchecked() {
+        unsafe {
+            let c_string = "he\x00llo".to_c_str_unchecked();
+            let buf = c_string.as_ptr();
+            assert_eq!(*buf.offset(0), 'h' as libc::c_char);
+            assert_eq!(*buf.offset(1), 'e' as libc::c_char);
+            assert_eq!(*buf.offset(2), 0);
+            assert_eq!(*buf.offset(3), 'l' as libc::c_char);
+            assert_eq!(*buf.offset(4), 'l' as libc::c_char);
+            assert_eq!(*buf.offset(5), 'o' as libc::c_char);
+            assert_eq!(*buf.offset(6), 0);
+        }
+    }
+
+    #[test]
+    fn test_as_bytes() {
+        let c_str = "hello".to_c_str();
+        assert_eq!(c_str.as_bytes(), b"hello\0");
+        let c_str = "".to_c_str();
+        assert_eq!(c_str.as_bytes(), b"\0");
+        let c_str = b"foo\xFF".to_c_str();
+        assert_eq!(c_str.as_bytes(), b"foo\xFF\0");
+    }
+
+    #[test]
+    fn test_as_bytes_no_nul() {
+        let c_str = "hello".to_c_str();
+        assert_eq!(c_str.as_bytes_no_nul(), b"hello");
+        let c_str = "".to_c_str();
+        let exp: &[u8] = &[];
+        assert_eq!(c_str.as_bytes_no_nul(), exp);
+        let c_str = b"foo\xFF".to_c_str();
+        assert_eq!(c_str.as_bytes_no_nul(), b"foo\xFF");
+    }
+
+    #[test]
+    fn test_as_str() {
+        let c_str = "hello".to_c_str();
+        assert_eq!(c_str.as_str(), Some("hello"));
+        let c_str = "".to_c_str();
+        assert_eq!(c_str.as_str(), Some(""));
+        let c_str = b"foo\xFF".to_c_str();
+        assert_eq!(c_str.as_str(), None);
+    }
+
+    #[test]
+    #[should_fail]
+    fn test_new_fail() {
+        let _c_str = unsafe { CString::new(ptr::null(), false) };
+    }
+
+    #[test]
+    fn test_clone() {
+        let a = "hello".to_c_str();
+        let b = a.clone();
+        assert!(a == b);
+    }
+
+    #[test]
+    fn test_clone_noleak() {
+        fn foo<F>(f: F) where F: FnOnce(&CString) {
+            let s = "test".to_string();
+            let c = s.to_c_str();
+            // give the closure a non-owned CString
+            let mut c_ = unsafe { CString::new(c.as_ptr(), false) };
+            f(&c_);
+            // muck with the buffer for later printing
+            unsafe { *c_.as_mut_ptr() = 'X' as libc::c_char }
+        }
+
+        let mut c_: Option<CString> = None;
+        foo(|c| {
+            c_ = Some(c.clone());
+            c.clone();
+            // force a copy, reading the memory
+            c.as_bytes().to_vec();
+        });
+        let c_ = c_.unwrap();
+        // force a copy, reading the memory
+        c_.as_bytes().to_vec();
+    }
+}
+
+#[cfg(test)]
+mod bench {
+    extern crate test;
+
+    use self::test::Bencher;
+    use libc;
+    use prelude::*;
+
+    #[inline]
+    fn check(s: &str, c_str: *const libc::c_char) {
+        let s_buf = s.as_ptr();
+        for i in range(0, s.len()) {
+            unsafe {
+                assert_eq!(
+                    *s_buf.offset(i as int) as libc::c_char,
+                    *c_str.offset(i as int));
+            }
+        }
+    }
+
+    static S_SHORT: &'static str = "Mary";
+    static S_MEDIUM: &'static str = "Mary had a little lamb";
+    static S_LONG: &'static str = "\
+        Mary had a little lamb, Little lamb
+        Mary had a little lamb, Little lamb
+        Mary had a little lamb, Little lamb
+        Mary had a little lamb, Little lamb
+        Mary had a little lamb, Little lamb
+        Mary had a little lamb, Little lamb";
+
+    fn bench_to_string(b: &mut Bencher, s: &str) {
+        b.iter(|| {
+            let c_str = s.to_c_str();
+            check(s, c_str.as_ptr());
+        })
+    }
+
+    #[bench]
+    fn bench_to_c_str_short(b: &mut Bencher) {
+        bench_to_string(b, S_SHORT)
+    }
+
+    #[bench]
+    fn bench_to_c_str_medium(b: &mut Bencher) {
+        bench_to_string(b, S_MEDIUM)
+    }
+
+    #[bench]
+    fn bench_to_c_str_long(b: &mut Bencher) {
+        bench_to_string(b, S_LONG)
+    }
+
+    fn bench_to_c_str_unchecked(b: &mut Bencher, s: &str) {
+        b.iter(|| {
+            let c_str = unsafe { s.to_c_str_unchecked() };
+            check(s, c_str.as_ptr())
+        })
+    }
+
+    #[bench]
+    fn bench_to_c_str_unchecked_short(b: &mut Bencher) {
+        bench_to_c_str_unchecked(b, S_SHORT)
+    }
+
+    #[bench]
+    fn bench_to_c_str_unchecked_medium(b: &mut Bencher) {
+        bench_to_c_str_unchecked(b, S_MEDIUM)
+    }
+
+    #[bench]
+    fn bench_to_c_str_unchecked_long(b: &mut Bencher) {
+        bench_to_c_str_unchecked(b, S_LONG)
+    }
+
+    fn bench_with_c_str(b: &mut Bencher, s: &str) {
+        b.iter(|| {
+            s.with_c_str(|c_str_buf| check(s, c_str_buf))
+        })
+    }
+
+    #[bench]
+    fn bench_with_c_str_short(b: &mut Bencher) {
+        bench_with_c_str(b, S_SHORT)
+    }
+
+    #[bench]
+    fn bench_with_c_str_medium(b: &mut Bencher) {
+        bench_with_c_str(b, S_MEDIUM)
+    }
+
+    #[bench]
+    fn bench_with_c_str_long(b: &mut Bencher) {
+        bench_with_c_str(b, S_LONG)
+    }
+
+    fn bench_with_c_str_unchecked(b: &mut Bencher, s: &str) {
+        b.iter(|| {
+            unsafe {
+                s.with_c_str_unchecked(|c_str_buf| check(s, c_str_buf))
+            }
+        })
+    }
+
+    #[bench]
+    fn bench_with_c_str_unchecked_short(b: &mut Bencher) {
+        bench_with_c_str_unchecked(b, S_SHORT)
+    }
+
+    #[bench]
+    fn bench_with_c_str_unchecked_medium(b: &mut Bencher) {
+        bench_with_c_str_unchecked(b, S_MEDIUM)
+    }
+
+    #[bench]
+    fn bench_with_c_str_unchecked_long(b: &mut Bencher) {
+        bench_with_c_str_unchecked(b, S_LONG)
+    }
+}
diff --git a/src/libstd/comm/mod.rs b/src/libstd/comm/mod.rs
index 29a7b0dd0cc..dfbb09d26b5 100644
--- a/src/libstd/comm/mod.rs
+++ b/src/libstd/comm/mod.rs
@@ -327,7 +327,7 @@ use alloc::arc::Arc;
 use core::kinds::marker;
 use core::mem;
 use core::cell::UnsafeCell;
-use rustrt::task::BlockedTask;
+use rt::task::BlockedTask;
 
 pub use comm::select::{Select, Handle};
 
@@ -336,9 +336,8 @@ macro_rules! test {
         mod $name {
             #![allow(unused_imports)]
 
-            extern crate rustrt;
-
             use prelude::*;
+            use rt;
 
             use comm::*;
             use super::*;
@@ -1519,7 +1518,7 @@ mod test {
     } }
 
     test! { fn sends_off_the_runtime() {
-        use rustrt::thread::Thread;
+        use rt::thread::Thread;
 
         let (tx, rx) = channel();
         let t = Thread::start(move|| {
@@ -1534,7 +1533,7 @@ mod test {
     } }
 
     test! { fn try_recvs_off_the_runtime() {
-        use rustrt::thread::Thread;
+        use rt::thread::Thread;
 
         let (tx, rx) = channel();
         let (cdone, pdone) = channel();
@@ -1984,7 +1983,7 @@ mod sync_tests {
     } }
 
     test! { fn try_recvs_off_the_runtime() {
-        use rustrt::thread::Thread;
+        use rt::thread::Thread;
 
         let (tx, rx) = sync_channel::<()>(0);
         let (cdone, pdone) = channel();
diff --git a/src/libstd/comm/oneshot.rs b/src/libstd/comm/oneshot.rs
index bc34c3e8c52..2c5248c0897 100644
--- a/src/libstd/comm/oneshot.rs
+++ b/src/libstd/comm/oneshot.rs
@@ -41,8 +41,8 @@ use core::prelude::*;
 
 use alloc::boxed::Box;
 use core::mem;
-use rustrt::local::Local;
-use rustrt::task::{Task, BlockedTask};
+use rt::local::Local;
+use rt::task::{Task, BlockedTask};
 
 use sync::atomic;
 use comm::Receiver;
diff --git a/src/libstd/comm/select.rs b/src/libstd/comm/select.rs
index de2b84b083c..4da9b4cfa36 100644
--- a/src/libstd/comm/select.rs
+++ b/src/libstd/comm/select.rs
@@ -59,8 +59,8 @@ use core::cell::Cell;
 use core::kinds::marker;
 use core::mem;
 use core::uint;
-use rustrt::local::Local;
-use rustrt::task::{Task, BlockedTask};
+use rt::local::Local;
+use rt::task::{Task, BlockedTask};
 
 use comm::Receiver;
 
diff --git a/src/libstd/comm/shared.rs b/src/libstd/comm/shared.rs
index 13b5e10fcd3..b3856e588e2 100644
--- a/src/libstd/comm/shared.rs
+++ b/src/libstd/comm/shared.rs
@@ -25,9 +25,9 @@ use core::prelude::*;
 use alloc::boxed::Box;
 use core::cmp;
 use core::int;
-use rustrt::local::Local;
-use rustrt::task::{Task, BlockedTask};
-use rustrt::thread::Thread;
+use rt::local::Local;
+use rt::task::{Task, BlockedTask};
+use rt::thread::Thread;
 
 use sync::{atomic, Mutex, MutexGuard};
 use comm::mpsc_queue as mpsc;
diff --git a/src/libstd/comm/stream.rs b/src/libstd/comm/stream.rs
index 06ab4f4427a..827b1d51ac4 100644
--- a/src/libstd/comm/stream.rs
+++ b/src/libstd/comm/stream.rs
@@ -27,9 +27,9 @@ use core::prelude::*;
 use alloc::boxed::Box;
 use core::cmp;
 use core::int;
-use rustrt::local::Local;
-use rustrt::task::{Task, BlockedTask};
-use rustrt::thread::Thread;
+use rt::local::Local;
+use rt::task::{Task, BlockedTask};
+use rt::thread::Thread;
 
 use sync::atomic;
 use comm::spsc_queue as spsc;
diff --git a/src/libstd/comm/sync.rs b/src/libstd/comm/sync.rs
index a2e839e134c..933cd43c662 100644
--- a/src/libstd/comm/sync.rs
+++ b/src/libstd/comm/sync.rs
@@ -42,9 +42,9 @@ use alloc::boxed::Box;
 use vec::Vec;
 use core::mem;
 use core::cell::UnsafeCell;
-use rustrt::local::Local;
-use rustrt::mutex::{NativeMutex, LockGuard};
-use rustrt::task::{Task, BlockedTask};
+use rt::local::Local;
+use rt::mutex::{NativeMutex, LockGuard};
+use rt::task::{Task, BlockedTask};
 
 use sync::atomic;
 
diff --git a/src/libstd/failure.rs b/src/libstd/failure.rs
index 89bccb8b99f..5438f1920d6 100644
--- a/src/libstd/failure.rs
+++ b/src/libstd/failure.rs
@@ -20,9 +20,9 @@ use option::Option;
 use option::Option::{Some, None};
 use result::Result::Ok;
 use rt::backtrace;
-use rustrt::{Stderr, Stdio};
-use rustrt::local::Local;
-use rustrt::task::Task;
+use rt::util::{Stderr, Stdio};
+use rt::local::Local;
+use rt::task::Task;
 use str::Str;
 use string::String;
 
diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs
index 73be389bb91..7b5cbf7d58f 100644
--- a/src/libstd/io/stdio.rs
+++ b/src/libstd/io/stdio.rs
@@ -42,9 +42,8 @@ use option::Option::{Some, None};
 use ops::{Deref, DerefMut, FnOnce};
 use result::Result::{Ok, Err};
 use rt;
-use rustrt;
-use rustrt::local::Local;
-use rustrt::task::Task;
+use rt::local::Local;
+use rt::task::Task;
 use slice::SliceExt;
 use str::StrPrelude;
 use string::String;
@@ -345,7 +344,7 @@ fn with_task_stdout<F>(f: F) where
         });
         result
     } else {
-        let mut io = rustrt::Stdout;
+        let mut io = rt::util::Stdout;
         f(&mut io as &mut Writer)
     };
     match result {
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index e99aba9b673..d7f331b6c23 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -104,7 +104,7 @@
        html_playground_url = "http://play.rust-lang.org/")]
 
 #![allow(unknown_features)]
-#![feature(macro_rules, globs, linkage)]
+#![feature(macro_rules, globs, linkage, thread_local, asm)]
 #![feature(default_type_params, phase, lang_items, unsafe_destructor)]
 #![feature(import_shadowing, slicing_syntax)]
 #![feature(unboxed_closures)]
@@ -124,7 +124,6 @@ extern crate core;
 extern crate "collections" as core_collections;
 extern crate "rand" as core_rand;
 extern crate libc;
-extern crate rustrt;
 
 // Make std testable by not duplicating lang items. See #2912
 #[cfg(test)] extern crate "std" as realstd;
@@ -167,12 +166,8 @@ pub use core_collections::str;
 pub use core_collections::string;
 pub use core_collections::vec;
 
-pub use rustrt::c_str;
-
 pub use unicode::char;
 
-pub use rustrt::thunk;
-
 /* Exported macros */
 
 pub mod macros;
@@ -207,6 +202,7 @@ pub mod prelude;
 #[path = "num/f64.rs"]   pub mod f64;
 
 pub mod ascii;
+pub mod thunk;
 
 /* Common traits */
 
@@ -216,6 +212,7 @@ pub mod num;
 /* Runtime and platform support */
 
 pub mod thread_local;
+pub mod c_str;
 pub mod c_vec;
 pub mod dynamic_lib;
 pub mod fmt;
diff --git a/src/libstd/os.rs b/src/libstd/os.rs
index 860f9d2670a..6e02c03602f 100644
--- a/src/libstd/os.rs
+++ b/src/libstd/os.rs
@@ -1042,9 +1042,9 @@ fn real_args_as_bytes() -> Vec<Vec<u8>> {
           target_os = "freebsd",
           target_os = "dragonfly"))]
 fn real_args_as_bytes() -> Vec<Vec<u8>> {
-    use rustrt;
+    use rt;
 
-    match rustrt::args::clone() {
+    match rt::args::clone() {
         Some(args) => args,
         None => panic!("process arguments not initialized")
     }
diff --git a/src/libstd/rt/args.rs b/src/libstd/rt/args.rs
new file mode 100644
index 00000000000..8b9dbf73c53
--- /dev/null
+++ b/src/libstd/rt/args.rs
@@ -0,0 +1,167 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Global storage for command line arguments
+//!
+//! The current incarnation of the Rust runtime expects for
+//! the processes `argc` and `argv` arguments to be stored
+//! in a globally-accessible location for use by the `os` module.
+//!
+//! Only valid to call on Linux. Mac and Windows use syscalls to
+//! discover the command line arguments.
+//!
+//! FIXME #7756: Would be nice for this to not exist.
+
+use core::prelude::*;
+use vec::Vec;
+
+/// One-time global initialization.
+pub unsafe fn init(argc: int, argv: *const *const u8) { imp::init(argc, argv) }
+
+/// One-time global cleanup.
+pub unsafe fn cleanup() { imp::cleanup() }
+
+/// Take the global arguments from global storage.
+pub fn take() -> Option<Vec<Vec<u8>>> { imp::take() }
+
+/// Give the global arguments to global storage.
+///
+/// It is an error if the arguments already exist.
+pub fn put(args: Vec<Vec<u8>>) { imp::put(args) }
+
+/// Make a clone of the global arguments.
+pub fn clone() -> Option<Vec<Vec<u8>>> { imp::clone() }
+
+#[cfg(any(target_os = "linux",
+          target_os = "android",
+          target_os = "freebsd",
+          target_os = "dragonfly"))]
+mod imp {
+    use core::prelude::*;
+
+    use boxed::Box;
+    use vec::Vec;
+    use string::String;
+    use mem;
+
+    use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+
+    static mut GLOBAL_ARGS_PTR: uint = 0;
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+
+    pub unsafe fn init(argc: int, argv: *const *const u8) {
+        let args = load_argc_and_argv(argc, argv);
+        put(args);
+    }
+
+    pub unsafe fn cleanup() {
+        rtassert!(take().is_some());
+        LOCK.destroy();
+    }
+
+    pub fn take() -> Option<Vec<Vec<u8>>> {
+        with_lock(|| unsafe {
+            let ptr = get_global_ptr();
+            let val = mem::replace(&mut *ptr, None);
+            val.as_ref().map(|s: &Box<Vec<Vec<u8>>>| (**s).clone())
+        })
+    }
+
+    pub fn put(args: Vec<Vec<u8>>) {
+        with_lock(|| unsafe {
+            let ptr = get_global_ptr();
+            rtassert!((*ptr).is_none());
+            (*ptr) = Some(box args.clone());
+        })
+    }
+
+    pub fn clone() -> Option<Vec<Vec<u8>>> {
+        with_lock(|| unsafe {
+            let ptr = get_global_ptr();
+            (*ptr).as_ref().map(|s: &Box<Vec<Vec<u8>>>| (**s).clone())
+        })
+    }
+
+    fn with_lock<T, F>(f: F) -> T where F: FnOnce() -> T {
+        unsafe {
+            let _guard = LOCK.lock();
+            f()
+        }
+    }
+
+    fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> {
+        unsafe { mem::transmute(&GLOBAL_ARGS_PTR) }
+    }
+
+    unsafe fn load_argc_and_argv(argc: int, argv: *const *const u8) -> Vec<Vec<u8>> {
+        Vec::from_fn(argc as uint, |i| {
+            String::from_raw_buf(*argv.offset(i as int)).into_bytes()
+        })
+    }
+
+    #[cfg(test)]
+    mod tests {
+        use std::prelude::*;
+        use std::finally::Finally;
+
+        use super::*;
+
+        #[test]
+        fn smoke_test() {
+            // Preserve the actual global state.
+            let saved_value = take();
+
+            let expected = vec![
+                b"happy".to_vec(),
+                b"today?".to_vec(),
+            ];
+
+            put(expected.clone());
+            assert!(clone() == Some(expected.clone()));
+            assert!(take() == Some(expected.clone()));
+            assert!(take() == None);
+
+            (|&mut:| {
+            }).finally(|| {
+                // Restore the actual global state.
+                match saved_value {
+                    Some(ref args) => put(args.clone()),
+                    None => ()
+                }
+            })
+        }
+    }
+}
+
+#[cfg(any(target_os = "macos",
+          target_os = "ios",
+          target_os = "windows"))]
+mod imp {
+    use core::prelude::*;
+    use vec::Vec;
+
+    pub unsafe fn init(_argc: int, _argv: *const *const u8) {
+    }
+
+    pub fn cleanup() {
+    }
+
+    pub fn take() -> Option<Vec<Vec<u8>>> {
+        panic!()
+    }
+
+    pub fn put(_args: Vec<Vec<u8>>) {
+        panic!()
+    }
+
+    pub fn clone() -> Option<Vec<Vec<u8>>> {
+        panic!()
+    }
+}
diff --git a/src/libstd/rt/at_exit_imp.rs b/src/libstd/rt/at_exit_imp.rs
new file mode 100644
index 00000000000..086079c312a
--- /dev/null
+++ b/src/libstd/rt/at_exit_imp.rs
@@ -0,0 +1,65 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of running at_exit routines
+//!
+//! Documentation can be found on the `rt::at_exit` function.
+
+use core::prelude::*;
+
+use boxed::Box;
+use vec::Vec;
+use sync::atomic;
+use mem;
+use thunk::Thunk;
+
+use rt::exclusive::Exclusive;
+
+type Queue = Exclusive<Vec<Thunk>>;
+
+static QUEUE: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+static RUNNING: atomic::AtomicBool = atomic::INIT_ATOMIC_BOOL;
+
+pub fn init() {
+    let state: Box<Queue> = box Exclusive::new(Vec::new());
+    unsafe {
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        assert!(QUEUE.swap(mem::transmute(state), atomic::SeqCst) == 0);
+    }
+}
+
+pub fn push(f: Thunk) {
+    unsafe {
+        // Note that the check against 0 for the queue pointer is not atomic at
+        // all with respect to `run`, meaning that this could theoretically be a
+        // use-after-free. There's not much we can do to protect against that,
+        // however. Let's just assume a well-behaved runtime and go from there!
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        let queue = QUEUE.load(atomic::SeqCst);
+        rtassert!(queue != 0);
+        (*(queue as *const Queue)).lock().push(f);
+    }
+}
+
+pub fn run() {
+    let cur = unsafe {
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        let queue = QUEUE.swap(0, atomic::SeqCst);
+        rtassert!(queue != 0);
+
+        let queue: Box<Queue> = mem::transmute(queue);
+        let v = mem::replace(&mut *queue.lock(), Vec::new());
+        v
+    };
+
+    for to_run in cur.into_iter() {
+        to_run.invoke(());
+    }
+}
diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs
index d815a5ea4f7..40885823a05 100644
--- a/src/libstd/rt/backtrace.rs
+++ b/src/libstd/rt/backtrace.rs
@@ -19,9 +19,8 @@ use os;
 use result::Result::{Ok, Err};
 use str::{StrPrelude, from_str};
 use sync::atomic;
-use unicode::char::UnicodeChar;
 
-pub use self::imp::write;
+pub use sys::backtrace::write;
 
 // For now logging is turned off by default, and this function checks to see
 // whether the magical environment variable is present to see if it's turned on.
@@ -41,984 +40,13 @@ pub fn log_enabled() -> bool {
     val == 2
 }
 
-#[cfg(target_word_size = "64")] const HEX_WIDTH: uint = 18;
-#[cfg(target_word_size = "32")] const HEX_WIDTH: uint = 10;
-
-// All rust symbols are in theory lists of "::"-separated identifiers. Some
-// assemblers, however, can't handle these characters in symbol names. To get
-// around this, we use C++-style mangling. The mangling method is:
-//
-// 1. Prefix the symbol with "_ZN"
-// 2. For each element of the path, emit the length plus the element
-// 3. End the path with "E"
-//
-// For example, "_ZN4testE" => "test" and "_ZN3foo3bar" => "foo::bar".
-//
-// We're the ones printing our backtraces, so we can't rely on anything else to
-// demangle our symbols. It's *much* nicer to look at demangled symbols, so
-// this function is implemented to give us nice pretty output.
-//
-// Note that this demangler isn't quite as fancy as it could be. We have lots
-// of other information in our symbols like hashes, version, type information,
-// etc. Additionally, this doesn't handle glue symbols at all.
-fn demangle(writer: &mut Writer, s: &str) -> IoResult<()> {
-    // First validate the symbol. If it doesn't look like anything we're
-    // expecting, we just print it literally. Note that we must handle non-rust
-    // symbols because we could have any function in the backtrace.
-    let mut valid = true;
-    let mut inner = s;
-    if s.len() > 4 && s.starts_with("_ZN") && s.ends_with("E") {
-        inner = s.slice(3, s.len() - 1);
-    // On Windows, dbghelp strips leading underscores, so we accept "ZN...E" form too.
-    } else if s.len() > 3 && s.starts_with("ZN") && s.ends_with("E") {
-        inner = s.slice(2, s.len() - 1);
-    } else {
-        valid = false;
-    }
-
-    if valid {
-        let mut chars = inner.chars();
-        while valid {
-            let mut i = 0;
-            for c in chars {
-                if c.is_numeric() {
-                    i = i * 10 + c as uint - '0' as uint;
-                } else {
-                    break
-                }
-            }
-            if i == 0 {
-                valid = chars.next().is_none();
-                break
-            } else if chars.by_ref().take(i - 1).count() != i - 1 {
-                valid = false;
-            }
-        }
-    }
-
-    // Alright, let's do this.
-    if !valid {
-        try!(writer.write_str(s));
-    } else {
-        let mut first = true;
-        while inner.len() > 0 {
-            if !first {
-                try!(writer.write_str("::"));
-            } else {
-                first = false;
-            }
-            let mut rest = inner;
-            while rest.char_at(0).is_numeric() {
-                rest = rest.slice_from(1);
-            }
-            let i: uint = from_str(inner.slice_to(inner.len() - rest.len())).unwrap();
-            inner = rest.slice_from(i);
-            rest = rest.slice_to(i);
-            while rest.len() > 0 {
-                if rest.starts_with("$") {
-                    macro_rules! demangle {
-                        ($($pat:expr => $demangled:expr),*) => ({
-                            $(if rest.starts_with($pat) {
-                                try!(writer.write_str($demangled));
-                                rest = rest.slice_from($pat.len());
-                              } else)*
-                            {
-                                try!(writer.write_str(rest));
-                                break;
-                            }
-
-                        })
-                    }
-
-                    // see src/librustc/back/link.rs for these mappings
-                    demangle! (
-                        "$SP$" => "@",
-                        "$UP$" => "Box",
-                        "$RP$" => "*",
-                        "$BP$" => "&",
-                        "$LT$" => "<",
-                        "$GT$" => ">",
-                        "$LP$" => "(",
-                        "$RP$" => ")",
-                        "$C$"  => ",",
-
-                        // in theory we can demangle any Unicode code point, but
-                        // for simplicity we just catch the common ones.
-                        "$x20" => " ",
-                        "$x27" => "'",
-                        "$x5b" => "[",
-                        "$x5d" => "]"
-                    )
-                } else {
-                    let idx = match rest.find('$') {
-                        None => rest.len(),
-                        Some(i) => i,
-                    };
-                    try!(writer.write_str(rest.slice_to(idx)));
-                    rest = rest.slice_from(idx);
-                }
-            }
-        }
-    }
-
-    Ok(())
-}
-
-/// Backtrace support built on libgcc with some extra OS-specific support
-///
-/// Some methods of getting a backtrace:
-///
-/// * The backtrace() functions on unix. It turns out this doesn't work very
-///   well for green threads on OSX, and the address to symbol portion of it
-///   suffers problems that are described below.
-///
-/// * Using libunwind. This is more difficult than it sounds because libunwind
-///   isn't installed everywhere by default. It's also a bit of a hefty library,
-///   so possibly not the best option. When testing, libunwind was excellent at
-///   getting both accurate backtraces and accurate symbols across platforms.
-///   This route was not chosen in favor of the next option, however.
-///
-/// * We're already using libgcc_s for exceptions in rust (triggering task
-///   unwinding and running destructors on the stack), and it turns out that it
-///   conveniently comes with a function that also gives us a backtrace. All of
-///   these functions look like _Unwind_*, but it's not quite the full
-///   repertoire of the libunwind API. Due to it already being in use, this was
-///   the chosen route of getting a backtrace.
-///
-/// After choosing libgcc_s for backtraces, the sad part is that it will only
-/// give us a stack trace of instruction pointers. Thankfully these instruction
-/// pointers are accurate (they work for green and native threads), but it's
-/// then up to us again to figure out how to translate these addresses to
-/// symbols. As with before, we have a few options. Before, that, a little bit
-/// of an interlude about symbols. This is my very limited knowledge about
-/// symbol tables, and this information is likely slightly wrong, but the
-/// general idea should be correct.
-///
-/// When talking about symbols, it's helpful to know a few things about where
-/// symbols are located. Some symbols are located in the dynamic symbol table
-/// of the executable which in theory means that they're available for dynamic
-/// linking and lookup. Other symbols end up only in the local symbol table of
-/// the file. This loosely corresponds to pub and priv functions in Rust.
-///
-/// Armed with this knowledge, we know that our solution for address to symbol
-/// translation will need to consult both the local and dynamic symbol tables.
-/// With that in mind, here's our options of translating an address to
-/// a symbol.
-///
-/// * Use dladdr(). The original backtrace()-based idea actually uses dladdr()
-///   behind the scenes to translate, and this is why backtrace() was not used.
-///   Conveniently, this method works fantastically on OSX. It appears dladdr()
-///   uses magic to consult the local symbol table, or we're putting everything
-///   in the dynamic symbol table anyway. Regardless, for OSX, this is the
-///   method used for translation. It's provided by the system and easy to do.o
-///
-///   Sadly, all other systems have a dladdr() implementation that does not
-///   consult the local symbol table. This means that most functions are blank
-///   because they don't have symbols. This means that we need another solution.
-///
-/// * Use unw_get_proc_name(). This is part of the libunwind api (not the
-///   libgcc_s version of the libunwind api), but involves taking a dependency
-///   to libunwind. We may pursue this route in the future if we bundle
-///   libunwind, but libunwind was unwieldy enough that it was not chosen at
-///   this time to provide this functionality.
-///
-/// * Shell out to a utility like `readelf`. Crazy though it may sound, it's a
-///   semi-reasonable solution. The stdlib already knows how to spawn processes,
-///   so in theory it could invoke readelf, parse the output, and consult the
-///   local/dynamic symbol tables from there. This ended up not getting chosen
-///   due to the craziness of the idea plus the advent of the next option.
-///
-/// * Use `libbacktrace`. It turns out that this is a small library bundled in
-///   the gcc repository which provides backtrace and symbol translation
-///   functionality. All we really need from it is the backtrace functionality,
-///   and we only really need this on everything that's not OSX, so this is the
-///   chosen route for now.
-///
-/// In summary, the current situation uses libgcc_s to get a trace of stack
-/// pointers, and we use dladdr() or libbacktrace to translate these addresses
-/// to symbols. This is a bit of a hokey implementation as-is, but it works for
-/// all unix platforms we support right now, so it at least gets the job done.
-#[cfg(unix)]
-mod imp {
-    use prelude::*;
-
-    use c_str::CString;
-    use io::IoResult;
-    use libc;
-    use mem;
-    use sync::{StaticMutex, MUTEX_INIT};
-
-    /// As always - iOS on arm uses SjLj exceptions and
-    /// _Unwind_Backtrace is even not available there. Still,
-    /// backtraces could be extracted using a backtrace function,
-    /// which thanks god is public
-    ///
-    /// As mentioned in a huge comment block above, backtrace doesn't
-    /// play well with green threads, so while it is extremely nice
-    /// and simple to use it should be used only on iOS devices as the
-    /// only viable option.
-    #[cfg(all(target_os = "ios", target_arch = "arm"))]
-    #[inline(never)]
-    pub fn write(w: &mut Writer) -> IoResult<()> {
-        use result;
-
-        extern {
-            fn backtrace(buf: *mut *mut libc::c_void,
-                         sz: libc::c_int) -> libc::c_int;
-        }
-
-        // while it doesn't requires lock for work as everything is
-        // local, it still displays much nicer backtraces when a
-        // couple of tasks panic simultaneously
-        static LOCK: StaticMutex = MUTEX_INIT;
-        let _g = LOCK.lock();
-
-        try!(writeln!(w, "stack backtrace:"));
-        // 100 lines should be enough
-        const SIZE: uint = 100;
-        let mut buf: [*mut libc::c_void, ..SIZE] = unsafe {mem::zeroed()};
-        let cnt = unsafe { backtrace(buf.as_mut_ptr(), SIZE as libc::c_int) as uint};
-
-        // skipping the first one as it is write itself
-        let iter = range(1, cnt).map(|i| {
-            print(w, i as int, buf[i])
-        });
-        result::fold(iter, (), |_, _| ())
-    }
-
-    #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
-    #[inline(never)] // if we know this is a function call, we can skip it when
-                     // tracing
-    pub fn write(w: &mut Writer) -> IoResult<()> {
-        use io::IoError;
-
-        struct Context<'a> {
-            idx: int,
-            writer: &'a mut (Writer+'a),
-            last_error: Option<IoError>,
-        }
-
-        // When using libbacktrace, we use some necessary global state, so we
-        // need to prevent more than one thread from entering this block. This
-        // is semi-reasonable in terms of printing anyway, and we know that all
-        // I/O done here is blocking I/O, not green I/O, so we don't have to
-        // worry about this being a native vs green mutex.
-        static LOCK: StaticMutex = MUTEX_INIT;
-        let _g = LOCK.lock();
-
-        try!(writeln!(w, "stack backtrace:"));
-
-        let mut cx = Context { writer: w, last_error: None, idx: 0 };
-        return match unsafe {
-            uw::_Unwind_Backtrace(trace_fn,
-                                  &mut cx as *mut Context as *mut libc::c_void)
-        } {
-            uw::_URC_NO_REASON => {
-                match cx.last_error {
-                    Some(err) => Err(err),
-                    None => Ok(())
-                }
-            }
-            _ => Ok(()),
-        };
-
-        extern fn trace_fn(ctx: *mut uw::_Unwind_Context,
-                           arg: *mut libc::c_void) -> uw::_Unwind_Reason_Code {
-            let cx: &mut Context = unsafe { mem::transmute(arg) };
-            let ip = unsafe { uw::_Unwind_GetIP(ctx) as *mut libc::c_void };
-            // dladdr() on osx gets whiny when we use FindEnclosingFunction, and
-            // it appears to work fine without it, so we only use
-            // FindEnclosingFunction on non-osx platforms. In doing so, we get a
-            // slightly more accurate stack trace in the process.
-            //
-            // This is often because panic involves the last instruction of a
-            // function being "call std::rt::begin_unwind", with no ret
-            // instructions after it. This means that the return instruction
-            // pointer points *outside* of the calling function, and by
-            // unwinding it we go back to the original function.
-            let ip = if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
-                ip
-            } else {
-                unsafe { uw::_Unwind_FindEnclosingFunction(ip) }
-            };
-
-            // Don't print out the first few frames (they're not user frames)
-            cx.idx += 1;
-            if cx.idx <= 0 { return uw::_URC_NO_REASON }
-            // Don't print ginormous backtraces
-            if cx.idx > 100 {
-                match write!(cx.writer, " ... <frames omitted>\n") {
-                    Ok(()) => {}
-                    Err(e) => { cx.last_error = Some(e); }
-                }
-                return uw::_URC_FAILURE
-            }
-
-            // Once we hit an error, stop trying to print more frames
-            if cx.last_error.is_some() { return uw::_URC_FAILURE }
-
-            match print(cx.writer, cx.idx, ip) {
-                Ok(()) => {}
-                Err(e) => { cx.last_error = Some(e); }
-            }
-
-            // keep going
-            return uw::_URC_NO_REASON
-        }
-    }
-
-    #[cfg(any(target_os = "macos", target_os = "ios"))]
-    fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
-        use intrinsics;
-        #[repr(C)]
-        struct Dl_info {
-            dli_fname: *const libc::c_char,
-            dli_fbase: *mut libc::c_void,
-            dli_sname: *const libc::c_char,
-            dli_saddr: *mut libc::c_void,
-        }
-        extern {
-            fn dladdr(addr: *const libc::c_void,
-                      info: *mut Dl_info) -> libc::c_int;
-        }
-
-        let mut info: Dl_info = unsafe { intrinsics::init() };
-        if unsafe { dladdr(addr as *const libc::c_void, &mut info) == 0 } {
-            output(w, idx,addr, None)
-        } else {
-            output(w, idx, addr, Some(unsafe {
-                CString::new(info.dli_sname, false)
-            }))
-        }
-    }
-
-    #[cfg(not(any(target_os = "macos", target_os = "ios")))]
-    fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
-        use os;
-        use ptr;
-
-        ////////////////////////////////////////////////////////////////////////
-        // libbacktrace.h API
-        ////////////////////////////////////////////////////////////////////////
-        type backtrace_syminfo_callback =
-            extern "C" fn(data: *mut libc::c_void,
-                          pc: libc::uintptr_t,
-                          symname: *const libc::c_char,
-                          symval: libc::uintptr_t,
-                          symsize: libc::uintptr_t);
-        type backtrace_error_callback =
-            extern "C" fn(data: *mut libc::c_void,
-                          msg: *const libc::c_char,
-                          errnum: libc::c_int);
-        enum backtrace_state {}
-        #[link(name = "backtrace", kind = "static")]
-        #[cfg(not(test))]
-        extern {}
-
-        extern {
-            fn backtrace_create_state(filename: *const libc::c_char,
-                                      threaded: libc::c_int,
-                                      error: backtrace_error_callback,
-                                      data: *mut libc::c_void)
-                                            -> *mut backtrace_state;
-            fn backtrace_syminfo(state: *mut backtrace_state,
-                                 addr: libc::uintptr_t,
-                                 cb: backtrace_syminfo_callback,
-                                 error: backtrace_error_callback,
-                                 data: *mut libc::c_void) -> libc::c_int;
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // helper callbacks
-        ////////////////////////////////////////////////////////////////////////
-
-        extern fn error_cb(_data: *mut libc::c_void, _msg: *const libc::c_char,
-                           _errnum: libc::c_int) {
-            // do nothing for now
-        }
-        extern fn syminfo_cb(data: *mut libc::c_void,
-                             _pc: libc::uintptr_t,
-                             symname: *const libc::c_char,
-                             _symval: libc::uintptr_t,
-                             _symsize: libc::uintptr_t) {
-            let slot = data as *mut *const libc::c_char;
-            unsafe { *slot = symname; }
-        }
-
-        // The libbacktrace API supports creating a state, but it does not
-        // support destroying a state. I personally take this to mean that a
-        // state is meant to be created and then live forever.
-        //
-        // I would love to register an at_exit() handler which cleans up this
-        // state, but libbacktrace provides no way to do so.
-        //
-        // With these constraints, this function has a statically cached state
-        // that is calculated the first time this is requested. Remember that
-        // backtracing all happens serially (one global lock).
-        //
-        // An additionally oddity in this function is that we initialize the
-        // filename via self_exe_name() to pass to libbacktrace. It turns out
-        // that on Linux libbacktrace seamlessly gets the filename of the
-        // current executable, but this fails on freebsd. by always providing
-        // it, we make sure that libbacktrace never has a reason to not look up
-        // the symbols. The libbacktrace API also states that the filename must
-        // be in "permanent memory", so we copy it to a static and then use the
-        // static as the pointer.
-        //
-        // FIXME: We also call self_exe_name() on DragonFly BSD. I haven't
-        //        tested if this is required or not.
-        unsafe fn init_state() -> *mut backtrace_state {
-            static mut STATE: *mut backtrace_state = 0 as *mut backtrace_state;
-            static mut LAST_FILENAME: [libc::c_char, ..256] = [0, ..256];
-            if !STATE.is_null() { return STATE }
-            let selfname = if cfg!(target_os = "freebsd") ||
-                              cfg!(target_os = "dragonfly") {
-                os::self_exe_name()
-            } else {
-                None
-            };
-            let filename = match selfname {
-                Some(path) => {
-                    let bytes = path.as_vec();
-                    if bytes.len() < LAST_FILENAME.len() {
-                        let i = bytes.iter();
-                        for (slot, val) in LAST_FILENAME.iter_mut().zip(i) {
-                            *slot = *val as libc::c_char;
-                        }
-                        LAST_FILENAME.as_ptr()
-                    } else {
-                        ptr::null()
-                    }
-                }
-                None => ptr::null(),
-            };
-            STATE = backtrace_create_state(filename, 0, error_cb,
-                                           ptr::null_mut());
-            return STATE
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // translation
-        ////////////////////////////////////////////////////////////////////////
-
-        // backtrace errors are currently swept under the rug, only I/O
-        // errors are reported
-        let state = unsafe { init_state() };
-        if state.is_null() {
-            return output(w, idx, addr, None)
-        }
-        let mut data = 0 as *const libc::c_char;
-        let data_addr = &mut data as *mut *const libc::c_char;
-        let ret = unsafe {
-            backtrace_syminfo(state, addr as libc::uintptr_t,
-                              syminfo_cb, error_cb,
-                              data_addr as *mut libc::c_void)
-        };
-        if ret == 0 || data.is_null() {
-            output(w, idx, addr, None)
-        } else {
-            output(w, idx, addr, Some(unsafe { CString::new(data, false) }))
-        }
-    }
-
-    // Finally, after all that work above, we can emit a symbol.
-    fn output(w: &mut Writer, idx: int, addr: *mut libc::c_void,
-              s: Option<CString>) -> IoResult<()> {
-        try!(write!(w, "  {:2}: {:2$} - ", idx, addr, super::HEX_WIDTH));
-        match s.as_ref().and_then(|c| c.as_str()) {
-            Some(string) => try!(super::demangle(w, string)),
-            None => try!(write!(w, "<unknown>")),
-        }
-        w.write(&['\n' as u8])
-    }
-
-    /// Unwind library interface used for backtraces
-    ///
-    /// Note that the native libraries come from librustrt, not this
-    /// module.
-    /// Note that dead code is allowed as here are just bindings
-    /// iOS doesn't use all of them it but adding more
-    /// platform-specific configs pollutes the code too much
-    #[allow(non_camel_case_types)]
-    #[allow(non_snake_case)]
-    #[allow(dead_code)]
-    mod uw {
-        pub use self::_Unwind_Reason_Code::*;
-
-        use libc;
-
-        #[repr(C)]
-        pub enum _Unwind_Reason_Code {
-            _URC_NO_REASON = 0,
-            _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
-            _URC_FATAL_PHASE2_ERROR = 2,
-            _URC_FATAL_PHASE1_ERROR = 3,
-            _URC_NORMAL_STOP = 4,
-            _URC_END_OF_STACK = 5,
-            _URC_HANDLER_FOUND = 6,
-            _URC_INSTALL_CONTEXT = 7,
-            _URC_CONTINUE_UNWIND = 8,
-            _URC_FAILURE = 9, // used only by ARM EABI
-        }
-
-        pub enum _Unwind_Context {}
-
-        pub type _Unwind_Trace_Fn =
-                extern fn(ctx: *mut _Unwind_Context,
-                          arg: *mut libc::c_void) -> _Unwind_Reason_Code;
-
-        extern {
-            // No native _Unwind_Backtrace on iOS
-            #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
-            pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
-                                     trace_argument: *mut libc::c_void)
-                        -> _Unwind_Reason_Code;
-
-            #[cfg(all(not(target_os = "android"),
-                      not(all(target_os = "linux", target_arch = "arm"))))]
-            pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t;
-
-            #[cfg(all(not(target_os = "android"),
-                      not(all(target_os = "linux", target_arch = "arm"))))]
-            pub fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
-                -> *mut libc::c_void;
-        }
-
-        // On android, the function _Unwind_GetIP is a macro, and this is the
-        // expansion of the macro. This is all copy/pasted directly from the
-        // header file with the definition of _Unwind_GetIP.
-        #[cfg(any(target_os = "android",
-                  all(target_os = "linux", target_arch = "arm")))]
-        pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
-            #[repr(C)]
-            enum _Unwind_VRS_Result {
-                _UVRSR_OK = 0,
-                _UVRSR_NOT_IMPLEMENTED = 1,
-                _UVRSR_FAILED = 2,
-            }
-            #[repr(C)]
-            enum _Unwind_VRS_RegClass {
-                _UVRSC_CORE = 0,
-                _UVRSC_VFP = 1,
-                _UVRSC_FPA = 2,
-                _UVRSC_WMMXD = 3,
-                _UVRSC_WMMXC = 4,
-            }
-            #[repr(C)]
-            enum _Unwind_VRS_DataRepresentation {
-                _UVRSD_UINT32 = 0,
-                _UVRSD_VFPX = 1,
-                _UVRSD_FPAX = 2,
-                _UVRSD_UINT64 = 3,
-                _UVRSD_FLOAT = 4,
-                _UVRSD_DOUBLE = 5,
-            }
-
-            type _Unwind_Word = libc::c_uint;
-            extern {
-                fn _Unwind_VRS_Get(ctx: *mut _Unwind_Context,
-                                   klass: _Unwind_VRS_RegClass,
-                                   word: _Unwind_Word,
-                                   repr: _Unwind_VRS_DataRepresentation,
-                                   data: *mut libc::c_void)
-                    -> _Unwind_VRS_Result;
-            }
-
-            let mut val: _Unwind_Word = 0;
-            let ptr = &mut val as *mut _Unwind_Word;
-            let _ = _Unwind_VRS_Get(ctx, _Unwind_VRS_RegClass::_UVRSC_CORE, 15,
-                                    _Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
-                                    ptr as *mut libc::c_void);
-            (val & !1) as libc::uintptr_t
-        }
-
-        // This function also doesn't exist on Android or ARM/Linux, so make it
-        // a no-op
-        #[cfg(any(target_os = "android",
-                  all(target_os = "linux", target_arch = "arm")))]
-        pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
-            -> *mut libc::c_void
-        {
-            pc
-        }
-    }
-}
-
-/// As always, windows has something very different than unix, we mainly want
-/// to avoid having to depend too much on libunwind for windows.
-///
-/// If you google around, you'll find a fair bit of references to built-in
-/// functions to get backtraces on windows. It turns out that most of these are
-/// in an external library called dbghelp. I was unable to find this library
-/// via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent
-/// of it.
-///
-/// You'll also find that there's a function called CaptureStackBackTrace
-/// mentioned frequently (which is also easy to use), but sadly I didn't have a
-/// copy of that function in my mingw install (maybe it was broken?). Instead,
-/// this takes the route of using StackWalk64 in order to walk the stack.
-#[cfg(windows)]
-#[allow(dead_code, non_snake_case)]
-mod imp {
-    use c_str::CString;
-    use intrinsics;
-    use io::{IoResult, Writer};
-    use libc;
-    use mem;
-    use ops::Drop;
-    use option::Option::{Some, None};
-    use path::Path;
-    use result::Result::{Ok, Err};
-    use sync::{StaticMutex, MUTEX_INIT};
-    use slice::SliceExt;
-    use str::StrPrelude;
-    use dynamic_lib::DynamicLibrary;
-
-    #[allow(non_snake_case)]
-    extern "system" {
-        fn GetCurrentProcess() -> libc::HANDLE;
-        fn GetCurrentThread() -> libc::HANDLE;
-        fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
-    }
-
-    type SymFromAddrFn =
-        extern "system" fn(libc::HANDLE, u64, *mut u64,
-                           *mut SYMBOL_INFO) -> libc::BOOL;
-    type SymInitializeFn =
-        extern "system" fn(libc::HANDLE, *mut libc::c_void,
-                           libc::BOOL) -> libc::BOOL;
-    type SymCleanupFn =
-        extern "system" fn(libc::HANDLE) -> libc::BOOL;
-
-    type StackWalk64Fn =
-        extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
-                           *mut STACKFRAME64, *mut arch::CONTEXT,
-                           *mut libc::c_void, *mut libc::c_void,
-                           *mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
-
-    const MAX_SYM_NAME: uint = 2000;
-    const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
-    const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
-    const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
-
-    #[repr(C)]
-    struct SYMBOL_INFO {
-        SizeOfStruct: libc::c_ulong,
-        TypeIndex: libc::c_ulong,
-        Reserved: [u64, ..2],
-        Index: libc::c_ulong,
-        Size: libc::c_ulong,
-        ModBase: u64,
-        Flags: libc::c_ulong,
-        Value: u64,
-        Address: u64,
-        Register: libc::c_ulong,
-        Scope: libc::c_ulong,
-        Tag: libc::c_ulong,
-        NameLen: libc::c_ulong,
-        MaxNameLen: libc::c_ulong,
-        // note that windows has this as 1, but it basically just means that
-        // the name is inline at the end of the struct. For us, we just bump
-        // the struct size up to MAX_SYM_NAME.
-        Name: [libc::c_char, ..MAX_SYM_NAME],
-    }
-
-
-    #[repr(C)]
-    enum ADDRESS_MODE {
-        AddrMode1616,
-        AddrMode1632,
-        AddrModeReal,
-        AddrModeFlat,
-    }
-
-    struct ADDRESS64 {
-        Offset: u64,
-        Segment: u16,
-        Mode: ADDRESS_MODE,
-    }
-
-    struct STACKFRAME64 {
-        AddrPC: ADDRESS64,
-        AddrReturn: ADDRESS64,
-        AddrFrame: ADDRESS64,
-        AddrStack: ADDRESS64,
-        AddrBStore: ADDRESS64,
-        FuncTableEntry: *mut libc::c_void,
-        Params: [u64, ..4],
-        Far: libc::BOOL,
-        Virtual: libc::BOOL,
-        Reserved: [u64, ..3],
-        KdHelp: KDHELP64,
-    }
-
-    struct KDHELP64 {
-        Thread: u64,
-        ThCallbackStack: libc::DWORD,
-        ThCallbackBStore: libc::DWORD,
-        NextCallback: libc::DWORD,
-        FramePointer: libc::DWORD,
-        KiCallUserMode: u64,
-        KeUserCallbackDispatcher: u64,
-        SystemRangeStart: u64,
-        KiUserExceptionDispatcher: u64,
-        StackBase: u64,
-        StackLimit: u64,
-        Reserved: [u64, ..5],
-    }
-
-    #[cfg(target_arch = "x86")]
-    mod arch {
-        use libc;
-
-        const MAXIMUM_SUPPORTED_EXTENSION: uint = 512;
-
-        #[repr(C)]
-        pub struct CONTEXT {
-            ContextFlags: libc::DWORD,
-            Dr0: libc::DWORD,
-            Dr1: libc::DWORD,
-            Dr2: libc::DWORD,
-            Dr3: libc::DWORD,
-            Dr6: libc::DWORD,
-            Dr7: libc::DWORD,
-            FloatSave: FLOATING_SAVE_AREA,
-            SegGs: libc::DWORD,
-            SegFs: libc::DWORD,
-            SegEs: libc::DWORD,
-            SegDs: libc::DWORD,
-            Edi: libc::DWORD,
-            Esi: libc::DWORD,
-            Ebx: libc::DWORD,
-            Edx: libc::DWORD,
-            Ecx: libc::DWORD,
-            Eax: libc::DWORD,
-            Ebp: libc::DWORD,
-            Eip: libc::DWORD,
-            SegCs: libc::DWORD,
-            EFlags: libc::DWORD,
-            Esp: libc::DWORD,
-            SegSs: libc::DWORD,
-            ExtendedRegisters: [u8, ..MAXIMUM_SUPPORTED_EXTENSION],
-        }
-
-        #[repr(C)]
-        pub struct FLOATING_SAVE_AREA {
-            ControlWord: libc::DWORD,
-            StatusWord: libc::DWORD,
-            TagWord: libc::DWORD,
-            ErrorOffset: libc::DWORD,
-            ErrorSelector: libc::DWORD,
-            DataOffset: libc::DWORD,
-            DataSelector: libc::DWORD,
-            RegisterArea: [u8, ..80],
-            Cr0NpxState: libc::DWORD,
-        }
-
-        pub fn init_frame(frame: &mut super::STACKFRAME64,
-                          ctx: &CONTEXT) -> libc::DWORD {
-            frame.AddrPC.Offset = ctx.Eip as u64;
-            frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrStack.Offset = ctx.Esp as u64;
-            frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrFrame.Offset = ctx.Ebp as u64;
-            frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            super::IMAGE_FILE_MACHINE_I386
-        }
-    }
-
-    #[cfg(target_arch = "x86_64")]
-    mod arch {
-        use libc::{c_longlong, c_ulonglong};
-        use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
-        use simd;
-
-        #[repr(C)]
-        pub struct CONTEXT {
-            _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
-            P1Home: DWORDLONG,
-            P2Home: DWORDLONG,
-            P3Home: DWORDLONG,
-            P4Home: DWORDLONG,
-            P5Home: DWORDLONG,
-            P6Home: DWORDLONG,
-
-            ContextFlags: DWORD,
-            MxCsr: DWORD,
-
-            SegCs: WORD,
-            SegDs: WORD,
-            SegEs: WORD,
-            SegFs: WORD,
-            SegGs: WORD,
-            SegSs: WORD,
-            EFlags: DWORD,
-
-            Dr0: DWORDLONG,
-            Dr1: DWORDLONG,
-            Dr2: DWORDLONG,
-            Dr3: DWORDLONG,
-            Dr6: DWORDLONG,
-            Dr7: DWORDLONG,
-
-            Rax: DWORDLONG,
-            Rcx: DWORDLONG,
-            Rdx: DWORDLONG,
-            Rbx: DWORDLONG,
-            Rsp: DWORDLONG,
-            Rbp: DWORDLONG,
-            Rsi: DWORDLONG,
-            Rdi: DWORDLONG,
-            R8:  DWORDLONG,
-            R9:  DWORDLONG,
-            R10: DWORDLONG,
-            R11: DWORDLONG,
-            R12: DWORDLONG,
-            R13: DWORDLONG,
-            R14: DWORDLONG,
-            R15: DWORDLONG,
-
-            Rip: DWORDLONG,
-
-            FltSave: FLOATING_SAVE_AREA,
-
-            VectorRegister: [M128A, .. 26],
-            VectorControl: DWORDLONG,
-
-            DebugControl: DWORDLONG,
-            LastBranchToRip: DWORDLONG,
-            LastBranchFromRip: DWORDLONG,
-            LastExceptionToRip: DWORDLONG,
-            LastExceptionFromRip: DWORDLONG,
-        }
-
-        #[repr(C)]
-        pub struct M128A {
-            _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
-            Low:  c_ulonglong,
-            High: c_longlong
-        }
-
-        #[repr(C)]
-        pub struct FLOATING_SAVE_AREA {
-            _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
-            _Dummy: [u8, ..512] // FIXME: Fill this out
-        }
-
-        pub fn init_frame(frame: &mut super::STACKFRAME64,
-                          ctx: &CONTEXT) -> DWORD {
-            frame.AddrPC.Offset = ctx.Rip as u64;
-            frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrStack.Offset = ctx.Rsp as u64;
-            frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrFrame.Offset = ctx.Rbp as u64;
-            frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            super::IMAGE_FILE_MACHINE_AMD64
-        }
-    }
-
-    #[repr(C)]
-    struct Cleanup {
-        handle: libc::HANDLE,
-        SymCleanup: SymCleanupFn,
-    }
-
-    impl Drop for Cleanup {
-        fn drop(&mut self) { (self.SymCleanup)(self.handle); }
-    }
-
-    pub fn write(w: &mut Writer) -> IoResult<()> {
-        // According to windows documentation, all dbghelp functions are
-        // single-threaded.
-        static LOCK: StaticMutex = MUTEX_INIT;
-        let _g = LOCK.lock();
-
-        // Open up dbghelp.dll, we don't link to it explicitly because it can't
-        // always be found. Additionally, it's nice having fewer dependencies.
-        let path = Path::new("dbghelp.dll");
-        let lib = match DynamicLibrary::open(Some(&path)) {
-            Ok(lib) => lib,
-            Err(..) => return Ok(()),
-        };
-
-        macro_rules! sym { ($e:expr, $t:ident) => (unsafe {
-            match lib.symbol($e) {
-                Ok(f) => mem::transmute::<*mut u8, $t>(f),
-                Err(..) => return Ok(())
-            }
-        }) }
-
-        // Fetch the symbols necessary from dbghelp.dll
-        let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
-        let SymInitialize = sym!("SymInitialize", SymInitializeFn);
-        let SymCleanup = sym!("SymCleanup", SymCleanupFn);
-        let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
-
-        // Allocate necessary structures for doing the stack walk
-        let process = unsafe { GetCurrentProcess() };
-        let thread = unsafe { GetCurrentThread() };
-        let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
-        unsafe { RtlCaptureContext(&mut context); }
-        let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
-        let image = arch::init_frame(&mut frame, &context);
-
-        // Initialize this process's symbols
-        let ret = SymInitialize(process, 0 as *mut libc::c_void, libc::TRUE);
-        if ret != libc::TRUE { return Ok(()) }
-        let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
-
-        // And now that we're done with all the setup, do the stack walking!
-        let mut i = 0i;
-        try!(write!(w, "stack backtrace:\n"));
-        while StackWalk64(image, process, thread, &mut frame, &mut context,
-                          0 as *mut libc::c_void,
-                          0 as *mut libc::c_void,
-                          0 as *mut libc::c_void,
-                          0 as *mut libc::c_void) == libc::TRUE{
-            let addr = frame.AddrPC.Offset;
-            if addr == frame.AddrReturn.Offset || addr == 0 ||
-               frame.AddrReturn.Offset == 0 { break }
-
-            i += 1;
-            try!(write!(w, "  {:2}: {:#2$x}", i, addr, super::HEX_WIDTH));
-            let mut info: SYMBOL_INFO = unsafe { intrinsics::init() };
-            info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong;
-            // the struct size in C.  the value is different to
-            // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
-            // due to struct alignment.
-            info.SizeOfStruct = 88;
-
-            let mut displacement = 0u64;
-            let ret = SymFromAddr(process, addr as u64, &mut displacement,
-                                  &mut info);
-
-            if ret == libc::TRUE {
-                try!(write!(w, " - "));
-                let cstr = unsafe { CString::new(info.Name.as_ptr(), false) };
-                let bytes = cstr.as_bytes();
-                match cstr.as_str() {
-                    Some(s) => try!(super::demangle(w, s)),
-                    None => try!(w.write(bytes[..bytes.len()-1])),
-                }
-                if displacement != 0 {
-                    try!(write!(w, "+{:#x}", displacement));
-                }
-            }
-            try!(w.write(&['\n' as u8]));
-        }
-
-        Ok(())
-    }
-}
-
 #[cfg(test)]
 mod test {
     use prelude::*;
-    use io::MemWriter;
-
+    use sys_common;
     macro_rules! t { ($a:expr, $b:expr) => ({
         let mut m = Vec::new();
-        super::demangle(&mut m, $a).unwrap();
+        sys_common::backtrace::demangle(&mut m, $a).unwrap();
         assert_eq!(String::from_utf8(m).unwrap(), $b);
     }) }
 
diff --git a/src/libstd/rt/bookkeeping.rs b/src/libstd/rt/bookkeeping.rs
new file mode 100644
index 00000000000..aca520fc088
--- /dev/null
+++ b/src/libstd/rt/bookkeeping.rs
@@ -0,0 +1,61 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Task bookkeeping
+//!
+//! This module keeps track of the number of running tasks so that entry points
+//! with libnative know when it's possible to exit the program (once all tasks
+//! have exited).
+//!
+//! The green counterpart for this is bookkeeping on sched pools, and it's up to
+//! each respective runtime to make sure that they call increment() and
+//! decrement() manually.
+
+use sync::atomic;
+use ops::Drop;
+
+use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+
+static TASK_COUNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+static TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+
+#[allow(missing_copy_implementations)]
+pub struct Token { _private: () }
+
+impl Drop for Token {
+    fn drop(&mut self) { decrement() }
+}
+
+/// Increment the number of live tasks, returning a token which will decrement
+/// the count when dropped.
+pub fn increment() -> Token {
+    let _ = TASK_COUNT.fetch_add(1, atomic::SeqCst);
+    Token { _private: () }
+}
+
+pub fn decrement() {
+    unsafe {
+        if TASK_COUNT.fetch_sub(1, atomic::SeqCst) == 1 {
+            let guard = TASK_LOCK.lock();
+            guard.signal();
+        }
+    }
+}
+
+/// Waits for all other native tasks in the system to exit. This is only used by
+/// the entry points of native programs
+pub fn wait_for_other_tasks() {
+    unsafe {
+        let guard = TASK_LOCK.lock();
+        while TASK_COUNT.load(atomic::SeqCst) > 0 {
+            guard.wait();
+        }
+    }
+}
diff --git a/src/libstd/rt/exclusive.rs b/src/libstd/rt/exclusive.rs
new file mode 100644
index 00000000000..1d3082d1b4c
--- /dev/null
+++ b/src/libstd/rt/exclusive.rs
@@ -0,0 +1,115 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use cell::UnsafeCell;
+use rt::mutex;
+
+/// An OS mutex over some data.
+///
+/// This is not a safe primitive to use, it is unaware of the libgreen
+/// scheduler, as well as being easily susceptible to misuse due to the usage of
+/// the inner NativeMutex.
+///
+/// > **Note**: This type is not recommended for general use. The mutex provided
+/// >           as part of `libsync` should almost always be favored.
+pub struct Exclusive<T> {
+    lock: mutex::NativeMutex,
+    data: UnsafeCell<T>,
+}
+
+/// An RAII guard returned via `lock`
+pub struct ExclusiveGuard<'a, T:'a> {
+    // FIXME #12808: strange name to try to avoid interfering with
+    // field accesses of the contained type via Deref
+    _data: &'a mut T,
+    _guard: mutex::LockGuard<'a>,
+}
+
+impl<T: Send> Exclusive<T> {
+    /// Creates a new `Exclusive` which will protect the data provided.
+    pub fn new(user_data: T) -> Exclusive<T> {
+        Exclusive {
+            lock: unsafe { mutex::NativeMutex::new() },
+            data: UnsafeCell::new(user_data),
+        }
+    }
+
+    /// Acquires this lock, returning a guard which the data is accessed through
+    /// and from which that lock will be unlocked.
+    ///
+    /// This method is unsafe due to many of the same reasons that the
+    /// NativeMutex itself is unsafe.
+    pub unsafe fn lock<'a>(&'a self) -> ExclusiveGuard<'a, T> {
+        let guard = self.lock.lock();
+        let data = &mut *self.data.get();
+
+        ExclusiveGuard {
+            _data: data,
+            _guard: guard,
+        }
+    }
+}
+
+impl<'a, T: Send> ExclusiveGuard<'a, T> {
+    // The unsafety here should be ok because our loan guarantees that the lock
+    // itself is not moving
+    pub fn signal(&self) {
+        unsafe { self._guard.signal() }
+    }
+    pub fn wait(&self) {
+        unsafe { self._guard.wait() }
+    }
+}
+
+impl<'a, T: Send> Deref<T> for ExclusiveGuard<'a, T> {
+    fn deref(&self) -> &T { &*self._data }
+}
+impl<'a, T: Send> DerefMut<T> for ExclusiveGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T { &mut *self._data }
+}
+
+#[cfg(test)]
+mod tests {
+    use prelude::*;
+    use sync::Arc;
+    use super::Exclusive;
+    use task;
+
+    #[test]
+    fn exclusive_new_arc() {
+        unsafe {
+            let mut futures = Vec::new();
+
+            let num_tasks = 10;
+            let count = 10;
+
+            let total = Arc::new(Exclusive::new(box 0));
+
+            for _ in range(0u, num_tasks) {
+                let total = total.clone();
+                let (tx, rx) = channel();
+                futures.push(rx);
+
+                task::spawn(move || {
+                    for _ in range(0u, count) {
+                        **total.lock() += 1;
+                    }
+                    tx.send(());
+                });
+            };
+
+            for f in futures.iter_mut() { f.recv() }
+
+            assert_eq!(**total.lock(), num_tasks * count);
+        }
+    }
+}
diff --git a/src/libstd/rt/libunwind.rs b/src/libstd/rt/libunwind.rs
new file mode 100644
index 00000000000..2feea7fa0a4
--- /dev/null
+++ b/src/libstd/rt/libunwind.rs
@@ -0,0 +1,128 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unwind library interface
+
+#![allow(non_upper_case_globals)]
+#![allow(non_camel_case_types)]
+#![allow(non_snake_case)]
+#![allow(dead_code)] // these are just bindings
+
+#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
+pub use self::_Unwind_Action::*;
+#[cfg(target_arch = "arm")]
+pub use self::_Unwind_State::*;
+pub use self::_Unwind_Reason_Code::*;
+
+use libc;
+
+#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
+#[repr(C)]
+#[deriving(Copy)]
+pub enum _Unwind_Action {
+    _UA_SEARCH_PHASE = 1,
+    _UA_CLEANUP_PHASE = 2,
+    _UA_HANDLER_FRAME = 4,
+    _UA_FORCE_UNWIND = 8,
+    _UA_END_OF_STACK = 16,
+}
+
+#[cfg(target_arch = "arm")]
+#[repr(C)]
+pub enum _Unwind_State {
+    _US_VIRTUAL_UNWIND_FRAME = 0,
+    _US_UNWIND_FRAME_STARTING = 1,
+    _US_UNWIND_FRAME_RESUME = 2,
+    _US_ACTION_MASK = 3,
+    _US_FORCE_UNWIND = 8,
+    _US_END_OF_STACK = 16
+}
+
+#[repr(C)]
+pub enum _Unwind_Reason_Code {
+    _URC_NO_REASON = 0,
+    _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+    _URC_FATAL_PHASE2_ERROR = 2,
+    _URC_FATAL_PHASE1_ERROR = 3,
+    _URC_NORMAL_STOP = 4,
+    _URC_END_OF_STACK = 5,
+    _URC_HANDLER_FOUND = 6,
+    _URC_INSTALL_CONTEXT = 7,
+    _URC_CONTINUE_UNWIND = 8,
+    _URC_FAILURE = 9, // used only by ARM EABI
+}
+
+pub type _Unwind_Exception_Class = u64;
+
+pub type _Unwind_Word = libc::uintptr_t;
+
+#[cfg(target_arch = "x86")]
+pub const unwinder_private_data_size: uint = 5;
+
+#[cfg(target_arch = "x86_64")]
+pub const unwinder_private_data_size: uint = 6;
+
+#[cfg(all(target_arch = "arm", not(target_os = "ios")))]
+pub const unwinder_private_data_size: uint = 20;
+
+#[cfg(all(target_arch = "arm", target_os = "ios"))]
+pub const unwinder_private_data_size: uint = 5;
+
+#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
+pub const unwinder_private_data_size: uint = 2;
+
+#[repr(C)]
+pub struct _Unwind_Exception {
+    pub exception_class: _Unwind_Exception_Class,
+    pub exception_cleanup: _Unwind_Exception_Cleanup_Fn,
+    pub private: [_Unwind_Word, ..unwinder_private_data_size],
+}
+
+pub enum _Unwind_Context {}
+
+pub type _Unwind_Exception_Cleanup_Fn =
+        extern "C" fn(unwind_code: _Unwind_Reason_Code,
+                      exception: *mut _Unwind_Exception);
+
+#[cfg(any(target_os = "linux", target_os = "freebsd"))]
+#[link(name = "gcc_s")]
+extern {}
+
+#[cfg(target_os = "android")]
+#[link(name = "gcc")]
+extern {}
+
+#[cfg(target_os = "dragonfly")]
+#[link(name = "gcc_pic")]
+extern {}
+
+extern "C" {
+    // iOS on armv7 uses SjLj exceptions and requires to link
+    // against corresponding routine (..._SjLj_...)
+    #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+    pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception)
+                                  -> _Unwind_Reason_Code;
+
+    #[cfg(all(target_os = "ios", target_arch = "arm"))]
+    fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception)
+                                   -> _Unwind_Reason_Code;
+
+    pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception);
+}
+
+// ... and now we just providing access to SjLj counterspart
+// through a standard name to hide those details from others
+// (see also comment above regarding _Unwind_RaiseException)
+#[cfg(all(target_os = "ios", target_arch = "arm"))]
+#[inline(always)]
+pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception)
+                                     -> _Unwind_Reason_Code {
+    _Unwind_SjLj_RaiseException(exc)
+}
diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs
new file mode 100644
index 00000000000..089960a6bc8
--- /dev/null
+++ b/src/libstd/rt/local.rs
@@ -0,0 +1,131 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use rt::local_ptr;
+use rt::task::Task;
+
+/// Encapsulates some task-local data.
+pub trait Local<Borrowed> {
+    fn put(value: Box<Self>);
+    fn take() -> Box<Self>;
+    fn try_take() -> Option<Box<Self>>;
+    fn exists(unused_value: Option<Self>) -> bool;
+    fn borrow(unused_value: Option<Self>) -> Borrowed;
+    unsafe fn unsafe_take() -> Box<Self>;
+    unsafe fn unsafe_borrow() -> *mut Self;
+    unsafe fn try_unsafe_borrow() -> Option<*mut Self>;
+}
+
+impl Local<local_ptr::Borrowed<Task>> for Task {
+    #[inline]
+    fn put(value: Box<Task>) { unsafe { local_ptr::put(value) } }
+    #[inline]
+    fn take() -> Box<Task> { unsafe { local_ptr::take() } }
+    #[inline]
+    fn try_take() -> Option<Box<Task>> { unsafe { local_ptr::try_take() } }
+    fn exists(_: Option<Task>) -> bool { local_ptr::exists() }
+    #[inline]
+    fn borrow(_: Option<Task>) -> local_ptr::Borrowed<Task> {
+        unsafe {
+            local_ptr::borrow::<Task>()
+        }
+    }
+    #[inline]
+    unsafe fn unsafe_take() -> Box<Task> { local_ptr::unsafe_take() }
+    #[inline]
+    unsafe fn unsafe_borrow() -> *mut Task { local_ptr::unsafe_borrow() }
+    #[inline]
+    unsafe fn try_unsafe_borrow() -> Option<*mut Task> {
+        local_ptr::try_unsafe_borrow()
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use prelude::*;
+    use super::*;
+    use super::super::thread::Thread;
+    use super::super::task::Task;
+
+    #[test]
+    fn thread_local_task_smoke_test() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn thread_local_task_two_instances() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+            let task = box Task::new(None, None);
+            Local::put(task);
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn borrow_smoke_test() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+
+            unsafe {
+                let _task: *mut Task = Local::unsafe_borrow();
+            }
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn borrow_with_return() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+
+            {
+                let _ = Local::borrow(None::<Task>);
+            }
+
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn try_take() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+
+            let t: Box<Task> = Local::try_take().unwrap();
+            let u: Option<Box<Task>> = Local::try_take();
+            assert!(u.is_none());
+
+            cleanup_task(t);
+        }).join();
+    }
+
+    fn cleanup_task(t: Box<Task>) {
+        t.drop();
+    }
+
+}
diff --git a/src/libstd/rt/local_ptr.rs b/src/libstd/rt/local_ptr.rs
new file mode 100644
index 00000000000..a87bc3d2766
--- /dev/null
+++ b/src/libstd/rt/local_ptr.rs
@@ -0,0 +1,404 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Access to a single thread-local pointer.
+//!
+//! The runtime will use this for storing Box<Task>.
+//!
+//! FIXME: Add runtime checks for usage of inconsistent pointer types.
+//! and for overwriting an existing pointer.
+
+#![allow(dead_code)]
+
+use core::prelude::*;
+
+use mem;
+use boxed::Box;
+
+#[cfg(any(windows, // mingw-w32 doesn't like thread_local things
+          target_os = "android", // see #10686
+          target_os = "ios"))]
+pub use self::native::{init, cleanup, put, take, try_take, unsafe_take, exists,
+                       unsafe_borrow, try_unsafe_borrow};
+
+#[cfg(not(any(windows, target_os = "android", target_os = "ios")))]
+pub use self::compiled::{init, cleanup, put, take, try_take, unsafe_take, exists,
+                         unsafe_borrow, try_unsafe_borrow};
+
+/// Encapsulates a borrowed value. When this value goes out of scope, the
+/// pointer is returned.
+pub struct Borrowed<T> {
+    val: *const (),
+}
+
+#[unsafe_destructor]
+impl<T> Drop for Borrowed<T> {
+    fn drop(&mut self) {
+        unsafe {
+            if self.val.is_null() {
+                rtabort!("Aiee, returning null borrowed object!");
+            }
+            let val: Box<T> = mem::transmute(self.val);
+            put::<T>(val);
+            rtassert!(exists());
+        }
+    }
+}
+
+impl<T> Deref<T> for Borrowed<T> {
+    fn deref<'a>(&'a self) -> &'a T {
+        unsafe { &*(self.val as *const T) }
+    }
+}
+
+impl<T> DerefMut<T> for Borrowed<T> {
+    fn deref_mut<'a>(&'a mut self) -> &'a mut T {
+        unsafe { &mut *(self.val as *mut T) }
+    }
+}
+
+/// Borrow the thread-local value from thread-local storage.
+/// While the value is borrowed it is not available in TLS.
+///
+/// # Safety note
+///
+/// Does not validate the pointer type.
+#[inline]
+pub unsafe fn borrow<T>() -> Borrowed<T> {
+    let val: *const () = mem::transmute(take::<T>());
+    Borrowed {
+        val: val,
+    }
+}
+
+/// Compiled implementation of accessing the runtime local pointer. This is
+/// implemented using LLVM's thread_local attribute which isn't necessarily
+/// working on all platforms. This implementation is faster, however, so we use
+/// it wherever possible.
+#[cfg(not(any(windows, target_os = "android", target_os = "ios")))]
+pub mod compiled {
+    use core::prelude::*;
+
+    use boxed::Box;
+    use mem;
+
+    #[cfg(test)]
+    pub use realstd::rt::shouldnt_be_public::RT_TLS_PTR;
+
+    #[cfg(not(test))]
+    #[thread_local]
+    pub static mut RT_TLS_PTR: *mut u8 = 0 as *mut u8;
+
+    pub fn init() {}
+
+    pub unsafe fn cleanup() {}
+
+    // Rationale for all of these functions being inline(never)
+    //
+    // The #[thread_local] annotation gets propagated all the way through to
+    // LLVM, meaning the global is specially treated by LLVM to lower it to an
+    // efficient sequence of instructions. This also involves dealing with fun
+    // stuff in object files and whatnot. Regardless, it turns out this causes
+    // trouble with green threads and lots of optimizations turned on. The
+    // following case study was done on Linux x86_64, but I would imagine that
+    // other platforms are similar.
+    //
+    // On Linux, the instruction sequence for loading the tls pointer global
+    // looks like:
+    //
+    //      mov %fs:0x0, %rax
+    //      mov -0x8(%rax), %rbx
+    //
+    // This code leads me to believe that (%fs:0x0) is a table, and then the
+    // table contains the TLS values for the process. Hence, the slot at offset
+    // -0x8 is the task TLS pointer. This leads us to the conclusion that this
+    // table is the actual thread local part of each thread. The kernel sets up
+    // the fs segment selector to point at the right region of memory for each
+    // thread.
+    //
+    // Optimizations lead me to believe that this code is lowered to these
+    // instructions in the LLVM codegen passes, because you'll see code like
+    // this when everything is optimized:
+    //
+    //      mov %fs:0x0, %r14
+    //      mov -0x8(%r14), %rbx
+    //      // do something with %rbx, the rust Task pointer
+    //
+    //      ... // <- do more things
+    //
+    //      mov -0x8(%r14), %rbx
+    //      // do something else with %rbx
+    //
+    // Note that the optimization done here is that the first load is not
+    // duplicated during the lower instructions. This means that the %fs:0x0
+    // memory location is only dereferenced once.
+    //
+    // Normally, this is actually a good thing! With green threads, however,
+    // it's very possible for the code labeled "do more things" to context
+    // switch to another thread. If this happens, then we *must* re-load %fs:0x0
+    // because it's changed (we're on a different thread). If we don't re-load
+    // the table location, then we'll be reading the original thread's TLS
+    // values, not our thread's TLS values.
+    //
+    // Hence, we never inline these functions. By never inlining, we're
+    // guaranteed that loading the table is a local decision which is forced to
+    // *always* happen.
+
+    /// Give a pointer to thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline(never)] // see comments above
+    pub unsafe fn put<T>(sched: Box<T>) {
+        RT_TLS_PTR = mem::transmute(sched)
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline(never)] // see comments above
+    pub unsafe fn take<T>() -> Box<T> {
+        let ptr = RT_TLS_PTR;
+        rtassert!(!ptr.is_null());
+        let ptr: Box<T> = mem::transmute(ptr);
+        // can't use `as`, due to type not matching with `cfg(test)`
+        RT_TLS_PTR = mem::transmute(0u);
+        ptr
+    }
+
+    /// Optionally take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline(never)] // see comments above
+    pub unsafe fn try_take<T>() -> Option<Box<T>> {
+        let ptr = RT_TLS_PTR;
+        if ptr.is_null() {
+            None
+        } else {
+            let ptr: Box<T> = mem::transmute(ptr);
+            // can't use `as`, due to type not matching with `cfg(test)`
+            RT_TLS_PTR = mem::transmute(0u);
+            Some(ptr)
+        }
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    /// Leaves the old pointer in TLS for speed.
+    #[inline(never)] // see comments above
+    pub unsafe fn unsafe_take<T>() -> Box<T> {
+        mem::transmute(RT_TLS_PTR)
+    }
+
+    /// Check whether there is a thread-local pointer installed.
+    #[inline(never)] // see comments above
+    pub fn exists() -> bool {
+        unsafe {
+            RT_TLS_PTR.is_not_null()
+        }
+    }
+
+    #[inline(never)] // see comments above
+    pub unsafe fn unsafe_borrow<T>() -> *mut T {
+        if RT_TLS_PTR.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        RT_TLS_PTR as *mut T
+    }
+
+    #[inline(never)] // see comments above
+    pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
+        if RT_TLS_PTR.is_null() {
+            None
+        } else {
+            Some(RT_TLS_PTR as *mut T)
+        }
+    }
+}
+
+/// Native implementation of having the runtime thread-local pointer. This
+/// implementation uses the `thread_local_storage` module to provide a
+/// thread-local value.
+pub mod native {
+    use core::prelude::*;
+
+    use boxed::Box;
+    use mem;
+    use ptr;
+    use rt::thread_local_storage as tls;
+
+    static mut RT_TLS_KEY: tls::Key = -1;
+
+    /// Initialize the TLS key. Other ops will fail if this isn't executed
+    /// first.
+    pub fn init() {
+        unsafe {
+            tls::create(&mut RT_TLS_KEY);
+        }
+    }
+
+    pub unsafe fn cleanup() {
+        rtassert!(RT_TLS_KEY != -1);
+        tls::destroy(RT_TLS_KEY);
+    }
+
+    /// Give a pointer to thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline]
+    pub unsafe fn put<T>(sched: Box<T>) {
+        let key = tls_key();
+        let void_ptr: *mut u8 = mem::transmute(sched);
+        tls::set(key, void_ptr);
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline]
+    pub unsafe fn take<T>() -> Box<T> {
+        let key = tls_key();
+        let void_ptr: *mut u8 = tls::get(key);
+        if void_ptr.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        let ptr: Box<T> = mem::transmute(void_ptr);
+        tls::set(key, ptr::null_mut());
+        return ptr;
+    }
+
+    /// Optionally take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline]
+    pub unsafe fn try_take<T>() -> Option<Box<T>> {
+        match maybe_tls_key() {
+            Some(key) => {
+                let void_ptr: *mut u8 = tls::get(key);
+                if void_ptr.is_null() {
+                    None
+                } else {
+                    let ptr: Box<T> = mem::transmute(void_ptr);
+                    tls::set(key, ptr::null_mut());
+                    Some(ptr)
+                }
+            }
+            None => None
+        }
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    /// Leaves the old pointer in TLS for speed.
+    #[inline]
+    pub unsafe fn unsafe_take<T>() -> Box<T> {
+        let key = tls_key();
+        let void_ptr: *mut u8 = tls::get(key);
+        if void_ptr.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        let ptr: Box<T> = mem::transmute(void_ptr);
+        return ptr;
+    }
+
+    /// Check whether there is a thread-local pointer installed.
+    pub fn exists() -> bool {
+        unsafe {
+            match maybe_tls_key() {
+                Some(key) => tls::get(key).is_not_null(),
+                None => false
+            }
+        }
+    }
+
+    /// Borrow a mutable reference to the thread-local value
+    ///
+    /// # Safety Note
+    ///
+    /// Because this leaves the value in thread-local storage it is possible
+    /// For the Scheduler pointer to be aliased
+    pub unsafe fn unsafe_borrow<T>() -> *mut T {
+        let key = tls_key();
+        let void_ptr = tls::get(key);
+        if void_ptr.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        void_ptr as *mut T
+    }
+
+    pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
+        match maybe_tls_key() {
+            Some(key) => {
+                let void_ptr = tls::get(key);
+                if void_ptr.is_null() {
+                    None
+                } else {
+                    Some(void_ptr as *mut T)
+                }
+            }
+            None => None
+        }
+    }
+
+    #[inline]
+    fn tls_key() -> tls::Key {
+        match maybe_tls_key() {
+            Some(key) => key,
+            None => rtabort!("runtime tls key not initialized")
+        }
+    }
+
+    #[inline]
+    #[cfg(not(test))]
+    pub fn maybe_tls_key() -> Option<tls::Key> {
+        unsafe {
+            // NB: This is a little racy because, while the key is
+            // initialized under a mutex and it's assumed to be initialized
+            // in the Scheduler ctor by any thread that needs to use it,
+            // we are not accessing the key under a mutex.  Threads that
+            // are not using the new Scheduler but still *want to check*
+            // whether they are running under a new Scheduler may see a 0
+            // value here that is in the process of being initialized in
+            // another thread. I think this is fine since the only action
+            // they could take if it was initialized would be to check the
+            // thread-local value and see that it's not set.
+            if RT_TLS_KEY != -1 {
+                return Some(RT_TLS_KEY);
+            } else {
+                return None;
+            }
+        }
+    }
+
+    #[inline] #[cfg(test)]
+    pub fn maybe_tls_key() -> Option<tls::Key> {
+        use rt;
+        unsafe {
+            mem::transmute(::realstd::rt::shouldnt_be_public::maybe_tls_key())
+        }
+    }
+}
diff --git a/src/libstd/rt/macros.rs b/src/libstd/rt/macros.rs
new file mode 100644
index 00000000000..bee8b5b82f4
--- /dev/null
+++ b/src/libstd/rt/macros.rs
@@ -0,0 +1,45 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Macros used by the runtime.
+//!
+//! These macros call functions which are only accessible in the `rt` module, so
+//! they aren't defined anywhere outside of the `rt` module.
+
+#![macro_escape]
+
+macro_rules! rterrln {
+    ($fmt:expr $($arg:tt)*) => ( {
+        format_args!(::rt::util::dumb_print, concat!($fmt, "\n") $($arg)*)
+    } )
+}
+
+// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build.
+macro_rules! rtdebug {
+    ($($arg:tt)*) => ( {
+        if cfg!(rtdebug) {
+            rterrln!($($arg)*)
+        }
+    })
+}
+
+macro_rules! rtassert {
+    ( $arg:expr ) => ( {
+        if ::rt::util::ENFORCE_SANITY {
+            if !$arg {
+                rtabort!(" assertion failed: {}", stringify!($arg));
+            }
+        }
+    } )
+}
+
+macro_rules! rtabort {
+    ($($arg:tt)*) => (format_args!(::rt::util::abort, $($arg)*))
+}
diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs
index eb517047ddc..21c8197ef05 100644
--- a/src/libstd/rt/mod.rs
+++ b/src/libstd/rt/mod.rs
@@ -50,23 +50,43 @@
 
 use borrow::IntoCow;
 use failure;
-use rustrt;
 use os;
 use thunk::Thunk;
+use kinds::Send;
+use sys_common;
 
 // Reexport some of our utilities which are expected by other crates.
 pub use self::util::{default_sched_threads, min_stack, running_on_valgrind};
+pub use self::unwind::{begin_unwind, begin_unwind_fmt};
 
-// Reexport functionality from librustrt and other crates underneath the
-// standard library which work together to create the entire runtime.
+// Reexport some functionality from liballoc.
 pub use alloc::heap;
-pub use rustrt::{begin_unwind, begin_unwind_fmt, at_exit};
 
 // Simple backtrace functionality (to print on panic)
 pub mod backtrace;
 
-// Just stuff
-mod util;
+// Internals
+mod macros;
+
+// These should be refactored/moved/made private over time
+pub mod mutex;
+pub mod thread;
+pub mod exclusive;
+pub mod util;
+pub mod bookkeeping;
+pub mod local;
+pub mod task;
+pub mod unwind;
+
+mod args;
+mod at_exit_imp;
+mod libunwind;
+mod local_ptr;
+mod thread_local_storage;
+
+/// The default error code of the rust runtime if the main task panics instead
+/// of exiting cleanly.
+pub const DEFAULT_ERROR_CODE: int = 101;
 
 /// One-time runtime initialization.
 ///
@@ -75,8 +95,15 @@ mod util;
 /// metadata, and storing the process arguments.
 #[allow(experimental)]
 pub fn init(argc: int, argv: *const *const u8) {
-    rustrt::init(argc, argv);
-    unsafe { rustrt::unwind::register(failure::on_fail); }
+    // FIXME: Derefing these pointers is not safe.
+    // Need to propagate the unsafety to `start`.
+    unsafe {
+        args::init(argc, argv);
+        local_ptr::init();
+        at_exit_imp::init();
+        thread::init();
+        unwind::register(failure::on_fail);
+    }
 }
 
 #[cfg(any(windows, android))]
@@ -106,7 +133,8 @@ fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int {
 pub fn start(argc: int, argv: *const *const u8, main: Thunk) -> int {
     use prelude::*;
     use rt;
-    use rustrt::task::Task;
+    use rt::task::Task;
+    use str;
 
     let something_around_the_top_of_the_stack = 1;
     let addr = &something_around_the_top_of_the_stack as *const int;
@@ -139,18 +167,35 @@ pub fn start(argc: int, argv: *const *const u8, main: Thunk) -> int {
     let mut exit_code = None;
     let mut main = Some(main);
     let mut task = box Task::new(Some((my_stack_bottom, my_stack_top)),
-                                 Some(rustrt::thread::main_guard_page()));
-    task.name = Some("<main>".into_cow());
+                                 Some(rt::thread::main_guard_page()));
+    task.name = Some(str::Slice("<main>"));
     drop(task.run(|| {
         unsafe {
-            rustrt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
+            sys_common::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
         }
         (main.take().unwrap()).invoke(());
         exit_code = Some(os::get_exit_status());
     }).destroy());
-    unsafe { rt::cleanup(); }
+    unsafe { cleanup(); }
     // If the exit code wasn't set, then the task block must have panicked.
-    return exit_code.unwrap_or(rustrt::DEFAULT_ERROR_CODE);
+    return exit_code.unwrap_or(rt::DEFAULT_ERROR_CODE);
+}
+
+/// Enqueues a procedure to run when the runtime is cleaned up
+///
+/// The procedure passed to this function will be executed as part of the
+/// runtime cleanup phase. For normal rust programs, this means that it will run
+/// after all other tasks have exited.
+///
+/// The procedure is *not* executed with a local `Task` available to it, so
+/// primitives like logging, I/O, channels, spawning, etc, are *not* available.
+/// This is meant for "bare bones" usage to clean up runtime details, this is
+/// not meant as a general-purpose "let's clean everything up" function.
+///
+/// It is forbidden for procedures to register more `at_exit` handlers when they
+/// are running, and doing so will lead to a process abort.
+pub fn at_exit(f: proc():Send) {
+    at_exit_imp::push(f);
 }
 
 /// One-time runtime cleanup.
@@ -163,5 +208,18 @@ pub fn start(argc: int, argv: *const *const u8, main: Thunk) -> int {
 /// Invoking cleanup while portions of the runtime are still in use may cause
 /// undefined behavior.
 pub unsafe fn cleanup() {
-    rustrt::cleanup();
+    bookkeeping::wait_for_other_tasks();
+    args::cleanup();
+    thread::cleanup();
+    local_ptr::cleanup();
+    at_exit_imp::run();
+}
+
+// FIXME: these probably shouldn't be public...
+#[doc(hidden)]
+pub mod shouldnt_be_public {
+    #[cfg(not(test))]
+    pub use super::local_ptr::native::maybe_tls_key;
+    #[cfg(all(not(windows), not(target_os = "android"), not(target_os = "ios")))]
+    pub use super::local_ptr::compiled::RT_TLS_PTR;
 }
diff --git a/src/libstd/rt/mutex.rs b/src/libstd/rt/mutex.rs
new file mode 100644
index 00000000000..381f14570df
--- /dev/null
+++ b/src/libstd/rt/mutex.rs
@@ -0,0 +1,406 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A native mutex and condition variable type.
+//!
+//! This module contains bindings to the platform's native mutex/condition
+//! variable primitives. It provides two types: `StaticNativeMutex`, which can
+//! be statically initialized via the `NATIVE_MUTEX_INIT` value, and a simple
+//! wrapper `NativeMutex` that has a destructor to clean up after itself. These
+//! objects serve as both mutexes and condition variables simultaneously.
+//!
+//! The static lock is lazily initialized, but it can only be unsafely
+//! destroyed. A statically initialized lock doesn't necessarily have a time at
+//! which it can get deallocated. For this reason, there is no `Drop`
+//! implementation of the static mutex, but rather the `destroy()` method must
+//! be invoked manually if destruction of the mutex is desired.
+//!
+//! The non-static `NativeMutex` type does have a destructor, but cannot be
+//! statically initialized.
+//!
+//! It is not recommended to use this type for idiomatic rust use. These types
+//! are appropriate where no other options are available, but other rust
+//! concurrency primitives should be used before them: the `sync` crate defines
+//! `StaticMutex` and `Mutex` types.
+//!
+//! # Example
+//!
+//! ```rust
+//! use rt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
+//!
+//! // Use a statically initialized mutex
+//! static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+//!
+//! unsafe {
+//!     let _guard = LOCK.lock();
+//! } // automatically unlocked here
+//!
+//! // Use a normally initialized mutex
+//! unsafe {
+//!     let mut lock = NativeMutex::new();
+//!
+//!     {
+//!         let _guard = lock.lock();
+//!     } // unlocked here
+//!
+//!     // sometimes the RAII guard isn't appropriate
+//!     lock.lock_noguard();
+//!     lock.unlock_noguard();
+//! } // `lock` is deallocated here
+//! ```
+
+#![allow(non_camel_case_types)]
+
+use core::prelude::*;
+
+use sys::mutex as imp;
+
+/// A native mutex suitable for storing in statics (that is, it has
+/// the `destroy` method rather than a destructor).
+///
+/// Prefer the `NativeMutex` type where possible, since that does not
+/// require manual deallocation.
+pub struct StaticNativeMutex {
+    inner: imp::Mutex,
+}
+
+/// A native mutex with a destructor for clean-up.
+///
+/// See `StaticNativeMutex` for a version that is suitable for storing in
+/// statics.
+pub struct NativeMutex {
+    inner: StaticNativeMutex
+}
+
+/// Automatically unlocks the mutex that it was created from on
+/// destruction.
+///
+/// Using this makes lock-based code resilient to unwinding/task
+/// panic, because the lock will be automatically unlocked even
+/// then.
+#[must_use]
+pub struct LockGuard<'a> {
+    lock: &'a StaticNativeMutex
+}
+
+pub const NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
+    inner: imp::MUTEX_INIT,
+};
+
+impl StaticNativeMutex {
+    /// Creates a new mutex.
+    ///
+    /// Note that a mutex created in this way needs to be explicit
+    /// freed with a call to `destroy` or it will leak.
+    /// Also it is important to avoid locking until mutex has stopped moving
+    pub unsafe fn new() -> StaticNativeMutex {
+        StaticNativeMutex { inner: imp::Mutex::new() }
+    }
+
+    /// Acquires this lock. This assumes that the current thread does not
+    /// already hold the lock.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+    /// static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    /// unsafe {
+    ///     let _guard = LOCK.lock();
+    ///     // critical section...
+    /// } // automatically unlocked in `_guard`'s destructor
+    /// ```
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe because it will not function correctly if this
+    /// mutex has been *moved* since it was last used. The mutex can move an
+    /// arbitrary number of times before its first usage, but once a mutex has
+    /// been used once it is no longer allowed to move (or otherwise it invokes
+    /// undefined behavior).
+    ///
+    /// Additionally, this type does not take into account any form of
+    /// scheduling model. This will unconditionally block the *os thread* which
+    /// is not always desired.
+    pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
+        self.inner.lock();
+
+        LockGuard { lock: self }
+    }
+
+    /// Attempts to acquire the lock. The value returned is `Some` if
+    /// the attempt succeeded.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`.
+    pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
+        if self.inner.trylock() {
+            Some(LockGuard { lock: self })
+        } else {
+            None
+        }
+    }
+
+    /// Acquire the lock without creating a `LockGuard`.
+    ///
+    /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
+    /// `.lock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, this
+    /// does not guarantee that the mutex will ever be unlocked, and it is
+    /// undefined to drop an already-locked mutex.
+    pub unsafe fn lock_noguard(&self) { self.inner.lock() }
+
+    /// Attempts to acquire the lock without creating a
+    /// `LockGuard`. The value returned is whether the lock was
+    /// acquired or not.
+    ///
+    /// If `true` is returned, this needs to be paired with a call to
+    /// `.unlock_noguard`. Prefer using `.trylock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock_noguard`.
+    pub unsafe fn trylock_noguard(&self) -> bool {
+        self.inner.trylock()
+    }
+
+    /// Unlocks the lock. This assumes that the current thread already holds the
+    /// lock.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, it
+    /// is not guaranteed that this is unlocking a previously locked mutex. It
+    /// is undefined to unlock an unlocked mutex.
+    pub unsafe fn unlock_noguard(&self) { self.inner.unlock() }
+
+    /// Block on the internal condition variable.
+    ///
+    /// This function assumes that the lock is already held. Prefer
+    /// using `LockGuard.wait` since that guarantees that the lock is
+    /// held.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, this
+    /// is unsafe because the mutex may not be currently locked.
+    pub unsafe fn wait_noguard(&self) { self.inner.wait() }
+
+    /// Signals a thread in `wait` to wake up
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, this
+    /// is unsafe because the mutex may not be currently locked.
+    pub unsafe fn signal_noguard(&self) { self.inner.signal() }
+
+    /// This function is especially unsafe because there are no guarantees made
+    /// that no other thread is currently holding the lock or waiting on the
+    /// condition variable contained inside.
+    pub unsafe fn destroy(&self) { self.inner.destroy() }
+}
+
+impl NativeMutex {
+    /// Creates a new mutex.
+    ///
+    /// The user must be careful to ensure the mutex is not locked when its is
+    /// being destroyed.
+    /// Also it is important to avoid locking until mutex has stopped moving
+    pub unsafe fn new() -> NativeMutex {
+        NativeMutex { inner: StaticNativeMutex::new() }
+    }
+
+    /// Acquires this lock. This assumes that the current thread does not
+    /// already hold the lock.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use rt::mutex::NativeMutex;
+    /// unsafe {
+    ///     let mut lock = NativeMutex::new();
+    ///
+    ///     {
+    ///         let _guard = lock.lock();
+    ///         // critical section...
+    ///     } // automatically unlocked in `_guard`'s destructor
+    /// }
+    /// ```
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::lock`.
+    pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
+        self.inner.lock()
+    }
+
+    /// Attempts to acquire the lock. The value returned is `Some` if
+    /// the attempt succeeded.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::trylock`.
+    pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
+        self.inner.trylock()
+    }
+
+    /// Acquire the lock without creating a `LockGuard`.
+    ///
+    /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
+    /// `.lock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::lock_noguard`.
+    pub unsafe fn lock_noguard(&self) { self.inner.lock_noguard() }
+
+    /// Attempts to acquire the lock without creating a
+    /// `LockGuard`. The value returned is whether the lock was
+    /// acquired or not.
+    ///
+    /// If `true` is returned, this needs to be paired with a call to
+    /// `.unlock_noguard`. Prefer using `.trylock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::trylock_noguard`.
+    pub unsafe fn trylock_noguard(&self) -> bool {
+        self.inner.trylock_noguard()
+    }
+
+    /// Unlocks the lock. This assumes that the current thread already holds the
+    /// lock.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::unlock_noguard`.
+    pub unsafe fn unlock_noguard(&self) { self.inner.unlock_noguard() }
+
+    /// Block on the internal condition variable.
+    ///
+    /// This function assumes that the lock is already held. Prefer
+    /// using `LockGuard.wait` since that guarantees that the lock is
+    /// held.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::wait_noguard`.
+    pub unsafe fn wait_noguard(&self) { self.inner.wait_noguard() }
+
+    /// Signals a thread in `wait` to wake up
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::signal_noguard`.
+    pub unsafe fn signal_noguard(&self) { self.inner.signal_noguard() }
+}
+
+impl Drop for NativeMutex {
+    fn drop(&mut self) {
+        unsafe {self.inner.destroy()}
+    }
+}
+
+impl<'a> LockGuard<'a> {
+    /// Block on the internal condition variable.
+    pub unsafe fn wait(&self) {
+        self.lock.wait_noguard()
+    }
+
+    /// Signals a thread in `wait` to wake up.
+    pub unsafe fn signal(&self) {
+        self.lock.signal_noguard()
+    }
+}
+
+#[unsafe_destructor]
+impl<'a> Drop for LockGuard<'a> {
+    fn drop(&mut self) {
+        unsafe {self.lock.unlock_noguard()}
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use prelude::*;
+
+    use mem::drop;
+    use super::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+    use rt::thread::Thread;
+
+    #[test]
+    fn smoke_lock() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            let _guard = LK.lock();
+        }
+    }
+
+    #[test]
+    fn smoke_cond() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            let guard = LK.lock();
+            let t = Thread::start(move|| {
+                let guard = LK.lock();
+                guard.signal();
+            });
+            guard.wait();
+            drop(guard);
+
+            t.join();
+        }
+    }
+
+    #[test]
+    fn smoke_lock_noguard() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            LK.lock_noguard();
+            LK.unlock_noguard();
+        }
+    }
+
+    #[test]
+    fn smoke_cond_noguard() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            LK.lock_noguard();
+            let t = Thread::start(move|| {
+                LK.lock_noguard();
+                LK.signal_noguard();
+                LK.unlock_noguard();
+            });
+            LK.wait_noguard();
+            LK.unlock_noguard();
+
+            t.join();
+        }
+    }
+
+    #[test]
+    fn destroy_immediately() {
+        unsafe {
+            let m = StaticNativeMutex::new();
+            m.destroy();
+        }
+    }
+}
diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs
new file mode 100644
index 00000000000..babd111b3c2
--- /dev/null
+++ b/src/libstd/rt/task.rs
@@ -0,0 +1,561 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Language-level runtime services that should reasonably expected
+//! to be available 'everywhere'. Unwinding, local storage, and logging.
+//! Even a 'freestanding' Rust would likely want to implement this.
+
+pub use self::BlockedTask::*;
+use self::TaskState::*;
+
+use any::Any;
+use boxed::Box;
+use sync::Arc;
+use sync::atomic::{AtomicUint, SeqCst};
+use iter::{IteratorExt, Take};
+use kinds::marker;
+use mem;
+use ops::FnMut;
+use core::prelude::{Clone, Drop, Err, Iterator, None, Ok, Option, Send, Some};
+use core::prelude::{drop};
+use str::SendStr;
+use thunk::Thunk;
+
+use rt;
+use rt::bookkeeping;
+use rt::mutex::NativeMutex;
+use rt::local::Local;
+use rt::thread::{mod, Thread};
+use sys_common::stack;
+use rt::unwind;
+use rt::unwind::Unwinder;
+
+/// State associated with Rust tasks.
+///
+/// This structure is currently undergoing major changes, and is
+/// likely to be move/be merged with a `Thread` structure.
+pub struct Task {
+    pub unwinder: Unwinder,
+    pub death: Death,
+    pub name: Option<SendStr>,
+
+    state: TaskState,
+    lock: NativeMutex,       // native synchronization
+    awoken: bool,            // used to prevent spurious wakeups
+
+    // This field holds the known bounds of the stack in (lo, hi) form. Not all
+    // native tasks necessarily know their precise bounds, hence this is
+    // optional.
+    stack_bounds: (uint, uint),
+
+    stack_guard: uint
+}
+
+// Once a task has entered the `Armed` state it must be destroyed via `drop`,
+// and no other method. This state is used to track this transition.
+#[deriving(PartialEq)]
+enum TaskState {
+    New,
+    Armed,
+    Destroyed,
+}
+
+pub struct TaskOpts {
+    /// Invoke this procedure with the result of the task when it finishes.
+    pub on_exit: Option<Thunk<Result>>,
+    /// A name for the task-to-be, for identification in panic messages
+    pub name: Option<SendStr>,
+    /// The size of the stack for the spawned task
+    pub stack_size: Option<uint>,
+}
+
+/// Indicates the manner in which a task exited.
+///
+/// A task that completes without panicking is considered to exit successfully.
+///
+/// If you wish for this result's delivery to block until all
+/// children tasks complete, recommend using a result future.
+pub type Result = ::core::result::Result<(), Box<Any + Send>>;
+
+/// A handle to a blocked task. Usually this means having the Box<Task>
+/// pointer by ownership, but if the task is killable, a killer can steal it
+/// at any time.
+pub enum BlockedTask {
+    Owned(Box<Task>),
+    Shared(Arc<AtomicUint>),
+}
+
+/// Per-task state related to task death, killing, panic, etc.
+pub struct Death {
+    pub on_exit: Option<Thunk<Result>>,
+}
+
+pub struct BlockedTasks {
+    inner: Arc<AtomicUint>,
+}
+
+impl Task {
+    /// Creates a new uninitialized task.
+    pub fn new(stack_bounds: Option<(uint, uint)>, stack_guard: Option<uint>) -> Task {
+        Task {
+            unwinder: Unwinder::new(),
+            death: Death::new(),
+            state: New,
+            name: None,
+            lock: unsafe { NativeMutex::new() },
+            awoken: false,
+            // these *should* get overwritten
+            stack_bounds: stack_bounds.unwrap_or((0, 0)),
+            stack_guard: stack_guard.unwrap_or(0)
+        }
+    }
+
+    pub fn spawn<F>(opts: TaskOpts, f: F)
+        where F : FnOnce(), F : Send
+    {
+        Task::spawn_thunk(opts, Thunk::new(f))
+    }
+
+    fn spawn_thunk(opts: TaskOpts, f: Thunk) {
+        let TaskOpts { name, stack_size, on_exit } = opts;
+
+        let mut task = box Task::new(None, None);
+        task.name = name;
+        task.death.on_exit = on_exit;
+
+        let stack = stack_size.unwrap_or(rt::min_stack());
+
+        // Note that this increment must happen *before* the spawn in order to
+        // guarantee that if this task exits it will always end up waiting for
+        // the spawned task to exit.
+        let token = bookkeeping::increment();
+
+        // Spawning a new OS thread guarantees that __morestack will never get
+        // triggered, but we must manually set up the actual stack bounds once
+        // this function starts executing. This raises the lower limit by a bit
+        // because by the time that this function is executing we've already
+        // consumed at least a little bit of stack (we don't know the exact byte
+        // address at which our stack started).
+        Thread::spawn_stack(stack, move|| {
+            let something_around_the_top_of_the_stack = 1;
+            let addr = &something_around_the_top_of_the_stack as *const int;
+            let my_stack = addr as uint;
+            unsafe {
+                stack::record_os_managed_stack_bounds(my_stack - stack + 1024,
+                                                      my_stack);
+            }
+            task.stack_guard = thread::current_guard_page();
+            task.stack_bounds = (my_stack - stack + 1024, my_stack);
+
+            let mut f = Some(f);
+            drop(task.run(|| { f.take().unwrap().invoke(()) }).destroy());
+            drop(token);
+        })
+    }
+
+    /// Consumes ownership of a task, runs some code, and returns the task back.
+    ///
+    /// This function can be used as an emulated "try/catch" to interoperate
+    /// with the rust runtime at the outermost boundary. It is not possible to
+    /// use this function in a nested fashion (a try/catch inside of another
+    /// try/catch). Invoking this function is quite cheap.
+    ///
+    /// If the closure `f` succeeds, then the returned task can be used again
+    /// for another invocation of `run`. If the closure `f` panics then `self`
+    /// will be internally destroyed along with all of the other associated
+    /// resources of this task. The `on_exit` callback is invoked with the
+    /// cause of panic (not returned here). This can be discovered by querying
+    /// `is_destroyed()`.
+    ///
+    /// Note that it is possible to view partial execution of the closure `f`
+    /// because it is not guaranteed to run to completion, but this function is
+    /// guaranteed to return if it panicks. Care should be taken to ensure that
+    /// stack references made by `f` are handled appropriately.
+    ///
+    /// It is invalid to call this function with a task that has been previously
+    /// destroyed via a failed call to `run`.
+    pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
+        assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
+
+        // First, make sure that no one else is in TLS. This does not allow
+        // recursive invocations of run(). If there's no one else, then
+        // relinquish ownership of ourselves back into TLS.
+        if Local::exists(None::<Task>) {
+            panic!("cannot run a task recursively inside another");
+        }
+        self.state = Armed;
+        Local::put(self);
+
+        // There are two primary reasons that general try/catch is unsafe. The
+        // first is that we do not support nested try/catch. The above check for
+        // an existing task in TLS is sufficient for this invariant to be
+        // upheld. The second is that unwinding while unwinding is not defined.
+        // We take care of that by having an 'unwinding' flag in the task
+        // itself. For these reasons, this unsafety should be ok.
+        let result = unsafe { unwind::try(f) };
+
+        // After running the closure given return the task back out if it ran
+        // successfully, or clean up the task if it panicked.
+        let task: Box<Task> = Local::take();
+        match result {
+            Ok(()) => task,
+            Err(cause) => { task.cleanup(Err(cause)) }
+        }
+    }
+
+    /// Destroy all associated resources of this task.
+    ///
+    /// This function will perform any necessary clean up to prepare the task
+    /// for destruction. It is required that this is called before a `Task`
+    /// falls out of scope.
+    ///
+    /// The returned task cannot be used for running any more code, but it may
+    /// be used to extract the runtime as necessary.
+    pub fn destroy(self: Box<Task>) -> Box<Task> {
+        if self.is_destroyed() {
+            self
+        } else {
+            self.cleanup(Ok(()))
+        }
+    }
+
+    /// Cleans up a task, processing the result of the task as appropriate.
+    ///
+    /// This function consumes ownership of the task, deallocating it once it's
+    /// done being processed. It is assumed that TLD and the local heap have
+    /// already been destroyed and/or annihilated.
+    fn cleanup(mut self: Box<Task>, result: Result) -> Box<Task> {
+        // After taking care of the data above, we need to transmit the result
+        // of this task.
+        let what_to_do = self.death.on_exit.take();
+        Local::put(self);
+
+        // FIXME: this is running in a seriously constrained context. If this
+        //        allocates TLD then it will likely abort the runtime. Similarly,
+        //        if this panics, this will also likely abort the runtime.
+        //
+        //        This closure is currently limited to a channel send via the
+        //        standard library's task interface, but this needs
+        //        reconsideration to whether it's a reasonable thing to let a
+        //        task to do or not.
+        match what_to_do {
+            Some(f) => { f.invoke(result) }
+            None => { drop(result) }
+        }
+
+        // Now that we're done, we remove the task from TLS and flag it for
+        // destruction.
+        let mut task: Box<Task> = Local::take();
+        task.state = Destroyed;
+        return task;
+    }
+
+    /// Queries whether this can be destroyed or not.
+    pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
+
+    /// Deschedules the current task, invoking `f` `amt` times. It is not
+    /// recommended to use this function directly, but rather communication
+    /// primitives in `std::comm` should be used.
+    //
+    // This function gets a little interesting. There are a few safety and
+    // ownership violations going on here, but this is all done in the name of
+    // shared state. Additionally, all of the violations are protected with a
+    // mutex, so in theory there are no races.
+    //
+    // The first thing we need to do is to get a pointer to the task's internal
+    // mutex. This address will not be changing (because the task is allocated
+    // on the heap). We must have this handle separately because the task will
+    // have its ownership transferred to the given closure. We're guaranteed,
+    // however, that this memory will remain valid because *this* is the current
+    // task's execution thread.
+    //
+    // The next weird part is where ownership of the task actually goes. We
+    // relinquish it to the `f` blocking function, but upon returning this
+    // function needs to replace the task back in TLS. There is no communication
+    // from the wakeup thread back to this thread about the task pointer, and
+    // there's really no need to. In order to get around this, we cast the task
+    // to a `uint` which is then used at the end of this function to cast back
+    // to a `Box<Task>` object. Naturally, this looks like it violates
+    // ownership semantics in that there may be two `Box<Task>` objects.
+    //
+    // The fun part is that the wakeup half of this implementation knows to
+    // "forget" the task on the other end. This means that the awakening half of
+    // things silently relinquishes ownership back to this thread, but not in a
+    // way that the compiler can understand. The task's memory is always valid
+    // for both tasks because these operations are all done inside of a mutex.
+    //
+    // You'll also find that if blocking fails (the `f` function hands the
+    // BlockedTask back to us), we will `mem::forget` the handles. The
+    // reasoning for this is the same logic as above in that the task silently
+    // transfers ownership via the `uint`, not through normal compiler
+    // semantics.
+    //
+    // On a mildly unrelated note, it should also be pointed out that OS
+    // condition variables are susceptible to spurious wakeups, which we need to
+    // be ready for. In order to accommodate for this fact, we have an extra
+    // `awoken` field which indicates whether we were actually woken up via some
+    // invocation of `reawaken`. This flag is only ever accessed inside the
+    // lock, so there's no need to make it atomic.
+    pub fn deschedule<F>(mut self: Box<Task>, times: uint, mut f: F) where
+        F: FnMut(BlockedTask) -> ::core::result::Result<(), BlockedTask>,
+    {
+        unsafe {
+            let me = &mut *self as *mut Task;
+            let task = BlockedTask::block(self);
+
+            if times == 1 {
+                let guard = (*me).lock.lock();
+                (*me).awoken = false;
+                match f(task) {
+                    Ok(()) => {
+                        while !(*me).awoken {
+                            guard.wait();
+                        }
+                    }
+                    Err(task) => { mem::forget(task.wake()); }
+                }
+            } else {
+                let iter = task.make_selectable(times);
+                let guard = (*me).lock.lock();
+                (*me).awoken = false;
+
+                // Apply the given closure to all of the "selectable tasks",
+                // bailing on the first one that produces an error. Note that
+                // care must be taken such that when an error is occurred, we
+                // may not own the task, so we may still have to wait for the
+                // task to become available. In other words, if task.wake()
+                // returns `None`, then someone else has ownership and we must
+                // wait for their signal.
+                match iter.map(f).filter_map(|a| a.err()).next() {
+                    None => {}
+                    Some(task) => {
+                        match task.wake() {
+                            Some(task) => {
+                                mem::forget(task);
+                                (*me).awoken = true;
+                            }
+                            None => {}
+                        }
+                    }
+                }
+                while !(*me).awoken {
+                    guard.wait();
+                }
+            }
+            // put the task back in TLS, and everything is as it once was.
+            Local::put(mem::transmute(me));
+        }
+    }
+
+    /// Wakes up a previously blocked task. This function can only be
+    /// called on tasks that were previously blocked in `deschedule`.
+    //
+    // See the comments on `deschedule` for why the task is forgotten here, and
+    // why it's valid to do so.
+    pub fn reawaken(mut self: Box<Task>) {
+        unsafe {
+            let me = &mut *self as *mut Task;
+            mem::forget(self);
+            let guard = (*me).lock.lock();
+            (*me).awoken = true;
+            guard.signal();
+        }
+    }
+
+    /// Yields control of this task to another task. This function will
+    /// eventually return, but possibly not immediately. This is used as an
+    /// opportunity to allow other tasks a chance to run.
+    pub fn yield_now() {
+        Thread::yield_now();
+    }
+
+    /// Returns the stack bounds for this task in (lo, hi) format. The stack
+    /// bounds may not be known for all tasks, so the return value may be
+    /// `None`.
+    pub fn stack_bounds(&self) -> (uint, uint) {
+        self.stack_bounds
+    }
+
+    /// Returns the stack guard for this task, if known.
+    pub fn stack_guard(&self) -> Option<uint> {
+        if self.stack_guard != 0 {
+            Some(self.stack_guard)
+        } else {
+            None
+        }
+    }
+
+    /// Consume this task, flagging it as a candidate for destruction.
+    ///
+    /// This function is required to be invoked to destroy a task. A task
+    /// destroyed through a normal drop will abort.
+    pub fn drop(mut self) {
+        self.state = Destroyed;
+    }
+}
+
+impl Drop for Task {
+    fn drop(&mut self) {
+        rtdebug!("called drop for a task: {}", self as *mut Task as uint);
+        rtassert!(self.state != Armed);
+    }
+}
+
+impl TaskOpts {
+    pub fn new() -> TaskOpts {
+        TaskOpts { on_exit: None, name: None, stack_size: None }
+    }
+}
+
+impl Iterator<BlockedTask> for BlockedTasks {
+    fn next(&mut self) -> Option<BlockedTask> {
+        Some(Shared(self.inner.clone()))
+    }
+}
+
+impl BlockedTask {
+    /// Returns Some if the task was successfully woken; None if already killed.
+    pub fn wake(self) -> Option<Box<Task>> {
+        match self {
+            Owned(task) => Some(task),
+            Shared(arc) => {
+                match arc.swap(0, SeqCst) {
+                    0 => None,
+                    n => Some(unsafe { mem::transmute(n) }),
+                }
+            }
+        }
+    }
+
+    /// Reawakens this task if ownership is acquired. If finer-grained control
+    /// is desired, use `wake` instead.
+    pub fn reawaken(self) {
+        self.wake().map(|t| t.reawaken());
+    }
+
+    // This assertion has two flavours because the wake involves an atomic op.
+    // In the faster version, destructors will panic dramatically instead.
+    #[cfg(not(test))] pub fn trash(self) { }
+    #[cfg(test)]      pub fn trash(self) { assert!(self.wake().is_none()); }
+
+    /// Create a blocked task, unless the task was already killed.
+    pub fn block(task: Box<Task>) -> BlockedTask {
+        Owned(task)
+    }
+
+    /// Converts one blocked task handle to a list of many handles to the same.
+    pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTasks> {
+        let arc = match self {
+            Owned(task) => {
+                let flag = unsafe { AtomicUint::new(mem::transmute(task)) };
+                Arc::new(flag)
+            }
+            Shared(arc) => arc.clone(),
+        };
+        BlockedTasks{ inner: arc }.take(num_handles)
+    }
+
+    /// Convert to an unsafe uint value. Useful for storing in a pipe's state
+    /// flag.
+    #[inline]
+    pub unsafe fn cast_to_uint(self) -> uint {
+        match self {
+            Owned(task) => {
+                let blocked_task_ptr: uint = mem::transmute(task);
+                rtassert!(blocked_task_ptr & 0x1 == 0);
+                blocked_task_ptr
+            }
+            Shared(arc) => {
+                let blocked_task_ptr: uint = mem::transmute(box arc);
+                rtassert!(blocked_task_ptr & 0x1 == 0);
+                blocked_task_ptr | 0x1
+            }
+        }
+    }
+
+    /// Convert from an unsafe uint value. Useful for retrieving a pipe's state
+    /// flag.
+    #[inline]
+    pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
+        if blocked_task_ptr & 0x1 == 0 {
+            Owned(mem::transmute(blocked_task_ptr))
+        } else {
+            let ptr: Box<Arc<AtomicUint>> =
+                mem::transmute(blocked_task_ptr & !1);
+            Shared(*ptr)
+        }
+    }
+}
+
+impl Death {
+    pub fn new() -> Death {
+        Death { on_exit: None }
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use prelude::*;
+    use task;
+    use rt::unwind;
+
+    #[test]
+    fn unwind() {
+        let result = task::try(move|| ());
+        rtdebug!("trying first assert");
+        assert!(result.is_ok());
+        let result = task::try(move|| -> () panic!());
+        rtdebug!("trying second assert");
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn rng() {
+        use rand::{StdRng, Rng};
+        let mut r = StdRng::new().ok().unwrap();
+        let _ = r.next_u32();
+    }
+
+    #[test]
+    fn comm_stream() {
+        let (tx, rx) = channel();
+        tx.send(10i);
+        assert!(rx.recv() == 10);
+    }
+
+    #[test]
+    fn comm_shared_chan() {
+        let (tx, rx) = channel();
+        tx.send(10i);
+        assert!(rx.recv() == 10);
+    }
+
+    #[test]
+    #[should_fail]
+    fn test_begin_unwind() {
+        use rt::unwind::begin_unwind;
+        begin_unwind("cause", &(file!(), line!()))
+    }
+
+    #[test]
+    fn drop_new_task_ok() {
+        drop(Task::new(None, None));
+    }
+
+    // Task blocking tests
+
+    #[test]
+    fn block_and_wake() {
+        let task = box Task::new(None, None);
+        let task = BlockedTask::block(task).wake().unwrap();
+        task.drop();
+    }
+}
diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs
new file mode 100644
index 00000000000..c10338b1bce
--- /dev/null
+++ b/src/libstd/rt/thread.rs
@@ -0,0 +1,171 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Native os-thread management
+//!
+//! This modules contains bindings necessary for managing OS-level threads.
+//! These functions operate outside of the rust runtime, creating threads
+//! which are not used for scheduling in any way.
+
+#![allow(non_camel_case_types)]
+
+use core::prelude::*;
+
+use boxed::Box;
+use mem;
+use sys::stack_overflow;
+use sys::thread as imp;
+
+pub unsafe fn init() {
+    imp::guard::init();
+    stack_overflow::init();
+}
+
+pub unsafe fn cleanup() {
+    stack_overflow::cleanup();
+}
+
+/// This struct represents a native thread's state. This is used to join on an
+/// existing thread created in the join-able state.
+pub struct Thread<T> {
+    native: imp::rust_thread,
+    joined: bool,
+    packet: Box<Option<T>>,
+}
+
+static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
+
+/// Returns the last writable byte of the main thread's stack next to the guard
+/// page. Must be called from the main thread.
+pub fn main_guard_page() -> uint {
+    unsafe {
+        imp::guard::main()
+    }
+}
+
+/// Returns the last writable byte of the current thread's stack next to the
+/// guard page. Must not be called from the main thread.
+pub fn current_guard_page() -> uint {
+    unsafe {
+        imp::guard::current()
+    }
+}
+
+// There are two impl blocks b/c if T were specified at the top then it's just a
+// pain to specify a type parameter on Thread::spawn (which doesn't need the
+// type parameter).
+impl Thread<()> {
+
+    /// Starts execution of a new OS thread.
+    ///
+    /// This function will not wait for the thread to join, but a handle to the
+    /// thread will be returned.
+    ///
+    /// Note that the handle returned is used to acquire the return value of the
+    /// procedure `main`. The `join` function will wait for the thread to finish
+    /// and return the value that `main` generated.
+    ///
+    /// Also note that the `Thread` returned will *always* wait for the thread
+    /// to finish executing. This means that even if `join` is not explicitly
+    /// called, when the `Thread` falls out of scope its destructor will block
+    /// waiting for the OS thread.
+    pub fn start<T: Send>(main: proc():Send -> T) -> Thread<T> {
+        Thread::start_stack(DEFAULT_STACK_SIZE, main)
+    }
+
+    /// Performs the same functionality as `start`, but specifies an explicit
+    /// stack size for the new thread.
+    pub fn start_stack<T: Send>(stack: uint, main: proc():Send -> T) -> Thread<T> {
+
+        // We need the address of the packet to fill in to be stable so when
+        // `main` fills it in it's still valid, so allocate an extra box to do
+        // so.
+        let packet = box None;
+        let packet2: *mut Option<T> = unsafe {
+            *mem::transmute::<&Box<Option<T>>, *const *mut Option<T>>(&packet)
+        };
+        let main = proc() unsafe { *packet2 = Some(main()); };
+        let native = unsafe { imp::create(stack, box main) };
+
+        Thread {
+            native: native,
+            joined: false,
+            packet: packet,
+        }
+    }
+
+    /// This will spawn a new thread, but it will not wait for the thread to
+    /// finish, nor is it possible to wait for the thread to finish.
+    ///
+    /// This corresponds to creating threads in the 'detached' state on unix
+    /// systems. Note that platforms may not keep the main program alive even if
+    /// there are detached thread still running around.
+    pub fn spawn(main: proc():Send) {
+        Thread::spawn_stack(DEFAULT_STACK_SIZE, main)
+    }
+
+    /// Performs the same functionality as `spawn`, but explicitly specifies a
+    /// stack size for the new thread.
+    pub fn spawn_stack(stack: uint, main: proc():Send) {
+        unsafe {
+            let handle = imp::create(stack, box main);
+            imp::detach(handle);
+        }
+    }
+
+    /// Relinquishes the CPU slot that this OS-thread is currently using,
+    /// allowing another thread to run for awhile.
+    pub fn yield_now() {
+        unsafe { imp::yield_now(); }
+    }
+}
+
+impl<T: Send> Thread<T> {
+    /// Wait for this thread to finish, returning the result of the thread's
+    /// calculation.
+    pub fn join(mut self) -> T {
+        assert!(!self.joined);
+        unsafe { imp::join(self.native) };
+        self.joined = true;
+        assert!(self.packet.is_some());
+        self.packet.take().unwrap()
+    }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Thread<T> {
+    fn drop(&mut self) {
+        // This is required for correctness. If this is not done then the thread
+        // would fill in a return box which no longer exists.
+        if !self.joined {
+            unsafe { imp::join(self.native) };
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::Thread;
+
+    #[test]
+    fn smoke() { Thread::start(proc (){}).join(); }
+
+    #[test]
+    fn data() { assert_eq!(Thread::start(proc () { 1i }).join(), 1); }
+
+    #[test]
+    fn detached() { Thread::spawn(proc () {}) }
+
+    #[test]
+    fn small_stacks() {
+        assert_eq!(42i, Thread::start_stack(0, proc () 42i).join());
+        assert_eq!(42i, Thread::start_stack(1, proc () 42i).join());
+    }
+}
diff --git a/src/libstd/rt/thread_local_storage.rs b/src/libstd/rt/thread_local_storage.rs
new file mode 100644
index 00000000000..ee6ad8a4e08
--- /dev/null
+++ b/src/libstd/rt/thread_local_storage.rs
@@ -0,0 +1,115 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+#[cfg(unix)] use libc::c_int;
+#[cfg(unix)] use ptr::null;
+#[cfg(windows)] use libc::types::os::arch::extra::{DWORD, LPVOID, BOOL};
+
+#[cfg(unix)]
+pub type Key = pthread_key_t;
+
+#[cfg(unix)]
+pub unsafe fn create(key: &mut Key) {
+    assert!(pthread_key_create(key, null()) == 0);
+}
+
+#[cfg(unix)]
+pub unsafe fn set(key: Key, value: *mut u8) {
+    assert!(pthread_setspecific(key, value) == 0);
+}
+
+#[cfg(unix)]
+pub unsafe fn get(key: Key) -> *mut u8 {
+    pthread_getspecific(key)
+}
+
+#[cfg(unix)]
+pub unsafe fn destroy(key: Key) {
+    assert!(pthread_key_delete(key) == 0);
+}
+
+#[cfg(target_os = "macos")]
+#[allow(non_camel_case_types)] // foreign type
+type pthread_key_t = ::libc::c_ulong;
+
+#[cfg(any(target_os="linux",
+          target_os="freebsd",
+          target_os="dragonfly",
+          target_os="android",
+          target_os = "ios"))]
+#[allow(non_camel_case_types)] // foreign type
+type pthread_key_t = ::libc::c_uint;
+
+#[cfg(unix)]
+extern {
+    fn pthread_key_create(key: *mut pthread_key_t, dtor: *const u8) -> c_int;
+    fn pthread_key_delete(key: pthread_key_t) -> c_int;
+    fn pthread_getspecific(key: pthread_key_t) -> *mut u8;
+    fn pthread_setspecific(key: pthread_key_t, value: *mut u8) -> c_int;
+}
+
+#[cfg(windows)]
+pub type Key = DWORD;
+
+#[cfg(windows)]
+pub unsafe fn create(key: &mut Key) {
+    static TLS_OUT_OF_INDEXES: DWORD = 0xFFFFFFFF;
+    *key = TlsAlloc();
+    assert!(*key != TLS_OUT_OF_INDEXES);
+}
+
+#[cfg(windows)]
+pub unsafe fn set(key: Key, value: *mut u8) {
+    assert!(0 != TlsSetValue(key, value as *mut ::libc::c_void))
+}
+
+#[cfg(windows)]
+pub unsafe fn get(key: Key) -> *mut u8 {
+    TlsGetValue(key) as *mut u8
+}
+
+#[cfg(windows)]
+pub unsafe fn destroy(key: Key) {
+    assert!(TlsFree(key) != 0);
+}
+
+#[cfg(windows)]
+#[allow(non_snake_case)]
+extern "system" {
+    fn TlsAlloc() -> DWORD;
+    fn TlsFree(dwTlsIndex: DWORD) -> BOOL;
+    fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID;
+    fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL;
+}
+
+#[cfg(test)]
+mod test {
+    use prelude::*;
+    use super::*;
+
+    #[test]
+    fn tls_smoke_test() {
+        use mem::transmute;
+        unsafe {
+            let mut key = 0;
+            let value = box 20i;
+            create(&mut key);
+            set(key, transmute(value));
+            let value: Box<int> = transmute(get(key));
+            assert_eq!(value, box 20i);
+            let value = box 30i;
+            set(key, transmute(value));
+            let value: Box<int> = transmute(get(key));
+            assert_eq!(value, box 30i);
+        }
+    }
+}
diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs
new file mode 100644
index 00000000000..1ac06270851
--- /dev/null
+++ b/src/libstd/rt/unwind.rs
@@ -0,0 +1,638 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of Rust stack unwinding
+//!
+//! For background on exception handling and stack unwinding please see
+//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
+//! documents linked from it.
+//! These are also good reads:
+//!     http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
+//!     http://monoinfinito.wordpress.com/series/exception-handling-in-c/
+//!     http://www.airs.com/blog/index.php?s=exception+frames
+//!
+//! ## A brief summary
+//!
+//! Exception handling happens in two phases: a search phase and a cleanup phase.
+//!
+//! In both phases the unwinder walks stack frames from top to bottom using
+//! information from the stack frame unwind sections of the current process's
+//! modules ("module" here refers to an OS module, i.e. an executable or a
+//! dynamic library).
+//!
+//! For each stack frame, it invokes the associated "personality routine", whose
+//! address is also stored in the unwind info section.
+//!
+//! In the search phase, the job of a personality routine is to examine exception
+//! object being thrown, and to decide whether it should be caught at that stack
+//! frame.  Once the handler frame has been identified, cleanup phase begins.
+//!
+//! In the cleanup phase, personality routines invoke cleanup code associated
+//! with their stack frames (i.e. destructors).  Once stack has been unwound down
+//! to the handler frame level, unwinding stops and the last personality routine
+//! transfers control to its catch block.
+//!
+//! ## Frame unwind info registration
+//!
+//! Each module has its own frame unwind info section (usually ".eh_frame"), and
+//! unwinder needs to know about all of them in order for unwinding to be able to
+//! cross module boundaries.
+//!
+//! On some platforms, like Linux, this is achieved by dynamically enumerating
+//! currently loaded modules via the dl_iterate_phdr() API and finding all
+//! .eh_frame sections.
+//!
+//! Others, like Windows, require modules to actively register their unwind info
+//! sections by calling __register_frame_info() API at startup.  In the latter
+//! case it is essential that there is only one copy of the unwinder runtime in
+//! the process.  This is usually achieved by linking to the dynamic version of
+//! the unwind runtime.
+//!
+//! Currently Rust uses unwind runtime provided by libgcc.
+
+use core::prelude::*;
+
+use boxed::Box;
+use string::String;
+use str::StrAllocating;
+use vec::Vec;
+use any::Any;
+use sync::atomic;
+use cmp;
+use fmt;
+use intrinsics;
+use mem;
+use raw::Closure;
+use libc::c_void;
+
+use rt::local::Local;
+use rt::task::Task;
+
+use rt::libunwind as uw;
+
+#[allow(missing_copy_implementations)]
+pub struct Unwinder {
+    unwinding: bool,
+}
+
+struct Exception {
+    uwe: uw::_Unwind_Exception,
+    cause: Option<Box<Any + Send>>,
+}
+
+pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint);
+
+// Variables used for invoking callbacks when a task starts to unwind.
+//
+// For more information, see below.
+const MAX_CALLBACKS: uint = 16;
+static CALLBACKS: [atomic::AtomicUint, ..MAX_CALLBACKS] =
+        [atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT];
+static CALLBACK_CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+
+impl Unwinder {
+    pub fn new() -> Unwinder {
+        Unwinder {
+            unwinding: false,
+        }
+    }
+
+    pub fn unwinding(&self) -> bool {
+        self.unwinding
+    }
+}
+
+/// Invoke a closure, capturing the cause of panic if one occurs.
+///
+/// This function will return `None` if the closure did not panic, and will
+/// return `Some(cause)` if the closure panics. The `cause` returned is the
+/// object with which panic was originally invoked.
+///
+/// This function also is unsafe for a variety of reasons:
+///
+/// * This is not safe to call in a nested fashion. The unwinding
+///   interface for Rust is designed to have at most one try/catch block per
+///   task, not multiple. No runtime checking is currently performed to uphold
+///   this invariant, so this function is not safe. A nested try/catch block
+///   may result in corruption of the outer try/catch block's state, especially
+///   if this is used within a task itself.
+///
+/// * It is not sound to trigger unwinding while already unwinding. Rust tasks
+///   have runtime checks in place to ensure this invariant, but it is not
+///   guaranteed that a rust task is in place when invoking this function.
+///   Unwinding twice can lead to resource leaks where some destructors are not
+///   run.
+pub unsafe fn try(f: ||) -> ::core::result::Result<(), Box<Any + Send>> {
+    let closure: Closure = mem::transmute(f);
+    let ep = rust_try(try_fn, closure.code as *mut c_void,
+                      closure.env as *mut c_void);
+    return if ep.is_null() {
+        Ok(())
+    } else {
+        let my_ep = ep as *mut Exception;
+        rtdebug!("caught {}", (*my_ep).uwe.exception_class);
+        let cause = (*my_ep).cause.take();
+        uw::_Unwind_DeleteException(ep);
+        Err(cause.unwrap())
+    };
+
+    extern fn try_fn(code: *mut c_void, env: *mut c_void) {
+        unsafe {
+            let closure: || = mem::transmute(Closure {
+                code: code as *mut (),
+                env: env as *mut (),
+            });
+            closure();
+        }
+    }
+
+    #[link(name = "rustrt_native", kind = "static")]
+    #[cfg(not(test))]
+    extern {}
+
+    extern {
+        // Rust's try-catch
+        // When f(...) returns normally, the return value is null.
+        // When f(...) throws, the return value is a pointer to the caught
+        // exception object.
+        fn rust_try(f: extern "C" fn(*mut c_void, *mut c_void),
+                    code: *mut c_void,
+                    data: *mut c_void) -> *mut uw::_Unwind_Exception;
+    }
+}
+
+// An uninlined, unmangled function upon which to slap yer breakpoints
+#[inline(never)]
+#[no_mangle]
+fn rust_panic(cause: Box<Any + Send>) -> ! {
+    rtdebug!("begin_unwind()");
+
+    unsafe {
+        let exception = box Exception {
+            uwe: uw::_Unwind_Exception {
+                exception_class: rust_exception_class(),
+                exception_cleanup: exception_cleanup,
+                private: [0, ..uw::unwinder_private_data_size],
+            },
+            cause: Some(cause),
+        };
+        let error = uw::_Unwind_RaiseException(mem::transmute(exception));
+        rtabort!("Could not unwind stack, error = {}", error as int)
+    }
+
+    extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
+                                exception: *mut uw::_Unwind_Exception) {
+        rtdebug!("exception_cleanup()");
+        unsafe {
+            let _: Box<Exception> = mem::transmute(exception);
+        }
+    }
+}
+
+// Rust's exception class identifier.  This is used by personality routines to
+// determine whether the exception was thrown by their own runtime.
+fn rust_exception_class() -> uw::_Unwind_Exception_Class {
+    // M O Z \0  R U S T -- vendor, language
+    0x4d4f5a_00_52555354
+}
+
+// We could implement our personality routine in pure Rust, however exception
+// info decoding is tedious.  More importantly, personality routines have to
+// handle various platform quirks, which are not fun to maintain.  For this
+// reason, we attempt to reuse personality routine of the C language:
+// __gcc_personality_v0.
+//
+// Since C does not support exception catching, __gcc_personality_v0 simply
+// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
+// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
+//
+// This is pretty close to Rust's exception handling approach, except that Rust
+// does have a single "catch-all" handler at the bottom of each task's stack.
+// So we have two versions of the personality routine:
+// - rust_eh_personality, used by all cleanup landing pads, which never catches,
+//   so the behavior of __gcc_personality_v0 is perfectly adequate there, and
+// - rust_eh_personality_catch, used only by rust_try(), which always catches.
+//
+// Note, however, that for implementation simplicity, rust_eh_personality_catch
+// lacks code to install a landing pad, so in order to obtain exception object
+// pointer (which it needs to return upstream), rust_try() employs another trick:
+// it calls into the nested rust_try_inner(), whose landing pad does not resume
+// unwinds.  Instead, it extracts the exception pointer and performs a "normal"
+// return.
+//
+// See also: rt/rust_try.ll
+
+#[cfg(all(not(target_arch = "arm"),
+          not(all(windows, target_arch = "x86_64")),
+          not(test)))]
+#[doc(hidden)]
+pub mod eabi {
+    use rt::libunwind as uw;
+    use libc::c_int;
+
+    extern "C" {
+        fn __gcc_personality_v0(version: c_int,
+                                actions: uw::_Unwind_Action,
+                                exception_class: uw::_Unwind_Exception_Class,
+                                ue_header: *mut uw::_Unwind_Exception,
+                                context: *mut uw::_Unwind_Context)
+            -> uw::_Unwind_Reason_Code;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    extern fn rust_eh_personality(
+        version: c_int,
+        actions: uw::_Unwind_Action,
+        exception_class: uw::_Unwind_Exception_Class,
+        ue_header: *mut uw::_Unwind_Exception,
+        context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        unsafe {
+            __gcc_personality_v0(version, actions, exception_class, ue_header,
+                                 context)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        _version: c_int,
+        actions: uw::_Unwind_Action,
+        _exception_class: uw::_Unwind_Exception_Class,
+        _ue_header: *mut uw::_Unwind_Exception,
+        _context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+
+        if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+            uw::_URC_HANDLER_FOUND // catch!
+        }
+        else { // cleanup phase
+            uw::_URC_INSTALL_CONTEXT
+        }
+    }
+}
+
+// iOS on armv7 is using SjLj exceptions and therefore requires to use
+// a specialized personality routine: __gcc_personality_sj0
+
+#[cfg(all(target_os = "ios", target_arch = "arm", not(test)))]
+#[doc(hidden)]
+pub mod eabi {
+    use rt::libunwind as uw;
+    use libc::c_int;
+
+    extern "C" {
+        fn __gcc_personality_sj0(version: c_int,
+                                actions: uw::_Unwind_Action,
+                                exception_class: uw::_Unwind_Exception_Class,
+                                ue_header: *mut uw::_Unwind_Exception,
+                                context: *mut uw::_Unwind_Context)
+            -> uw::_Unwind_Reason_Code;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality(
+        version: c_int,
+        actions: uw::_Unwind_Action,
+        exception_class: uw::_Unwind_Exception_Class,
+        ue_header: *mut uw::_Unwind_Exception,
+        context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        unsafe {
+            __gcc_personality_sj0(version, actions, exception_class, ue_header,
+                                  context)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        _version: c_int,
+        actions: uw::_Unwind_Action,
+        _exception_class: uw::_Unwind_Exception_Class,
+        _ue_header: *mut uw::_Unwind_Exception,
+        _context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+            uw::_URC_HANDLER_FOUND // catch!
+        }
+        else { // cleanup phase
+            unsafe {
+                __gcc_personality_sj0(_version, actions, _exception_class, _ue_header,
+                                      _context)
+            }
+        }
+    }
+}
+
+
+// ARM EHABI uses a slightly different personality routine signature,
+// but otherwise works the same.
+#[cfg(all(target_arch = "arm", not(target_os = "ios"), not(test)))]
+#[doc(hidden)]
+pub mod eabi {
+    use rt::libunwind as uw;
+    use libc::c_int;
+
+    extern "C" {
+        fn __gcc_personality_v0(state: uw::_Unwind_State,
+                                ue_header: *mut uw::_Unwind_Exception,
+                                context: *mut uw::_Unwind_Context)
+            -> uw::_Unwind_Reason_Code;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    extern "C" fn rust_eh_personality(
+        state: uw::_Unwind_State,
+        ue_header: *mut uw::_Unwind_Exception,
+        context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        unsafe {
+            __gcc_personality_v0(state, ue_header, context)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        state: uw::_Unwind_State,
+        _ue_header: *mut uw::_Unwind_Exception,
+        _context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        if (state as c_int & uw::_US_ACTION_MASK as c_int)
+                           == uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
+            uw::_URC_HANDLER_FOUND // catch!
+        }
+        else { // cleanup phase
+            uw::_URC_INSTALL_CONTEXT
+        }
+    }
+}
+
+// Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
+//
+// This looks a bit convoluted because rather than implementing a native SEH handler,
+// GCC reuses the same personality routine as for the other architectures by wrapping it
+// with an "API translator" layer (_GCC_specific_handler).
+
+#[cfg(all(windows, target_arch = "x86_64", not(test)))]
+#[doc(hidden)]
+#[allow(non_camel_case_types, non_snake_case)]
+pub mod eabi {
+    pub use self::EXCEPTION_DISPOSITION::*;
+    use rt::libunwind as uw;
+    use libc::{c_void, c_int};
+
+    #[repr(C)]
+    #[allow(missing_copy_implementations)]
+    pub struct EXCEPTION_RECORD;
+    #[repr(C)]
+    #[allow(missing_copy_implementations)]
+    pub struct CONTEXT;
+    #[repr(C)]
+    #[allow(missing_copy_implementations)]
+    pub struct DISPATCHER_CONTEXT;
+
+    #[repr(C)]
+    pub enum EXCEPTION_DISPOSITION {
+        ExceptionContinueExecution,
+        ExceptionContinueSearch,
+        ExceptionNestedException,
+        ExceptionCollidedUnwind
+    }
+
+    impl Copy for EXCEPTION_DISPOSITION {}
+
+    type _Unwind_Personality_Fn =
+        extern "C" fn(
+            version: c_int,
+            actions: uw::_Unwind_Action,
+            exception_class: uw::_Unwind_Exception_Class,
+            ue_header: *mut uw::_Unwind_Exception,
+            context: *mut uw::_Unwind_Context
+        ) -> uw::_Unwind_Reason_Code;
+
+    extern "C" {
+        fn __gcc_personality_seh0(
+            exceptionRecord: *mut EXCEPTION_RECORD,
+            establisherFrame: *mut c_void,
+            contextRecord: *mut CONTEXT,
+            dispatcherContext: *mut DISPATCHER_CONTEXT
+        ) -> EXCEPTION_DISPOSITION;
+
+        fn _GCC_specific_handler(
+            exceptionRecord: *mut EXCEPTION_RECORD,
+            establisherFrame: *mut c_void,
+            contextRecord: *mut CONTEXT,
+            dispatcherContext: *mut DISPATCHER_CONTEXT,
+            personality: _Unwind_Personality_Fn
+        ) -> EXCEPTION_DISPOSITION;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    extern "C" fn rust_eh_personality(
+        exceptionRecord: *mut EXCEPTION_RECORD,
+        establisherFrame: *mut c_void,
+        contextRecord: *mut CONTEXT,
+        dispatcherContext: *mut DISPATCHER_CONTEXT
+    ) -> EXCEPTION_DISPOSITION
+    {
+        unsafe {
+            __gcc_personality_seh0(exceptionRecord, establisherFrame,
+                                   contextRecord, dispatcherContext)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        exceptionRecord: *mut EXCEPTION_RECORD,
+        establisherFrame: *mut c_void,
+        contextRecord: *mut CONTEXT,
+        dispatcherContext: *mut DISPATCHER_CONTEXT
+    ) -> EXCEPTION_DISPOSITION
+    {
+        extern "C" fn inner(
+                _version: c_int,
+                actions: uw::_Unwind_Action,
+                _exception_class: uw::_Unwind_Exception_Class,
+                _ue_header: *mut uw::_Unwind_Exception,
+                _context: *mut uw::_Unwind_Context
+            ) -> uw::_Unwind_Reason_Code
+        {
+            if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+                uw::_URC_HANDLER_FOUND // catch!
+            }
+            else { // cleanup phase
+                uw::_URC_INSTALL_CONTEXT
+            }
+        }
+
+        unsafe {
+            _GCC_specific_handler(exceptionRecord, establisherFrame,
+                                  contextRecord, dispatcherContext,
+                                  inner)
+        }
+    }
+}
+
+// Entry point of panic from the libcore crate
+#[cfg(not(test))]
+#[lang = "panic_fmt"]
+pub extern fn rust_begin_unwind(msg: &fmt::Arguments,
+                                file: &'static str, line: uint) -> ! {
+    begin_unwind_fmt(msg, &(file, line))
+}
+
+/// The entry point for unwinding with a formatted message.
+///
+/// This is designed to reduce the amount of code required at the call
+/// site as much as possible (so that `panic!()` has as low an impact
+/// on (e.g.) the inlining of other functions as possible), by moving
+/// the actual formatting into this shared place.
+#[inline(never)] #[cold]
+pub fn begin_unwind_fmt(msg: &fmt::Arguments, file_line: &(&'static str, uint)) -> ! {
+    use fmt::FormatWriter;
+
+    // We do two allocations here, unfortunately. But (a) they're
+    // required with the current scheme, and (b) we don't handle
+    // panic + OOM properly anyway (see comment in begin_unwind
+    // below).
+
+    struct VecWriter<'a> { v: &'a mut Vec<u8> }
+
+    impl<'a> fmt::FormatWriter for VecWriter<'a> {
+        fn write(&mut self, buf: &[u8]) -> fmt::Result {
+            self.v.push_all(buf);
+            Ok(())
+        }
+    }
+
+    let mut v = Vec::new();
+    let _ = write!(&mut VecWriter { v: &mut v }, "{}", msg);
+
+    let msg = box String::from_utf8_lossy(v.as_slice()).into_string();
+    begin_unwind_inner(msg, file_line)
+}
+
+/// This is the entry point of unwinding for panic!() and assert!().
+#[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
+pub fn begin_unwind<M: Any + Send>(msg: M, file_line: &(&'static str, uint)) -> ! {
+    // Note that this should be the only allocation performed in this code path.
+    // Currently this means that panic!() on OOM will invoke this code path,
+    // but then again we're not really ready for panic on OOM anyway. If
+    // we do start doing this, then we should propagate this allocation to
+    // be performed in the parent of this task instead of the task that's
+    // panicking.
+
+    // see below for why we do the `Any` coercion here.
+    begin_unwind_inner(box msg, file_line)
+}
+
+/// The core of the unwinding.
+///
+/// This is non-generic to avoid instantiation bloat in other crates
+/// (which makes compilation of small crates noticeably slower). (Note:
+/// we need the `Any` object anyway, we're not just creating it to
+/// avoid being generic.)
+///
+/// Do this split took the LLVM IR line counts of `fn main() { panic!()
+/// }` from ~1900/3700 (-O/no opts) to 180/590.
+#[inline(never)] #[cold] // this is the slow path, please never inline this
+fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) -> ! {
+    // First, invoke call the user-defined callbacks triggered on task panic.
+    //
+    // By the time that we see a callback has been registered (by reading
+    // MAX_CALLBACKS), the actual callback itself may have not been stored yet,
+    // so we just chalk it up to a race condition and move on to the next
+    // callback. Additionally, CALLBACK_CNT may briefly be higher than
+    // MAX_CALLBACKS, so we're sure to clamp it as necessary.
+    let callbacks = {
+        let amt = CALLBACK_CNT.load(atomic::SeqCst);
+        CALLBACKS[..cmp::min(amt, MAX_CALLBACKS)]
+    };
+    for cb in callbacks.iter() {
+        match cb.load(atomic::SeqCst) {
+            0 => {}
+            n => {
+                let f: Callback = unsafe { mem::transmute(n) };
+                let (file, line) = *file_line;
+                f(&*msg, file, line);
+            }
+        }
+    };
+
+    // Now that we've run all the necessary unwind callbacks, we actually
+    // perform the unwinding. If we don't have a task, then it's time to die
+    // (hopefully someone printed something about this).
+    let mut task: Box<Task> = match Local::try_take() {
+        Some(task) => task,
+        None => rust_panic(msg),
+    };
+
+    if task.unwinder.unwinding {
+        // If a task panics while it's already unwinding then we
+        // have limited options. Currently our preference is to
+        // just abort. In the future we may consider resuming
+        // unwinding or otherwise exiting the task cleanly.
+        rterrln!("task failed during unwinding. aborting.");
+        unsafe { intrinsics::abort() }
+    }
+    task.unwinder.unwinding = true;
+
+    // Put the task back in TLS because the unwinding process may run code which
+    // requires the task. We need a handle to its unwinder, however, so after
+    // this we unsafely extract it and continue along.
+    Local::put(task);
+    rust_panic(msg);
+}
+
+/// Register a callback to be invoked when a task unwinds.
+///
+/// This is an unsafe and experimental API which allows for an arbitrary
+/// callback to be invoked when a task panics. This callback is invoked on both
+/// the initial unwinding and a double unwinding if one occurs. Additionally,
+/// the local `Task` will be in place for the duration of the callback, and
+/// the callback must ensure that it remains in place once the callback returns.
+///
+/// Only a limited number of callbacks can be registered, and this function
+/// returns whether the callback was successfully registered or not. It is not
+/// currently possible to unregister a callback once it has been registered.
+#[experimental]
+pub unsafe fn register(f: Callback) -> bool {
+    match CALLBACK_CNT.fetch_add(1, atomic::SeqCst) {
+        // The invocation code has knowledge of this window where the count has
+        // been incremented, but the callback has not been stored. We're
+        // guaranteed that the slot we're storing into is 0.
+        n if n < MAX_CALLBACKS => {
+            let prev = CALLBACKS[n].swap(mem::transmute(f), atomic::SeqCst);
+            rtassert!(prev == 0);
+            true
+        }
+        // If we accidentally bumped the count too high, pull it back.
+        _ => {
+            CALLBACK_CNT.store(MAX_CALLBACKS, atomic::SeqCst);
+            false
+        }
+    }
+}
diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs
index ce359c7b0e0..d3cfccab9d0 100644
--- a/src/libstd/rt/util.rs
+++ b/src/libstd/rt/util.rs
@@ -7,10 +7,18 @@
 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
+//
+// ignore-lexer-test FIXME #15677
+
+use core::prelude::*;
 
-use libc::uintptr_t;
-use option::Option;
-use option::Option::{Some, None};
+use core::cmp;
+use core::fmt;
+use core::intrinsics;
+use core::slice;
+use core::str;
+
+use libc::{mod, uintptr_t};
 use os;
 use str::{FromStr, from_str, Str};
 use sync::atomic;
@@ -73,3 +81,136 @@ pub fn default_sched_threads() -> uint {
         }
     }
 }
+
+// Indicates whether we should perform expensive sanity checks, including rtassert!
+//
+// FIXME: Once the runtime matures remove the `true` below to turn off rtassert,
+//        etc.
+pub const ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) ||
+                                  cfg!(rtassert);
+
+pub struct Stdio(libc::c_int);
+
+#[allow(non_upper_case_globals)]
+pub const Stdout: Stdio = Stdio(libc::STDOUT_FILENO);
+#[allow(non_upper_case_globals)]
+pub const Stderr: Stdio = Stdio(libc::STDERR_FILENO);
+
+impl fmt::FormatWriter for Stdio {
+    fn write(&mut self, data: &[u8]) -> fmt::Result {
+        #[cfg(unix)]
+        type WriteLen = libc::size_t;
+        #[cfg(windows)]
+        type WriteLen = libc::c_uint;
+        unsafe {
+            let Stdio(fd) = *self;
+            libc::write(fd,
+                        data.as_ptr() as *const libc::c_void,
+                        data.len() as WriteLen);
+        }
+        Ok(()) // yes, we're lying
+    }
+}
+
+pub fn dumb_print(args: &fmt::Arguments) {
+    use fmt::FormatWriter;
+    let mut w = Stderr;
+    let _ = w.write_fmt(args);
+}
+
+pub fn abort(args: &fmt::Arguments) -> ! {
+    use fmt::FormatWriter;
+
+    struct BufWriter<'a> {
+        buf: &'a mut [u8],
+        pos: uint,
+    }
+    impl<'a> FormatWriter for BufWriter<'a> {
+        fn write(&mut self, bytes: &[u8]) -> fmt::Result {
+            let left = self.buf[mut self.pos..];
+            let to_write = bytes[..cmp::min(bytes.len(), left.len())];
+            slice::bytes::copy_memory(left, to_write);
+            self.pos += to_write.len();
+            Ok(())
+        }
+    }
+
+    // Convert the arguments into a stack-allocated string
+    let mut msg = [0u8, ..512];
+    let mut w = BufWriter { buf: &mut msg, pos: 0 };
+    let _ = write!(&mut w, "{}", args);
+    let msg = str::from_utf8(w.buf[mut ..w.pos]).unwrap_or("aborted");
+    let msg = if msg.is_empty() {"aborted"} else {msg};
+
+    // Give some context to the message
+    let hash = msg.bytes().fold(0, |accum, val| accum + (val as uint) );
+    let quote = match hash % 10 {
+        0 => "
+It was from the artists and poets that the pertinent answers came, and I
+know that panic would have broken loose had they been able to compare notes.
+As it was, lacking their original letters, I half suspected the compiler of
+having asked leading questions, or of having edited the correspondence in
+corroboration of what he had latently resolved to see.",
+        1 => "
+There are not many persons who know what wonders are opened to them in the
+stories and visions of their youth; for when as children we listen and dream,
+we think but half-formed thoughts, and when as men we try to remember, we are
+dulled and prosaic with the poison of life. But some of us awake in the night
+with strange phantasms of enchanted hills and gardens, of fountains that sing
+in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
+down to sleeping cities of bronze and stone, and of shadowy companies of heroes
+that ride caparisoned white horses along the edges of thick forests; and then
+we know that we have looked back through the ivory gates into that world of
+wonder which was ours before we were wise and unhappy.",
+        2 => "
+Instead of the poems I had hoped for, there came only a shuddering blackness
+and ineffable loneliness; and I saw at last a fearful truth which no one had
+ever dared to breathe before — the unwhisperable secret of secrets — The fact
+that this city of stone and stridor is not a sentient perpetuation of Old New
+York as London is of Old London and Paris of Old Paris, but that it is in fact
+quite dead, its sprawling body imperfectly embalmed and infested with queer
+animate things which have nothing to do with it as it was in life.",
+        3 => "
+The ocean ate the last of the land and poured into the smoking gulf, thereby
+giving up all it had ever conquered. From the new-flooded lands it flowed
+again, uncovering death and decay; and from its ancient and immemorial bed it
+trickled loathsomely, uncovering nighted secrets of the years when Time was
+young and the gods unborn. Above the waves rose weedy remembered spires. The
+moon laid pale lilies of light on dead London, and Paris stood up from its damp
+grave to be sanctified with star-dust. Then rose spires and monoliths that were
+weedy but not remembered; terrible spires and monoliths of lands that men never
+knew were lands...",
+        4 => "
+There was a night when winds from unknown spaces whirled us irresistibly into
+limitless vacuum beyond all thought and entity. Perceptions of the most
+maddeningly untransmissible sort thronged upon us; perceptions of infinity
+which at the time convulsed us with joy, yet which are now partly lost to my
+memory and partly incapable of presentation to others.",
+        _ => "You've met with a terrible fate, haven't you?"
+    };
+    rterrln!("{}", "");
+    rterrln!("{}", quote);
+    rterrln!("{}", "");
+    rterrln!("fatal runtime error: {}", msg);
+    unsafe { intrinsics::abort(); }
+}
+
+pub unsafe fn report_overflow() {
+    use rt::task::Task;
+    use rt::local::Local;
+
+    // See the message below for why this is not emitted to the
+    // ^ Where did the message below go?
+    // task's logger. This has the additional conundrum of the
+    // logger may not be initialized just yet, meaning that an FFI
+    // call would happen to initialized it (calling out to libuv),
+    // and the FFI call needs 2MB of stack when we just ran out.
+
+    let task: Option<*mut Task> = Local::try_unsafe_borrow();
+
+    let name = task.and_then(|task| {
+        (*task).name.as_ref().map(|n| n.as_slice())
+    });
+
+    rterrln!("\ntask '{}' has overflowed its stack", name.unwrap_or("<unknown>"));
+}
diff --git a/src/libstd/rtdeps.rs b/src/libstd/rtdeps.rs
index 35a87137115..862808a9e3d 100644
--- a/src/libstd/rtdeps.rs
+++ b/src/libstd/rtdeps.rs
@@ -22,7 +22,7 @@ extern {}
 // LLVM implements the `frem` instruction as a call to `fmod`, which lives in
 // libm. Hence, we must explicitly link to it.
 //
-// On Linux, librt and libdl are indirect dependencies via rustrt,
+// On Linux, librt and libdl are indirect dependencies via std,
 // and binutils 2.22+ won't add them automatically
 #[cfg(target_os = "linux")]
 #[link(name = "dl")]
diff --git a/src/libstd/sys/common/backtrace.rs b/src/libstd/sys/common/backtrace.rs
new file mode 100644
index 00000000000..0c03060b314
--- /dev/null
+++ b/src/libstd/sys/common/backtrace.rs
@@ -0,0 +1,131 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use io::{IoResult, Writer};
+use iter::Iterator;
+use option::{Some, None};
+use result::{Ok, Err};
+use str::{StrPrelude, from_str};
+use unicode::char::UnicodeChar;
+
+#[cfg(target_word_size = "64")] pub const HEX_WIDTH: uint = 18;
+#[cfg(target_word_size = "32")] pub const HEX_WIDTH: uint = 10;
+
+// All rust symbols are in theory lists of "::"-separated identifiers. Some
+// assemblers, however, can't handle these characters in symbol names. To get
+// around this, we use C++-style mangling. The mangling method is:
+//
+// 1. Prefix the symbol with "_ZN"
+// 2. For each element of the path, emit the length plus the element
+// 3. End the path with "E"
+//
+// For example, "_ZN4testE" => "test" and "_ZN3foo3bar" => "foo::bar".
+//
+// We're the ones printing our backtraces, so we can't rely on anything else to
+// demangle our symbols. It's *much* nicer to look at demangled symbols, so
+// this function is implemented to give us nice pretty output.
+//
+// Note that this demangler isn't quite as fancy as it could be. We have lots
+// of other information in our symbols like hashes, version, type information,
+// etc. Additionally, this doesn't handle glue symbols at all.
+pub fn demangle(writer: &mut Writer, s: &str) -> IoResult<()> {
+    // First validate the symbol. If it doesn't look like anything we're
+    // expecting, we just print it literally. Note that we must handle non-rust
+    // symbols because we could have any function in the backtrace.
+    let mut valid = true;
+    if s.len() > 4 && s.starts_with("_ZN") && s.ends_with("E") {
+        let mut chars = s.slice(3, s.len() - 1).chars();
+        while valid {
+            let mut i = 0;
+            for c in chars {
+                if c.is_numeric() {
+                    i = i * 10 + c as uint - '0' as uint;
+                } else {
+                    break
+                }
+            }
+            if i == 0 {
+                valid = chars.next().is_none();
+                break
+            } else if chars.by_ref().take(i - 1).count() != i - 1 {
+                valid = false;
+            }
+        }
+    } else {
+        valid = false;
+    }
+
+    // Alright, let's do this.
+    if !valid {
+        try!(writer.write_str(s));
+    } else {
+        let mut s = s.slice_from(3);
+        let mut first = true;
+        while s.len() > 1 {
+            if !first {
+                try!(writer.write_str("::"));
+            } else {
+                first = false;
+            }
+            let mut rest = s;
+            while rest.char_at(0).is_numeric() {
+                rest = rest.slice_from(1);
+            }
+            let i: uint = from_str(s.slice_to(s.len() - rest.len())).unwrap();
+            s = rest.slice_from(i);
+            rest = rest.slice_to(i);
+            while rest.len() > 0 {
+                if rest.starts_with("$") {
+                    macro_rules! demangle(
+                        ($($pat:expr => $demangled:expr),*) => ({
+                            $(if rest.starts_with($pat) {
+                                try!(writer.write_str($demangled));
+                                rest = rest.slice_from($pat.len());
+                              } else)*
+                            {
+                                try!(writer.write_str(rest));
+                                break;
+                            }
+
+                        })
+                    )
+                    // see src/librustc/back/link.rs for these mappings
+                    demangle! (
+                        "$SP$" => "@",
+                        "$UP$" => "Box",
+                        "$RP$" => "*",
+                        "$BP$" => "&",
+                        "$LT$" => "<",
+                        "$GT$" => ">",
+                        "$LP$" => "(",
+                        "$RP$" => ")",
+                        "$C$"  => ",",
+
+                        // in theory we can demangle any Unicode code point, but
+                        // for simplicity we just catch the common ones.
+                        "$x20" => " ",
+                        "$x27" => "'",
+                        "$x5b" => "[",
+                        "$x5d" => "]"
+                    )
+                } else {
+                    let idx = match rest.find('$') {
+                        None => rest.len(),
+                        Some(i) => i,
+                    };
+                    try!(writer.write_str(rest.slice_to(idx)));
+                    rest = rest.slice_from(idx);
+                }
+            }
+        }
+    }
+
+    Ok(())
+}
diff --git a/src/libstd/sys/common/helper_thread.rs b/src/libstd/sys/common/helper_thread.rs
index 96b4accd4bd..ffb053e852e 100644
--- a/src/libstd/sys/common/helper_thread.rs
+++ b/src/libstd/sys/common/helper_thread.rs
@@ -24,9 +24,8 @@ use prelude::*;
 
 use cell::UnsafeCell;
 use mem;
-use rustrt::bookkeeping;
-use rustrt;
 use sync::{StaticMutex, StaticCondvar};
+use rt::{mod, bookkeeping};
 use sys::helper_signal;
 
 use task;
@@ -91,7 +90,7 @@ impl<M: Send> Helper<M> {
                     self.cond.notify_one()
                 });
 
-                rustrt::at_exit(move|:| { self.shutdown() });
+                rt::at_exit(move|:| { self.shutdown() });
                 *self.initialized.get() = true;
             }
         }
diff --git a/src/libstd/sys/common/mod.rs b/src/libstd/sys/common/mod.rs
index 73e1c7bd9e5..aeee4cf01cd 100644
--- a/src/libstd/sys/common/mod.rs
+++ b/src/libstd/sys/common/mod.rs
@@ -19,11 +19,14 @@ use num::Int;
 use path::BytesContainer;
 use collections;
 
+pub mod backtrace;
 pub mod condvar;
 pub mod helper_thread;
 pub mod mutex;
 pub mod net;
 pub mod rwlock;
+pub mod stack;
+pub mod thread;
 pub mod thread_local;
 
 // common error constructors
diff --git a/src/libstd/sys/common/stack.rs b/src/libstd/sys/common/stack.rs
new file mode 100644
index 00000000000..2a88e20c8fa
--- /dev/null
+++ b/src/libstd/sys/common/stack.rs
@@ -0,0 +1,325 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Rust stack-limit management
+//!
+//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
+//! overflow for rust tasks. In this scheme, the prologue of all functions are
+//! preceded with a check to see whether the current stack limits are being
+//! exceeded.
+//!
+//! This module provides the functionality necessary in order to manage these
+//! stack limits (which are stored in platform-specific locations). The
+//! functions here are used at the borders of the task lifetime in order to
+//! manage these limits.
+//!
+//! This function is an unstable module because this scheme for stack overflow
+//! detection is not guaranteed to continue in the future. Usage of this module
+//! is discouraged unless absolutely necessary.
+
+// iOS related notes
+//
+// It is possible to implement it using idea from
+// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
+//
+// In short: _pthread_{get,set}_specific_direct allows extremely fast
+// access, exactly what is required for segmented stack
+// There is a pool of reserved slots for Apple internal use (0..119)
+// First dynamic allocated pthread key starts with 257 (on iOS7)
+// So using slot 149 should be pretty safe ASSUMING space is reserved
+// for every key < first dynamic key
+//
+// There is also an opportunity to steal keys reserved for Garbage Collection
+// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
+// never supposed to work on iOS. But as everybody knows it - there is a chance
+// that those slots will be re-used, like it happened with key 95 (moved from
+// JavaScriptCore to CoreText)
+//
+// Unfortunately Apple rejected patch to LLVM which generated
+// corresponding prolog, decision was taken to disable segmented
+// stack support on iOS.
+
+pub const RED_ZONE: uint = 20 * 1024;
+
+/// This function is invoked from rust's current __morestack function. Segmented
+/// stacks are currently not enabled as segmented stacks, but rather one giant
+/// stack segment. This means that whenever we run out of stack, we want to
+/// truly consider it to be stack overflow rather than allocating a new stack.
+#[cfg(not(test))] // in testing, use the original libstd's version
+#[lang = "stack_exhausted"]
+extern fn stack_exhausted() {
+    use intrinsics;
+
+    unsafe {
+        // We're calling this function because the stack just ran out. We need
+        // to call some other rust functions, but if we invoke the functions
+        // right now it'll just trigger this handler being called again. In
+        // order to alleviate this, we move the stack limit to be inside of the
+        // red zone that was allocated for exactly this reason.
+        let limit = get_sp_limit();
+        record_sp_limit(limit - RED_ZONE / 2);
+
+        // This probably isn't the best course of action. Ideally one would want
+        // to unwind the stack here instead of just aborting the entire process.
+        // This is a tricky problem, however. There's a few things which need to
+        // be considered:
+        //
+        //  1. We're here because of a stack overflow, yet unwinding will run
+        //     destructors and hence arbitrary code. What if that code overflows
+        //     the stack? One possibility is to use the above allocation of an
+        //     extra 10k to hope that we don't hit the limit, and if we do then
+        //     abort the whole program. Not the best, but kind of hard to deal
+        //     with unless we want to switch stacks.
+        //
+        //  2. LLVM will optimize functions based on whether they can unwind or
+        //     not. It will flag functions with 'nounwind' if it believes that
+        //     the function cannot trigger unwinding, but if we do unwind on
+        //     stack overflow then it means that we could unwind in any function
+        //     anywhere. We would have to make sure that LLVM only places the
+        //     nounwind flag on functions which don't call any other functions.
+        //
+        //  3. The function that overflowed may have owned arguments. These
+        //     arguments need to have their destructors run, but we haven't even
+        //     begun executing the function yet, so unwinding will not run the
+        //     any landing pads for these functions. If this is ignored, then
+        //     the arguments will just be leaked.
+        //
+        // Exactly what to do here is a very delicate topic, and is possibly
+        // still up in the air for what exactly to do. Some relevant issues:
+        //
+        //  #3555 - out-of-stack failure leaks arguments
+        //  #3695 - should there be a stack limit?
+        //  #9855 - possible strategies which could be taken
+        //  #9854 - unwinding on windows through __morestack has never worked
+        //  #2361 - possible implementation of not using landing pads
+
+        ::rt::util::report_overflow();
+
+        intrinsics::abort();
+    }
+}
+
+// Windows maintains a record of upper and lower stack bounds in the Thread Information
+// Block (TIB), and some syscalls do check that addresses which are supposed to be in
+// the stack, indeed lie between these two values.
+// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
+//
+// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
+// For OS-managed stacks (libnative), we let the OS manage them for us.
+//
+// On all other platforms both variants behave identically.
+
+#[inline(always)]
+pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
+    record_sp_limit(stack_lo + RED_ZONE);
+}
+
+#[inline(always)]
+pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
+    // When the old runtime had segmented stacks, it used a calculation that was
+    // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
+    // symbol resolution, llvm function calls, etc. In theory this red zone
+    // value is 0, but it matters far less when we have gigantic stacks because
+    // we don't need to be so exact about our stack budget. The "fudge factor"
+    // was because LLVM doesn't emit a stack check for functions < 256 bytes in
+    // size. Again though, we have giant stacks, so we round all these
+    // calculations up to the nice round number of 20k.
+    record_sp_limit(stack_lo + RED_ZONE);
+
+    return target_record_stack_bounds(stack_lo, stack_hi);
+
+    #[cfg(not(windows))] #[inline(always)]
+    unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
+
+    #[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
+    unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+        // stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
+        asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
+        asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
+    }
+    #[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
+    unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+        // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
+        asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
+        asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
+    }
+}
+
+/// Records the current limit of the stack as specified by `end`.
+///
+/// This is stored in an OS-dependent location, likely inside of the thread
+/// local storage. The location that the limit is stored is a pre-ordained
+/// location because it's where LLVM has emitted code to check.
+///
+/// Note that this cannot be called under normal circumstances. This function is
+/// changing the stack limit, so upon returning any further function calls will
+/// possibly be triggering the morestack logic if you're not careful.
+///
+/// Also note that this and all of the inside functions are all flagged as
+/// "inline(always)" because they're messing around with the stack limits.  This
+/// would be unfortunate for the functions themselves to trigger a morestack
+/// invocation (if they were an actual function call).
+#[inline(always)]
+pub unsafe fn record_sp_limit(limit: uint) {
+    return target_record_sp_limit(limit);
+
+    // x86-64
+    #[cfg(all(target_arch = "x86_64",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $$0x60+90*8, %rsi
+              movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(_: uint) {
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
+    }
+
+    // x86
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movl $$0x48+90*4, %eax
+              movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
+    }
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "linux", target_os = "freebsd")))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(_: uint) {
+    }
+
+    // mips, arm - Some brave soul can port these to inline asm, but it's over
+    //             my head personally
+    #[cfg(any(target_arch = "mips",
+              target_arch = "mipsel",
+              all(target_arch = "arm", not(target_os = "ios"))))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        use libc::c_void;
+        return record_sp_limit(limit as *const c_void);
+        extern {
+            fn record_sp_limit(limit: *const c_void);
+        }
+    }
+
+    // iOS segmented stack is disabled for now, see related notes
+    #[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(_: uint) {
+    }
+}
+
+/// The counterpart of the function above, this function will fetch the current
+/// stack limit stored in TLS.
+///
+/// Note that all of these functions are meant to be exact counterparts of their
+/// brethren above, except that the operands are reversed.
+///
+/// As with the setter, this function does not have a __morestack header and can
+/// therefore be called in a "we're out of stack" situation.
+#[inline(always)]
+pub unsafe fn get_sp_limit() -> uint {
+    return target_get_sp_limit();
+
+    // x86-64
+    #[cfg(all(target_arch = "x86_64",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq $$0x60+90*8, %rsi
+              movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        return 1024;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+
+
+    // x86
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movl $$0x48+90*4, %eax
+              movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "linux", target_os = "freebsd")))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        return 1024;
+    }
+
+    // mips, arm - Some brave soul can port these to inline asm, but it's over
+    //             my head personally
+    #[cfg(any(target_arch = "mips",
+              target_arch = "mipsel",
+              all(target_arch = "arm", not(target_os = "ios"))))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        use libc::c_void;
+        return get_sp_limit() as uint;
+        extern {
+            fn get_sp_limit() -> *const c_void;
+        }
+    }
+
+    // iOS doesn't support segmented stacks yet. This function might
+    // be called by runtime though so it is unsafe to mark it as
+    // unreachable, let's return a fixed constant.
+    #[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        1024
+    }
+}
diff --git a/src/libstd/sys/common/thread.rs b/src/libstd/sys/common/thread.rs
new file mode 100644
index 00000000000..5e1adfb8714
--- /dev/null
+++ b/src/libstd/sys/common/thread.rs
@@ -0,0 +1,34 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use mem;
+use uint;
+use libc;
+use sys_common::stack;
+use sys::{thread, stack_overflow};
+
+// This is the starting point of rust os threads. The first thing we do
+// is make sure that we don't trigger __morestack (also why this has a
+// no_stack_check annotation), and then we extract the main function
+// and invoke it.
+#[no_stack_check]
+pub fn start_thread(main: *mut libc::c_void) -> thread::rust_thread_return {
+    unsafe {
+        stack::record_os_managed_stack_bounds(0, uint::MAX);
+        let handler = stack_overflow::Handler::new();
+        let f: Box<proc()> = mem::transmute(main);
+        (*f)();
+        drop(handler);
+        mem::transmute(0 as thread::rust_thread_return)
+    }
+}
diff --git a/src/libstd/sys/common/thread_local.rs b/src/libstd/sys/common/thread_local.rs
index cf56a71d67a..a8bc6bf9d0d 100644
--- a/src/libstd/sys/common/thread_local.rs
+++ b/src/libstd/sys/common/thread_local.rs
@@ -58,7 +58,8 @@
 
 use prelude::*;
 
-use rustrt::exclusive::Exclusive;
+use rt::exclusive::Exclusive;
+use rt;
 use sync::atomic::{mod, AtomicUint};
 use sync::{Once, ONCE_INIT};
 
@@ -283,4 +284,3 @@ mod tests {
         }
     }
 }
-
diff --git a/src/libstd/sys/unix/backtrace.rs b/src/libstd/sys/unix/backtrace.rs
new file mode 100644
index 00000000000..c139dba2c46
--- /dev/null
+++ b/src/libstd/sys/unix/backtrace.rs
@@ -0,0 +1,493 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Backtrace support built on libgcc with some extra OS-specific support
+///
+/// Some methods of getting a backtrace:
+///
+/// * The backtrace() functions on unix. It turns out this doesn't work very
+///   well for green threads on OSX, and the address to symbol portion of it
+///   suffers problems that are described below.
+///
+/// * Using libunwind. This is more difficult than it sounds because libunwind
+///   isn't installed everywhere by default. It's also a bit of a hefty library,
+///   so possibly not the best option. When testing, libunwind was excellent at
+///   getting both accurate backtraces and accurate symbols across platforms.
+///   This route was not chosen in favor of the next option, however.
+///
+/// * We're already using libgcc_s for exceptions in rust (triggering task
+///   unwinding and running destructors on the stack), and it turns out that it
+///   conveniently comes with a function that also gives us a backtrace. All of
+///   these functions look like _Unwind_*, but it's not quite the full
+///   repertoire of the libunwind API. Due to it already being in use, this was
+///   the chosen route of getting a backtrace.
+///
+/// After choosing libgcc_s for backtraces, the sad part is that it will only
+/// give us a stack trace of instruction pointers. Thankfully these instruction
+/// pointers are accurate (they work for green and native threads), but it's
+/// then up to us again to figure out how to translate these addresses to
+/// symbols. As with before, we have a few options. Before, that, a little bit
+/// of an interlude about symbols. This is my very limited knowledge about
+/// symbol tables, and this information is likely slightly wrong, but the
+/// general idea should be correct.
+///
+/// When talking about symbols, it's helpful to know a few things about where
+/// symbols are located. Some symbols are located in the dynamic symbol table
+/// of the executable which in theory means that they're available for dynamic
+/// linking and lookup. Other symbols end up only in the local symbol table of
+/// the file. This loosely corresponds to pub and priv functions in Rust.
+///
+/// Armed with this knowledge, we know that our solution for address to symbol
+/// translation will need to consult both the local and dynamic symbol tables.
+/// With that in mind, here's our options of translating an address to
+/// a symbol.
+///
+/// * Use dladdr(). The original backtrace()-based idea actually uses dladdr()
+///   behind the scenes to translate, and this is why backtrace() was not used.
+///   Conveniently, this method works fantastically on OSX. It appears dladdr()
+///   uses magic to consult the local symbol table, or we're putting everything
+///   in the dynamic symbol table anyway. Regardless, for OSX, this is the
+///   method used for translation. It's provided by the system and easy to do.o
+///
+///   Sadly, all other systems have a dladdr() implementation that does not
+///   consult the local symbol table. This means that most functions are blank
+///   because they don't have symbols. This means that we need another solution.
+///
+/// * Use unw_get_proc_name(). This is part of the libunwind api (not the
+///   libgcc_s version of the libunwind api), but involves taking a dependency
+///   to libunwind. We may pursue this route in the future if we bundle
+///   libunwind, but libunwind was unwieldy enough that it was not chosen at
+///   this time to provide this functionality.
+///
+/// * Shell out to a utility like `readelf`. Crazy though it may sound, it's a
+///   semi-reasonable solution. The stdlib already knows how to spawn processes,
+///   so in theory it could invoke readelf, parse the output, and consult the
+///   local/dynamic symbol tables from there. This ended up not getting chosen
+///   due to the craziness of the idea plus the advent of the next option.
+///
+/// * Use `libbacktrace`. It turns out that this is a small library bundled in
+///   the gcc repository which provides backtrace and symbol translation
+///   functionality. All we really need from it is the backtrace functionality,
+///   and we only really need this on everything that's not OSX, so this is the
+///   chosen route for now.
+///
+/// In summary, the current situation uses libgcc_s to get a trace of stack
+/// pointers, and we use dladdr() or libbacktrace to translate these addresses
+/// to symbols. This is a bit of a hokey implementation as-is, but it works for
+/// all unix platforms we support right now, so it at least gets the job done.
+
+use c_str::CString;
+use io::{IoResult, Writer};
+use libc;
+use mem;
+use option::{Some, None, Option};
+use result::{Ok, Err};
+use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+
+use sys_common::backtrace::*;
+
+/// As always - iOS on arm uses SjLj exceptions and
+/// _Unwind_Backtrace is even not available there. Still,
+/// backtraces could be extracted using a backtrace function,
+/// which thanks god is public
+///
+/// As mentioned in a huge comment block above, backtrace doesn't
+/// play well with green threads, so while it is extremely nice
+/// and simple to use it should be used only on iOS devices as the
+/// only viable option.
+#[cfg(all(target_os = "ios", target_arch = "arm"))]
+#[inline(never)]
+pub fn write(w: &mut Writer) -> IoResult<()> {
+    use iter::{Iterator, range};
+    use result;
+    use slice::SliceExt;
+
+    extern {
+        fn backtrace(buf: *mut *mut libc::c_void,
+                     sz: libc::c_int) -> libc::c_int;
+    }
+
+    // while it doesn't requires lock for work as everything is
+    // local, it still displays much nicer backtraces when a
+    // couple of tasks panic simultaneously
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    let _g = unsafe { LOCK.lock() };
+
+    try!(writeln!(w, "stack backtrace:"));
+    // 100 lines should be enough
+    const SIZE: uint = 100;
+    let mut buf: [*mut libc::c_void, ..SIZE] = unsafe {mem::zeroed()};
+    let cnt = unsafe { backtrace(buf.as_mut_ptr(), SIZE as libc::c_int) as uint};
+
+    // skipping the first one as it is write itself
+    let iter = range(1, cnt).map(|i| {
+        print(w, i as int, buf[i])
+    });
+    result::fold(iter, (), |_, _| ())
+}
+
+#[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+#[inline(never)] // if we know this is a function call, we can skip it when
+                 // tracing
+pub fn write(w: &mut Writer) -> IoResult<()> {
+    use io::IoError;
+
+    struct Context<'a> {
+        idx: int,
+        writer: &'a mut Writer+'a,
+        last_error: Option<IoError>,
+    }
+
+    // When using libbacktrace, we use some necessary global state, so we
+    // need to prevent more than one thread from entering this block. This
+    // is semi-reasonable in terms of printing anyway, and we know that all
+    // I/O done here is blocking I/O, not green I/O, so we don't have to
+    // worry about this being a native vs green mutex.
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    let _g = unsafe { LOCK.lock() };
+
+    try!(writeln!(w, "stack backtrace:"));
+
+    let mut cx = Context { writer: w, last_error: None, idx: 0 };
+    return match unsafe {
+        uw::_Unwind_Backtrace(trace_fn,
+                              &mut cx as *mut Context as *mut libc::c_void)
+    } {
+        uw::_URC_NO_REASON => {
+            match cx.last_error {
+                Some(err) => Err(err),
+                None => Ok(())
+            }
+        }
+        _ => Ok(()),
+    };
+
+    extern fn trace_fn(ctx: *mut uw::_Unwind_Context,
+                       arg: *mut libc::c_void) -> uw::_Unwind_Reason_Code {
+        let cx: &mut Context = unsafe { mem::transmute(arg) };
+        let ip = unsafe { uw::_Unwind_GetIP(ctx) as *mut libc::c_void };
+        // dladdr() on osx gets whiny when we use FindEnclosingFunction, and
+        // it appears to work fine without it, so we only use
+        // FindEnclosingFunction on non-osx platforms. In doing so, we get a
+        // slightly more accurate stack trace in the process.
+        //
+        // This is often because panic involves the last instruction of a
+        // function being "call std::rt::begin_unwind", with no ret
+        // instructions after it. This means that the return instruction
+        // pointer points *outside* of the calling function, and by
+        // unwinding it we go back to the original function.
+        let ip = if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
+            ip
+        } else {
+            unsafe { uw::_Unwind_FindEnclosingFunction(ip) }
+        };
+
+        // Don't print out the first few frames (they're not user frames)
+        cx.idx += 1;
+        if cx.idx <= 0 { return uw::_URC_NO_REASON }
+        // Don't print ginormous backtraces
+        if cx.idx > 100 {
+            match write!(cx.writer, " ... <frames omitted>\n") {
+                Ok(()) => {}
+                Err(e) => { cx.last_error = Some(e); }
+            }
+            return uw::_URC_FAILURE
+        }
+
+        // Once we hit an error, stop trying to print more frames
+        if cx.last_error.is_some() { return uw::_URC_FAILURE }
+
+        match print(cx.writer, cx.idx, ip) {
+            Ok(()) => {}
+            Err(e) => { cx.last_error = Some(e); }
+        }
+
+        // keep going
+        return uw::_URC_NO_REASON
+    }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
+    use intrinsics;
+    #[repr(C)]
+    struct Dl_info {
+        dli_fname: *const libc::c_char,
+        dli_fbase: *mut libc::c_void,
+        dli_sname: *const libc::c_char,
+        dli_saddr: *mut libc::c_void,
+    }
+    extern {
+        fn dladdr(addr: *const libc::c_void,
+                  info: *mut Dl_info) -> libc::c_int;
+    }
+
+    let mut info: Dl_info = unsafe { intrinsics::init() };
+    if unsafe { dladdr(addr as *const libc::c_void, &mut info) == 0 } {
+        output(w, idx,addr, None)
+    } else {
+        output(w, idx, addr, Some(unsafe {
+            CString::new(info.dli_sname, false)
+        }))
+    }
+}
+
+#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
+    use iter::Iterator;
+    use os;
+    use path::GenericPath;
+    use ptr::RawPtr;
+    use ptr;
+    use slice::SliceExt;
+
+    ////////////////////////////////////////////////////////////////////////
+    // libbacktrace.h API
+    ////////////////////////////////////////////////////////////////////////
+    type backtrace_syminfo_callback =
+        extern "C" fn(data: *mut libc::c_void,
+                      pc: libc::uintptr_t,
+                      symname: *const libc::c_char,
+                      symval: libc::uintptr_t,
+                      symsize: libc::uintptr_t);
+    type backtrace_error_callback =
+        extern "C" fn(data: *mut libc::c_void,
+                      msg: *const libc::c_char,
+                      errnum: libc::c_int);
+    enum backtrace_state {}
+    #[link(name = "backtrace", kind = "static")]
+    #[cfg(not(test))]
+    extern {}
+
+    extern {
+        fn backtrace_create_state(filename: *const libc::c_char,
+                                  threaded: libc::c_int,
+                                  error: backtrace_error_callback,
+                                  data: *mut libc::c_void)
+                                        -> *mut backtrace_state;
+        fn backtrace_syminfo(state: *mut backtrace_state,
+                             addr: libc::uintptr_t,
+                             cb: backtrace_syminfo_callback,
+                             error: backtrace_error_callback,
+                             data: *mut libc::c_void) -> libc::c_int;
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    // helper callbacks
+    ////////////////////////////////////////////////////////////////////////
+
+    extern fn error_cb(_data: *mut libc::c_void, _msg: *const libc::c_char,
+                       _errnum: libc::c_int) {
+        // do nothing for now
+    }
+    extern fn syminfo_cb(data: *mut libc::c_void,
+                         _pc: libc::uintptr_t,
+                         symname: *const libc::c_char,
+                         _symval: libc::uintptr_t,
+                         _symsize: libc::uintptr_t) {
+        let slot = data as *mut *const libc::c_char;
+        unsafe { *slot = symname; }
+    }
+
+    // The libbacktrace API supports creating a state, but it does not
+    // support destroying a state. I personally take this to mean that a
+    // state is meant to be created and then live forever.
+    //
+    // I would love to register an at_exit() handler which cleans up this
+    // state, but libbacktrace provides no way to do so.
+    //
+    // With these constraints, this function has a statically cached state
+    // that is calculated the first time this is requested. Remember that
+    // backtracing all happens serially (one global lock).
+    //
+    // An additionally oddity in this function is that we initialize the
+    // filename via self_exe_name() to pass to libbacktrace. It turns out
+    // that on Linux libbacktrace seamlessly gets the filename of the
+    // current executable, but this fails on freebsd. by always providing
+    // it, we make sure that libbacktrace never has a reason to not look up
+    // the symbols. The libbacktrace API also states that the filename must
+    // be in "permanent memory", so we copy it to a static and then use the
+    // static as the pointer.
+    //
+    // FIXME: We also call self_exe_name() on DragonFly BSD. I haven't
+    //        tested if this is required or not.
+    unsafe fn init_state() -> *mut backtrace_state {
+        static mut STATE: *mut backtrace_state = 0 as *mut backtrace_state;
+        static mut LAST_FILENAME: [libc::c_char, ..256] = [0, ..256];
+        if !STATE.is_null() { return STATE }
+        let selfname = if cfg!(target_os = "freebsd") ||
+                          cfg!(target_os = "dragonfly") {
+            os::self_exe_name()
+        } else {
+            None
+        };
+        let filename = match selfname {
+            Some(path) => {
+                let bytes = path.as_vec();
+                if bytes.len() < LAST_FILENAME.len() {
+                    let i = bytes.iter();
+                    for (slot, val) in LAST_FILENAME.iter_mut().zip(i) {
+                        *slot = *val as libc::c_char;
+                    }
+                    LAST_FILENAME.as_ptr()
+                } else {
+                    ptr::null()
+                }
+            }
+            None => ptr::null(),
+        };
+        STATE = backtrace_create_state(filename, 0, error_cb,
+                                       ptr::null_mut());
+        return STATE
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    // translation
+    ////////////////////////////////////////////////////////////////////////
+
+    // backtrace errors are currently swept under the rug, only I/O
+    // errors are reported
+    let state = unsafe { init_state() };
+    if state.is_null() {
+        return output(w, idx, addr, None)
+    }
+    let mut data = 0 as *const libc::c_char;
+    let data_addr = &mut data as *mut *const libc::c_char;
+    let ret = unsafe {
+        backtrace_syminfo(state, addr as libc::uintptr_t,
+                          syminfo_cb, error_cb,
+                          data_addr as *mut libc::c_void)
+    };
+    if ret == 0 || data.is_null() {
+        output(w, idx, addr, None)
+    } else {
+        output(w, idx, addr, Some(unsafe { CString::new(data, false) }))
+    }
+}
+
+// Finally, after all that work above, we can emit a symbol.
+fn output(w: &mut Writer, idx: int, addr: *mut libc::c_void,
+          s: Option<CString>) -> IoResult<()> {
+    try!(write!(w, "  {:2}: {:2$} - ", idx, addr, HEX_WIDTH));
+    match s.as_ref().and_then(|c| c.as_str()) {
+        Some(string) => try!(demangle(w, string)),
+        None => try!(write!(w, "<unknown>")),
+    }
+    w.write(&['\n' as u8])
+}
+
+/// Unwind library interface used for backtraces
+///
+/// Note that dead code is allowed as here are just bindings
+/// iOS doesn't use all of them it but adding more
+/// platform-specific configs pollutes the code too much
+#[allow(non_camel_case_types)]
+#[allow(non_snake_case)]
+#[allow(dead_code)]
+mod uw {
+    pub use self::_Unwind_Reason_Code::*;
+
+    use libc;
+
+    #[repr(C)]
+    pub enum _Unwind_Reason_Code {
+        _URC_NO_REASON = 0,
+        _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+        _URC_FATAL_PHASE2_ERROR = 2,
+        _URC_FATAL_PHASE1_ERROR = 3,
+        _URC_NORMAL_STOP = 4,
+        _URC_END_OF_STACK = 5,
+        _URC_HANDLER_FOUND = 6,
+        _URC_INSTALL_CONTEXT = 7,
+        _URC_CONTINUE_UNWIND = 8,
+        _URC_FAILURE = 9, // used only by ARM EABI
+    }
+
+    pub enum _Unwind_Context {}
+
+    pub type _Unwind_Trace_Fn =
+            extern fn(ctx: *mut _Unwind_Context,
+                      arg: *mut libc::c_void) -> _Unwind_Reason_Code;
+
+    extern {
+        // No native _Unwind_Backtrace on iOS
+        #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+        pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
+                                 trace_argument: *mut libc::c_void)
+                    -> _Unwind_Reason_Code;
+
+        #[cfg(all(not(target_os = "android"),
+                  not(all(target_os = "linux", target_arch = "arm"))))]
+        pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t;
+
+        #[cfg(all(not(target_os = "android"),
+                  not(all(target_os = "linux", target_arch = "arm"))))]
+        pub fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
+            -> *mut libc::c_void;
+    }
+
+    // On android, the function _Unwind_GetIP is a macro, and this is the
+    // expansion of the macro. This is all copy/pasted directly from the
+    // header file with the definition of _Unwind_GetIP.
+    #[cfg(any(target_os = "android",
+              all(target_os = "linux", target_arch = "arm")))]
+    pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
+        #[repr(C)]
+        enum _Unwind_VRS_Result {
+            _UVRSR_OK = 0,
+            _UVRSR_NOT_IMPLEMENTED = 1,
+            _UVRSR_FAILED = 2,
+        }
+        #[repr(C)]
+        enum _Unwind_VRS_RegClass {
+            _UVRSC_CORE = 0,
+            _UVRSC_VFP = 1,
+            _UVRSC_FPA = 2,
+            _UVRSC_WMMXD = 3,
+            _UVRSC_WMMXC = 4,
+        }
+        #[repr(C)]
+        enum _Unwind_VRS_DataRepresentation {
+            _UVRSD_UINT32 = 0,
+            _UVRSD_VFPX = 1,
+            _UVRSD_FPAX = 2,
+            _UVRSD_UINT64 = 3,
+            _UVRSD_FLOAT = 4,
+            _UVRSD_DOUBLE = 5,
+        }
+
+        type _Unwind_Word = libc::c_uint;
+        extern {
+            fn _Unwind_VRS_Get(ctx: *mut _Unwind_Context,
+                               klass: _Unwind_VRS_RegClass,
+                               word: _Unwind_Word,
+                               repr: _Unwind_VRS_DataRepresentation,
+                               data: *mut libc::c_void)
+                -> _Unwind_VRS_Result;
+        }
+
+        let mut val: _Unwind_Word = 0;
+        let ptr = &mut val as *mut _Unwind_Word;
+        let _ = _Unwind_VRS_Get(ctx, _Unwind_VRS_RegClass::_UVRSC_CORE, 15,
+                                _Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
+                                ptr as *mut libc::c_void);
+        (val & !1) as libc::uintptr_t
+    }
+
+    // This function also doesn't exist on Android or ARM/Linux, so make it
+    // a no-op
+    #[cfg(any(target_os = "android",
+              all(target_os = "linux", target_arch = "arm")))]
+    pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
+        -> *mut libc::c_void
+    {
+        pc
+    }
+}
diff --git a/src/libstd/sys/unix/mod.rs b/src/libstd/sys/unix/mod.rs
index acbf2096326..f3babca3287 100644
--- a/src/libstd/sys/unix/mod.rs
+++ b/src/libstd/sys/unix/mod.rs
@@ -34,6 +34,7 @@ macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
     };
 ) }
 
+pub mod backtrace;
 pub mod c;
 pub mod ext;
 pub mod condvar;
@@ -44,8 +45,10 @@ pub mod os;
 pub mod pipe;
 pub mod process;
 pub mod rwlock;
+pub mod stack_overflow;
 pub mod sync;
 pub mod tcp;
+pub mod thread;
 pub mod thread_local;
 pub mod timer;
 pub mod tty;
diff --git a/src/libstd/sys/unix/stack_overflow.rs b/src/libstd/sys/unix/stack_overflow.rs
new file mode 100644
index 00000000000..73b98f762b4
--- /dev/null
+++ b/src/libstd/sys/unix/stack_overflow.rs
@@ -0,0 +1,291 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use libc;
+use core::prelude::*;
+use self::imp::{make_handler, drop_handler};
+
+pub use self::imp::{init, cleanup};
+
+pub struct Handler {
+    _data: *mut libc::c_void
+}
+
+impl Handler {
+    pub unsafe fn new() -> Handler {
+        make_handler()
+    }
+}
+
+impl Drop for Handler {
+    fn drop(&mut self) {
+        unsafe {
+            drop_handler(self);
+        }
+    }
+}
+
+#[cfg(any(target_os = "linux", target_os = "macos"))]
+mod imp {
+    use core::prelude::*;
+    use sys_common::stack;
+
+    use super::Handler;
+    use rt::util::report_overflow;
+    use mem;
+    use ptr;
+    use intrinsics;
+    use self::signal::{siginfo, sigaction, SIGBUS, SIG_DFL,
+                       SA_SIGINFO, SA_ONSTACK, sigaltstack,
+                       SIGSTKSZ};
+    use rt::local::Local;
+    use rt::task::Task;
+    use libc;
+    use libc::funcs::posix88::mman::{mmap, munmap};
+    use libc::consts::os::posix88::{SIGSEGV,
+                                    PROT_READ,
+                                    PROT_WRITE,
+                                    MAP_PRIVATE,
+                                    MAP_ANON,
+                                    MAP_FAILED};
+
+
+    // This is initialized in init() and only read from after
+    static mut PAGE_SIZE: uint = 0;
+
+    // get_task_info is called from an exception / signal handler.
+    // It returns the guard page of the current task or 0 if that
+    // guard page doesn't exist. None is returned if there's currently
+    // no local task.
+    unsafe fn get_task_guard_page() -> Option<uint> {
+        let task: Option<*mut Task> = Local::try_unsafe_borrow();
+        task.map(|task| (&*task).stack_guard().unwrap_or(0))
+    }
+
+
+    #[no_stack_check]
+    unsafe extern fn signal_handler(signum: libc::c_int,
+                                     info: *mut siginfo,
+                                     _data: *mut libc::c_void) {
+
+        // We can not return from a SIGSEGV or SIGBUS signal.
+        // See: https://www.gnu.org/software/libc/manual/html_node/Handler-Returns.html
+
+        unsafe fn term(signum: libc::c_int) -> ! {
+            use core::mem::transmute;
+
+            signal(signum, transmute(SIG_DFL));
+            raise(signum);
+            intrinsics::abort();
+        }
+
+        // We're calling into functions with stack checks
+        stack::record_sp_limit(0);
+
+        match get_task_guard_page() {
+            Some(guard) => {
+                let addr = (*info).si_addr as uint;
+
+                if guard == 0 || addr < guard - PAGE_SIZE || addr >= guard {
+                    term(signum);
+                }
+
+                report_overflow();
+
+                intrinsics::abort()
+            }
+            None => term(signum)
+        }
+    }
+
+    static mut MAIN_ALTSTACK: *mut libc::c_void = 0 as *mut libc::c_void;
+
+    pub unsafe fn init() {
+        let psize = libc::sysconf(libc::consts::os::sysconf::_SC_PAGESIZE);
+        if psize == -1 {
+            panic!("failed to get page size");
+        }
+
+        PAGE_SIZE = psize as uint;
+
+        let mut action: sigaction = mem::zeroed();
+        action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+        action.sa_sigaction = signal_handler as sighandler_t;
+        sigaction(SIGSEGV, &action, ptr::null_mut());
+        sigaction(SIGBUS, &action, ptr::null_mut());
+
+        let handler = make_handler();
+        MAIN_ALTSTACK = handler._data;
+        mem::forget(handler);
+    }
+
+    pub unsafe fn cleanup() {
+        Handler { _data: MAIN_ALTSTACK };
+    }
+
+    pub unsafe fn make_handler() -> Handler {
+        let alt_stack = mmap(ptr::null_mut(),
+                             signal::SIGSTKSZ,
+                             PROT_READ | PROT_WRITE,
+                             MAP_PRIVATE | MAP_ANON,
+                             -1,
+                             0);
+        if alt_stack == MAP_FAILED {
+            panic!("failed to allocate an alternative stack");
+        }
+
+        let mut stack: sigaltstack = mem::zeroed();
+
+        stack.ss_sp = alt_stack;
+        stack.ss_flags = 0;
+        stack.ss_size = SIGSTKSZ;
+
+        sigaltstack(&stack, ptr::null_mut());
+
+        Handler { _data: alt_stack }
+    }
+
+    pub unsafe fn drop_handler(handler: &mut Handler) {
+        munmap(handler._data, SIGSTKSZ);
+    }
+
+    type sighandler_t = *mut libc::c_void;
+
+    #[cfg(any(all(target_os = "linux", target_arch = "x86"), // may not match
+              all(target_os = "linux", target_arch = "x86_64"),
+              all(target_os = "linux", target_arch = "arm"), // may not match
+              all(target_os = "linux", target_arch = "mips"), // may not match
+              all(target_os = "linux", target_arch = "mipsel"), // may not match
+              target_os = "android"))] // may not match
+    mod signal {
+        use libc;
+        use super::sighandler_t;
+
+        pub static SA_ONSTACK: libc::c_int = 0x08000000;
+        pub static SA_SIGINFO: libc::c_int = 0x00000004;
+        pub static SIGBUS: libc::c_int = 7;
+
+        pub static SIGSTKSZ: libc::size_t = 8192;
+
+        pub static SIG_DFL: sighandler_t = 0i as sighandler_t;
+
+        // This definition is not as accurate as it could be, {si_addr} is
+        // actually a giant union. Currently we're only interested in that field,
+        // however.
+        #[repr(C)]
+        pub struct siginfo {
+            si_signo: libc::c_int,
+            si_errno: libc::c_int,
+            si_code: libc::c_int,
+            pub si_addr: *mut libc::c_void
+        }
+
+        #[repr(C)]
+        pub struct sigaction {
+            pub sa_sigaction: sighandler_t,
+            pub sa_mask: sigset_t,
+            pub sa_flags: libc::c_int,
+            sa_restorer: *mut libc::c_void,
+        }
+
+        #[cfg(target_word_size = "32")]
+        #[repr(C)]
+        pub struct sigset_t {
+            __val: [libc::c_ulong, ..32],
+        }
+        #[cfg(target_word_size = "64")]
+        #[repr(C)]
+        pub struct sigset_t {
+            __val: [libc::c_ulong, ..16],
+        }
+
+        #[repr(C)]
+        pub struct sigaltstack {
+            pub ss_sp: *mut libc::c_void,
+            pub ss_flags: libc::c_int,
+            pub ss_size: libc::size_t
+        }
+
+    }
+
+    #[cfg(target_os = "macos")]
+    mod signal {
+        use libc;
+        use super::sighandler_t;
+
+        pub const SA_ONSTACK: libc::c_int = 0x0001;
+        pub const SA_SIGINFO: libc::c_int = 0x0040;
+        pub const SIGBUS: libc::c_int = 10;
+
+        pub const SIGSTKSZ: libc::size_t = 131072;
+
+        pub const SIG_DFL: sighandler_t = 0i as sighandler_t;
+
+        pub type sigset_t = u32;
+
+        // This structure has more fields, but we're not all that interested in
+        // them.
+        #[repr(C)]
+        pub struct siginfo {
+            pub si_signo: libc::c_int,
+            pub si_errno: libc::c_int,
+            pub si_code: libc::c_int,
+            pub pid: libc::pid_t,
+            pub uid: libc::uid_t,
+            pub status: libc::c_int,
+            pub si_addr: *mut libc::c_void
+        }
+
+        #[repr(C)]
+        pub struct sigaltstack {
+            pub ss_sp: *mut libc::c_void,
+            pub ss_size: libc::size_t,
+            pub ss_flags: libc::c_int
+        }
+
+        #[repr(C)]
+        pub struct sigaction {
+            pub sa_sigaction: sighandler_t,
+            pub sa_mask: sigset_t,
+            pub sa_flags: libc::c_int,
+        }
+    }
+
+    extern {
+        pub fn signal(signum: libc::c_int, handler: sighandler_t) -> sighandler_t;
+        pub fn raise(signum: libc::c_int) -> libc::c_int;
+
+        pub fn sigaction(signum: libc::c_int,
+                         act: *const sigaction,
+                         oldact: *mut sigaction) -> libc::c_int;
+
+        pub fn sigaltstack(ss: *const sigaltstack,
+                           oss: *mut sigaltstack) -> libc::c_int;
+    }
+}
+
+#[cfg(not(any(target_os = "linux",
+              target_os = "macos")))]
+mod imp {
+    use libc;
+
+    pub unsafe fn init() {
+    }
+
+    pub unsafe fn cleanup() {
+    }
+
+    pub unsafe fn make_handler() -> super::Handler {
+        super::Handler { _data: 0i as *mut libc::c_void }
+    }
+
+    pub unsafe fn drop_handler(_handler: &mut super::Handler) {
+    }
+}
diff --git a/src/libstd/sys/unix/thread.rs b/src/libstd/sys/unix/thread.rs
new file mode 100644
index 00000000000..02da3a19818
--- /dev/null
+++ b/src/libstd/sys/unix/thread.rs
@@ -0,0 +1,270 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use cmp;
+use mem;
+use ptr;
+use libc::consts::os::posix01::{PTHREAD_CREATE_JOINABLE, PTHREAD_STACK_MIN};
+use libc;
+
+use sys_common::stack::RED_ZONE;
+use sys_common::thread::*;
+
+pub type rust_thread = libc::pthread_t;
+pub type rust_thread_return = *mut u8;
+pub type StartFn = extern "C" fn(*mut libc::c_void) -> rust_thread_return;
+
+#[no_stack_check]
+pub extern fn thread_start(main: *mut libc::c_void) -> rust_thread_return {
+    return start_thread(main);
+}
+
+#[cfg(all(not(target_os = "linux"), not(target_os = "macos")))]
+pub mod guard {
+    pub unsafe fn current() -> uint {
+        0
+    }
+
+    pub unsafe fn main() -> uint {
+        0
+    }
+
+    pub unsafe fn init() {
+    }
+}
+
+#[cfg(any(target_os = "linux", target_os = "macos"))]
+pub mod guard {
+    use super::*;
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    use mem;
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    use ptr;
+    use libc;
+    use libc::funcs::posix88::mman::{mmap};
+    use libc::consts::os::posix88::{PROT_NONE,
+                                    MAP_PRIVATE,
+                                    MAP_ANON,
+                                    MAP_FAILED,
+                                    MAP_FIXED};
+
+    // These are initialized in init() and only read from after
+    static mut PAGE_SIZE: uint = 0;
+    static mut GUARD_PAGE: uint = 0;
+
+    #[cfg(target_os = "macos")]
+    unsafe fn get_stack_start() -> *mut libc::c_void {
+        current() as *mut libc::c_void
+    }
+
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    unsafe fn get_stack_start() -> *mut libc::c_void {
+        let mut attr: libc::pthread_attr_t = mem::zeroed();
+        if pthread_getattr_np(pthread_self(), &mut attr) != 0 {
+            panic!("failed to get thread attributes");
+        }
+        let mut stackaddr = ptr::null_mut();
+        let mut stacksize = 0;
+        if pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize) != 0 {
+            panic!("failed to get stack information");
+        }
+        if pthread_attr_destroy(&mut attr) != 0 {
+            panic!("failed to destroy thread attributes");
+        }
+        stackaddr
+    }
+
+    pub unsafe fn init() {
+        let psize = libc::sysconf(libc::consts::os::sysconf::_SC_PAGESIZE);
+        if psize == -1 {
+            panic!("failed to get page size");
+        }
+
+        PAGE_SIZE = psize as uint;
+
+        let stackaddr = get_stack_start();
+
+        // Rellocate the last page of the stack.
+        // This ensures SIGBUS will be raised on
+        // stack overflow.
+        let result = mmap(stackaddr,
+                          PAGE_SIZE as libc::size_t,
+                          PROT_NONE,
+                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                          -1,
+                          0);
+
+        if result != stackaddr || result == MAP_FAILED {
+            panic!("failed to allocate a guard page");
+        }
+
+        let offset = if cfg!(target_os = "linux") {
+            2
+        } else {
+            1
+        };
+
+        GUARD_PAGE = stackaddr as uint + offset * PAGE_SIZE;
+    }
+
+    pub unsafe fn main() -> uint {
+        GUARD_PAGE
+    }
+
+    #[cfg(target_os = "macos")]
+    pub unsafe fn current() -> uint {
+        (pthread_get_stackaddr_np(pthread_self()) as libc::size_t -
+         pthread_get_stacksize_np(pthread_self())) as uint
+    }
+
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    pub unsafe fn current() -> uint {
+        let mut attr: libc::pthread_attr_t = mem::zeroed();
+        if pthread_getattr_np(pthread_self(), &mut attr) != 0 {
+            panic!("failed to get thread attributes");
+        }
+        let mut guardsize = 0;
+        if pthread_attr_getguardsize(&attr, &mut guardsize) != 0 {
+            panic!("failed to get stack guard page");
+        }
+        if guardsize == 0 {
+            panic!("there is no guard page");
+        }
+        let mut stackaddr = ptr::null_mut();
+        let mut stacksize = 0;
+        if pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize) != 0 {
+            panic!("failed to get stack information");
+        }
+        if pthread_attr_destroy(&mut attr) != 0 {
+            panic!("failed to destroy thread attributes");
+        }
+
+        stackaddr as uint + guardsize as uint
+    }
+}
+
+pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
+    let mut native: libc::pthread_t = mem::zeroed();
+    let mut attr: libc::pthread_attr_t = mem::zeroed();
+    assert_eq!(pthread_attr_init(&mut attr), 0);
+    assert_eq!(pthread_attr_setdetachstate(&mut attr,
+                                           PTHREAD_CREATE_JOINABLE), 0);
+
+    // Reserve room for the red zone, the runtime's stack of last resort.
+    let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr) as uint);
+    match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
+        0 => {
+        },
+        libc::EINVAL => {
+            // EINVAL means |stack_size| is either too small or not a
+            // multiple of the system page size.  Because it's definitely
+            // >= PTHREAD_STACK_MIN, it must be an alignment issue.
+            // Round up to the nearest page and try again.
+            let page_size = libc::sysconf(libc::_SC_PAGESIZE) as uint;
+            let stack_size = (stack_size + page_size - 1) &
+                             (-(page_size as int - 1) as uint - 1);
+            assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t), 0);
+        },
+        errno => {
+            // This cannot really happen.
+            panic!("pthread_attr_setstacksize() error: {}", errno);
+        },
+    };
+
+    let arg: *mut libc::c_void = mem::transmute(p);
+    let ret = pthread_create(&mut native, &attr, thread_start, arg);
+    assert_eq!(pthread_attr_destroy(&mut attr), 0);
+
+    if ret != 0 {
+        // be sure to not leak the closure
+        let _p: Box<proc():Send> = mem::transmute(arg);
+        panic!("failed to spawn native thread: {}", ret);
+    }
+    native
+}
+
+pub unsafe fn join(native: rust_thread) {
+    assert_eq!(pthread_join(native, ptr::null_mut()), 0);
+}
+
+pub unsafe fn detach(native: rust_thread) {
+    assert_eq!(pthread_detach(native), 0);
+}
+
+pub unsafe fn yield_now() { assert_eq!(sched_yield(), 0); }
+// glibc >= 2.15 has a __pthread_get_minstack() function that returns
+// PTHREAD_STACK_MIN plus however many bytes are needed for thread-local
+// storage.  We need that information to avoid blowing up when a small stack
+// is created in an application with big thread-local storage requirements.
+// See #6233 for rationale and details.
+//
+// Link weakly to the symbol for compatibility with older versions of glibc.
+// Assumes that we've been dynamically linked to libpthread but that is
+// currently always the case.  Note that you need to check that the symbol
+// is non-null before calling it!
+#[cfg(target_os = "linux")]
+fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t {
+    type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t;
+    extern {
+        #[linkage = "extern_weak"]
+        static __pthread_get_minstack: *const ();
+    }
+    if __pthread_get_minstack.is_null() {
+        PTHREAD_STACK_MIN
+    } else {
+        unsafe { mem::transmute::<*const (), F>(__pthread_get_minstack)(attr) }
+    }
+}
+
+// __pthread_get_minstack() is marked as weak but extern_weak linkage is
+// not supported on OS X, hence this kludge...
+#[cfg(not(target_os = "linux"))]
+fn min_stack_size(_: *const libc::pthread_attr_t) -> libc::size_t {
+    PTHREAD_STACK_MIN
+}
+
+#[cfg(any(target_os = "linux"))]
+extern {
+    pub fn pthread_self() -> libc::pthread_t;
+    pub fn pthread_getattr_np(native: libc::pthread_t,
+                              attr: *mut libc::pthread_attr_t) -> libc::c_int;
+    pub fn pthread_attr_getguardsize(attr: *const libc::pthread_attr_t,
+                                     guardsize: *mut libc::size_t) -> libc::c_int;
+    pub fn pthread_attr_getstack(attr: *const libc::pthread_attr_t,
+                                 stackaddr: *mut *mut libc::c_void,
+                                 stacksize: *mut libc::size_t) -> libc::c_int;
+}
+
+#[cfg(target_os = "macos")]
+extern {
+    pub fn pthread_self() -> libc::pthread_t;
+    pub fn pthread_get_stackaddr_np(thread: libc::pthread_t) -> *mut libc::c_void;
+    pub fn pthread_get_stacksize_np(thread: libc::pthread_t) -> libc::size_t;
+}
+
+extern {
+    fn pthread_create(native: *mut libc::pthread_t,
+                      attr: *const libc::pthread_attr_t,
+                      f: StartFn,
+                      value: *mut libc::c_void) -> libc::c_int;
+    fn pthread_join(native: libc::pthread_t,
+                    value: *mut *mut libc::c_void) -> libc::c_int;
+    fn pthread_attr_init(attr: *mut libc::pthread_attr_t) -> libc::c_int;
+    pub fn pthread_attr_destroy(attr: *mut libc::pthread_attr_t) -> libc::c_int;
+    fn pthread_attr_setstacksize(attr: *mut libc::pthread_attr_t,
+                                 stack_size: libc::size_t) -> libc::c_int;
+    fn pthread_attr_setdetachstate(attr: *mut libc::pthread_attr_t,
+                                   state: libc::c_int) -> libc::c_int;
+    fn pthread_detach(thread: libc::pthread_t) -> libc::c_int;
+    fn sched_yield() -> libc::c_int;
+}
diff --git a/src/libstd/sys/windows/backtrace.rs b/src/libstd/sys/windows/backtrace.rs
new file mode 100644
index 00000000000..833b69d6cbe
--- /dev/null
+++ b/src/libstd/sys/windows/backtrace.rs
@@ -0,0 +1,371 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+/// As always, windows has something very different than unix, we mainly want
+/// to avoid having to depend too much on libunwind for windows.
+///
+/// If you google around, you'll find a fair bit of references to built-in
+/// functions to get backtraces on windows. It turns out that most of these are
+/// in an external library called dbghelp. I was unable to find this library
+/// via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent
+/// of it.
+///
+/// You'll also find that there's a function called CaptureStackBackTrace
+/// mentioned frequently (which is also easy to use), but sadly I didn't have a
+/// copy of that function in my mingw install (maybe it was broken?). Instead,
+/// this takes the route of using StackWalk64 in order to walk the stack.
+
+use c_str::CString;
+use intrinsics;
+use io::{IoResult, Writer};
+use libc;
+use mem;
+use ops::Drop;
+use option::{Some, None};
+use path::Path;
+use result::{Ok, Err};
+use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+use slice::SliceExt;
+use str::StrPrelude;
+use dynamic_lib::DynamicLibrary;
+
+use sys_common::backtrace::*;
+
+#[allow(non_snake_case)]
+extern "system" {
+    fn GetCurrentProcess() -> libc::HANDLE;
+    fn GetCurrentThread() -> libc::HANDLE;
+    fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
+}
+
+type SymFromAddrFn =
+    extern "system" fn(libc::HANDLE, u64, *mut u64,
+                       *mut SYMBOL_INFO) -> libc::BOOL;
+type SymInitializeFn =
+    extern "system" fn(libc::HANDLE, *mut libc::c_void,
+                       libc::BOOL) -> libc::BOOL;
+type SymCleanupFn =
+    extern "system" fn(libc::HANDLE) -> libc::BOOL;
+
+type StackWalk64Fn =
+    extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
+                       *mut STACKFRAME64, *mut arch::CONTEXT,
+                       *mut libc::c_void, *mut libc::c_void,
+                       *mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
+
+const MAX_SYM_NAME: uint = 2000;
+const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
+const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
+const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
+
+#[repr(C)]
+struct SYMBOL_INFO {
+    SizeOfStruct: libc::c_ulong,
+    TypeIndex: libc::c_ulong,
+    Reserved: [u64, ..2],
+    Index: libc::c_ulong,
+    Size: libc::c_ulong,
+    ModBase: u64,
+    Flags: libc::c_ulong,
+    Value: u64,
+    Address: u64,
+    Register: libc::c_ulong,
+    Scope: libc::c_ulong,
+    Tag: libc::c_ulong,
+    NameLen: libc::c_ulong,
+    MaxNameLen: libc::c_ulong,
+    // note that windows has this as 1, but it basically just means that
+    // the name is inline at the end of the struct. For us, we just bump
+    // the struct size up to MAX_SYM_NAME.
+    Name: [libc::c_char, ..MAX_SYM_NAME],
+}
+
+
+#[repr(C)]
+enum ADDRESS_MODE {
+    AddrMode1616,
+    AddrMode1632,
+    AddrModeReal,
+    AddrModeFlat,
+}
+
+struct ADDRESS64 {
+    Offset: u64,
+    Segment: u16,
+    Mode: ADDRESS_MODE,
+}
+
+struct STACKFRAME64 {
+    AddrPC: ADDRESS64,
+    AddrReturn: ADDRESS64,
+    AddrFrame: ADDRESS64,
+    AddrStack: ADDRESS64,
+    AddrBStore: ADDRESS64,
+    FuncTableEntry: *mut libc::c_void,
+    Params: [u64, ..4],
+    Far: libc::BOOL,
+    Virtual: libc::BOOL,
+    Reserved: [u64, ..3],
+    KdHelp: KDHELP64,
+}
+
+struct KDHELP64 {
+    Thread: u64,
+    ThCallbackStack: libc::DWORD,
+    ThCallbackBStore: libc::DWORD,
+    NextCallback: libc::DWORD,
+    FramePointer: libc::DWORD,
+    KiCallUserMode: u64,
+    KeUserCallbackDispatcher: u64,
+    SystemRangeStart: u64,
+    KiUserExceptionDispatcher: u64,
+    StackBase: u64,
+    StackLimit: u64,
+    Reserved: [u64, ..5],
+}
+
+#[cfg(target_arch = "x86")]
+mod arch {
+    use libc;
+
+    const MAXIMUM_SUPPORTED_EXTENSION: uint = 512;
+
+    #[repr(C)]
+    pub struct CONTEXT {
+        ContextFlags: libc::DWORD,
+        Dr0: libc::DWORD,
+        Dr1: libc::DWORD,
+        Dr2: libc::DWORD,
+        Dr3: libc::DWORD,
+        Dr6: libc::DWORD,
+        Dr7: libc::DWORD,
+        FloatSave: FLOATING_SAVE_AREA,
+        SegGs: libc::DWORD,
+        SegFs: libc::DWORD,
+        SegEs: libc::DWORD,
+        SegDs: libc::DWORD,
+        Edi: libc::DWORD,
+        Esi: libc::DWORD,
+        Ebx: libc::DWORD,
+        Edx: libc::DWORD,
+        Ecx: libc::DWORD,
+        Eax: libc::DWORD,
+        Ebp: libc::DWORD,
+        Eip: libc::DWORD,
+        SegCs: libc::DWORD,
+        EFlags: libc::DWORD,
+        Esp: libc::DWORD,
+        SegSs: libc::DWORD,
+        ExtendedRegisters: [u8, ..MAXIMUM_SUPPORTED_EXTENSION],
+    }
+
+    #[repr(C)]
+    pub struct FLOATING_SAVE_AREA {
+        ControlWord: libc::DWORD,
+        StatusWord: libc::DWORD,
+        TagWord: libc::DWORD,
+        ErrorOffset: libc::DWORD,
+        ErrorSelector: libc::DWORD,
+        DataOffset: libc::DWORD,
+        DataSelector: libc::DWORD,
+        RegisterArea: [u8, ..80],
+        Cr0NpxState: libc::DWORD,
+    }
+
+    pub fn init_frame(frame: &mut super::STACKFRAME64,
+                      ctx: &CONTEXT) -> libc::DWORD {
+        frame.AddrPC.Offset = ctx.Eip as u64;
+        frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrStack.Offset = ctx.Esp as u64;
+        frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrFrame.Offset = ctx.Ebp as u64;
+        frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        super::IMAGE_FILE_MACHINE_I386
+    }
+}
+
+#[cfg(target_arch = "x86_64")]
+mod arch {
+    use libc::{c_longlong, c_ulonglong};
+    use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
+    use simd;
+
+    #[repr(C)]
+    pub struct CONTEXT {
+        _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
+        P1Home: DWORDLONG,
+        P2Home: DWORDLONG,
+        P3Home: DWORDLONG,
+        P4Home: DWORDLONG,
+        P5Home: DWORDLONG,
+        P6Home: DWORDLONG,
+
+        ContextFlags: DWORD,
+        MxCsr: DWORD,
+
+        SegCs: WORD,
+        SegDs: WORD,
+        SegEs: WORD,
+        SegFs: WORD,
+        SegGs: WORD,
+        SegSs: WORD,
+        EFlags: DWORD,
+
+        Dr0: DWORDLONG,
+        Dr1: DWORDLONG,
+        Dr2: DWORDLONG,
+        Dr3: DWORDLONG,
+        Dr6: DWORDLONG,
+        Dr7: DWORDLONG,
+
+        Rax: DWORDLONG,
+        Rcx: DWORDLONG,
+        Rdx: DWORDLONG,
+        Rbx: DWORDLONG,
+        Rsp: DWORDLONG,
+        Rbp: DWORDLONG,
+        Rsi: DWORDLONG,
+        Rdi: DWORDLONG,
+        R8:  DWORDLONG,
+        R9:  DWORDLONG,
+        R10: DWORDLONG,
+        R11: DWORDLONG,
+        R12: DWORDLONG,
+        R13: DWORDLONG,
+        R14: DWORDLONG,
+        R15: DWORDLONG,
+
+        Rip: DWORDLONG,
+
+        FltSave: FLOATING_SAVE_AREA,
+
+        VectorRegister: [M128A, .. 26],
+        VectorControl: DWORDLONG,
+
+        DebugControl: DWORDLONG,
+        LastBranchToRip: DWORDLONG,
+        LastBranchFromRip: DWORDLONG,
+        LastExceptionToRip: DWORDLONG,
+        LastExceptionFromRip: DWORDLONG,
+    }
+
+    #[repr(C)]
+    pub struct M128A {
+        _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
+        Low:  c_ulonglong,
+        High: c_longlong
+    }
+
+    #[repr(C)]
+    pub struct FLOATING_SAVE_AREA {
+        _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
+        _Dummy: [u8, ..512] // FIXME: Fill this out
+    }
+
+    pub fn init_frame(frame: &mut super::STACKFRAME64,
+                      ctx: &CONTEXT) -> DWORD {
+        frame.AddrPC.Offset = ctx.Rip as u64;
+        frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrStack.Offset = ctx.Rsp as u64;
+        frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrFrame.Offset = ctx.Rbp as u64;
+        frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        super::IMAGE_FILE_MACHINE_AMD64
+    }
+}
+
+#[repr(C)]
+struct Cleanup {
+    handle: libc::HANDLE,
+    SymCleanup: SymCleanupFn,
+}
+
+impl Drop for Cleanup {
+    fn drop(&mut self) { (self.SymCleanup)(self.handle); }
+}
+
+pub fn write(w: &mut Writer) -> IoResult<()> {
+    // According to windows documentation, all dbghelp functions are
+    // single-threaded.
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    let _g = unsafe { LOCK.lock() };
+
+    // Open up dbghelp.dll, we don't link to it explicitly because it can't
+    // always be found. Additionally, it's nice having fewer dependencies.
+    let path = Path::new("dbghelp.dll");
+    let lib = match DynamicLibrary::open(Some(&path)) {
+        Ok(lib) => lib,
+        Err(..) => return Ok(()),
+    };
+
+    macro_rules! sym( ($e:expr, $t:ident) => (unsafe {
+        match lib.symbol($e) {
+            Ok(f) => mem::transmute::<*mut u8, $t>(f),
+            Err(..) => return Ok(())
+        }
+    }) )
+
+    // Fetch the symbols necessary from dbghelp.dll
+    let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
+    let SymInitialize = sym!("SymInitialize", SymInitializeFn);
+    let SymCleanup = sym!("SymCleanup", SymCleanupFn);
+    let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
+
+    // Allocate necessary structures for doing the stack walk
+    let process = unsafe { GetCurrentProcess() };
+    let thread = unsafe { GetCurrentThread() };
+    let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
+    unsafe { RtlCaptureContext(&mut context); }
+    let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
+    let image = arch::init_frame(&mut frame, &context);
+
+    // Initialize this process's symbols
+    let ret = SymInitialize(process, 0 as *mut libc::c_void, libc::TRUE);
+    if ret != libc::TRUE { return Ok(()) }
+    let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
+
+    // And now that we're done with all the setup, do the stack walking!
+    let mut i = 0i;
+    try!(write!(w, "stack backtrace:\n"));
+    while StackWalk64(image, process, thread, &mut frame, &mut context,
+                      0 as *mut libc::c_void,
+                      0 as *mut libc::c_void,
+                      0 as *mut libc::c_void,
+                      0 as *mut libc::c_void) == libc::TRUE{
+        let addr = frame.AddrPC.Offset;
+        if addr == frame.AddrReturn.Offset || addr == 0 ||
+           frame.AddrReturn.Offset == 0 { break }
+
+        i += 1;
+        try!(write!(w, "  {:2}: {:#2$x}", i, addr, HEX_WIDTH));
+        let mut info: SYMBOL_INFO = unsafe { intrinsics::init() };
+        info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong;
+        // the struct size in C.  the value is different to
+        // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
+        // due to struct alignment.
+        info.SizeOfStruct = 88;
+
+        let mut displacement = 0u64;
+        let ret = SymFromAddr(process, addr as u64, &mut displacement,
+                              &mut info);
+
+        if ret == libc::TRUE {
+            try!(write!(w, " - "));
+            let cstr = unsafe { CString::new(info.Name.as_ptr(), false) };
+            let bytes = cstr.as_bytes();
+            match cstr.as_str() {
+                Some(s) => try!(demangle(w, s)),
+                None => try!(w.write(bytes[..bytes.len()-1])),
+            }
+        }
+        try!(w.write(&['\n' as u8]));
+    }
+
+    Ok(())
+}
diff --git a/src/libstd/sys/windows/mod.rs b/src/libstd/sys/windows/mod.rs
index d22d4e0f534..6924687d8c4 100644
--- a/src/libstd/sys/windows/mod.rs
+++ b/src/libstd/sys/windows/mod.rs
@@ -35,6 +35,7 @@ macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
     };
 ) }
 
+pub mod backtrace;
 pub mod c;
 pub mod ext;
 pub mod condvar;
@@ -46,7 +47,9 @@ pub mod pipe;
 pub mod process;
 pub mod rwlock;
 pub mod sync;
+pub mod stack_overflow;
 pub mod tcp;
+pub mod thread;
 pub mod thread_local;
 pub mod timer;
 pub mod tty;
diff --git a/src/libstd/sys/windows/stack_overflow.rs b/src/libstd/sys/windows/stack_overflow.rs
new file mode 100644
index 00000000000..e3d96a054f4
--- /dev/null
+++ b/src/libstd/sys/windows/stack_overflow.rs
@@ -0,0 +1,120 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rt::local::Local;
+use rt::task::Task;
+use rt::util::report_overflow;
+use core::prelude::*;
+use ptr;
+use mem;
+use libc;
+use libc::types::os::arch::extra::{LPVOID, DWORD, LONG, BOOL};
+use sys_common::stack;
+
+pub struct Handler {
+    _data: *mut libc::c_void
+}
+
+impl Handler {
+    pub unsafe fn new() -> Handler {
+        make_handler()
+    }
+}
+
+impl Drop for Handler {
+    fn drop(&mut self) {}
+}
+
+// get_task_info is called from an exception / signal handler.
+// It returns the guard page of the current task or 0 if that
+// guard page doesn't exist. None is returned if there's currently
+// no local task.
+unsafe fn get_task_guard_page() -> Option<uint> {
+    let task: Option<*mut Task> = Local::try_unsafe_borrow();
+    task.map(|task| (&*task).stack_guard().unwrap_or(0))
+}
+
+// This is initialized in init() and only read from after
+static mut PAGE_SIZE: uint = 0;
+
+#[no_stack_check]
+extern "system" fn vectored_handler(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG {
+    unsafe {
+        let rec = &(*(*ExceptionInfo).ExceptionRecord);
+        let code = rec.ExceptionCode;
+
+        if code != EXCEPTION_STACK_OVERFLOW {
+            return EXCEPTION_CONTINUE_SEARCH;
+        }
+
+        // We're calling into functions with stack checks,
+        // however stack checks by limit should be disabled on Windows
+        stack::record_sp_limit(0);
+
+        if get_task_guard_page().is_some() {
+           report_overflow();
+        }
+
+        EXCEPTION_CONTINUE_SEARCH
+    }
+}
+
+pub unsafe fn init() {
+    let mut info = mem::zeroed();
+    libc::GetSystemInfo(&mut info);
+    PAGE_SIZE = info.dwPageSize as uint;
+
+    if AddVectoredExceptionHandler(0, vectored_handler) == ptr::null_mut() {
+        panic!("failed to install exception handler");
+    }
+
+    mem::forget(make_handler());
+}
+
+pub unsafe fn cleanup() {
+}
+
+pub unsafe fn make_handler() -> Handler {
+    if SetThreadStackGuarantee(&mut 0x5000) == 0 {
+        panic!("failed to reserve stack space for exception handling");
+    }
+
+    Handler { _data: 0i as *mut libc::c_void }
+}
+
+pub struct EXCEPTION_RECORD {
+    pub ExceptionCode: DWORD,
+    pub ExceptionFlags: DWORD,
+    pub ExceptionRecord: *mut EXCEPTION_RECORD,
+    pub ExceptionAddress: LPVOID,
+    pub NumberParameters: DWORD,
+    pub ExceptionInformation: [LPVOID, ..EXCEPTION_MAXIMUM_PARAMETERS]
+}
+
+pub struct EXCEPTION_POINTERS {
+    pub ExceptionRecord: *mut EXCEPTION_RECORD,
+    pub ContextRecord: LPVOID
+}
+
+pub type PVECTORED_EXCEPTION_HANDLER = extern "system"
+        fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
+
+pub type ULONG = libc::c_ulong;
+
+const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
+const EXCEPTION_MAXIMUM_PARAMETERS: uint = 15;
+const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
+
+extern "system" {
+    fn AddVectoredExceptionHandler(FirstHandler: ULONG,
+                                   VectoredHandler: PVECTORED_EXCEPTION_HANDLER)
+                                  -> LPVOID;
+    fn SetThreadStackGuarantee(StackSizeInBytes: *mut ULONG) -> BOOL;
+}
diff --git a/src/libstd/sys/windows/thread.rs b/src/libstd/sys/windows/thread.rs
new file mode 100644
index 00000000000..00f1e9767f5
--- /dev/null
+++ b/src/libstd/sys/windows/thread.rs
@@ -0,0 +1,95 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use cmp;
+use mem;
+use ptr;
+use libc;
+use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
+                                   LPVOID, DWORD, LPDWORD, HANDLE};
+use sys_common::stack::RED_ZONE;
+use sys_common::thread::*;
+
+pub type rust_thread = HANDLE;
+pub type rust_thread_return = DWORD;
+
+pub type StartFn = extern "system" fn(*mut libc::c_void) -> rust_thread_return;
+
+#[no_stack_check]
+pub extern "system" fn thread_start(main: *mut libc::c_void) -> rust_thread_return {
+    return start_thread(main);
+}
+
+pub mod guard {
+    pub unsafe fn main() -> uint {
+        0
+    }
+
+    pub unsafe fn current() -> uint {
+        0
+    }
+
+    pub unsafe fn init() {
+    }
+}
+
+pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
+    let arg: *mut libc::c_void = mem::transmute(p);
+    // FIXME On UNIX, we guard against stack sizes that are too small but
+    // that's because pthreads enforces that stacks are at least
+    // PTHREAD_STACK_MIN bytes big.  Windows has no such lower limit, it's
+    // just that below a certain threshold you can't do anything useful.
+    // That threshold is application and architecture-specific, however.
+    // For now, the only requirement is that it's big enough to hold the
+    // red zone.  Round up to the next 64 kB because that's what the NT
+    // kernel does, might as well make it explicit.  With the current
+    // 20 kB red zone, that makes for a 64 kB minimum stack.
+    let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
+    let ret = CreateThread(ptr::null_mut(), stack_size as libc::size_t,
+                           thread_start, arg, 0, ptr::null_mut());
+
+    if ret as uint == 0 {
+        // be sure to not leak the closure
+        let _p: Box<proc():Send> = mem::transmute(arg);
+        panic!("failed to spawn native thread: {}", ret);
+    }
+    return ret;
+}
+
+pub unsafe fn join(native: rust_thread) {
+    use libc::consts::os::extra::INFINITE;
+    WaitForSingleObject(native, INFINITE);
+}
+
+pub unsafe fn detach(native: rust_thread) {
+    assert!(libc::CloseHandle(native) != 0);
+}
+
+pub unsafe fn yield_now() {
+    // This function will return 0 if there are no other threads to execute,
+    // but this also means that the yield was useless so this isn't really a
+    // case that needs to be worried about.
+    SwitchToThread();
+}
+
+#[allow(non_snake_case)]
+extern "system" {
+    fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
+                    dwStackSize: SIZE_T,
+                    lpStartAddress: StartFn,
+                    lpParameter: LPVOID,
+                    dwCreationFlags: DWORD,
+                    lpThreadId: LPDWORD) -> HANDLE;
+    fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
+    fn SwitchToThread() -> BOOL;
+}
diff --git a/src/libstd/sys/windows/thread_local.rs b/src/libstd/sys/windows/thread_local.rs
index 969b322af99..6c8d9639d5c 100644
--- a/src/libstd/sys/windows/thread_local.rs
+++ b/src/libstd/sys/windows/thread_local.rs
@@ -13,8 +13,8 @@ use prelude::*;
 use libc::types::os::arch::extra::{DWORD, LPVOID, BOOL};
 
 use mem;
-use rustrt;
-use rustrt::exclusive::Exclusive;
+use rt;
+use rt::exclusive::Exclusive;
 use sync::{ONCE_INIT, Once};
 
 pub type Key = DWORD;
@@ -131,7 +131,7 @@ fn init_dtors() {
         DTORS = mem::transmute(dtors);
     }
 
-    rustrt::at_exit(move|| unsafe {
+    rt::at_exit(move|| unsafe {
         mem::transmute::<_, Box<Exclusive<Vec<(Key, Dtor)>>>>(DTORS);
         DTORS = 0 as *mut _;
     });
diff --git a/src/libstd/task.rs b/src/libstd/task.rs
index 324b594209a..127cad186f6 100644
--- a/src/libstd/task.rs
+++ b/src/libstd/task.rs
@@ -53,9 +53,9 @@ use kinds::Send;
 use option::Option;
 use option::Option::{None, Some};
 use result::Result;
-use rustrt::local::Local;
-use rustrt::task::Task;
-use rustrt::task;
+use rt::local::Local;
+use rt::task;
+use rt::task::Task;
 use str::SendStr;
 use string::{String, ToString};
 use thunk::{Thunk};
@@ -252,7 +252,7 @@ pub fn try_future<T,F>(f: F) -> Future<Result<T, Box<Any + Send>>>
 /// Read the name of the current task.
 #[stable]
 pub fn name() -> Option<String> {
-    use rustrt::task::Task;
+    use rt::task::Task;
 
     let task = Local::borrow(None::<Task>);
     match task.name {
@@ -264,7 +264,7 @@ pub fn name() -> Option<String> {
 /// Yield control to the task scheduler.
 #[unstable = "Name will change."]
 pub fn deschedule() {
-    use rustrt::task::Task;
+    use rt::task::Task;
     Task::yield_now();
 }
 
@@ -272,7 +272,7 @@ pub fn deschedule() {
 /// destructor that is run while unwinding the stack after a call to `panic!()`).
 #[unstable = "May move to a different module."]
 pub fn failing() -> bool {
-    use rustrt::task::Task;
+    use rt::task::Task;
     Local::borrow(None::<Task>).unwinder.unwinding()
 }
 
diff --git a/src/libstd/thread_local/mod.rs b/src/libstd/thread_local/mod.rs
index 1268ab8e0cf..b78428d69de 100644
--- a/src/libstd/thread_local/mod.rs
+++ b/src/libstd/thread_local/mod.rs
@@ -446,7 +446,7 @@ mod tests {
     use prelude::*;
 
     use cell::UnsafeCell;
-    use rustrt::thread::Thread;
+    use rt::thread::Thread;
 
     struct Foo(Sender<()>);
 
diff --git a/src/libstd/thunk.rs b/src/libstd/thunk.rs
new file mode 100644
index 00000000000..42e78495990
--- /dev/null
+++ b/src/libstd/thunk.rs
@@ -0,0 +1,52 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use alloc::boxed::Box;
+use core::kinds::Send;
+use core::ops::FnOnce;
+
+pub struct Thunk<A=(),R=()> {
+    invoke: Box<Invoke<A,R>+Send>
+}
+
+impl<R> Thunk<(),R> {
+    pub fn new<F>(func: F) -> Thunk<(),R>
+        where F : FnOnce() -> R, F : Send
+    {
+        Thunk::with_arg(move|: ()| func())
+    }
+}
+
+impl<A,R> Thunk<A,R> {
+    pub fn with_arg<F>(func: F) -> Thunk<A,R>
+        where F : FnOnce(A) -> R, F : Send
+    {
+        Thunk {
+            invoke: box func
+        }
+    }
+
+    pub fn invoke(self, arg: A) -> R {
+        self.invoke.invoke(arg)
+    }
+}
+
+pub trait Invoke<A=(),R=()> {
+    fn invoke(self: Box<Self>, arg: A) -> R;
+}
+
+impl<A,R,F> Invoke<A,R> for F
+    where F : FnOnce(A) -> R
+{
+    fn invoke(self: Box<F>, arg: A) -> R {
+        let f = *self;
+        f(arg)
+    }
+}