about summary refs log tree commit diff
path: root/src/libstd/rt
diff options
context:
space:
mode:
authorAaron Turon <aturon@mozilla.com>2014-11-23 19:21:17 -0800
committerAaron Turon <aturon@mozilla.com>2014-12-18 23:31:34 -0800
commit2b3477d373603527d23cc578f3737857b7b253d7 (patch)
tree56022ebf11d5d27a6ef15f15d00d014a84a35837 /src/libstd/rt
parent840de072085df360733c48396224e9966e2dc72c (diff)
downloadrust-2b3477d373603527d23cc578f3737857b7b253d7.tar.gz
rust-2b3477d373603527d23cc578f3737857b7b253d7.zip
libs: merge librustrt into libstd
This commit merges the `rustrt` crate into `std`, undoing part of the
facade. This merger continues the paring down of the runtime system.

Code relying on the public API of `rustrt` will break; some of this API
is now available through `std::rt`, but is likely to change and/or be
removed very soon.

[breaking-change]
Diffstat (limited to 'src/libstd/rt')
-rw-r--r--src/libstd/rt/args.rs167
-rw-r--r--src/libstd/rt/at_exit_imp.rs65
-rw-r--r--src/libstd/rt/backtrace.rs978
-rw-r--r--src/libstd/rt/bookkeeping.rs61
-rw-r--r--src/libstd/rt/exclusive.rs115
-rw-r--r--src/libstd/rt/libunwind.rs128
-rw-r--r--src/libstd/rt/local.rs131
-rw-r--r--src/libstd/rt/local_ptr.rs404
-rw-r--r--src/libstd/rt/macros.rs45
-rw-r--r--src/libstd/rt/mod.rs88
-rw-r--r--src/libstd/rt/mutex.rs406
-rw-r--r--src/libstd/rt/task.rs561
-rw-r--r--src/libstd/rt/thread.rs171
-rw-r--r--src/libstd/rt/thread_local_storage.rs115
-rw-r--r--src/libstd/rt/unwind.rs638
-rw-r--r--src/libstd/rt/util.rs147
16 files changed, 3227 insertions, 993 deletions
diff --git a/src/libstd/rt/args.rs b/src/libstd/rt/args.rs
new file mode 100644
index 00000000000..8b9dbf73c53
--- /dev/null
+++ b/src/libstd/rt/args.rs
@@ -0,0 +1,167 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Global storage for command line arguments
+//!
+//! The current incarnation of the Rust runtime expects for
+//! the processes `argc` and `argv` arguments to be stored
+//! in a globally-accessible location for use by the `os` module.
+//!
+//! Only valid to call on Linux. Mac and Windows use syscalls to
+//! discover the command line arguments.
+//!
+//! FIXME #7756: Would be nice for this to not exist.
+
+use core::prelude::*;
+use vec::Vec;
+
+/// One-time global initialization.
+pub unsafe fn init(argc: int, argv: *const *const u8) { imp::init(argc, argv) }
+
+/// One-time global cleanup.
+pub unsafe fn cleanup() { imp::cleanup() }
+
+/// Take the global arguments from global storage.
+pub fn take() -> Option<Vec<Vec<u8>>> { imp::take() }
+
+/// Give the global arguments to global storage.
+///
+/// It is an error if the arguments already exist.
+pub fn put(args: Vec<Vec<u8>>) { imp::put(args) }
+
+/// Make a clone of the global arguments.
+pub fn clone() -> Option<Vec<Vec<u8>>> { imp::clone() }
+
+#[cfg(any(target_os = "linux",
+          target_os = "android",
+          target_os = "freebsd",
+          target_os = "dragonfly"))]
+mod imp {
+    use core::prelude::*;
+
+    use boxed::Box;
+    use vec::Vec;
+    use string::String;
+    use mem;
+
+    use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+
+    static mut GLOBAL_ARGS_PTR: uint = 0;
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+
+    pub unsafe fn init(argc: int, argv: *const *const u8) {
+        let args = load_argc_and_argv(argc, argv);
+        put(args);
+    }
+
+    pub unsafe fn cleanup() {
+        rtassert!(take().is_some());
+        LOCK.destroy();
+    }
+
+    pub fn take() -> Option<Vec<Vec<u8>>> {
+        with_lock(|| unsafe {
+            let ptr = get_global_ptr();
+            let val = mem::replace(&mut *ptr, None);
+            val.as_ref().map(|s: &Box<Vec<Vec<u8>>>| (**s).clone())
+        })
+    }
+
+    pub fn put(args: Vec<Vec<u8>>) {
+        with_lock(|| unsafe {
+            let ptr = get_global_ptr();
+            rtassert!((*ptr).is_none());
+            (*ptr) = Some(box args.clone());
+        })
+    }
+
+    pub fn clone() -> Option<Vec<Vec<u8>>> {
+        with_lock(|| unsafe {
+            let ptr = get_global_ptr();
+            (*ptr).as_ref().map(|s: &Box<Vec<Vec<u8>>>| (**s).clone())
+        })
+    }
+
+    fn with_lock<T, F>(f: F) -> T where F: FnOnce() -> T {
+        unsafe {
+            let _guard = LOCK.lock();
+            f()
+        }
+    }
+
+    fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> {
+        unsafe { mem::transmute(&GLOBAL_ARGS_PTR) }
+    }
+
+    unsafe fn load_argc_and_argv(argc: int, argv: *const *const u8) -> Vec<Vec<u8>> {
+        Vec::from_fn(argc as uint, |i| {
+            String::from_raw_buf(*argv.offset(i as int)).into_bytes()
+        })
+    }
+
+    #[cfg(test)]
+    mod tests {
+        use std::prelude::*;
+        use std::finally::Finally;
+
+        use super::*;
+
+        #[test]
+        fn smoke_test() {
+            // Preserve the actual global state.
+            let saved_value = take();
+
+            let expected = vec![
+                b"happy".to_vec(),
+                b"today?".to_vec(),
+            ];
+
+            put(expected.clone());
+            assert!(clone() == Some(expected.clone()));
+            assert!(take() == Some(expected.clone()));
+            assert!(take() == None);
+
+            (|&mut:| {
+            }).finally(|| {
+                // Restore the actual global state.
+                match saved_value {
+                    Some(ref args) => put(args.clone()),
+                    None => ()
+                }
+            })
+        }
+    }
+}
+
+#[cfg(any(target_os = "macos",
+          target_os = "ios",
+          target_os = "windows"))]
+mod imp {
+    use core::prelude::*;
+    use vec::Vec;
+
+    pub unsafe fn init(_argc: int, _argv: *const *const u8) {
+    }
+
+    pub fn cleanup() {
+    }
+
+    pub fn take() -> Option<Vec<Vec<u8>>> {
+        panic!()
+    }
+
+    pub fn put(_args: Vec<Vec<u8>>) {
+        panic!()
+    }
+
+    pub fn clone() -> Option<Vec<Vec<u8>>> {
+        panic!()
+    }
+}
diff --git a/src/libstd/rt/at_exit_imp.rs b/src/libstd/rt/at_exit_imp.rs
new file mode 100644
index 00000000000..086079c312a
--- /dev/null
+++ b/src/libstd/rt/at_exit_imp.rs
@@ -0,0 +1,65 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of running at_exit routines
+//!
+//! Documentation can be found on the `rt::at_exit` function.
+
+use core::prelude::*;
+
+use boxed::Box;
+use vec::Vec;
+use sync::atomic;
+use mem;
+use thunk::Thunk;
+
+use rt::exclusive::Exclusive;
+
+type Queue = Exclusive<Vec<Thunk>>;
+
+static QUEUE: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+static RUNNING: atomic::AtomicBool = atomic::INIT_ATOMIC_BOOL;
+
+pub fn init() {
+    let state: Box<Queue> = box Exclusive::new(Vec::new());
+    unsafe {
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        assert!(QUEUE.swap(mem::transmute(state), atomic::SeqCst) == 0);
+    }
+}
+
+pub fn push(f: Thunk) {
+    unsafe {
+        // Note that the check against 0 for the queue pointer is not atomic at
+        // all with respect to `run`, meaning that this could theoretically be a
+        // use-after-free. There's not much we can do to protect against that,
+        // however. Let's just assume a well-behaved runtime and go from there!
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        let queue = QUEUE.load(atomic::SeqCst);
+        rtassert!(queue != 0);
+        (*(queue as *const Queue)).lock().push(f);
+    }
+}
+
+pub fn run() {
+    let cur = unsafe {
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        let queue = QUEUE.swap(0, atomic::SeqCst);
+        rtassert!(queue != 0);
+
+        let queue: Box<Queue> = mem::transmute(queue);
+        let v = mem::replace(&mut *queue.lock(), Vec::new());
+        v
+    };
+
+    for to_run in cur.into_iter() {
+        to_run.invoke(());
+    }
+}
diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs
index d815a5ea4f7..40885823a05 100644
--- a/src/libstd/rt/backtrace.rs
+++ b/src/libstd/rt/backtrace.rs
@@ -19,9 +19,8 @@ use os;
 use result::Result::{Ok, Err};
 use str::{StrPrelude, from_str};
 use sync::atomic;
-use unicode::char::UnicodeChar;
 
-pub use self::imp::write;
+pub use sys::backtrace::write;
 
 // For now logging is turned off by default, and this function checks to see
 // whether the magical environment variable is present to see if it's turned on.
@@ -41,984 +40,13 @@ pub fn log_enabled() -> bool {
     val == 2
 }
 
-#[cfg(target_word_size = "64")] const HEX_WIDTH: uint = 18;
-#[cfg(target_word_size = "32")] const HEX_WIDTH: uint = 10;
-
-// All rust symbols are in theory lists of "::"-separated identifiers. Some
-// assemblers, however, can't handle these characters in symbol names. To get
-// around this, we use C++-style mangling. The mangling method is:
-//
-// 1. Prefix the symbol with "_ZN"
-// 2. For each element of the path, emit the length plus the element
-// 3. End the path with "E"
-//
-// For example, "_ZN4testE" => "test" and "_ZN3foo3bar" => "foo::bar".
-//
-// We're the ones printing our backtraces, so we can't rely on anything else to
-// demangle our symbols. It's *much* nicer to look at demangled symbols, so
-// this function is implemented to give us nice pretty output.
-//
-// Note that this demangler isn't quite as fancy as it could be. We have lots
-// of other information in our symbols like hashes, version, type information,
-// etc. Additionally, this doesn't handle glue symbols at all.
-fn demangle(writer: &mut Writer, s: &str) -> IoResult<()> {
-    // First validate the symbol. If it doesn't look like anything we're
-    // expecting, we just print it literally. Note that we must handle non-rust
-    // symbols because we could have any function in the backtrace.
-    let mut valid = true;
-    let mut inner = s;
-    if s.len() > 4 && s.starts_with("_ZN") && s.ends_with("E") {
-        inner = s.slice(3, s.len() - 1);
-    // On Windows, dbghelp strips leading underscores, so we accept "ZN...E" form too.
-    } else if s.len() > 3 && s.starts_with("ZN") && s.ends_with("E") {
-        inner = s.slice(2, s.len() - 1);
-    } else {
-        valid = false;
-    }
-
-    if valid {
-        let mut chars = inner.chars();
-        while valid {
-            let mut i = 0;
-            for c in chars {
-                if c.is_numeric() {
-                    i = i * 10 + c as uint - '0' as uint;
-                } else {
-                    break
-                }
-            }
-            if i == 0 {
-                valid = chars.next().is_none();
-                break
-            } else if chars.by_ref().take(i - 1).count() != i - 1 {
-                valid = false;
-            }
-        }
-    }
-
-    // Alright, let's do this.
-    if !valid {
-        try!(writer.write_str(s));
-    } else {
-        let mut first = true;
-        while inner.len() > 0 {
-            if !first {
-                try!(writer.write_str("::"));
-            } else {
-                first = false;
-            }
-            let mut rest = inner;
-            while rest.char_at(0).is_numeric() {
-                rest = rest.slice_from(1);
-            }
-            let i: uint = from_str(inner.slice_to(inner.len() - rest.len())).unwrap();
-            inner = rest.slice_from(i);
-            rest = rest.slice_to(i);
-            while rest.len() > 0 {
-                if rest.starts_with("$") {
-                    macro_rules! demangle {
-                        ($($pat:expr => $demangled:expr),*) => ({
-                            $(if rest.starts_with($pat) {
-                                try!(writer.write_str($demangled));
-                                rest = rest.slice_from($pat.len());
-                              } else)*
-                            {
-                                try!(writer.write_str(rest));
-                                break;
-                            }
-
-                        })
-                    }
-
-                    // see src/librustc/back/link.rs for these mappings
-                    demangle! (
-                        "$SP$" => "@",
-                        "$UP$" => "Box",
-                        "$RP$" => "*",
-                        "$BP$" => "&",
-                        "$LT$" => "<",
-                        "$GT$" => ">",
-                        "$LP$" => "(",
-                        "$RP$" => ")",
-                        "$C$"  => ",",
-
-                        // in theory we can demangle any Unicode code point, but
-                        // for simplicity we just catch the common ones.
-                        "$x20" => " ",
-                        "$x27" => "'",
-                        "$x5b" => "[",
-                        "$x5d" => "]"
-                    )
-                } else {
-                    let idx = match rest.find('$') {
-                        None => rest.len(),
-                        Some(i) => i,
-                    };
-                    try!(writer.write_str(rest.slice_to(idx)));
-                    rest = rest.slice_from(idx);
-                }
-            }
-        }
-    }
-
-    Ok(())
-}
-
-/// Backtrace support built on libgcc with some extra OS-specific support
-///
-/// Some methods of getting a backtrace:
-///
-/// * The backtrace() functions on unix. It turns out this doesn't work very
-///   well for green threads on OSX, and the address to symbol portion of it
-///   suffers problems that are described below.
-///
-/// * Using libunwind. This is more difficult than it sounds because libunwind
-///   isn't installed everywhere by default. It's also a bit of a hefty library,
-///   so possibly not the best option. When testing, libunwind was excellent at
-///   getting both accurate backtraces and accurate symbols across platforms.
-///   This route was not chosen in favor of the next option, however.
-///
-/// * We're already using libgcc_s for exceptions in rust (triggering task
-///   unwinding and running destructors on the stack), and it turns out that it
-///   conveniently comes with a function that also gives us a backtrace. All of
-///   these functions look like _Unwind_*, but it's not quite the full
-///   repertoire of the libunwind API. Due to it already being in use, this was
-///   the chosen route of getting a backtrace.
-///
-/// After choosing libgcc_s for backtraces, the sad part is that it will only
-/// give us a stack trace of instruction pointers. Thankfully these instruction
-/// pointers are accurate (they work for green and native threads), but it's
-/// then up to us again to figure out how to translate these addresses to
-/// symbols. As with before, we have a few options. Before, that, a little bit
-/// of an interlude about symbols. This is my very limited knowledge about
-/// symbol tables, and this information is likely slightly wrong, but the
-/// general idea should be correct.
-///
-/// When talking about symbols, it's helpful to know a few things about where
-/// symbols are located. Some symbols are located in the dynamic symbol table
-/// of the executable which in theory means that they're available for dynamic
-/// linking and lookup. Other symbols end up only in the local symbol table of
-/// the file. This loosely corresponds to pub and priv functions in Rust.
-///
-/// Armed with this knowledge, we know that our solution for address to symbol
-/// translation will need to consult both the local and dynamic symbol tables.
-/// With that in mind, here's our options of translating an address to
-/// a symbol.
-///
-/// * Use dladdr(). The original backtrace()-based idea actually uses dladdr()
-///   behind the scenes to translate, and this is why backtrace() was not used.
-///   Conveniently, this method works fantastically on OSX. It appears dladdr()
-///   uses magic to consult the local symbol table, or we're putting everything
-///   in the dynamic symbol table anyway. Regardless, for OSX, this is the
-///   method used for translation. It's provided by the system and easy to do.o
-///
-///   Sadly, all other systems have a dladdr() implementation that does not
-///   consult the local symbol table. This means that most functions are blank
-///   because they don't have symbols. This means that we need another solution.
-///
-/// * Use unw_get_proc_name(). This is part of the libunwind api (not the
-///   libgcc_s version of the libunwind api), but involves taking a dependency
-///   to libunwind. We may pursue this route in the future if we bundle
-///   libunwind, but libunwind was unwieldy enough that it was not chosen at
-///   this time to provide this functionality.
-///
-/// * Shell out to a utility like `readelf`. Crazy though it may sound, it's a
-///   semi-reasonable solution. The stdlib already knows how to spawn processes,
-///   so in theory it could invoke readelf, parse the output, and consult the
-///   local/dynamic symbol tables from there. This ended up not getting chosen
-///   due to the craziness of the idea plus the advent of the next option.
-///
-/// * Use `libbacktrace`. It turns out that this is a small library bundled in
-///   the gcc repository which provides backtrace and symbol translation
-///   functionality. All we really need from it is the backtrace functionality,
-///   and we only really need this on everything that's not OSX, so this is the
-///   chosen route for now.
-///
-/// In summary, the current situation uses libgcc_s to get a trace of stack
-/// pointers, and we use dladdr() or libbacktrace to translate these addresses
-/// to symbols. This is a bit of a hokey implementation as-is, but it works for
-/// all unix platforms we support right now, so it at least gets the job done.
-#[cfg(unix)]
-mod imp {
-    use prelude::*;
-
-    use c_str::CString;
-    use io::IoResult;
-    use libc;
-    use mem;
-    use sync::{StaticMutex, MUTEX_INIT};
-
-    /// As always - iOS on arm uses SjLj exceptions and
-    /// _Unwind_Backtrace is even not available there. Still,
-    /// backtraces could be extracted using a backtrace function,
-    /// which thanks god is public
-    ///
-    /// As mentioned in a huge comment block above, backtrace doesn't
-    /// play well with green threads, so while it is extremely nice
-    /// and simple to use it should be used only on iOS devices as the
-    /// only viable option.
-    #[cfg(all(target_os = "ios", target_arch = "arm"))]
-    #[inline(never)]
-    pub fn write(w: &mut Writer) -> IoResult<()> {
-        use result;
-
-        extern {
-            fn backtrace(buf: *mut *mut libc::c_void,
-                         sz: libc::c_int) -> libc::c_int;
-        }
-
-        // while it doesn't requires lock for work as everything is
-        // local, it still displays much nicer backtraces when a
-        // couple of tasks panic simultaneously
-        static LOCK: StaticMutex = MUTEX_INIT;
-        let _g = LOCK.lock();
-
-        try!(writeln!(w, "stack backtrace:"));
-        // 100 lines should be enough
-        const SIZE: uint = 100;
-        let mut buf: [*mut libc::c_void, ..SIZE] = unsafe {mem::zeroed()};
-        let cnt = unsafe { backtrace(buf.as_mut_ptr(), SIZE as libc::c_int) as uint};
-
-        // skipping the first one as it is write itself
-        let iter = range(1, cnt).map(|i| {
-            print(w, i as int, buf[i])
-        });
-        result::fold(iter, (), |_, _| ())
-    }
-
-    #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
-    #[inline(never)] // if we know this is a function call, we can skip it when
-                     // tracing
-    pub fn write(w: &mut Writer) -> IoResult<()> {
-        use io::IoError;
-
-        struct Context<'a> {
-            idx: int,
-            writer: &'a mut (Writer+'a),
-            last_error: Option<IoError>,
-        }
-
-        // When using libbacktrace, we use some necessary global state, so we
-        // need to prevent more than one thread from entering this block. This
-        // is semi-reasonable in terms of printing anyway, and we know that all
-        // I/O done here is blocking I/O, not green I/O, so we don't have to
-        // worry about this being a native vs green mutex.
-        static LOCK: StaticMutex = MUTEX_INIT;
-        let _g = LOCK.lock();
-
-        try!(writeln!(w, "stack backtrace:"));
-
-        let mut cx = Context { writer: w, last_error: None, idx: 0 };
-        return match unsafe {
-            uw::_Unwind_Backtrace(trace_fn,
-                                  &mut cx as *mut Context as *mut libc::c_void)
-        } {
-            uw::_URC_NO_REASON => {
-                match cx.last_error {
-                    Some(err) => Err(err),
-                    None => Ok(())
-                }
-            }
-            _ => Ok(()),
-        };
-
-        extern fn trace_fn(ctx: *mut uw::_Unwind_Context,
-                           arg: *mut libc::c_void) -> uw::_Unwind_Reason_Code {
-            let cx: &mut Context = unsafe { mem::transmute(arg) };
-            let ip = unsafe { uw::_Unwind_GetIP(ctx) as *mut libc::c_void };
-            // dladdr() on osx gets whiny when we use FindEnclosingFunction, and
-            // it appears to work fine without it, so we only use
-            // FindEnclosingFunction on non-osx platforms. In doing so, we get a
-            // slightly more accurate stack trace in the process.
-            //
-            // This is often because panic involves the last instruction of a
-            // function being "call std::rt::begin_unwind", with no ret
-            // instructions after it. This means that the return instruction
-            // pointer points *outside* of the calling function, and by
-            // unwinding it we go back to the original function.
-            let ip = if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
-                ip
-            } else {
-                unsafe { uw::_Unwind_FindEnclosingFunction(ip) }
-            };
-
-            // Don't print out the first few frames (they're not user frames)
-            cx.idx += 1;
-            if cx.idx <= 0 { return uw::_URC_NO_REASON }
-            // Don't print ginormous backtraces
-            if cx.idx > 100 {
-                match write!(cx.writer, " ... <frames omitted>\n") {
-                    Ok(()) => {}
-                    Err(e) => { cx.last_error = Some(e); }
-                }
-                return uw::_URC_FAILURE
-            }
-
-            // Once we hit an error, stop trying to print more frames
-            if cx.last_error.is_some() { return uw::_URC_FAILURE }
-
-            match print(cx.writer, cx.idx, ip) {
-                Ok(()) => {}
-                Err(e) => { cx.last_error = Some(e); }
-            }
-
-            // keep going
-            return uw::_URC_NO_REASON
-        }
-    }
-
-    #[cfg(any(target_os = "macos", target_os = "ios"))]
-    fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
-        use intrinsics;
-        #[repr(C)]
-        struct Dl_info {
-            dli_fname: *const libc::c_char,
-            dli_fbase: *mut libc::c_void,
-            dli_sname: *const libc::c_char,
-            dli_saddr: *mut libc::c_void,
-        }
-        extern {
-            fn dladdr(addr: *const libc::c_void,
-                      info: *mut Dl_info) -> libc::c_int;
-        }
-
-        let mut info: Dl_info = unsafe { intrinsics::init() };
-        if unsafe { dladdr(addr as *const libc::c_void, &mut info) == 0 } {
-            output(w, idx,addr, None)
-        } else {
-            output(w, idx, addr, Some(unsafe {
-                CString::new(info.dli_sname, false)
-            }))
-        }
-    }
-
-    #[cfg(not(any(target_os = "macos", target_os = "ios")))]
-    fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
-        use os;
-        use ptr;
-
-        ////////////////////////////////////////////////////////////////////////
-        // libbacktrace.h API
-        ////////////////////////////////////////////////////////////////////////
-        type backtrace_syminfo_callback =
-            extern "C" fn(data: *mut libc::c_void,
-                          pc: libc::uintptr_t,
-                          symname: *const libc::c_char,
-                          symval: libc::uintptr_t,
-                          symsize: libc::uintptr_t);
-        type backtrace_error_callback =
-            extern "C" fn(data: *mut libc::c_void,
-                          msg: *const libc::c_char,
-                          errnum: libc::c_int);
-        enum backtrace_state {}
-        #[link(name = "backtrace", kind = "static")]
-        #[cfg(not(test))]
-        extern {}
-
-        extern {
-            fn backtrace_create_state(filename: *const libc::c_char,
-                                      threaded: libc::c_int,
-                                      error: backtrace_error_callback,
-                                      data: *mut libc::c_void)
-                                            -> *mut backtrace_state;
-            fn backtrace_syminfo(state: *mut backtrace_state,
-                                 addr: libc::uintptr_t,
-                                 cb: backtrace_syminfo_callback,
-                                 error: backtrace_error_callback,
-                                 data: *mut libc::c_void) -> libc::c_int;
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // helper callbacks
-        ////////////////////////////////////////////////////////////////////////
-
-        extern fn error_cb(_data: *mut libc::c_void, _msg: *const libc::c_char,
-                           _errnum: libc::c_int) {
-            // do nothing for now
-        }
-        extern fn syminfo_cb(data: *mut libc::c_void,
-                             _pc: libc::uintptr_t,
-                             symname: *const libc::c_char,
-                             _symval: libc::uintptr_t,
-                             _symsize: libc::uintptr_t) {
-            let slot = data as *mut *const libc::c_char;
-            unsafe { *slot = symname; }
-        }
-
-        // The libbacktrace API supports creating a state, but it does not
-        // support destroying a state. I personally take this to mean that a
-        // state is meant to be created and then live forever.
-        //
-        // I would love to register an at_exit() handler which cleans up this
-        // state, but libbacktrace provides no way to do so.
-        //
-        // With these constraints, this function has a statically cached state
-        // that is calculated the first time this is requested. Remember that
-        // backtracing all happens serially (one global lock).
-        //
-        // An additionally oddity in this function is that we initialize the
-        // filename via self_exe_name() to pass to libbacktrace. It turns out
-        // that on Linux libbacktrace seamlessly gets the filename of the
-        // current executable, but this fails on freebsd. by always providing
-        // it, we make sure that libbacktrace never has a reason to not look up
-        // the symbols. The libbacktrace API also states that the filename must
-        // be in "permanent memory", so we copy it to a static and then use the
-        // static as the pointer.
-        //
-        // FIXME: We also call self_exe_name() on DragonFly BSD. I haven't
-        //        tested if this is required or not.
-        unsafe fn init_state() -> *mut backtrace_state {
-            static mut STATE: *mut backtrace_state = 0 as *mut backtrace_state;
-            static mut LAST_FILENAME: [libc::c_char, ..256] = [0, ..256];
-            if !STATE.is_null() { return STATE }
-            let selfname = if cfg!(target_os = "freebsd") ||
-                              cfg!(target_os = "dragonfly") {
-                os::self_exe_name()
-            } else {
-                None
-            };
-            let filename = match selfname {
-                Some(path) => {
-                    let bytes = path.as_vec();
-                    if bytes.len() < LAST_FILENAME.len() {
-                        let i = bytes.iter();
-                        for (slot, val) in LAST_FILENAME.iter_mut().zip(i) {
-                            *slot = *val as libc::c_char;
-                        }
-                        LAST_FILENAME.as_ptr()
-                    } else {
-                        ptr::null()
-                    }
-                }
-                None => ptr::null(),
-            };
-            STATE = backtrace_create_state(filename, 0, error_cb,
-                                           ptr::null_mut());
-            return STATE
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // translation
-        ////////////////////////////////////////////////////////////////////////
-
-        // backtrace errors are currently swept under the rug, only I/O
-        // errors are reported
-        let state = unsafe { init_state() };
-        if state.is_null() {
-            return output(w, idx, addr, None)
-        }
-        let mut data = 0 as *const libc::c_char;
-        let data_addr = &mut data as *mut *const libc::c_char;
-        let ret = unsafe {
-            backtrace_syminfo(state, addr as libc::uintptr_t,
-                              syminfo_cb, error_cb,
-                              data_addr as *mut libc::c_void)
-        };
-        if ret == 0 || data.is_null() {
-            output(w, idx, addr, None)
-        } else {
-            output(w, idx, addr, Some(unsafe { CString::new(data, false) }))
-        }
-    }
-
-    // Finally, after all that work above, we can emit a symbol.
-    fn output(w: &mut Writer, idx: int, addr: *mut libc::c_void,
-              s: Option<CString>) -> IoResult<()> {
-        try!(write!(w, "  {:2}: {:2$} - ", idx, addr, super::HEX_WIDTH));
-        match s.as_ref().and_then(|c| c.as_str()) {
-            Some(string) => try!(super::demangle(w, string)),
-            None => try!(write!(w, "<unknown>")),
-        }
-        w.write(&['\n' as u8])
-    }
-
-    /// Unwind library interface used for backtraces
-    ///
-    /// Note that the native libraries come from librustrt, not this
-    /// module.
-    /// Note that dead code is allowed as here are just bindings
-    /// iOS doesn't use all of them it but adding more
-    /// platform-specific configs pollutes the code too much
-    #[allow(non_camel_case_types)]
-    #[allow(non_snake_case)]
-    #[allow(dead_code)]
-    mod uw {
-        pub use self::_Unwind_Reason_Code::*;
-
-        use libc;
-
-        #[repr(C)]
-        pub enum _Unwind_Reason_Code {
-            _URC_NO_REASON = 0,
-            _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
-            _URC_FATAL_PHASE2_ERROR = 2,
-            _URC_FATAL_PHASE1_ERROR = 3,
-            _URC_NORMAL_STOP = 4,
-            _URC_END_OF_STACK = 5,
-            _URC_HANDLER_FOUND = 6,
-            _URC_INSTALL_CONTEXT = 7,
-            _URC_CONTINUE_UNWIND = 8,
-            _URC_FAILURE = 9, // used only by ARM EABI
-        }
-
-        pub enum _Unwind_Context {}
-
-        pub type _Unwind_Trace_Fn =
-                extern fn(ctx: *mut _Unwind_Context,
-                          arg: *mut libc::c_void) -> _Unwind_Reason_Code;
-
-        extern {
-            // No native _Unwind_Backtrace on iOS
-            #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
-            pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
-                                     trace_argument: *mut libc::c_void)
-                        -> _Unwind_Reason_Code;
-
-            #[cfg(all(not(target_os = "android"),
-                      not(all(target_os = "linux", target_arch = "arm"))))]
-            pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t;
-
-            #[cfg(all(not(target_os = "android"),
-                      not(all(target_os = "linux", target_arch = "arm"))))]
-            pub fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
-                -> *mut libc::c_void;
-        }
-
-        // On android, the function _Unwind_GetIP is a macro, and this is the
-        // expansion of the macro. This is all copy/pasted directly from the
-        // header file with the definition of _Unwind_GetIP.
-        #[cfg(any(target_os = "android",
-                  all(target_os = "linux", target_arch = "arm")))]
-        pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
-            #[repr(C)]
-            enum _Unwind_VRS_Result {
-                _UVRSR_OK = 0,
-                _UVRSR_NOT_IMPLEMENTED = 1,
-                _UVRSR_FAILED = 2,
-            }
-            #[repr(C)]
-            enum _Unwind_VRS_RegClass {
-                _UVRSC_CORE = 0,
-                _UVRSC_VFP = 1,
-                _UVRSC_FPA = 2,
-                _UVRSC_WMMXD = 3,
-                _UVRSC_WMMXC = 4,
-            }
-            #[repr(C)]
-            enum _Unwind_VRS_DataRepresentation {
-                _UVRSD_UINT32 = 0,
-                _UVRSD_VFPX = 1,
-                _UVRSD_FPAX = 2,
-                _UVRSD_UINT64 = 3,
-                _UVRSD_FLOAT = 4,
-                _UVRSD_DOUBLE = 5,
-            }
-
-            type _Unwind_Word = libc::c_uint;
-            extern {
-                fn _Unwind_VRS_Get(ctx: *mut _Unwind_Context,
-                                   klass: _Unwind_VRS_RegClass,
-                                   word: _Unwind_Word,
-                                   repr: _Unwind_VRS_DataRepresentation,
-                                   data: *mut libc::c_void)
-                    -> _Unwind_VRS_Result;
-            }
-
-            let mut val: _Unwind_Word = 0;
-            let ptr = &mut val as *mut _Unwind_Word;
-            let _ = _Unwind_VRS_Get(ctx, _Unwind_VRS_RegClass::_UVRSC_CORE, 15,
-                                    _Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
-                                    ptr as *mut libc::c_void);
-            (val & !1) as libc::uintptr_t
-        }
-
-        // This function also doesn't exist on Android or ARM/Linux, so make it
-        // a no-op
-        #[cfg(any(target_os = "android",
-                  all(target_os = "linux", target_arch = "arm")))]
-        pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
-            -> *mut libc::c_void
-        {
-            pc
-        }
-    }
-}
-
-/// As always, windows has something very different than unix, we mainly want
-/// to avoid having to depend too much on libunwind for windows.
-///
-/// If you google around, you'll find a fair bit of references to built-in
-/// functions to get backtraces on windows. It turns out that most of these are
-/// in an external library called dbghelp. I was unable to find this library
-/// via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent
-/// of it.
-///
-/// You'll also find that there's a function called CaptureStackBackTrace
-/// mentioned frequently (which is also easy to use), but sadly I didn't have a
-/// copy of that function in my mingw install (maybe it was broken?). Instead,
-/// this takes the route of using StackWalk64 in order to walk the stack.
-#[cfg(windows)]
-#[allow(dead_code, non_snake_case)]
-mod imp {
-    use c_str::CString;
-    use intrinsics;
-    use io::{IoResult, Writer};
-    use libc;
-    use mem;
-    use ops::Drop;
-    use option::Option::{Some, None};
-    use path::Path;
-    use result::Result::{Ok, Err};
-    use sync::{StaticMutex, MUTEX_INIT};
-    use slice::SliceExt;
-    use str::StrPrelude;
-    use dynamic_lib::DynamicLibrary;
-
-    #[allow(non_snake_case)]
-    extern "system" {
-        fn GetCurrentProcess() -> libc::HANDLE;
-        fn GetCurrentThread() -> libc::HANDLE;
-        fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
-    }
-
-    type SymFromAddrFn =
-        extern "system" fn(libc::HANDLE, u64, *mut u64,
-                           *mut SYMBOL_INFO) -> libc::BOOL;
-    type SymInitializeFn =
-        extern "system" fn(libc::HANDLE, *mut libc::c_void,
-                           libc::BOOL) -> libc::BOOL;
-    type SymCleanupFn =
-        extern "system" fn(libc::HANDLE) -> libc::BOOL;
-
-    type StackWalk64Fn =
-        extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
-                           *mut STACKFRAME64, *mut arch::CONTEXT,
-                           *mut libc::c_void, *mut libc::c_void,
-                           *mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
-
-    const MAX_SYM_NAME: uint = 2000;
-    const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
-    const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
-    const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
-
-    #[repr(C)]
-    struct SYMBOL_INFO {
-        SizeOfStruct: libc::c_ulong,
-        TypeIndex: libc::c_ulong,
-        Reserved: [u64, ..2],
-        Index: libc::c_ulong,
-        Size: libc::c_ulong,
-        ModBase: u64,
-        Flags: libc::c_ulong,
-        Value: u64,
-        Address: u64,
-        Register: libc::c_ulong,
-        Scope: libc::c_ulong,
-        Tag: libc::c_ulong,
-        NameLen: libc::c_ulong,
-        MaxNameLen: libc::c_ulong,
-        // note that windows has this as 1, but it basically just means that
-        // the name is inline at the end of the struct. For us, we just bump
-        // the struct size up to MAX_SYM_NAME.
-        Name: [libc::c_char, ..MAX_SYM_NAME],
-    }
-
-
-    #[repr(C)]
-    enum ADDRESS_MODE {
-        AddrMode1616,
-        AddrMode1632,
-        AddrModeReal,
-        AddrModeFlat,
-    }
-
-    struct ADDRESS64 {
-        Offset: u64,
-        Segment: u16,
-        Mode: ADDRESS_MODE,
-    }
-
-    struct STACKFRAME64 {
-        AddrPC: ADDRESS64,
-        AddrReturn: ADDRESS64,
-        AddrFrame: ADDRESS64,
-        AddrStack: ADDRESS64,
-        AddrBStore: ADDRESS64,
-        FuncTableEntry: *mut libc::c_void,
-        Params: [u64, ..4],
-        Far: libc::BOOL,
-        Virtual: libc::BOOL,
-        Reserved: [u64, ..3],
-        KdHelp: KDHELP64,
-    }
-
-    struct KDHELP64 {
-        Thread: u64,
-        ThCallbackStack: libc::DWORD,
-        ThCallbackBStore: libc::DWORD,
-        NextCallback: libc::DWORD,
-        FramePointer: libc::DWORD,
-        KiCallUserMode: u64,
-        KeUserCallbackDispatcher: u64,
-        SystemRangeStart: u64,
-        KiUserExceptionDispatcher: u64,
-        StackBase: u64,
-        StackLimit: u64,
-        Reserved: [u64, ..5],
-    }
-
-    #[cfg(target_arch = "x86")]
-    mod arch {
-        use libc;
-
-        const MAXIMUM_SUPPORTED_EXTENSION: uint = 512;
-
-        #[repr(C)]
-        pub struct CONTEXT {
-            ContextFlags: libc::DWORD,
-            Dr0: libc::DWORD,
-            Dr1: libc::DWORD,
-            Dr2: libc::DWORD,
-            Dr3: libc::DWORD,
-            Dr6: libc::DWORD,
-            Dr7: libc::DWORD,
-            FloatSave: FLOATING_SAVE_AREA,
-            SegGs: libc::DWORD,
-            SegFs: libc::DWORD,
-            SegEs: libc::DWORD,
-            SegDs: libc::DWORD,
-            Edi: libc::DWORD,
-            Esi: libc::DWORD,
-            Ebx: libc::DWORD,
-            Edx: libc::DWORD,
-            Ecx: libc::DWORD,
-            Eax: libc::DWORD,
-            Ebp: libc::DWORD,
-            Eip: libc::DWORD,
-            SegCs: libc::DWORD,
-            EFlags: libc::DWORD,
-            Esp: libc::DWORD,
-            SegSs: libc::DWORD,
-            ExtendedRegisters: [u8, ..MAXIMUM_SUPPORTED_EXTENSION],
-        }
-
-        #[repr(C)]
-        pub struct FLOATING_SAVE_AREA {
-            ControlWord: libc::DWORD,
-            StatusWord: libc::DWORD,
-            TagWord: libc::DWORD,
-            ErrorOffset: libc::DWORD,
-            ErrorSelector: libc::DWORD,
-            DataOffset: libc::DWORD,
-            DataSelector: libc::DWORD,
-            RegisterArea: [u8, ..80],
-            Cr0NpxState: libc::DWORD,
-        }
-
-        pub fn init_frame(frame: &mut super::STACKFRAME64,
-                          ctx: &CONTEXT) -> libc::DWORD {
-            frame.AddrPC.Offset = ctx.Eip as u64;
-            frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrStack.Offset = ctx.Esp as u64;
-            frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrFrame.Offset = ctx.Ebp as u64;
-            frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            super::IMAGE_FILE_MACHINE_I386
-        }
-    }
-
-    #[cfg(target_arch = "x86_64")]
-    mod arch {
-        use libc::{c_longlong, c_ulonglong};
-        use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
-        use simd;
-
-        #[repr(C)]
-        pub struct CONTEXT {
-            _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
-            P1Home: DWORDLONG,
-            P2Home: DWORDLONG,
-            P3Home: DWORDLONG,
-            P4Home: DWORDLONG,
-            P5Home: DWORDLONG,
-            P6Home: DWORDLONG,
-
-            ContextFlags: DWORD,
-            MxCsr: DWORD,
-
-            SegCs: WORD,
-            SegDs: WORD,
-            SegEs: WORD,
-            SegFs: WORD,
-            SegGs: WORD,
-            SegSs: WORD,
-            EFlags: DWORD,
-
-            Dr0: DWORDLONG,
-            Dr1: DWORDLONG,
-            Dr2: DWORDLONG,
-            Dr3: DWORDLONG,
-            Dr6: DWORDLONG,
-            Dr7: DWORDLONG,
-
-            Rax: DWORDLONG,
-            Rcx: DWORDLONG,
-            Rdx: DWORDLONG,
-            Rbx: DWORDLONG,
-            Rsp: DWORDLONG,
-            Rbp: DWORDLONG,
-            Rsi: DWORDLONG,
-            Rdi: DWORDLONG,
-            R8:  DWORDLONG,
-            R9:  DWORDLONG,
-            R10: DWORDLONG,
-            R11: DWORDLONG,
-            R12: DWORDLONG,
-            R13: DWORDLONG,
-            R14: DWORDLONG,
-            R15: DWORDLONG,
-
-            Rip: DWORDLONG,
-
-            FltSave: FLOATING_SAVE_AREA,
-
-            VectorRegister: [M128A, .. 26],
-            VectorControl: DWORDLONG,
-
-            DebugControl: DWORDLONG,
-            LastBranchToRip: DWORDLONG,
-            LastBranchFromRip: DWORDLONG,
-            LastExceptionToRip: DWORDLONG,
-            LastExceptionFromRip: DWORDLONG,
-        }
-
-        #[repr(C)]
-        pub struct M128A {
-            _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
-            Low:  c_ulonglong,
-            High: c_longlong
-        }
-
-        #[repr(C)]
-        pub struct FLOATING_SAVE_AREA {
-            _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
-            _Dummy: [u8, ..512] // FIXME: Fill this out
-        }
-
-        pub fn init_frame(frame: &mut super::STACKFRAME64,
-                          ctx: &CONTEXT) -> DWORD {
-            frame.AddrPC.Offset = ctx.Rip as u64;
-            frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrStack.Offset = ctx.Rsp as u64;
-            frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            frame.AddrFrame.Offset = ctx.Rbp as u64;
-            frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
-            super::IMAGE_FILE_MACHINE_AMD64
-        }
-    }
-
-    #[repr(C)]
-    struct Cleanup {
-        handle: libc::HANDLE,
-        SymCleanup: SymCleanupFn,
-    }
-
-    impl Drop for Cleanup {
-        fn drop(&mut self) { (self.SymCleanup)(self.handle); }
-    }
-
-    pub fn write(w: &mut Writer) -> IoResult<()> {
-        // According to windows documentation, all dbghelp functions are
-        // single-threaded.
-        static LOCK: StaticMutex = MUTEX_INIT;
-        let _g = LOCK.lock();
-
-        // Open up dbghelp.dll, we don't link to it explicitly because it can't
-        // always be found. Additionally, it's nice having fewer dependencies.
-        let path = Path::new("dbghelp.dll");
-        let lib = match DynamicLibrary::open(Some(&path)) {
-            Ok(lib) => lib,
-            Err(..) => return Ok(()),
-        };
-
-        macro_rules! sym { ($e:expr, $t:ident) => (unsafe {
-            match lib.symbol($e) {
-                Ok(f) => mem::transmute::<*mut u8, $t>(f),
-                Err(..) => return Ok(())
-            }
-        }) }
-
-        // Fetch the symbols necessary from dbghelp.dll
-        let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
-        let SymInitialize = sym!("SymInitialize", SymInitializeFn);
-        let SymCleanup = sym!("SymCleanup", SymCleanupFn);
-        let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
-
-        // Allocate necessary structures for doing the stack walk
-        let process = unsafe { GetCurrentProcess() };
-        let thread = unsafe { GetCurrentThread() };
-        let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
-        unsafe { RtlCaptureContext(&mut context); }
-        let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
-        let image = arch::init_frame(&mut frame, &context);
-
-        // Initialize this process's symbols
-        let ret = SymInitialize(process, 0 as *mut libc::c_void, libc::TRUE);
-        if ret != libc::TRUE { return Ok(()) }
-        let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
-
-        // And now that we're done with all the setup, do the stack walking!
-        let mut i = 0i;
-        try!(write!(w, "stack backtrace:\n"));
-        while StackWalk64(image, process, thread, &mut frame, &mut context,
-                          0 as *mut libc::c_void,
-                          0 as *mut libc::c_void,
-                          0 as *mut libc::c_void,
-                          0 as *mut libc::c_void) == libc::TRUE{
-            let addr = frame.AddrPC.Offset;
-            if addr == frame.AddrReturn.Offset || addr == 0 ||
-               frame.AddrReturn.Offset == 0 { break }
-
-            i += 1;
-            try!(write!(w, "  {:2}: {:#2$x}", i, addr, super::HEX_WIDTH));
-            let mut info: SYMBOL_INFO = unsafe { intrinsics::init() };
-            info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong;
-            // the struct size in C.  the value is different to
-            // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
-            // due to struct alignment.
-            info.SizeOfStruct = 88;
-
-            let mut displacement = 0u64;
-            let ret = SymFromAddr(process, addr as u64, &mut displacement,
-                                  &mut info);
-
-            if ret == libc::TRUE {
-                try!(write!(w, " - "));
-                let cstr = unsafe { CString::new(info.Name.as_ptr(), false) };
-                let bytes = cstr.as_bytes();
-                match cstr.as_str() {
-                    Some(s) => try!(super::demangle(w, s)),
-                    None => try!(w.write(bytes[..bytes.len()-1])),
-                }
-                if displacement != 0 {
-                    try!(write!(w, "+{:#x}", displacement));
-                }
-            }
-            try!(w.write(&['\n' as u8]));
-        }
-
-        Ok(())
-    }
-}
-
 #[cfg(test)]
 mod test {
     use prelude::*;
-    use io::MemWriter;
-
+    use sys_common;
     macro_rules! t { ($a:expr, $b:expr) => ({
         let mut m = Vec::new();
-        super::demangle(&mut m, $a).unwrap();
+        sys_common::backtrace::demangle(&mut m, $a).unwrap();
         assert_eq!(String::from_utf8(m).unwrap(), $b);
     }) }
 
diff --git a/src/libstd/rt/bookkeeping.rs b/src/libstd/rt/bookkeeping.rs
new file mode 100644
index 00000000000..aca520fc088
--- /dev/null
+++ b/src/libstd/rt/bookkeeping.rs
@@ -0,0 +1,61 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Task bookkeeping
+//!
+//! This module keeps track of the number of running tasks so that entry points
+//! with libnative know when it's possible to exit the program (once all tasks
+//! have exited).
+//!
+//! The green counterpart for this is bookkeeping on sched pools, and it's up to
+//! each respective runtime to make sure that they call increment() and
+//! decrement() manually.
+
+use sync::atomic;
+use ops::Drop;
+
+use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+
+static TASK_COUNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+static TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+
+#[allow(missing_copy_implementations)]
+pub struct Token { _private: () }
+
+impl Drop for Token {
+    fn drop(&mut self) { decrement() }
+}
+
+/// Increment the number of live tasks, returning a token which will decrement
+/// the count when dropped.
+pub fn increment() -> Token {
+    let _ = TASK_COUNT.fetch_add(1, atomic::SeqCst);
+    Token { _private: () }
+}
+
+pub fn decrement() {
+    unsafe {
+        if TASK_COUNT.fetch_sub(1, atomic::SeqCst) == 1 {
+            let guard = TASK_LOCK.lock();
+            guard.signal();
+        }
+    }
+}
+
+/// Waits for all other native tasks in the system to exit. This is only used by
+/// the entry points of native programs
+pub fn wait_for_other_tasks() {
+    unsafe {
+        let guard = TASK_LOCK.lock();
+        while TASK_COUNT.load(atomic::SeqCst) > 0 {
+            guard.wait();
+        }
+    }
+}
diff --git a/src/libstd/rt/exclusive.rs b/src/libstd/rt/exclusive.rs
new file mode 100644
index 00000000000..1d3082d1b4c
--- /dev/null
+++ b/src/libstd/rt/exclusive.rs
@@ -0,0 +1,115 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use cell::UnsafeCell;
+use rt::mutex;
+
+/// An OS mutex over some data.
+///
+/// This is not a safe primitive to use, it is unaware of the libgreen
+/// scheduler, as well as being easily susceptible to misuse due to the usage of
+/// the inner NativeMutex.
+///
+/// > **Note**: This type is not recommended for general use. The mutex provided
+/// >           as part of `libsync` should almost always be favored.
+pub struct Exclusive<T> {
+    lock: mutex::NativeMutex,
+    data: UnsafeCell<T>,
+}
+
+/// An RAII guard returned via `lock`
+pub struct ExclusiveGuard<'a, T:'a> {
+    // FIXME #12808: strange name to try to avoid interfering with
+    // field accesses of the contained type via Deref
+    _data: &'a mut T,
+    _guard: mutex::LockGuard<'a>,
+}
+
+impl<T: Send> Exclusive<T> {
+    /// Creates a new `Exclusive` which will protect the data provided.
+    pub fn new(user_data: T) -> Exclusive<T> {
+        Exclusive {
+            lock: unsafe { mutex::NativeMutex::new() },
+            data: UnsafeCell::new(user_data),
+        }
+    }
+
+    /// Acquires this lock, returning a guard which the data is accessed through
+    /// and from which that lock will be unlocked.
+    ///
+    /// This method is unsafe due to many of the same reasons that the
+    /// NativeMutex itself is unsafe.
+    pub unsafe fn lock<'a>(&'a self) -> ExclusiveGuard<'a, T> {
+        let guard = self.lock.lock();
+        let data = &mut *self.data.get();
+
+        ExclusiveGuard {
+            _data: data,
+            _guard: guard,
+        }
+    }
+}
+
+impl<'a, T: Send> ExclusiveGuard<'a, T> {
+    // The unsafety here should be ok because our loan guarantees that the lock
+    // itself is not moving
+    pub fn signal(&self) {
+        unsafe { self._guard.signal() }
+    }
+    pub fn wait(&self) {
+        unsafe { self._guard.wait() }
+    }
+}
+
+impl<'a, T: Send> Deref<T> for ExclusiveGuard<'a, T> {
+    fn deref(&self) -> &T { &*self._data }
+}
+impl<'a, T: Send> DerefMut<T> for ExclusiveGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T { &mut *self._data }
+}
+
+#[cfg(test)]
+mod tests {
+    use prelude::*;
+    use sync::Arc;
+    use super::Exclusive;
+    use task;
+
+    #[test]
+    fn exclusive_new_arc() {
+        unsafe {
+            let mut futures = Vec::new();
+
+            let num_tasks = 10;
+            let count = 10;
+
+            let total = Arc::new(Exclusive::new(box 0));
+
+            for _ in range(0u, num_tasks) {
+                let total = total.clone();
+                let (tx, rx) = channel();
+                futures.push(rx);
+
+                task::spawn(move || {
+                    for _ in range(0u, count) {
+                        **total.lock() += 1;
+                    }
+                    tx.send(());
+                });
+            };
+
+            for f in futures.iter_mut() { f.recv() }
+
+            assert_eq!(**total.lock(), num_tasks * count);
+        }
+    }
+}
diff --git a/src/libstd/rt/libunwind.rs b/src/libstd/rt/libunwind.rs
new file mode 100644
index 00000000000..2feea7fa0a4
--- /dev/null
+++ b/src/libstd/rt/libunwind.rs
@@ -0,0 +1,128 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unwind library interface
+
+#![allow(non_upper_case_globals)]
+#![allow(non_camel_case_types)]
+#![allow(non_snake_case)]
+#![allow(dead_code)] // these are just bindings
+
+#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
+pub use self::_Unwind_Action::*;
+#[cfg(target_arch = "arm")]
+pub use self::_Unwind_State::*;
+pub use self::_Unwind_Reason_Code::*;
+
+use libc;
+
+#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
+#[repr(C)]
+#[deriving(Copy)]
+pub enum _Unwind_Action {
+    _UA_SEARCH_PHASE = 1,
+    _UA_CLEANUP_PHASE = 2,
+    _UA_HANDLER_FRAME = 4,
+    _UA_FORCE_UNWIND = 8,
+    _UA_END_OF_STACK = 16,
+}
+
+#[cfg(target_arch = "arm")]
+#[repr(C)]
+pub enum _Unwind_State {
+    _US_VIRTUAL_UNWIND_FRAME = 0,
+    _US_UNWIND_FRAME_STARTING = 1,
+    _US_UNWIND_FRAME_RESUME = 2,
+    _US_ACTION_MASK = 3,
+    _US_FORCE_UNWIND = 8,
+    _US_END_OF_STACK = 16
+}
+
+#[repr(C)]
+pub enum _Unwind_Reason_Code {
+    _URC_NO_REASON = 0,
+    _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+    _URC_FATAL_PHASE2_ERROR = 2,
+    _URC_FATAL_PHASE1_ERROR = 3,
+    _URC_NORMAL_STOP = 4,
+    _URC_END_OF_STACK = 5,
+    _URC_HANDLER_FOUND = 6,
+    _URC_INSTALL_CONTEXT = 7,
+    _URC_CONTINUE_UNWIND = 8,
+    _URC_FAILURE = 9, // used only by ARM EABI
+}
+
+pub type _Unwind_Exception_Class = u64;
+
+pub type _Unwind_Word = libc::uintptr_t;
+
+#[cfg(target_arch = "x86")]
+pub const unwinder_private_data_size: uint = 5;
+
+#[cfg(target_arch = "x86_64")]
+pub const unwinder_private_data_size: uint = 6;
+
+#[cfg(all(target_arch = "arm", not(target_os = "ios")))]
+pub const unwinder_private_data_size: uint = 20;
+
+#[cfg(all(target_arch = "arm", target_os = "ios"))]
+pub const unwinder_private_data_size: uint = 5;
+
+#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
+pub const unwinder_private_data_size: uint = 2;
+
+#[repr(C)]
+pub struct _Unwind_Exception {
+    pub exception_class: _Unwind_Exception_Class,
+    pub exception_cleanup: _Unwind_Exception_Cleanup_Fn,
+    pub private: [_Unwind_Word, ..unwinder_private_data_size],
+}
+
+pub enum _Unwind_Context {}
+
+pub type _Unwind_Exception_Cleanup_Fn =
+        extern "C" fn(unwind_code: _Unwind_Reason_Code,
+                      exception: *mut _Unwind_Exception);
+
+#[cfg(any(target_os = "linux", target_os = "freebsd"))]
+#[link(name = "gcc_s")]
+extern {}
+
+#[cfg(target_os = "android")]
+#[link(name = "gcc")]
+extern {}
+
+#[cfg(target_os = "dragonfly")]
+#[link(name = "gcc_pic")]
+extern {}
+
+extern "C" {
+    // iOS on armv7 uses SjLj exceptions and requires to link
+    // against corresponding routine (..._SjLj_...)
+    #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+    pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception)
+                                  -> _Unwind_Reason_Code;
+
+    #[cfg(all(target_os = "ios", target_arch = "arm"))]
+    fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception)
+                                   -> _Unwind_Reason_Code;
+
+    pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception);
+}
+
+// ... and now we just providing access to SjLj counterspart
+// through a standard name to hide those details from others
+// (see also comment above regarding _Unwind_RaiseException)
+#[cfg(all(target_os = "ios", target_arch = "arm"))]
+#[inline(always)]
+pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception)
+                                     -> _Unwind_Reason_Code {
+    _Unwind_SjLj_RaiseException(exc)
+}
diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs
new file mode 100644
index 00000000000..089960a6bc8
--- /dev/null
+++ b/src/libstd/rt/local.rs
@@ -0,0 +1,131 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use rt::local_ptr;
+use rt::task::Task;
+
+/// Encapsulates some task-local data.
+pub trait Local<Borrowed> {
+    fn put(value: Box<Self>);
+    fn take() -> Box<Self>;
+    fn try_take() -> Option<Box<Self>>;
+    fn exists(unused_value: Option<Self>) -> bool;
+    fn borrow(unused_value: Option<Self>) -> Borrowed;
+    unsafe fn unsafe_take() -> Box<Self>;
+    unsafe fn unsafe_borrow() -> *mut Self;
+    unsafe fn try_unsafe_borrow() -> Option<*mut Self>;
+}
+
+impl Local<local_ptr::Borrowed<Task>> for Task {
+    #[inline]
+    fn put(value: Box<Task>) { unsafe { local_ptr::put(value) } }
+    #[inline]
+    fn take() -> Box<Task> { unsafe { local_ptr::take() } }
+    #[inline]
+    fn try_take() -> Option<Box<Task>> { unsafe { local_ptr::try_take() } }
+    fn exists(_: Option<Task>) -> bool { local_ptr::exists() }
+    #[inline]
+    fn borrow(_: Option<Task>) -> local_ptr::Borrowed<Task> {
+        unsafe {
+            local_ptr::borrow::<Task>()
+        }
+    }
+    #[inline]
+    unsafe fn unsafe_take() -> Box<Task> { local_ptr::unsafe_take() }
+    #[inline]
+    unsafe fn unsafe_borrow() -> *mut Task { local_ptr::unsafe_borrow() }
+    #[inline]
+    unsafe fn try_unsafe_borrow() -> Option<*mut Task> {
+        local_ptr::try_unsafe_borrow()
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use prelude::*;
+    use super::*;
+    use super::super::thread::Thread;
+    use super::super::task::Task;
+
+    #[test]
+    fn thread_local_task_smoke_test() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn thread_local_task_two_instances() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+            let task = box Task::new(None, None);
+            Local::put(task);
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn borrow_smoke_test() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+
+            unsafe {
+                let _task: *mut Task = Local::unsafe_borrow();
+            }
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn borrow_with_return() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+
+            {
+                let _ = Local::borrow(None::<Task>);
+            }
+
+            let task: Box<Task> = Local::take();
+            cleanup_task(task);
+        }).join();
+    }
+
+    #[test]
+    fn try_take() {
+        Thread::start(move|| {
+            let task = box Task::new(None, None);
+            Local::put(task);
+
+            let t: Box<Task> = Local::try_take().unwrap();
+            let u: Option<Box<Task>> = Local::try_take();
+            assert!(u.is_none());
+
+            cleanup_task(t);
+        }).join();
+    }
+
+    fn cleanup_task(t: Box<Task>) {
+        t.drop();
+    }
+
+}
diff --git a/src/libstd/rt/local_ptr.rs b/src/libstd/rt/local_ptr.rs
new file mode 100644
index 00000000000..a87bc3d2766
--- /dev/null
+++ b/src/libstd/rt/local_ptr.rs
@@ -0,0 +1,404 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Access to a single thread-local pointer.
+//!
+//! The runtime will use this for storing Box<Task>.
+//!
+//! FIXME: Add runtime checks for usage of inconsistent pointer types.
+//! and for overwriting an existing pointer.
+
+#![allow(dead_code)]
+
+use core::prelude::*;
+
+use mem;
+use boxed::Box;
+
+#[cfg(any(windows, // mingw-w32 doesn't like thread_local things
+          target_os = "android", // see #10686
+          target_os = "ios"))]
+pub use self::native::{init, cleanup, put, take, try_take, unsafe_take, exists,
+                       unsafe_borrow, try_unsafe_borrow};
+
+#[cfg(not(any(windows, target_os = "android", target_os = "ios")))]
+pub use self::compiled::{init, cleanup, put, take, try_take, unsafe_take, exists,
+                         unsafe_borrow, try_unsafe_borrow};
+
+/// Encapsulates a borrowed value. When this value goes out of scope, the
+/// pointer is returned.
+pub struct Borrowed<T> {
+    val: *const (),
+}
+
+#[unsafe_destructor]
+impl<T> Drop for Borrowed<T> {
+    fn drop(&mut self) {
+        unsafe {
+            if self.val.is_null() {
+                rtabort!("Aiee, returning null borrowed object!");
+            }
+            let val: Box<T> = mem::transmute(self.val);
+            put::<T>(val);
+            rtassert!(exists());
+        }
+    }
+}
+
+impl<T> Deref<T> for Borrowed<T> {
+    fn deref<'a>(&'a self) -> &'a T {
+        unsafe { &*(self.val as *const T) }
+    }
+}
+
+impl<T> DerefMut<T> for Borrowed<T> {
+    fn deref_mut<'a>(&'a mut self) -> &'a mut T {
+        unsafe { &mut *(self.val as *mut T) }
+    }
+}
+
+/// Borrow the thread-local value from thread-local storage.
+/// While the value is borrowed it is not available in TLS.
+///
+/// # Safety note
+///
+/// Does not validate the pointer type.
+#[inline]
+pub unsafe fn borrow<T>() -> Borrowed<T> {
+    let val: *const () = mem::transmute(take::<T>());
+    Borrowed {
+        val: val,
+    }
+}
+
+/// Compiled implementation of accessing the runtime local pointer. This is
+/// implemented using LLVM's thread_local attribute which isn't necessarily
+/// working on all platforms. This implementation is faster, however, so we use
+/// it wherever possible.
+#[cfg(not(any(windows, target_os = "android", target_os = "ios")))]
+pub mod compiled {
+    use core::prelude::*;
+
+    use boxed::Box;
+    use mem;
+
+    #[cfg(test)]
+    pub use realstd::rt::shouldnt_be_public::RT_TLS_PTR;
+
+    #[cfg(not(test))]
+    #[thread_local]
+    pub static mut RT_TLS_PTR: *mut u8 = 0 as *mut u8;
+
+    pub fn init() {}
+
+    pub unsafe fn cleanup() {}
+
+    // Rationale for all of these functions being inline(never)
+    //
+    // The #[thread_local] annotation gets propagated all the way through to
+    // LLVM, meaning the global is specially treated by LLVM to lower it to an
+    // efficient sequence of instructions. This also involves dealing with fun
+    // stuff in object files and whatnot. Regardless, it turns out this causes
+    // trouble with green threads and lots of optimizations turned on. The
+    // following case study was done on Linux x86_64, but I would imagine that
+    // other platforms are similar.
+    //
+    // On Linux, the instruction sequence for loading the tls pointer global
+    // looks like:
+    //
+    //      mov %fs:0x0, %rax
+    //      mov -0x8(%rax), %rbx
+    //
+    // This code leads me to believe that (%fs:0x0) is a table, and then the
+    // table contains the TLS values for the process. Hence, the slot at offset
+    // -0x8 is the task TLS pointer. This leads us to the conclusion that this
+    // table is the actual thread local part of each thread. The kernel sets up
+    // the fs segment selector to point at the right region of memory for each
+    // thread.
+    //
+    // Optimizations lead me to believe that this code is lowered to these
+    // instructions in the LLVM codegen passes, because you'll see code like
+    // this when everything is optimized:
+    //
+    //      mov %fs:0x0, %r14
+    //      mov -0x8(%r14), %rbx
+    //      // do something with %rbx, the rust Task pointer
+    //
+    //      ... // <- do more things
+    //
+    //      mov -0x8(%r14), %rbx
+    //      // do something else with %rbx
+    //
+    // Note that the optimization done here is that the first load is not
+    // duplicated during the lower instructions. This means that the %fs:0x0
+    // memory location is only dereferenced once.
+    //
+    // Normally, this is actually a good thing! With green threads, however,
+    // it's very possible for the code labeled "do more things" to context
+    // switch to another thread. If this happens, then we *must* re-load %fs:0x0
+    // because it's changed (we're on a different thread). If we don't re-load
+    // the table location, then we'll be reading the original thread's TLS
+    // values, not our thread's TLS values.
+    //
+    // Hence, we never inline these functions. By never inlining, we're
+    // guaranteed that loading the table is a local decision which is forced to
+    // *always* happen.
+
+    /// Give a pointer to thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline(never)] // see comments above
+    pub unsafe fn put<T>(sched: Box<T>) {
+        RT_TLS_PTR = mem::transmute(sched)
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline(never)] // see comments above
+    pub unsafe fn take<T>() -> Box<T> {
+        let ptr = RT_TLS_PTR;
+        rtassert!(!ptr.is_null());
+        let ptr: Box<T> = mem::transmute(ptr);
+        // can't use `as`, due to type not matching with `cfg(test)`
+        RT_TLS_PTR = mem::transmute(0u);
+        ptr
+    }
+
+    /// Optionally take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline(never)] // see comments above
+    pub unsafe fn try_take<T>() -> Option<Box<T>> {
+        let ptr = RT_TLS_PTR;
+        if ptr.is_null() {
+            None
+        } else {
+            let ptr: Box<T> = mem::transmute(ptr);
+            // can't use `as`, due to type not matching with `cfg(test)`
+            RT_TLS_PTR = mem::transmute(0u);
+            Some(ptr)
+        }
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    /// Leaves the old pointer in TLS for speed.
+    #[inline(never)] // see comments above
+    pub unsafe fn unsafe_take<T>() -> Box<T> {
+        mem::transmute(RT_TLS_PTR)
+    }
+
+    /// Check whether there is a thread-local pointer installed.
+    #[inline(never)] // see comments above
+    pub fn exists() -> bool {
+        unsafe {
+            RT_TLS_PTR.is_not_null()
+        }
+    }
+
+    #[inline(never)] // see comments above
+    pub unsafe fn unsafe_borrow<T>() -> *mut T {
+        if RT_TLS_PTR.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        RT_TLS_PTR as *mut T
+    }
+
+    #[inline(never)] // see comments above
+    pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
+        if RT_TLS_PTR.is_null() {
+            None
+        } else {
+            Some(RT_TLS_PTR as *mut T)
+        }
+    }
+}
+
+/// Native implementation of having the runtime thread-local pointer. This
+/// implementation uses the `thread_local_storage` module to provide a
+/// thread-local value.
+pub mod native {
+    use core::prelude::*;
+
+    use boxed::Box;
+    use mem;
+    use ptr;
+    use rt::thread_local_storage as tls;
+
+    static mut RT_TLS_KEY: tls::Key = -1;
+
+    /// Initialize the TLS key. Other ops will fail if this isn't executed
+    /// first.
+    pub fn init() {
+        unsafe {
+            tls::create(&mut RT_TLS_KEY);
+        }
+    }
+
+    pub unsafe fn cleanup() {
+        rtassert!(RT_TLS_KEY != -1);
+        tls::destroy(RT_TLS_KEY);
+    }
+
+    /// Give a pointer to thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline]
+    pub unsafe fn put<T>(sched: Box<T>) {
+        let key = tls_key();
+        let void_ptr: *mut u8 = mem::transmute(sched);
+        tls::set(key, void_ptr);
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline]
+    pub unsafe fn take<T>() -> Box<T> {
+        let key = tls_key();
+        let void_ptr: *mut u8 = tls::get(key);
+        if void_ptr.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        let ptr: Box<T> = mem::transmute(void_ptr);
+        tls::set(key, ptr::null_mut());
+        return ptr;
+    }
+
+    /// Optionally take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    #[inline]
+    pub unsafe fn try_take<T>() -> Option<Box<T>> {
+        match maybe_tls_key() {
+            Some(key) => {
+                let void_ptr: *mut u8 = tls::get(key);
+                if void_ptr.is_null() {
+                    None
+                } else {
+                    let ptr: Box<T> = mem::transmute(void_ptr);
+                    tls::set(key, ptr::null_mut());
+                    Some(ptr)
+                }
+            }
+            None => None
+        }
+    }
+
+    /// Take ownership of a pointer from thread-local storage.
+    ///
+    /// # Safety note
+    ///
+    /// Does not validate the pointer type.
+    /// Leaves the old pointer in TLS for speed.
+    #[inline]
+    pub unsafe fn unsafe_take<T>() -> Box<T> {
+        let key = tls_key();
+        let void_ptr: *mut u8 = tls::get(key);
+        if void_ptr.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        let ptr: Box<T> = mem::transmute(void_ptr);
+        return ptr;
+    }
+
+    /// Check whether there is a thread-local pointer installed.
+    pub fn exists() -> bool {
+        unsafe {
+            match maybe_tls_key() {
+                Some(key) => tls::get(key).is_not_null(),
+                None => false
+            }
+        }
+    }
+
+    /// Borrow a mutable reference to the thread-local value
+    ///
+    /// # Safety Note
+    ///
+    /// Because this leaves the value in thread-local storage it is possible
+    /// For the Scheduler pointer to be aliased
+    pub unsafe fn unsafe_borrow<T>() -> *mut T {
+        let key = tls_key();
+        let void_ptr = tls::get(key);
+        if void_ptr.is_null() {
+            rtabort!("thread-local pointer is null. bogus!");
+        }
+        void_ptr as *mut T
+    }
+
+    pub unsafe fn try_unsafe_borrow<T>() -> Option<*mut T> {
+        match maybe_tls_key() {
+            Some(key) => {
+                let void_ptr = tls::get(key);
+                if void_ptr.is_null() {
+                    None
+                } else {
+                    Some(void_ptr as *mut T)
+                }
+            }
+            None => None
+        }
+    }
+
+    #[inline]
+    fn tls_key() -> tls::Key {
+        match maybe_tls_key() {
+            Some(key) => key,
+            None => rtabort!("runtime tls key not initialized")
+        }
+    }
+
+    #[inline]
+    #[cfg(not(test))]
+    pub fn maybe_tls_key() -> Option<tls::Key> {
+        unsafe {
+            // NB: This is a little racy because, while the key is
+            // initialized under a mutex and it's assumed to be initialized
+            // in the Scheduler ctor by any thread that needs to use it,
+            // we are not accessing the key under a mutex.  Threads that
+            // are not using the new Scheduler but still *want to check*
+            // whether they are running under a new Scheduler may see a 0
+            // value here that is in the process of being initialized in
+            // another thread. I think this is fine since the only action
+            // they could take if it was initialized would be to check the
+            // thread-local value and see that it's not set.
+            if RT_TLS_KEY != -1 {
+                return Some(RT_TLS_KEY);
+            } else {
+                return None;
+            }
+        }
+    }
+
+    #[inline] #[cfg(test)]
+    pub fn maybe_tls_key() -> Option<tls::Key> {
+        use rt;
+        unsafe {
+            mem::transmute(::realstd::rt::shouldnt_be_public::maybe_tls_key())
+        }
+    }
+}
diff --git a/src/libstd/rt/macros.rs b/src/libstd/rt/macros.rs
new file mode 100644
index 00000000000..bee8b5b82f4
--- /dev/null
+++ b/src/libstd/rt/macros.rs
@@ -0,0 +1,45 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Macros used by the runtime.
+//!
+//! These macros call functions which are only accessible in the `rt` module, so
+//! they aren't defined anywhere outside of the `rt` module.
+
+#![macro_escape]
+
+macro_rules! rterrln {
+    ($fmt:expr $($arg:tt)*) => ( {
+        format_args!(::rt::util::dumb_print, concat!($fmt, "\n") $($arg)*)
+    } )
+}
+
+// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build.
+macro_rules! rtdebug {
+    ($($arg:tt)*) => ( {
+        if cfg!(rtdebug) {
+            rterrln!($($arg)*)
+        }
+    })
+}
+
+macro_rules! rtassert {
+    ( $arg:expr ) => ( {
+        if ::rt::util::ENFORCE_SANITY {
+            if !$arg {
+                rtabort!(" assertion failed: {}", stringify!($arg));
+            }
+        }
+    } )
+}
+
+macro_rules! rtabort {
+    ($($arg:tt)*) => (format_args!(::rt::util::abort, $($arg)*))
+}
diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs
index eb517047ddc..21c8197ef05 100644
--- a/src/libstd/rt/mod.rs
+++ b/src/libstd/rt/mod.rs
@@ -50,23 +50,43 @@
 
 use borrow::IntoCow;
 use failure;
-use rustrt;
 use os;
 use thunk::Thunk;
+use kinds::Send;
+use sys_common;
 
 // Reexport some of our utilities which are expected by other crates.
 pub use self::util::{default_sched_threads, min_stack, running_on_valgrind};
+pub use self::unwind::{begin_unwind, begin_unwind_fmt};
 
-// Reexport functionality from librustrt and other crates underneath the
-// standard library which work together to create the entire runtime.
+// Reexport some functionality from liballoc.
 pub use alloc::heap;
-pub use rustrt::{begin_unwind, begin_unwind_fmt, at_exit};
 
 // Simple backtrace functionality (to print on panic)
 pub mod backtrace;
 
-// Just stuff
-mod util;
+// Internals
+mod macros;
+
+// These should be refactored/moved/made private over time
+pub mod mutex;
+pub mod thread;
+pub mod exclusive;
+pub mod util;
+pub mod bookkeeping;
+pub mod local;
+pub mod task;
+pub mod unwind;
+
+mod args;
+mod at_exit_imp;
+mod libunwind;
+mod local_ptr;
+mod thread_local_storage;
+
+/// The default error code of the rust runtime if the main task panics instead
+/// of exiting cleanly.
+pub const DEFAULT_ERROR_CODE: int = 101;
 
 /// One-time runtime initialization.
 ///
@@ -75,8 +95,15 @@ mod util;
 /// metadata, and storing the process arguments.
 #[allow(experimental)]
 pub fn init(argc: int, argv: *const *const u8) {
-    rustrt::init(argc, argv);
-    unsafe { rustrt::unwind::register(failure::on_fail); }
+    // FIXME: Derefing these pointers is not safe.
+    // Need to propagate the unsafety to `start`.
+    unsafe {
+        args::init(argc, argv);
+        local_ptr::init();
+        at_exit_imp::init();
+        thread::init();
+        unwind::register(failure::on_fail);
+    }
 }
 
 #[cfg(any(windows, android))]
@@ -106,7 +133,8 @@ fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int {
 pub fn start(argc: int, argv: *const *const u8, main: Thunk) -> int {
     use prelude::*;
     use rt;
-    use rustrt::task::Task;
+    use rt::task::Task;
+    use str;
 
     let something_around_the_top_of_the_stack = 1;
     let addr = &something_around_the_top_of_the_stack as *const int;
@@ -139,18 +167,35 @@ pub fn start(argc: int, argv: *const *const u8, main: Thunk) -> int {
     let mut exit_code = None;
     let mut main = Some(main);
     let mut task = box Task::new(Some((my_stack_bottom, my_stack_top)),
-                                 Some(rustrt::thread::main_guard_page()));
-    task.name = Some("<main>".into_cow());
+                                 Some(rt::thread::main_guard_page()));
+    task.name = Some(str::Slice("<main>"));
     drop(task.run(|| {
         unsafe {
-            rustrt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
+            sys_common::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
         }
         (main.take().unwrap()).invoke(());
         exit_code = Some(os::get_exit_status());
     }).destroy());
-    unsafe { rt::cleanup(); }
+    unsafe { cleanup(); }
     // If the exit code wasn't set, then the task block must have panicked.
-    return exit_code.unwrap_or(rustrt::DEFAULT_ERROR_CODE);
+    return exit_code.unwrap_or(rt::DEFAULT_ERROR_CODE);
+}
+
+/// Enqueues a procedure to run when the runtime is cleaned up
+///
+/// The procedure passed to this function will be executed as part of the
+/// runtime cleanup phase. For normal rust programs, this means that it will run
+/// after all other tasks have exited.
+///
+/// The procedure is *not* executed with a local `Task` available to it, so
+/// primitives like logging, I/O, channels, spawning, etc, are *not* available.
+/// This is meant for "bare bones" usage to clean up runtime details, this is
+/// not meant as a general-purpose "let's clean everything up" function.
+///
+/// It is forbidden for procedures to register more `at_exit` handlers when they
+/// are running, and doing so will lead to a process abort.
+pub fn at_exit(f: proc():Send) {
+    at_exit_imp::push(f);
 }
 
 /// One-time runtime cleanup.
@@ -163,5 +208,18 @@ pub fn start(argc: int, argv: *const *const u8, main: Thunk) -> int {
 /// Invoking cleanup while portions of the runtime are still in use may cause
 /// undefined behavior.
 pub unsafe fn cleanup() {
-    rustrt::cleanup();
+    bookkeeping::wait_for_other_tasks();
+    args::cleanup();
+    thread::cleanup();
+    local_ptr::cleanup();
+    at_exit_imp::run();
+}
+
+// FIXME: these probably shouldn't be public...
+#[doc(hidden)]
+pub mod shouldnt_be_public {
+    #[cfg(not(test))]
+    pub use super::local_ptr::native::maybe_tls_key;
+    #[cfg(all(not(windows), not(target_os = "android"), not(target_os = "ios")))]
+    pub use super::local_ptr::compiled::RT_TLS_PTR;
 }
diff --git a/src/libstd/rt/mutex.rs b/src/libstd/rt/mutex.rs
new file mode 100644
index 00000000000..381f14570df
--- /dev/null
+++ b/src/libstd/rt/mutex.rs
@@ -0,0 +1,406 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A native mutex and condition variable type.
+//!
+//! This module contains bindings to the platform's native mutex/condition
+//! variable primitives. It provides two types: `StaticNativeMutex`, which can
+//! be statically initialized via the `NATIVE_MUTEX_INIT` value, and a simple
+//! wrapper `NativeMutex` that has a destructor to clean up after itself. These
+//! objects serve as both mutexes and condition variables simultaneously.
+//!
+//! The static lock is lazily initialized, but it can only be unsafely
+//! destroyed. A statically initialized lock doesn't necessarily have a time at
+//! which it can get deallocated. For this reason, there is no `Drop`
+//! implementation of the static mutex, but rather the `destroy()` method must
+//! be invoked manually if destruction of the mutex is desired.
+//!
+//! The non-static `NativeMutex` type does have a destructor, but cannot be
+//! statically initialized.
+//!
+//! It is not recommended to use this type for idiomatic rust use. These types
+//! are appropriate where no other options are available, but other rust
+//! concurrency primitives should be used before them: the `sync` crate defines
+//! `StaticMutex` and `Mutex` types.
+//!
+//! # Example
+//!
+//! ```rust
+//! use rt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
+//!
+//! // Use a statically initialized mutex
+//! static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+//!
+//! unsafe {
+//!     let _guard = LOCK.lock();
+//! } // automatically unlocked here
+//!
+//! // Use a normally initialized mutex
+//! unsafe {
+//!     let mut lock = NativeMutex::new();
+//!
+//!     {
+//!         let _guard = lock.lock();
+//!     } // unlocked here
+//!
+//!     // sometimes the RAII guard isn't appropriate
+//!     lock.lock_noguard();
+//!     lock.unlock_noguard();
+//! } // `lock` is deallocated here
+//! ```
+
+#![allow(non_camel_case_types)]
+
+use core::prelude::*;
+
+use sys::mutex as imp;
+
+/// A native mutex suitable for storing in statics (that is, it has
+/// the `destroy` method rather than a destructor).
+///
+/// Prefer the `NativeMutex` type where possible, since that does not
+/// require manual deallocation.
+pub struct StaticNativeMutex {
+    inner: imp::Mutex,
+}
+
+/// A native mutex with a destructor for clean-up.
+///
+/// See `StaticNativeMutex` for a version that is suitable for storing in
+/// statics.
+pub struct NativeMutex {
+    inner: StaticNativeMutex
+}
+
+/// Automatically unlocks the mutex that it was created from on
+/// destruction.
+///
+/// Using this makes lock-based code resilient to unwinding/task
+/// panic, because the lock will be automatically unlocked even
+/// then.
+#[must_use]
+pub struct LockGuard<'a> {
+    lock: &'a StaticNativeMutex
+}
+
+pub const NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
+    inner: imp::MUTEX_INIT,
+};
+
+impl StaticNativeMutex {
+    /// Creates a new mutex.
+    ///
+    /// Note that a mutex created in this way needs to be explicit
+    /// freed with a call to `destroy` or it will leak.
+    /// Also it is important to avoid locking until mutex has stopped moving
+    pub unsafe fn new() -> StaticNativeMutex {
+        StaticNativeMutex { inner: imp::Mutex::new() }
+    }
+
+    /// Acquires this lock. This assumes that the current thread does not
+    /// already hold the lock.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+    /// static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    /// unsafe {
+    ///     let _guard = LOCK.lock();
+    ///     // critical section...
+    /// } // automatically unlocked in `_guard`'s destructor
+    /// ```
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe because it will not function correctly if this
+    /// mutex has been *moved* since it was last used. The mutex can move an
+    /// arbitrary number of times before its first usage, but once a mutex has
+    /// been used once it is no longer allowed to move (or otherwise it invokes
+    /// undefined behavior).
+    ///
+    /// Additionally, this type does not take into account any form of
+    /// scheduling model. This will unconditionally block the *os thread* which
+    /// is not always desired.
+    pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
+        self.inner.lock();
+
+        LockGuard { lock: self }
+    }
+
+    /// Attempts to acquire the lock. The value returned is `Some` if
+    /// the attempt succeeded.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`.
+    pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
+        if self.inner.trylock() {
+            Some(LockGuard { lock: self })
+        } else {
+            None
+        }
+    }
+
+    /// Acquire the lock without creating a `LockGuard`.
+    ///
+    /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
+    /// `.lock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, this
+    /// does not guarantee that the mutex will ever be unlocked, and it is
+    /// undefined to drop an already-locked mutex.
+    pub unsafe fn lock_noguard(&self) { self.inner.lock() }
+
+    /// Attempts to acquire the lock without creating a
+    /// `LockGuard`. The value returned is whether the lock was
+    /// acquired or not.
+    ///
+    /// If `true` is returned, this needs to be paired with a call to
+    /// `.unlock_noguard`. Prefer using `.trylock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock_noguard`.
+    pub unsafe fn trylock_noguard(&self) -> bool {
+        self.inner.trylock()
+    }
+
+    /// Unlocks the lock. This assumes that the current thread already holds the
+    /// lock.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, it
+    /// is not guaranteed that this is unlocking a previously locked mutex. It
+    /// is undefined to unlock an unlocked mutex.
+    pub unsafe fn unlock_noguard(&self) { self.inner.unlock() }
+
+    /// Block on the internal condition variable.
+    ///
+    /// This function assumes that the lock is already held. Prefer
+    /// using `LockGuard.wait` since that guarantees that the lock is
+    /// held.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, this
+    /// is unsafe because the mutex may not be currently locked.
+    pub unsafe fn wait_noguard(&self) { self.inner.wait() }
+
+    /// Signals a thread in `wait` to wake up
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe for the same reasons as `lock`. Additionally, this
+    /// is unsafe because the mutex may not be currently locked.
+    pub unsafe fn signal_noguard(&self) { self.inner.signal() }
+
+    /// This function is especially unsafe because there are no guarantees made
+    /// that no other thread is currently holding the lock or waiting on the
+    /// condition variable contained inside.
+    pub unsafe fn destroy(&self) { self.inner.destroy() }
+}
+
+impl NativeMutex {
+    /// Creates a new mutex.
+    ///
+    /// The user must be careful to ensure the mutex is not locked when its is
+    /// being destroyed.
+    /// Also it is important to avoid locking until mutex has stopped moving
+    pub unsafe fn new() -> NativeMutex {
+        NativeMutex { inner: StaticNativeMutex::new() }
+    }
+
+    /// Acquires this lock. This assumes that the current thread does not
+    /// already hold the lock.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use rt::mutex::NativeMutex;
+    /// unsafe {
+    ///     let mut lock = NativeMutex::new();
+    ///
+    ///     {
+    ///         let _guard = lock.lock();
+    ///         // critical section...
+    ///     } // automatically unlocked in `_guard`'s destructor
+    /// }
+    /// ```
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::lock`.
+    pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
+        self.inner.lock()
+    }
+
+    /// Attempts to acquire the lock. The value returned is `Some` if
+    /// the attempt succeeded.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::trylock`.
+    pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
+        self.inner.trylock()
+    }
+
+    /// Acquire the lock without creating a `LockGuard`.
+    ///
+    /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
+    /// `.lock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::lock_noguard`.
+    pub unsafe fn lock_noguard(&self) { self.inner.lock_noguard() }
+
+    /// Attempts to acquire the lock without creating a
+    /// `LockGuard`. The value returned is whether the lock was
+    /// acquired or not.
+    ///
+    /// If `true` is returned, this needs to be paired with a call to
+    /// `.unlock_noguard`. Prefer using `.trylock`.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::trylock_noguard`.
+    pub unsafe fn trylock_noguard(&self) -> bool {
+        self.inner.trylock_noguard()
+    }
+
+    /// Unlocks the lock. This assumes that the current thread already holds the
+    /// lock.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::unlock_noguard`.
+    pub unsafe fn unlock_noguard(&self) { self.inner.unlock_noguard() }
+
+    /// Block on the internal condition variable.
+    ///
+    /// This function assumes that the lock is already held. Prefer
+    /// using `LockGuard.wait` since that guarantees that the lock is
+    /// held.
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::wait_noguard`.
+    pub unsafe fn wait_noguard(&self) { self.inner.wait_noguard() }
+
+    /// Signals a thread in `wait` to wake up
+    ///
+    /// # Unsafety
+    ///
+    /// This method is unsafe due to the same reasons as
+    /// `StaticNativeMutex::signal_noguard`.
+    pub unsafe fn signal_noguard(&self) { self.inner.signal_noguard() }
+}
+
+impl Drop for NativeMutex {
+    fn drop(&mut self) {
+        unsafe {self.inner.destroy()}
+    }
+}
+
+impl<'a> LockGuard<'a> {
+    /// Block on the internal condition variable.
+    pub unsafe fn wait(&self) {
+        self.lock.wait_noguard()
+    }
+
+    /// Signals a thread in `wait` to wake up.
+    pub unsafe fn signal(&self) {
+        self.lock.signal_noguard()
+    }
+}
+
+#[unsafe_destructor]
+impl<'a> Drop for LockGuard<'a> {
+    fn drop(&mut self) {
+        unsafe {self.lock.unlock_noguard()}
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use prelude::*;
+
+    use mem::drop;
+    use super::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+    use rt::thread::Thread;
+
+    #[test]
+    fn smoke_lock() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            let _guard = LK.lock();
+        }
+    }
+
+    #[test]
+    fn smoke_cond() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            let guard = LK.lock();
+            let t = Thread::start(move|| {
+                let guard = LK.lock();
+                guard.signal();
+            });
+            guard.wait();
+            drop(guard);
+
+            t.join();
+        }
+    }
+
+    #[test]
+    fn smoke_lock_noguard() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            LK.lock_noguard();
+            LK.unlock_noguard();
+        }
+    }
+
+    #[test]
+    fn smoke_cond_noguard() {
+        static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+        unsafe {
+            LK.lock_noguard();
+            let t = Thread::start(move|| {
+                LK.lock_noguard();
+                LK.signal_noguard();
+                LK.unlock_noguard();
+            });
+            LK.wait_noguard();
+            LK.unlock_noguard();
+
+            t.join();
+        }
+    }
+
+    #[test]
+    fn destroy_immediately() {
+        unsafe {
+            let m = StaticNativeMutex::new();
+            m.destroy();
+        }
+    }
+}
diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs
new file mode 100644
index 00000000000..babd111b3c2
--- /dev/null
+++ b/src/libstd/rt/task.rs
@@ -0,0 +1,561 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Language-level runtime services that should reasonably expected
+//! to be available 'everywhere'. Unwinding, local storage, and logging.
+//! Even a 'freestanding' Rust would likely want to implement this.
+
+pub use self::BlockedTask::*;
+use self::TaskState::*;
+
+use any::Any;
+use boxed::Box;
+use sync::Arc;
+use sync::atomic::{AtomicUint, SeqCst};
+use iter::{IteratorExt, Take};
+use kinds::marker;
+use mem;
+use ops::FnMut;
+use core::prelude::{Clone, Drop, Err, Iterator, None, Ok, Option, Send, Some};
+use core::prelude::{drop};
+use str::SendStr;
+use thunk::Thunk;
+
+use rt;
+use rt::bookkeeping;
+use rt::mutex::NativeMutex;
+use rt::local::Local;
+use rt::thread::{mod, Thread};
+use sys_common::stack;
+use rt::unwind;
+use rt::unwind::Unwinder;
+
+/// State associated with Rust tasks.
+///
+/// This structure is currently undergoing major changes, and is
+/// likely to be move/be merged with a `Thread` structure.
+pub struct Task {
+    pub unwinder: Unwinder,
+    pub death: Death,
+    pub name: Option<SendStr>,
+
+    state: TaskState,
+    lock: NativeMutex,       // native synchronization
+    awoken: bool,            // used to prevent spurious wakeups
+
+    // This field holds the known bounds of the stack in (lo, hi) form. Not all
+    // native tasks necessarily know their precise bounds, hence this is
+    // optional.
+    stack_bounds: (uint, uint),
+
+    stack_guard: uint
+}
+
+// Once a task has entered the `Armed` state it must be destroyed via `drop`,
+// and no other method. This state is used to track this transition.
+#[deriving(PartialEq)]
+enum TaskState {
+    New,
+    Armed,
+    Destroyed,
+}
+
+pub struct TaskOpts {
+    /// Invoke this procedure with the result of the task when it finishes.
+    pub on_exit: Option<Thunk<Result>>,
+    /// A name for the task-to-be, for identification in panic messages
+    pub name: Option<SendStr>,
+    /// The size of the stack for the spawned task
+    pub stack_size: Option<uint>,
+}
+
+/// Indicates the manner in which a task exited.
+///
+/// A task that completes without panicking is considered to exit successfully.
+///
+/// If you wish for this result's delivery to block until all
+/// children tasks complete, recommend using a result future.
+pub type Result = ::core::result::Result<(), Box<Any + Send>>;
+
+/// A handle to a blocked task. Usually this means having the Box<Task>
+/// pointer by ownership, but if the task is killable, a killer can steal it
+/// at any time.
+pub enum BlockedTask {
+    Owned(Box<Task>),
+    Shared(Arc<AtomicUint>),
+}
+
+/// Per-task state related to task death, killing, panic, etc.
+pub struct Death {
+    pub on_exit: Option<Thunk<Result>>,
+}
+
+pub struct BlockedTasks {
+    inner: Arc<AtomicUint>,
+}
+
+impl Task {
+    /// Creates a new uninitialized task.
+    pub fn new(stack_bounds: Option<(uint, uint)>, stack_guard: Option<uint>) -> Task {
+        Task {
+            unwinder: Unwinder::new(),
+            death: Death::new(),
+            state: New,
+            name: None,
+            lock: unsafe { NativeMutex::new() },
+            awoken: false,
+            // these *should* get overwritten
+            stack_bounds: stack_bounds.unwrap_or((0, 0)),
+            stack_guard: stack_guard.unwrap_or(0)
+        }
+    }
+
+    pub fn spawn<F>(opts: TaskOpts, f: F)
+        where F : FnOnce(), F : Send
+    {
+        Task::spawn_thunk(opts, Thunk::new(f))
+    }
+
+    fn spawn_thunk(opts: TaskOpts, f: Thunk) {
+        let TaskOpts { name, stack_size, on_exit } = opts;
+
+        let mut task = box Task::new(None, None);
+        task.name = name;
+        task.death.on_exit = on_exit;
+
+        let stack = stack_size.unwrap_or(rt::min_stack());
+
+        // Note that this increment must happen *before* the spawn in order to
+        // guarantee that if this task exits it will always end up waiting for
+        // the spawned task to exit.
+        let token = bookkeeping::increment();
+
+        // Spawning a new OS thread guarantees that __morestack will never get
+        // triggered, but we must manually set up the actual stack bounds once
+        // this function starts executing. This raises the lower limit by a bit
+        // because by the time that this function is executing we've already
+        // consumed at least a little bit of stack (we don't know the exact byte
+        // address at which our stack started).
+        Thread::spawn_stack(stack, move|| {
+            let something_around_the_top_of_the_stack = 1;
+            let addr = &something_around_the_top_of_the_stack as *const int;
+            let my_stack = addr as uint;
+            unsafe {
+                stack::record_os_managed_stack_bounds(my_stack - stack + 1024,
+                                                      my_stack);
+            }
+            task.stack_guard = thread::current_guard_page();
+            task.stack_bounds = (my_stack - stack + 1024, my_stack);
+
+            let mut f = Some(f);
+            drop(task.run(|| { f.take().unwrap().invoke(()) }).destroy());
+            drop(token);
+        })
+    }
+
+    /// Consumes ownership of a task, runs some code, and returns the task back.
+    ///
+    /// This function can be used as an emulated "try/catch" to interoperate
+    /// with the rust runtime at the outermost boundary. It is not possible to
+    /// use this function in a nested fashion (a try/catch inside of another
+    /// try/catch). Invoking this function is quite cheap.
+    ///
+    /// If the closure `f` succeeds, then the returned task can be used again
+    /// for another invocation of `run`. If the closure `f` panics then `self`
+    /// will be internally destroyed along with all of the other associated
+    /// resources of this task. The `on_exit` callback is invoked with the
+    /// cause of panic (not returned here). This can be discovered by querying
+    /// `is_destroyed()`.
+    ///
+    /// Note that it is possible to view partial execution of the closure `f`
+    /// because it is not guaranteed to run to completion, but this function is
+    /// guaranteed to return if it panicks. Care should be taken to ensure that
+    /// stack references made by `f` are handled appropriately.
+    ///
+    /// It is invalid to call this function with a task that has been previously
+    /// destroyed via a failed call to `run`.
+    pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
+        assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
+
+        // First, make sure that no one else is in TLS. This does not allow
+        // recursive invocations of run(). If there's no one else, then
+        // relinquish ownership of ourselves back into TLS.
+        if Local::exists(None::<Task>) {
+            panic!("cannot run a task recursively inside another");
+        }
+        self.state = Armed;
+        Local::put(self);
+
+        // There are two primary reasons that general try/catch is unsafe. The
+        // first is that we do not support nested try/catch. The above check for
+        // an existing task in TLS is sufficient for this invariant to be
+        // upheld. The second is that unwinding while unwinding is not defined.
+        // We take care of that by having an 'unwinding' flag in the task
+        // itself. For these reasons, this unsafety should be ok.
+        let result = unsafe { unwind::try(f) };
+
+        // After running the closure given return the task back out if it ran
+        // successfully, or clean up the task if it panicked.
+        let task: Box<Task> = Local::take();
+        match result {
+            Ok(()) => task,
+            Err(cause) => { task.cleanup(Err(cause)) }
+        }
+    }
+
+    /// Destroy all associated resources of this task.
+    ///
+    /// This function will perform any necessary clean up to prepare the task
+    /// for destruction. It is required that this is called before a `Task`
+    /// falls out of scope.
+    ///
+    /// The returned task cannot be used for running any more code, but it may
+    /// be used to extract the runtime as necessary.
+    pub fn destroy(self: Box<Task>) -> Box<Task> {
+        if self.is_destroyed() {
+            self
+        } else {
+            self.cleanup(Ok(()))
+        }
+    }
+
+    /// Cleans up a task, processing the result of the task as appropriate.
+    ///
+    /// This function consumes ownership of the task, deallocating it once it's
+    /// done being processed. It is assumed that TLD and the local heap have
+    /// already been destroyed and/or annihilated.
+    fn cleanup(mut self: Box<Task>, result: Result) -> Box<Task> {
+        // After taking care of the data above, we need to transmit the result
+        // of this task.
+        let what_to_do = self.death.on_exit.take();
+        Local::put(self);
+
+        // FIXME: this is running in a seriously constrained context. If this
+        //        allocates TLD then it will likely abort the runtime. Similarly,
+        //        if this panics, this will also likely abort the runtime.
+        //
+        //        This closure is currently limited to a channel send via the
+        //        standard library's task interface, but this needs
+        //        reconsideration to whether it's a reasonable thing to let a
+        //        task to do or not.
+        match what_to_do {
+            Some(f) => { f.invoke(result) }
+            None => { drop(result) }
+        }
+
+        // Now that we're done, we remove the task from TLS and flag it for
+        // destruction.
+        let mut task: Box<Task> = Local::take();
+        task.state = Destroyed;
+        return task;
+    }
+
+    /// Queries whether this can be destroyed or not.
+    pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
+
+    /// Deschedules the current task, invoking `f` `amt` times. It is not
+    /// recommended to use this function directly, but rather communication
+    /// primitives in `std::comm` should be used.
+    //
+    // This function gets a little interesting. There are a few safety and
+    // ownership violations going on here, but this is all done in the name of
+    // shared state. Additionally, all of the violations are protected with a
+    // mutex, so in theory there are no races.
+    //
+    // The first thing we need to do is to get a pointer to the task's internal
+    // mutex. This address will not be changing (because the task is allocated
+    // on the heap). We must have this handle separately because the task will
+    // have its ownership transferred to the given closure. We're guaranteed,
+    // however, that this memory will remain valid because *this* is the current
+    // task's execution thread.
+    //
+    // The next weird part is where ownership of the task actually goes. We
+    // relinquish it to the `f` blocking function, but upon returning this
+    // function needs to replace the task back in TLS. There is no communication
+    // from the wakeup thread back to this thread about the task pointer, and
+    // there's really no need to. In order to get around this, we cast the task
+    // to a `uint` which is then used at the end of this function to cast back
+    // to a `Box<Task>` object. Naturally, this looks like it violates
+    // ownership semantics in that there may be two `Box<Task>` objects.
+    //
+    // The fun part is that the wakeup half of this implementation knows to
+    // "forget" the task on the other end. This means that the awakening half of
+    // things silently relinquishes ownership back to this thread, but not in a
+    // way that the compiler can understand. The task's memory is always valid
+    // for both tasks because these operations are all done inside of a mutex.
+    //
+    // You'll also find that if blocking fails (the `f` function hands the
+    // BlockedTask back to us), we will `mem::forget` the handles. The
+    // reasoning for this is the same logic as above in that the task silently
+    // transfers ownership via the `uint`, not through normal compiler
+    // semantics.
+    //
+    // On a mildly unrelated note, it should also be pointed out that OS
+    // condition variables are susceptible to spurious wakeups, which we need to
+    // be ready for. In order to accommodate for this fact, we have an extra
+    // `awoken` field which indicates whether we were actually woken up via some
+    // invocation of `reawaken`. This flag is only ever accessed inside the
+    // lock, so there's no need to make it atomic.
+    pub fn deschedule<F>(mut self: Box<Task>, times: uint, mut f: F) where
+        F: FnMut(BlockedTask) -> ::core::result::Result<(), BlockedTask>,
+    {
+        unsafe {
+            let me = &mut *self as *mut Task;
+            let task = BlockedTask::block(self);
+
+            if times == 1 {
+                let guard = (*me).lock.lock();
+                (*me).awoken = false;
+                match f(task) {
+                    Ok(()) => {
+                        while !(*me).awoken {
+                            guard.wait();
+                        }
+                    }
+                    Err(task) => { mem::forget(task.wake()); }
+                }
+            } else {
+                let iter = task.make_selectable(times);
+                let guard = (*me).lock.lock();
+                (*me).awoken = false;
+
+                // Apply the given closure to all of the "selectable tasks",
+                // bailing on the first one that produces an error. Note that
+                // care must be taken such that when an error is occurred, we
+                // may not own the task, so we may still have to wait for the
+                // task to become available. In other words, if task.wake()
+                // returns `None`, then someone else has ownership and we must
+                // wait for their signal.
+                match iter.map(f).filter_map(|a| a.err()).next() {
+                    None => {}
+                    Some(task) => {
+                        match task.wake() {
+                            Some(task) => {
+                                mem::forget(task);
+                                (*me).awoken = true;
+                            }
+                            None => {}
+                        }
+                    }
+                }
+                while !(*me).awoken {
+                    guard.wait();
+                }
+            }
+            // put the task back in TLS, and everything is as it once was.
+            Local::put(mem::transmute(me));
+        }
+    }
+
+    /// Wakes up a previously blocked task. This function can only be
+    /// called on tasks that were previously blocked in `deschedule`.
+    //
+    // See the comments on `deschedule` for why the task is forgotten here, and
+    // why it's valid to do so.
+    pub fn reawaken(mut self: Box<Task>) {
+        unsafe {
+            let me = &mut *self as *mut Task;
+            mem::forget(self);
+            let guard = (*me).lock.lock();
+            (*me).awoken = true;
+            guard.signal();
+        }
+    }
+
+    /// Yields control of this task to another task. This function will
+    /// eventually return, but possibly not immediately. This is used as an
+    /// opportunity to allow other tasks a chance to run.
+    pub fn yield_now() {
+        Thread::yield_now();
+    }
+
+    /// Returns the stack bounds for this task in (lo, hi) format. The stack
+    /// bounds may not be known for all tasks, so the return value may be
+    /// `None`.
+    pub fn stack_bounds(&self) -> (uint, uint) {
+        self.stack_bounds
+    }
+
+    /// Returns the stack guard for this task, if known.
+    pub fn stack_guard(&self) -> Option<uint> {
+        if self.stack_guard != 0 {
+            Some(self.stack_guard)
+        } else {
+            None
+        }
+    }
+
+    /// Consume this task, flagging it as a candidate for destruction.
+    ///
+    /// This function is required to be invoked to destroy a task. A task
+    /// destroyed through a normal drop will abort.
+    pub fn drop(mut self) {
+        self.state = Destroyed;
+    }
+}
+
+impl Drop for Task {
+    fn drop(&mut self) {
+        rtdebug!("called drop for a task: {}", self as *mut Task as uint);
+        rtassert!(self.state != Armed);
+    }
+}
+
+impl TaskOpts {
+    pub fn new() -> TaskOpts {
+        TaskOpts { on_exit: None, name: None, stack_size: None }
+    }
+}
+
+impl Iterator<BlockedTask> for BlockedTasks {
+    fn next(&mut self) -> Option<BlockedTask> {
+        Some(Shared(self.inner.clone()))
+    }
+}
+
+impl BlockedTask {
+    /// Returns Some if the task was successfully woken; None if already killed.
+    pub fn wake(self) -> Option<Box<Task>> {
+        match self {
+            Owned(task) => Some(task),
+            Shared(arc) => {
+                match arc.swap(0, SeqCst) {
+                    0 => None,
+                    n => Some(unsafe { mem::transmute(n) }),
+                }
+            }
+        }
+    }
+
+    /// Reawakens this task if ownership is acquired. If finer-grained control
+    /// is desired, use `wake` instead.
+    pub fn reawaken(self) {
+        self.wake().map(|t| t.reawaken());
+    }
+
+    // This assertion has two flavours because the wake involves an atomic op.
+    // In the faster version, destructors will panic dramatically instead.
+    #[cfg(not(test))] pub fn trash(self) { }
+    #[cfg(test)]      pub fn trash(self) { assert!(self.wake().is_none()); }
+
+    /// Create a blocked task, unless the task was already killed.
+    pub fn block(task: Box<Task>) -> BlockedTask {
+        Owned(task)
+    }
+
+    /// Converts one blocked task handle to a list of many handles to the same.
+    pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTasks> {
+        let arc = match self {
+            Owned(task) => {
+                let flag = unsafe { AtomicUint::new(mem::transmute(task)) };
+                Arc::new(flag)
+            }
+            Shared(arc) => arc.clone(),
+        };
+        BlockedTasks{ inner: arc }.take(num_handles)
+    }
+
+    /// Convert to an unsafe uint value. Useful for storing in a pipe's state
+    /// flag.
+    #[inline]
+    pub unsafe fn cast_to_uint(self) -> uint {
+        match self {
+            Owned(task) => {
+                let blocked_task_ptr: uint = mem::transmute(task);
+                rtassert!(blocked_task_ptr & 0x1 == 0);
+                blocked_task_ptr
+            }
+            Shared(arc) => {
+                let blocked_task_ptr: uint = mem::transmute(box arc);
+                rtassert!(blocked_task_ptr & 0x1 == 0);
+                blocked_task_ptr | 0x1
+            }
+        }
+    }
+
+    /// Convert from an unsafe uint value. Useful for retrieving a pipe's state
+    /// flag.
+    #[inline]
+    pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
+        if blocked_task_ptr & 0x1 == 0 {
+            Owned(mem::transmute(blocked_task_ptr))
+        } else {
+            let ptr: Box<Arc<AtomicUint>> =
+                mem::transmute(blocked_task_ptr & !1);
+            Shared(*ptr)
+        }
+    }
+}
+
+impl Death {
+    pub fn new() -> Death {
+        Death { on_exit: None }
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use prelude::*;
+    use task;
+    use rt::unwind;
+
+    #[test]
+    fn unwind() {
+        let result = task::try(move|| ());
+        rtdebug!("trying first assert");
+        assert!(result.is_ok());
+        let result = task::try(move|| -> () panic!());
+        rtdebug!("trying second assert");
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn rng() {
+        use rand::{StdRng, Rng};
+        let mut r = StdRng::new().ok().unwrap();
+        let _ = r.next_u32();
+    }
+
+    #[test]
+    fn comm_stream() {
+        let (tx, rx) = channel();
+        tx.send(10i);
+        assert!(rx.recv() == 10);
+    }
+
+    #[test]
+    fn comm_shared_chan() {
+        let (tx, rx) = channel();
+        tx.send(10i);
+        assert!(rx.recv() == 10);
+    }
+
+    #[test]
+    #[should_fail]
+    fn test_begin_unwind() {
+        use rt::unwind::begin_unwind;
+        begin_unwind("cause", &(file!(), line!()))
+    }
+
+    #[test]
+    fn drop_new_task_ok() {
+        drop(Task::new(None, None));
+    }
+
+    // Task blocking tests
+
+    #[test]
+    fn block_and_wake() {
+        let task = box Task::new(None, None);
+        let task = BlockedTask::block(task).wake().unwrap();
+        task.drop();
+    }
+}
diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs
new file mode 100644
index 00000000000..c10338b1bce
--- /dev/null
+++ b/src/libstd/rt/thread.rs
@@ -0,0 +1,171 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Native os-thread management
+//!
+//! This modules contains bindings necessary for managing OS-level threads.
+//! These functions operate outside of the rust runtime, creating threads
+//! which are not used for scheduling in any way.
+
+#![allow(non_camel_case_types)]
+
+use core::prelude::*;
+
+use boxed::Box;
+use mem;
+use sys::stack_overflow;
+use sys::thread as imp;
+
+pub unsafe fn init() {
+    imp::guard::init();
+    stack_overflow::init();
+}
+
+pub unsafe fn cleanup() {
+    stack_overflow::cleanup();
+}
+
+/// This struct represents a native thread's state. This is used to join on an
+/// existing thread created in the join-able state.
+pub struct Thread<T> {
+    native: imp::rust_thread,
+    joined: bool,
+    packet: Box<Option<T>>,
+}
+
+static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
+
+/// Returns the last writable byte of the main thread's stack next to the guard
+/// page. Must be called from the main thread.
+pub fn main_guard_page() -> uint {
+    unsafe {
+        imp::guard::main()
+    }
+}
+
+/// Returns the last writable byte of the current thread's stack next to the
+/// guard page. Must not be called from the main thread.
+pub fn current_guard_page() -> uint {
+    unsafe {
+        imp::guard::current()
+    }
+}
+
+// There are two impl blocks b/c if T were specified at the top then it's just a
+// pain to specify a type parameter on Thread::spawn (which doesn't need the
+// type parameter).
+impl Thread<()> {
+
+    /// Starts execution of a new OS thread.
+    ///
+    /// This function will not wait for the thread to join, but a handle to the
+    /// thread will be returned.
+    ///
+    /// Note that the handle returned is used to acquire the return value of the
+    /// procedure `main`. The `join` function will wait for the thread to finish
+    /// and return the value that `main` generated.
+    ///
+    /// Also note that the `Thread` returned will *always* wait for the thread
+    /// to finish executing. This means that even if `join` is not explicitly
+    /// called, when the `Thread` falls out of scope its destructor will block
+    /// waiting for the OS thread.
+    pub fn start<T: Send>(main: proc():Send -> T) -> Thread<T> {
+        Thread::start_stack(DEFAULT_STACK_SIZE, main)
+    }
+
+    /// Performs the same functionality as `start`, but specifies an explicit
+    /// stack size for the new thread.
+    pub fn start_stack<T: Send>(stack: uint, main: proc():Send -> T) -> Thread<T> {
+
+        // We need the address of the packet to fill in to be stable so when
+        // `main` fills it in it's still valid, so allocate an extra box to do
+        // so.
+        let packet = box None;
+        let packet2: *mut Option<T> = unsafe {
+            *mem::transmute::<&Box<Option<T>>, *const *mut Option<T>>(&packet)
+        };
+        let main = proc() unsafe { *packet2 = Some(main()); };
+        let native = unsafe { imp::create(stack, box main) };
+
+        Thread {
+            native: native,
+            joined: false,
+            packet: packet,
+        }
+    }
+
+    /// This will spawn a new thread, but it will not wait for the thread to
+    /// finish, nor is it possible to wait for the thread to finish.
+    ///
+    /// This corresponds to creating threads in the 'detached' state on unix
+    /// systems. Note that platforms may not keep the main program alive even if
+    /// there are detached thread still running around.
+    pub fn spawn(main: proc():Send) {
+        Thread::spawn_stack(DEFAULT_STACK_SIZE, main)
+    }
+
+    /// Performs the same functionality as `spawn`, but explicitly specifies a
+    /// stack size for the new thread.
+    pub fn spawn_stack(stack: uint, main: proc():Send) {
+        unsafe {
+            let handle = imp::create(stack, box main);
+            imp::detach(handle);
+        }
+    }
+
+    /// Relinquishes the CPU slot that this OS-thread is currently using,
+    /// allowing another thread to run for awhile.
+    pub fn yield_now() {
+        unsafe { imp::yield_now(); }
+    }
+}
+
+impl<T: Send> Thread<T> {
+    /// Wait for this thread to finish, returning the result of the thread's
+    /// calculation.
+    pub fn join(mut self) -> T {
+        assert!(!self.joined);
+        unsafe { imp::join(self.native) };
+        self.joined = true;
+        assert!(self.packet.is_some());
+        self.packet.take().unwrap()
+    }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Thread<T> {
+    fn drop(&mut self) {
+        // This is required for correctness. If this is not done then the thread
+        // would fill in a return box which no longer exists.
+        if !self.joined {
+            unsafe { imp::join(self.native) };
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::Thread;
+
+    #[test]
+    fn smoke() { Thread::start(proc (){}).join(); }
+
+    #[test]
+    fn data() { assert_eq!(Thread::start(proc () { 1i }).join(), 1); }
+
+    #[test]
+    fn detached() { Thread::spawn(proc () {}) }
+
+    #[test]
+    fn small_stacks() {
+        assert_eq!(42i, Thread::start_stack(0, proc () 42i).join());
+        assert_eq!(42i, Thread::start_stack(1, proc () 42i).join());
+    }
+}
diff --git a/src/libstd/rt/thread_local_storage.rs b/src/libstd/rt/thread_local_storage.rs
new file mode 100644
index 00000000000..ee6ad8a4e08
--- /dev/null
+++ b/src/libstd/rt/thread_local_storage.rs
@@ -0,0 +1,115 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+#[cfg(unix)] use libc::c_int;
+#[cfg(unix)] use ptr::null;
+#[cfg(windows)] use libc::types::os::arch::extra::{DWORD, LPVOID, BOOL};
+
+#[cfg(unix)]
+pub type Key = pthread_key_t;
+
+#[cfg(unix)]
+pub unsafe fn create(key: &mut Key) {
+    assert!(pthread_key_create(key, null()) == 0);
+}
+
+#[cfg(unix)]
+pub unsafe fn set(key: Key, value: *mut u8) {
+    assert!(pthread_setspecific(key, value) == 0);
+}
+
+#[cfg(unix)]
+pub unsafe fn get(key: Key) -> *mut u8 {
+    pthread_getspecific(key)
+}
+
+#[cfg(unix)]
+pub unsafe fn destroy(key: Key) {
+    assert!(pthread_key_delete(key) == 0);
+}
+
+#[cfg(target_os = "macos")]
+#[allow(non_camel_case_types)] // foreign type
+type pthread_key_t = ::libc::c_ulong;
+
+#[cfg(any(target_os="linux",
+          target_os="freebsd",
+          target_os="dragonfly",
+          target_os="android",
+          target_os = "ios"))]
+#[allow(non_camel_case_types)] // foreign type
+type pthread_key_t = ::libc::c_uint;
+
+#[cfg(unix)]
+extern {
+    fn pthread_key_create(key: *mut pthread_key_t, dtor: *const u8) -> c_int;
+    fn pthread_key_delete(key: pthread_key_t) -> c_int;
+    fn pthread_getspecific(key: pthread_key_t) -> *mut u8;
+    fn pthread_setspecific(key: pthread_key_t, value: *mut u8) -> c_int;
+}
+
+#[cfg(windows)]
+pub type Key = DWORD;
+
+#[cfg(windows)]
+pub unsafe fn create(key: &mut Key) {
+    static TLS_OUT_OF_INDEXES: DWORD = 0xFFFFFFFF;
+    *key = TlsAlloc();
+    assert!(*key != TLS_OUT_OF_INDEXES);
+}
+
+#[cfg(windows)]
+pub unsafe fn set(key: Key, value: *mut u8) {
+    assert!(0 != TlsSetValue(key, value as *mut ::libc::c_void))
+}
+
+#[cfg(windows)]
+pub unsafe fn get(key: Key) -> *mut u8 {
+    TlsGetValue(key) as *mut u8
+}
+
+#[cfg(windows)]
+pub unsafe fn destroy(key: Key) {
+    assert!(TlsFree(key) != 0);
+}
+
+#[cfg(windows)]
+#[allow(non_snake_case)]
+extern "system" {
+    fn TlsAlloc() -> DWORD;
+    fn TlsFree(dwTlsIndex: DWORD) -> BOOL;
+    fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID;
+    fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL;
+}
+
+#[cfg(test)]
+mod test {
+    use prelude::*;
+    use super::*;
+
+    #[test]
+    fn tls_smoke_test() {
+        use mem::transmute;
+        unsafe {
+            let mut key = 0;
+            let value = box 20i;
+            create(&mut key);
+            set(key, transmute(value));
+            let value: Box<int> = transmute(get(key));
+            assert_eq!(value, box 20i);
+            let value = box 30i;
+            set(key, transmute(value));
+            let value: Box<int> = transmute(get(key));
+            assert_eq!(value, box 30i);
+        }
+    }
+}
diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs
new file mode 100644
index 00000000000..1ac06270851
--- /dev/null
+++ b/src/libstd/rt/unwind.rs
@@ -0,0 +1,638 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of Rust stack unwinding
+//!
+//! For background on exception handling and stack unwinding please see
+//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
+//! documents linked from it.
+//! These are also good reads:
+//!     http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
+//!     http://monoinfinito.wordpress.com/series/exception-handling-in-c/
+//!     http://www.airs.com/blog/index.php?s=exception+frames
+//!
+//! ## A brief summary
+//!
+//! Exception handling happens in two phases: a search phase and a cleanup phase.
+//!
+//! In both phases the unwinder walks stack frames from top to bottom using
+//! information from the stack frame unwind sections of the current process's
+//! modules ("module" here refers to an OS module, i.e. an executable or a
+//! dynamic library).
+//!
+//! For each stack frame, it invokes the associated "personality routine", whose
+//! address is also stored in the unwind info section.
+//!
+//! In the search phase, the job of a personality routine is to examine exception
+//! object being thrown, and to decide whether it should be caught at that stack
+//! frame.  Once the handler frame has been identified, cleanup phase begins.
+//!
+//! In the cleanup phase, personality routines invoke cleanup code associated
+//! with their stack frames (i.e. destructors).  Once stack has been unwound down
+//! to the handler frame level, unwinding stops and the last personality routine
+//! transfers control to its catch block.
+//!
+//! ## Frame unwind info registration
+//!
+//! Each module has its own frame unwind info section (usually ".eh_frame"), and
+//! unwinder needs to know about all of them in order for unwinding to be able to
+//! cross module boundaries.
+//!
+//! On some platforms, like Linux, this is achieved by dynamically enumerating
+//! currently loaded modules via the dl_iterate_phdr() API and finding all
+//! .eh_frame sections.
+//!
+//! Others, like Windows, require modules to actively register their unwind info
+//! sections by calling __register_frame_info() API at startup.  In the latter
+//! case it is essential that there is only one copy of the unwinder runtime in
+//! the process.  This is usually achieved by linking to the dynamic version of
+//! the unwind runtime.
+//!
+//! Currently Rust uses unwind runtime provided by libgcc.
+
+use core::prelude::*;
+
+use boxed::Box;
+use string::String;
+use str::StrAllocating;
+use vec::Vec;
+use any::Any;
+use sync::atomic;
+use cmp;
+use fmt;
+use intrinsics;
+use mem;
+use raw::Closure;
+use libc::c_void;
+
+use rt::local::Local;
+use rt::task::Task;
+
+use rt::libunwind as uw;
+
+#[allow(missing_copy_implementations)]
+pub struct Unwinder {
+    unwinding: bool,
+}
+
+struct Exception {
+    uwe: uw::_Unwind_Exception,
+    cause: Option<Box<Any + Send>>,
+}
+
+pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint);
+
+// Variables used for invoking callbacks when a task starts to unwind.
+//
+// For more information, see below.
+const MAX_CALLBACKS: uint = 16;
+static CALLBACKS: [atomic::AtomicUint, ..MAX_CALLBACKS] =
+        [atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT];
+static CALLBACK_CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+
+impl Unwinder {
+    pub fn new() -> Unwinder {
+        Unwinder {
+            unwinding: false,
+        }
+    }
+
+    pub fn unwinding(&self) -> bool {
+        self.unwinding
+    }
+}
+
+/// Invoke a closure, capturing the cause of panic if one occurs.
+///
+/// This function will return `None` if the closure did not panic, and will
+/// return `Some(cause)` if the closure panics. The `cause` returned is the
+/// object with which panic was originally invoked.
+///
+/// This function also is unsafe for a variety of reasons:
+///
+/// * This is not safe to call in a nested fashion. The unwinding
+///   interface for Rust is designed to have at most one try/catch block per
+///   task, not multiple. No runtime checking is currently performed to uphold
+///   this invariant, so this function is not safe. A nested try/catch block
+///   may result in corruption of the outer try/catch block's state, especially
+///   if this is used within a task itself.
+///
+/// * It is not sound to trigger unwinding while already unwinding. Rust tasks
+///   have runtime checks in place to ensure this invariant, but it is not
+///   guaranteed that a rust task is in place when invoking this function.
+///   Unwinding twice can lead to resource leaks where some destructors are not
+///   run.
+pub unsafe fn try(f: ||) -> ::core::result::Result<(), Box<Any + Send>> {
+    let closure: Closure = mem::transmute(f);
+    let ep = rust_try(try_fn, closure.code as *mut c_void,
+                      closure.env as *mut c_void);
+    return if ep.is_null() {
+        Ok(())
+    } else {
+        let my_ep = ep as *mut Exception;
+        rtdebug!("caught {}", (*my_ep).uwe.exception_class);
+        let cause = (*my_ep).cause.take();
+        uw::_Unwind_DeleteException(ep);
+        Err(cause.unwrap())
+    };
+
+    extern fn try_fn(code: *mut c_void, env: *mut c_void) {
+        unsafe {
+            let closure: || = mem::transmute(Closure {
+                code: code as *mut (),
+                env: env as *mut (),
+            });
+            closure();
+        }
+    }
+
+    #[link(name = "rustrt_native", kind = "static")]
+    #[cfg(not(test))]
+    extern {}
+
+    extern {
+        // Rust's try-catch
+        // When f(...) returns normally, the return value is null.
+        // When f(...) throws, the return value is a pointer to the caught
+        // exception object.
+        fn rust_try(f: extern "C" fn(*mut c_void, *mut c_void),
+                    code: *mut c_void,
+                    data: *mut c_void) -> *mut uw::_Unwind_Exception;
+    }
+}
+
+// An uninlined, unmangled function upon which to slap yer breakpoints
+#[inline(never)]
+#[no_mangle]
+fn rust_panic(cause: Box<Any + Send>) -> ! {
+    rtdebug!("begin_unwind()");
+
+    unsafe {
+        let exception = box Exception {
+            uwe: uw::_Unwind_Exception {
+                exception_class: rust_exception_class(),
+                exception_cleanup: exception_cleanup,
+                private: [0, ..uw::unwinder_private_data_size],
+            },
+            cause: Some(cause),
+        };
+        let error = uw::_Unwind_RaiseException(mem::transmute(exception));
+        rtabort!("Could not unwind stack, error = {}", error as int)
+    }
+
+    extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
+                                exception: *mut uw::_Unwind_Exception) {
+        rtdebug!("exception_cleanup()");
+        unsafe {
+            let _: Box<Exception> = mem::transmute(exception);
+        }
+    }
+}
+
+// Rust's exception class identifier.  This is used by personality routines to
+// determine whether the exception was thrown by their own runtime.
+fn rust_exception_class() -> uw::_Unwind_Exception_Class {
+    // M O Z \0  R U S T -- vendor, language
+    0x4d4f5a_00_52555354
+}
+
+// We could implement our personality routine in pure Rust, however exception
+// info decoding is tedious.  More importantly, personality routines have to
+// handle various platform quirks, which are not fun to maintain.  For this
+// reason, we attempt to reuse personality routine of the C language:
+// __gcc_personality_v0.
+//
+// Since C does not support exception catching, __gcc_personality_v0 simply
+// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
+// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
+//
+// This is pretty close to Rust's exception handling approach, except that Rust
+// does have a single "catch-all" handler at the bottom of each task's stack.
+// So we have two versions of the personality routine:
+// - rust_eh_personality, used by all cleanup landing pads, which never catches,
+//   so the behavior of __gcc_personality_v0 is perfectly adequate there, and
+// - rust_eh_personality_catch, used only by rust_try(), which always catches.
+//
+// Note, however, that for implementation simplicity, rust_eh_personality_catch
+// lacks code to install a landing pad, so in order to obtain exception object
+// pointer (which it needs to return upstream), rust_try() employs another trick:
+// it calls into the nested rust_try_inner(), whose landing pad does not resume
+// unwinds.  Instead, it extracts the exception pointer and performs a "normal"
+// return.
+//
+// See also: rt/rust_try.ll
+
+#[cfg(all(not(target_arch = "arm"),
+          not(all(windows, target_arch = "x86_64")),
+          not(test)))]
+#[doc(hidden)]
+pub mod eabi {
+    use rt::libunwind as uw;
+    use libc::c_int;
+
+    extern "C" {
+        fn __gcc_personality_v0(version: c_int,
+                                actions: uw::_Unwind_Action,
+                                exception_class: uw::_Unwind_Exception_Class,
+                                ue_header: *mut uw::_Unwind_Exception,
+                                context: *mut uw::_Unwind_Context)
+            -> uw::_Unwind_Reason_Code;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    extern fn rust_eh_personality(
+        version: c_int,
+        actions: uw::_Unwind_Action,
+        exception_class: uw::_Unwind_Exception_Class,
+        ue_header: *mut uw::_Unwind_Exception,
+        context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        unsafe {
+            __gcc_personality_v0(version, actions, exception_class, ue_header,
+                                 context)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        _version: c_int,
+        actions: uw::_Unwind_Action,
+        _exception_class: uw::_Unwind_Exception_Class,
+        _ue_header: *mut uw::_Unwind_Exception,
+        _context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+
+        if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+            uw::_URC_HANDLER_FOUND // catch!
+        }
+        else { // cleanup phase
+            uw::_URC_INSTALL_CONTEXT
+        }
+    }
+}
+
+// iOS on armv7 is using SjLj exceptions and therefore requires to use
+// a specialized personality routine: __gcc_personality_sj0
+
+#[cfg(all(target_os = "ios", target_arch = "arm", not(test)))]
+#[doc(hidden)]
+pub mod eabi {
+    use rt::libunwind as uw;
+    use libc::c_int;
+
+    extern "C" {
+        fn __gcc_personality_sj0(version: c_int,
+                                actions: uw::_Unwind_Action,
+                                exception_class: uw::_Unwind_Exception_Class,
+                                ue_header: *mut uw::_Unwind_Exception,
+                                context: *mut uw::_Unwind_Context)
+            -> uw::_Unwind_Reason_Code;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality(
+        version: c_int,
+        actions: uw::_Unwind_Action,
+        exception_class: uw::_Unwind_Exception_Class,
+        ue_header: *mut uw::_Unwind_Exception,
+        context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        unsafe {
+            __gcc_personality_sj0(version, actions, exception_class, ue_header,
+                                  context)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        _version: c_int,
+        actions: uw::_Unwind_Action,
+        _exception_class: uw::_Unwind_Exception_Class,
+        _ue_header: *mut uw::_Unwind_Exception,
+        _context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+            uw::_URC_HANDLER_FOUND // catch!
+        }
+        else { // cleanup phase
+            unsafe {
+                __gcc_personality_sj0(_version, actions, _exception_class, _ue_header,
+                                      _context)
+            }
+        }
+    }
+}
+
+
+// ARM EHABI uses a slightly different personality routine signature,
+// but otherwise works the same.
+#[cfg(all(target_arch = "arm", not(target_os = "ios"), not(test)))]
+#[doc(hidden)]
+pub mod eabi {
+    use rt::libunwind as uw;
+    use libc::c_int;
+
+    extern "C" {
+        fn __gcc_personality_v0(state: uw::_Unwind_State,
+                                ue_header: *mut uw::_Unwind_Exception,
+                                context: *mut uw::_Unwind_Context)
+            -> uw::_Unwind_Reason_Code;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    extern "C" fn rust_eh_personality(
+        state: uw::_Unwind_State,
+        ue_header: *mut uw::_Unwind_Exception,
+        context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        unsafe {
+            __gcc_personality_v0(state, ue_header, context)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        state: uw::_Unwind_State,
+        _ue_header: *mut uw::_Unwind_Exception,
+        _context: *mut uw::_Unwind_Context
+    ) -> uw::_Unwind_Reason_Code
+    {
+        if (state as c_int & uw::_US_ACTION_MASK as c_int)
+                           == uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
+            uw::_URC_HANDLER_FOUND // catch!
+        }
+        else { // cleanup phase
+            uw::_URC_INSTALL_CONTEXT
+        }
+    }
+}
+
+// Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
+//
+// This looks a bit convoluted because rather than implementing a native SEH handler,
+// GCC reuses the same personality routine as for the other architectures by wrapping it
+// with an "API translator" layer (_GCC_specific_handler).
+
+#[cfg(all(windows, target_arch = "x86_64", not(test)))]
+#[doc(hidden)]
+#[allow(non_camel_case_types, non_snake_case)]
+pub mod eabi {
+    pub use self::EXCEPTION_DISPOSITION::*;
+    use rt::libunwind as uw;
+    use libc::{c_void, c_int};
+
+    #[repr(C)]
+    #[allow(missing_copy_implementations)]
+    pub struct EXCEPTION_RECORD;
+    #[repr(C)]
+    #[allow(missing_copy_implementations)]
+    pub struct CONTEXT;
+    #[repr(C)]
+    #[allow(missing_copy_implementations)]
+    pub struct DISPATCHER_CONTEXT;
+
+    #[repr(C)]
+    pub enum EXCEPTION_DISPOSITION {
+        ExceptionContinueExecution,
+        ExceptionContinueSearch,
+        ExceptionNestedException,
+        ExceptionCollidedUnwind
+    }
+
+    impl Copy for EXCEPTION_DISPOSITION {}
+
+    type _Unwind_Personality_Fn =
+        extern "C" fn(
+            version: c_int,
+            actions: uw::_Unwind_Action,
+            exception_class: uw::_Unwind_Exception_Class,
+            ue_header: *mut uw::_Unwind_Exception,
+            context: *mut uw::_Unwind_Context
+        ) -> uw::_Unwind_Reason_Code;
+
+    extern "C" {
+        fn __gcc_personality_seh0(
+            exceptionRecord: *mut EXCEPTION_RECORD,
+            establisherFrame: *mut c_void,
+            contextRecord: *mut CONTEXT,
+            dispatcherContext: *mut DISPATCHER_CONTEXT
+        ) -> EXCEPTION_DISPOSITION;
+
+        fn _GCC_specific_handler(
+            exceptionRecord: *mut EXCEPTION_RECORD,
+            establisherFrame: *mut c_void,
+            contextRecord: *mut CONTEXT,
+            dispatcherContext: *mut DISPATCHER_CONTEXT,
+            personality: _Unwind_Personality_Fn
+        ) -> EXCEPTION_DISPOSITION;
+    }
+
+    #[lang="eh_personality"]
+    #[no_mangle] // referenced from rust_try.ll
+    extern "C" fn rust_eh_personality(
+        exceptionRecord: *mut EXCEPTION_RECORD,
+        establisherFrame: *mut c_void,
+        contextRecord: *mut CONTEXT,
+        dispatcherContext: *mut DISPATCHER_CONTEXT
+    ) -> EXCEPTION_DISPOSITION
+    {
+        unsafe {
+            __gcc_personality_seh0(exceptionRecord, establisherFrame,
+                                   contextRecord, dispatcherContext)
+        }
+    }
+
+    #[no_mangle] // referenced from rust_try.ll
+    pub extern "C" fn rust_eh_personality_catch(
+        exceptionRecord: *mut EXCEPTION_RECORD,
+        establisherFrame: *mut c_void,
+        contextRecord: *mut CONTEXT,
+        dispatcherContext: *mut DISPATCHER_CONTEXT
+    ) -> EXCEPTION_DISPOSITION
+    {
+        extern "C" fn inner(
+                _version: c_int,
+                actions: uw::_Unwind_Action,
+                _exception_class: uw::_Unwind_Exception_Class,
+                _ue_header: *mut uw::_Unwind_Exception,
+                _context: *mut uw::_Unwind_Context
+            ) -> uw::_Unwind_Reason_Code
+        {
+            if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+                uw::_URC_HANDLER_FOUND // catch!
+            }
+            else { // cleanup phase
+                uw::_URC_INSTALL_CONTEXT
+            }
+        }
+
+        unsafe {
+            _GCC_specific_handler(exceptionRecord, establisherFrame,
+                                  contextRecord, dispatcherContext,
+                                  inner)
+        }
+    }
+}
+
+// Entry point of panic from the libcore crate
+#[cfg(not(test))]
+#[lang = "panic_fmt"]
+pub extern fn rust_begin_unwind(msg: &fmt::Arguments,
+                                file: &'static str, line: uint) -> ! {
+    begin_unwind_fmt(msg, &(file, line))
+}
+
+/// The entry point for unwinding with a formatted message.
+///
+/// This is designed to reduce the amount of code required at the call
+/// site as much as possible (so that `panic!()` has as low an impact
+/// on (e.g.) the inlining of other functions as possible), by moving
+/// the actual formatting into this shared place.
+#[inline(never)] #[cold]
+pub fn begin_unwind_fmt(msg: &fmt::Arguments, file_line: &(&'static str, uint)) -> ! {
+    use fmt::FormatWriter;
+
+    // We do two allocations here, unfortunately. But (a) they're
+    // required with the current scheme, and (b) we don't handle
+    // panic + OOM properly anyway (see comment in begin_unwind
+    // below).
+
+    struct VecWriter<'a> { v: &'a mut Vec<u8> }
+
+    impl<'a> fmt::FormatWriter for VecWriter<'a> {
+        fn write(&mut self, buf: &[u8]) -> fmt::Result {
+            self.v.push_all(buf);
+            Ok(())
+        }
+    }
+
+    let mut v = Vec::new();
+    let _ = write!(&mut VecWriter { v: &mut v }, "{}", msg);
+
+    let msg = box String::from_utf8_lossy(v.as_slice()).into_string();
+    begin_unwind_inner(msg, file_line)
+}
+
+/// This is the entry point of unwinding for panic!() and assert!().
+#[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
+pub fn begin_unwind<M: Any + Send>(msg: M, file_line: &(&'static str, uint)) -> ! {
+    // Note that this should be the only allocation performed in this code path.
+    // Currently this means that panic!() on OOM will invoke this code path,
+    // but then again we're not really ready for panic on OOM anyway. If
+    // we do start doing this, then we should propagate this allocation to
+    // be performed in the parent of this task instead of the task that's
+    // panicking.
+
+    // see below for why we do the `Any` coercion here.
+    begin_unwind_inner(box msg, file_line)
+}
+
+/// The core of the unwinding.
+///
+/// This is non-generic to avoid instantiation bloat in other crates
+/// (which makes compilation of small crates noticeably slower). (Note:
+/// we need the `Any` object anyway, we're not just creating it to
+/// avoid being generic.)
+///
+/// Do this split took the LLVM IR line counts of `fn main() { panic!()
+/// }` from ~1900/3700 (-O/no opts) to 180/590.
+#[inline(never)] #[cold] // this is the slow path, please never inline this
+fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) -> ! {
+    // First, invoke call the user-defined callbacks triggered on task panic.
+    //
+    // By the time that we see a callback has been registered (by reading
+    // MAX_CALLBACKS), the actual callback itself may have not been stored yet,
+    // so we just chalk it up to a race condition and move on to the next
+    // callback. Additionally, CALLBACK_CNT may briefly be higher than
+    // MAX_CALLBACKS, so we're sure to clamp it as necessary.
+    let callbacks = {
+        let amt = CALLBACK_CNT.load(atomic::SeqCst);
+        CALLBACKS[..cmp::min(amt, MAX_CALLBACKS)]
+    };
+    for cb in callbacks.iter() {
+        match cb.load(atomic::SeqCst) {
+            0 => {}
+            n => {
+                let f: Callback = unsafe { mem::transmute(n) };
+                let (file, line) = *file_line;
+                f(&*msg, file, line);
+            }
+        }
+    };
+
+    // Now that we've run all the necessary unwind callbacks, we actually
+    // perform the unwinding. If we don't have a task, then it's time to die
+    // (hopefully someone printed something about this).
+    let mut task: Box<Task> = match Local::try_take() {
+        Some(task) => task,
+        None => rust_panic(msg),
+    };
+
+    if task.unwinder.unwinding {
+        // If a task panics while it's already unwinding then we
+        // have limited options. Currently our preference is to
+        // just abort. In the future we may consider resuming
+        // unwinding or otherwise exiting the task cleanly.
+        rterrln!("task failed during unwinding. aborting.");
+        unsafe { intrinsics::abort() }
+    }
+    task.unwinder.unwinding = true;
+
+    // Put the task back in TLS because the unwinding process may run code which
+    // requires the task. We need a handle to its unwinder, however, so after
+    // this we unsafely extract it and continue along.
+    Local::put(task);
+    rust_panic(msg);
+}
+
+/// Register a callback to be invoked when a task unwinds.
+///
+/// This is an unsafe and experimental API which allows for an arbitrary
+/// callback to be invoked when a task panics. This callback is invoked on both
+/// the initial unwinding and a double unwinding if one occurs. Additionally,
+/// the local `Task` will be in place for the duration of the callback, and
+/// the callback must ensure that it remains in place once the callback returns.
+///
+/// Only a limited number of callbacks can be registered, and this function
+/// returns whether the callback was successfully registered or not. It is not
+/// currently possible to unregister a callback once it has been registered.
+#[experimental]
+pub unsafe fn register(f: Callback) -> bool {
+    match CALLBACK_CNT.fetch_add(1, atomic::SeqCst) {
+        // The invocation code has knowledge of this window where the count has
+        // been incremented, but the callback has not been stored. We're
+        // guaranteed that the slot we're storing into is 0.
+        n if n < MAX_CALLBACKS => {
+            let prev = CALLBACKS[n].swap(mem::transmute(f), atomic::SeqCst);
+            rtassert!(prev == 0);
+            true
+        }
+        // If we accidentally bumped the count too high, pull it back.
+        _ => {
+            CALLBACK_CNT.store(MAX_CALLBACKS, atomic::SeqCst);
+            false
+        }
+    }
+}
diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs
index ce359c7b0e0..d3cfccab9d0 100644
--- a/src/libstd/rt/util.rs
+++ b/src/libstd/rt/util.rs
@@ -7,10 +7,18 @@
 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
+//
+// ignore-lexer-test FIXME #15677
+
+use core::prelude::*;
 
-use libc::uintptr_t;
-use option::Option;
-use option::Option::{Some, None};
+use core::cmp;
+use core::fmt;
+use core::intrinsics;
+use core::slice;
+use core::str;
+
+use libc::{mod, uintptr_t};
 use os;
 use str::{FromStr, from_str, Str};
 use sync::atomic;
@@ -73,3 +81,136 @@ pub fn default_sched_threads() -> uint {
         }
     }
 }
+
+// Indicates whether we should perform expensive sanity checks, including rtassert!
+//
+// FIXME: Once the runtime matures remove the `true` below to turn off rtassert,
+//        etc.
+pub const ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) ||
+                                  cfg!(rtassert);
+
+pub struct Stdio(libc::c_int);
+
+#[allow(non_upper_case_globals)]
+pub const Stdout: Stdio = Stdio(libc::STDOUT_FILENO);
+#[allow(non_upper_case_globals)]
+pub const Stderr: Stdio = Stdio(libc::STDERR_FILENO);
+
+impl fmt::FormatWriter for Stdio {
+    fn write(&mut self, data: &[u8]) -> fmt::Result {
+        #[cfg(unix)]
+        type WriteLen = libc::size_t;
+        #[cfg(windows)]
+        type WriteLen = libc::c_uint;
+        unsafe {
+            let Stdio(fd) = *self;
+            libc::write(fd,
+                        data.as_ptr() as *const libc::c_void,
+                        data.len() as WriteLen);
+        }
+        Ok(()) // yes, we're lying
+    }
+}
+
+pub fn dumb_print(args: &fmt::Arguments) {
+    use fmt::FormatWriter;
+    let mut w = Stderr;
+    let _ = w.write_fmt(args);
+}
+
+pub fn abort(args: &fmt::Arguments) -> ! {
+    use fmt::FormatWriter;
+
+    struct BufWriter<'a> {
+        buf: &'a mut [u8],
+        pos: uint,
+    }
+    impl<'a> FormatWriter for BufWriter<'a> {
+        fn write(&mut self, bytes: &[u8]) -> fmt::Result {
+            let left = self.buf[mut self.pos..];
+            let to_write = bytes[..cmp::min(bytes.len(), left.len())];
+            slice::bytes::copy_memory(left, to_write);
+            self.pos += to_write.len();
+            Ok(())
+        }
+    }
+
+    // Convert the arguments into a stack-allocated string
+    let mut msg = [0u8, ..512];
+    let mut w = BufWriter { buf: &mut msg, pos: 0 };
+    let _ = write!(&mut w, "{}", args);
+    let msg = str::from_utf8(w.buf[mut ..w.pos]).unwrap_or("aborted");
+    let msg = if msg.is_empty() {"aborted"} else {msg};
+
+    // Give some context to the message
+    let hash = msg.bytes().fold(0, |accum, val| accum + (val as uint) );
+    let quote = match hash % 10 {
+        0 => "
+It was from the artists and poets that the pertinent answers came, and I
+know that panic would have broken loose had they been able to compare notes.
+As it was, lacking their original letters, I half suspected the compiler of
+having asked leading questions, or of having edited the correspondence in
+corroboration of what he had latently resolved to see.",
+        1 => "
+There are not many persons who know what wonders are opened to them in the
+stories and visions of their youth; for when as children we listen and dream,
+we think but half-formed thoughts, and when as men we try to remember, we are
+dulled and prosaic with the poison of life. But some of us awake in the night
+with strange phantasms of enchanted hills and gardens, of fountains that sing
+in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
+down to sleeping cities of bronze and stone, and of shadowy companies of heroes
+that ride caparisoned white horses along the edges of thick forests; and then
+we know that we have looked back through the ivory gates into that world of
+wonder which was ours before we were wise and unhappy.",
+        2 => "
+Instead of the poems I had hoped for, there came only a shuddering blackness
+and ineffable loneliness; and I saw at last a fearful truth which no one had
+ever dared to breathe before — the unwhisperable secret of secrets — The fact
+that this city of stone and stridor is not a sentient perpetuation of Old New
+York as London is of Old London and Paris of Old Paris, but that it is in fact
+quite dead, its sprawling body imperfectly embalmed and infested with queer
+animate things which have nothing to do with it as it was in life.",
+        3 => "
+The ocean ate the last of the land and poured into the smoking gulf, thereby
+giving up all it had ever conquered. From the new-flooded lands it flowed
+again, uncovering death and decay; and from its ancient and immemorial bed it
+trickled loathsomely, uncovering nighted secrets of the years when Time was
+young and the gods unborn. Above the waves rose weedy remembered spires. The
+moon laid pale lilies of light on dead London, and Paris stood up from its damp
+grave to be sanctified with star-dust. Then rose spires and monoliths that were
+weedy but not remembered; terrible spires and monoliths of lands that men never
+knew were lands...",
+        4 => "
+There was a night when winds from unknown spaces whirled us irresistibly into
+limitless vacuum beyond all thought and entity. Perceptions of the most
+maddeningly untransmissible sort thronged upon us; perceptions of infinity
+which at the time convulsed us with joy, yet which are now partly lost to my
+memory and partly incapable of presentation to others.",
+        _ => "You've met with a terrible fate, haven't you?"
+    };
+    rterrln!("{}", "");
+    rterrln!("{}", quote);
+    rterrln!("{}", "");
+    rterrln!("fatal runtime error: {}", msg);
+    unsafe { intrinsics::abort(); }
+}
+
+pub unsafe fn report_overflow() {
+    use rt::task::Task;
+    use rt::local::Local;
+
+    // See the message below for why this is not emitted to the
+    // ^ Where did the message below go?
+    // task's logger. This has the additional conundrum of the
+    // logger may not be initialized just yet, meaning that an FFI
+    // call would happen to initialized it (calling out to libuv),
+    // and the FFI call needs 2MB of stack when we just ran out.
+
+    let task: Option<*mut Task> = Local::try_unsafe_borrow();
+
+    let name = task.and_then(|task| {
+        (*task).name.as_ref().map(|n| n.as_slice())
+    });
+
+    rterrln!("\ntask '{}' has overflowed its stack", name.unwrap_or("<unknown>"));
+}