about summary refs log tree commit diff
path: root/src/libstd/sys
diff options
context:
space:
mode:
authorAaron Turon <aturon@mozilla.com>2014-11-23 19:21:17 -0800
committerAaron Turon <aturon@mozilla.com>2014-12-18 23:31:34 -0800
commit2b3477d373603527d23cc578f3737857b7b253d7 (patch)
tree56022ebf11d5d27a6ef15f15d00d014a84a35837 /src/libstd/sys
parent840de072085df360733c48396224e9966e2dc72c (diff)
downloadrust-2b3477d373603527d23cc578f3737857b7b253d7.tar.gz
rust-2b3477d373603527d23cc578f3737857b7b253d7.zip
libs: merge librustrt into libstd
This commit merges the `rustrt` crate into `std`, undoing part of the
facade. This merger continues the paring down of the runtime system.

Code relying on the public API of `rustrt` will break; some of this API
is now available through `std::rt`, but is likely to change and/or be
removed very soon.

[breaking-change]
Diffstat (limited to 'src/libstd/sys')
-rw-r--r--src/libstd/sys/common/backtrace.rs131
-rw-r--r--src/libstd/sys/common/helper_thread.rs5
-rw-r--r--src/libstd/sys/common/mod.rs3
-rw-r--r--src/libstd/sys/common/stack.rs325
-rw-r--r--src/libstd/sys/common/thread.rs34
-rw-r--r--src/libstd/sys/common/thread_local.rs4
-rw-r--r--src/libstd/sys/unix/backtrace.rs493
-rw-r--r--src/libstd/sys/unix/mod.rs3
-rw-r--r--src/libstd/sys/unix/stack_overflow.rs291
-rw-r--r--src/libstd/sys/unix/thread.rs270
-rw-r--r--src/libstd/sys/windows/backtrace.rs371
-rw-r--r--src/libstd/sys/windows/mod.rs3
-rw-r--r--src/libstd/sys/windows/stack_overflow.rs120
-rw-r--r--src/libstd/sys/windows/thread.rs95
-rw-r--r--src/libstd/sys/windows/thread_local.rs6
15 files changed, 2146 insertions, 8 deletions
diff --git a/src/libstd/sys/common/backtrace.rs b/src/libstd/sys/common/backtrace.rs
new file mode 100644
index 00000000000..0c03060b314
--- /dev/null
+++ b/src/libstd/sys/common/backtrace.rs
@@ -0,0 +1,131 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use io::{IoResult, Writer};
+use iter::Iterator;
+use option::{Some, None};
+use result::{Ok, Err};
+use str::{StrPrelude, from_str};
+use unicode::char::UnicodeChar;
+
+#[cfg(target_word_size = "64")] pub const HEX_WIDTH: uint = 18;
+#[cfg(target_word_size = "32")] pub const HEX_WIDTH: uint = 10;
+
+// All rust symbols are in theory lists of "::"-separated identifiers. Some
+// assemblers, however, can't handle these characters in symbol names. To get
+// around this, we use C++-style mangling. The mangling method is:
+//
+// 1. Prefix the symbol with "_ZN"
+// 2. For each element of the path, emit the length plus the element
+// 3. End the path with "E"
+//
+// For example, "_ZN4testE" => "test" and "_ZN3foo3bar" => "foo::bar".
+//
+// We're the ones printing our backtraces, so we can't rely on anything else to
+// demangle our symbols. It's *much* nicer to look at demangled symbols, so
+// this function is implemented to give us nice pretty output.
+//
+// Note that this demangler isn't quite as fancy as it could be. We have lots
+// of other information in our symbols like hashes, version, type information,
+// etc. Additionally, this doesn't handle glue symbols at all.
+pub fn demangle(writer: &mut Writer, s: &str) -> IoResult<()> {
+    // First validate the symbol. If it doesn't look like anything we're
+    // expecting, we just print it literally. Note that we must handle non-rust
+    // symbols because we could have any function in the backtrace.
+    let mut valid = true;
+    if s.len() > 4 && s.starts_with("_ZN") && s.ends_with("E") {
+        let mut chars = s.slice(3, s.len() - 1).chars();
+        while valid {
+            let mut i = 0;
+            for c in chars {
+                if c.is_numeric() {
+                    i = i * 10 + c as uint - '0' as uint;
+                } else {
+                    break
+                }
+            }
+            if i == 0 {
+                valid = chars.next().is_none();
+                break
+            } else if chars.by_ref().take(i - 1).count() != i - 1 {
+                valid = false;
+            }
+        }
+    } else {
+        valid = false;
+    }
+
+    // Alright, let's do this.
+    if !valid {
+        try!(writer.write_str(s));
+    } else {
+        let mut s = s.slice_from(3);
+        let mut first = true;
+        while s.len() > 1 {
+            if !first {
+                try!(writer.write_str("::"));
+            } else {
+                first = false;
+            }
+            let mut rest = s;
+            while rest.char_at(0).is_numeric() {
+                rest = rest.slice_from(1);
+            }
+            let i: uint = from_str(s.slice_to(s.len() - rest.len())).unwrap();
+            s = rest.slice_from(i);
+            rest = rest.slice_to(i);
+            while rest.len() > 0 {
+                if rest.starts_with("$") {
+                    macro_rules! demangle(
+                        ($($pat:expr => $demangled:expr),*) => ({
+                            $(if rest.starts_with($pat) {
+                                try!(writer.write_str($demangled));
+                                rest = rest.slice_from($pat.len());
+                              } else)*
+                            {
+                                try!(writer.write_str(rest));
+                                break;
+                            }
+
+                        })
+                    )
+                    // see src/librustc/back/link.rs for these mappings
+                    demangle! (
+                        "$SP$" => "@",
+                        "$UP$" => "Box",
+                        "$RP$" => "*",
+                        "$BP$" => "&",
+                        "$LT$" => "<",
+                        "$GT$" => ">",
+                        "$LP$" => "(",
+                        "$RP$" => ")",
+                        "$C$"  => ",",
+
+                        // in theory we can demangle any Unicode code point, but
+                        // for simplicity we just catch the common ones.
+                        "$x20" => " ",
+                        "$x27" => "'",
+                        "$x5b" => "[",
+                        "$x5d" => "]"
+                    )
+                } else {
+                    let idx = match rest.find('$') {
+                        None => rest.len(),
+                        Some(i) => i,
+                    };
+                    try!(writer.write_str(rest.slice_to(idx)));
+                    rest = rest.slice_from(idx);
+                }
+            }
+        }
+    }
+
+    Ok(())
+}
diff --git a/src/libstd/sys/common/helper_thread.rs b/src/libstd/sys/common/helper_thread.rs
index 96b4accd4bd..ffb053e852e 100644
--- a/src/libstd/sys/common/helper_thread.rs
+++ b/src/libstd/sys/common/helper_thread.rs
@@ -24,9 +24,8 @@ use prelude::*;
 
 use cell::UnsafeCell;
 use mem;
-use rustrt::bookkeeping;
-use rustrt;
 use sync::{StaticMutex, StaticCondvar};
+use rt::{mod, bookkeeping};
 use sys::helper_signal;
 
 use task;
@@ -91,7 +90,7 @@ impl<M: Send> Helper<M> {
                     self.cond.notify_one()
                 });
 
-                rustrt::at_exit(move|:| { self.shutdown() });
+                rt::at_exit(move|:| { self.shutdown() });
                 *self.initialized.get() = true;
             }
         }
diff --git a/src/libstd/sys/common/mod.rs b/src/libstd/sys/common/mod.rs
index 73e1c7bd9e5..aeee4cf01cd 100644
--- a/src/libstd/sys/common/mod.rs
+++ b/src/libstd/sys/common/mod.rs
@@ -19,11 +19,14 @@ use num::Int;
 use path::BytesContainer;
 use collections;
 
+pub mod backtrace;
 pub mod condvar;
 pub mod helper_thread;
 pub mod mutex;
 pub mod net;
 pub mod rwlock;
+pub mod stack;
+pub mod thread;
 pub mod thread_local;
 
 // common error constructors
diff --git a/src/libstd/sys/common/stack.rs b/src/libstd/sys/common/stack.rs
new file mode 100644
index 00000000000..2a88e20c8fa
--- /dev/null
+++ b/src/libstd/sys/common/stack.rs
@@ -0,0 +1,325 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Rust stack-limit management
+//!
+//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
+//! overflow for rust tasks. In this scheme, the prologue of all functions are
+//! preceded with a check to see whether the current stack limits are being
+//! exceeded.
+//!
+//! This module provides the functionality necessary in order to manage these
+//! stack limits (which are stored in platform-specific locations). The
+//! functions here are used at the borders of the task lifetime in order to
+//! manage these limits.
+//!
+//! This function is an unstable module because this scheme for stack overflow
+//! detection is not guaranteed to continue in the future. Usage of this module
+//! is discouraged unless absolutely necessary.
+
+// iOS related notes
+//
+// It is possible to implement it using idea from
+// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
+//
+// In short: _pthread_{get,set}_specific_direct allows extremely fast
+// access, exactly what is required for segmented stack
+// There is a pool of reserved slots for Apple internal use (0..119)
+// First dynamic allocated pthread key starts with 257 (on iOS7)
+// So using slot 149 should be pretty safe ASSUMING space is reserved
+// for every key < first dynamic key
+//
+// There is also an opportunity to steal keys reserved for Garbage Collection
+// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
+// never supposed to work on iOS. But as everybody knows it - there is a chance
+// that those slots will be re-used, like it happened with key 95 (moved from
+// JavaScriptCore to CoreText)
+//
+// Unfortunately Apple rejected patch to LLVM which generated
+// corresponding prolog, decision was taken to disable segmented
+// stack support on iOS.
+
+pub const RED_ZONE: uint = 20 * 1024;
+
+/// This function is invoked from rust's current __morestack function. Segmented
+/// stacks are currently not enabled as segmented stacks, but rather one giant
+/// stack segment. This means that whenever we run out of stack, we want to
+/// truly consider it to be stack overflow rather than allocating a new stack.
+#[cfg(not(test))] // in testing, use the original libstd's version
+#[lang = "stack_exhausted"]
+extern fn stack_exhausted() {
+    use intrinsics;
+
+    unsafe {
+        // We're calling this function because the stack just ran out. We need
+        // to call some other rust functions, but if we invoke the functions
+        // right now it'll just trigger this handler being called again. In
+        // order to alleviate this, we move the stack limit to be inside of the
+        // red zone that was allocated for exactly this reason.
+        let limit = get_sp_limit();
+        record_sp_limit(limit - RED_ZONE / 2);
+
+        // This probably isn't the best course of action. Ideally one would want
+        // to unwind the stack here instead of just aborting the entire process.
+        // This is a tricky problem, however. There's a few things which need to
+        // be considered:
+        //
+        //  1. We're here because of a stack overflow, yet unwinding will run
+        //     destructors and hence arbitrary code. What if that code overflows
+        //     the stack? One possibility is to use the above allocation of an
+        //     extra 10k to hope that we don't hit the limit, and if we do then
+        //     abort the whole program. Not the best, but kind of hard to deal
+        //     with unless we want to switch stacks.
+        //
+        //  2. LLVM will optimize functions based on whether they can unwind or
+        //     not. It will flag functions with 'nounwind' if it believes that
+        //     the function cannot trigger unwinding, but if we do unwind on
+        //     stack overflow then it means that we could unwind in any function
+        //     anywhere. We would have to make sure that LLVM only places the
+        //     nounwind flag on functions which don't call any other functions.
+        //
+        //  3. The function that overflowed may have owned arguments. These
+        //     arguments need to have their destructors run, but we haven't even
+        //     begun executing the function yet, so unwinding will not run the
+        //     any landing pads for these functions. If this is ignored, then
+        //     the arguments will just be leaked.
+        //
+        // Exactly what to do here is a very delicate topic, and is possibly
+        // still up in the air for what exactly to do. Some relevant issues:
+        //
+        //  #3555 - out-of-stack failure leaks arguments
+        //  #3695 - should there be a stack limit?
+        //  #9855 - possible strategies which could be taken
+        //  #9854 - unwinding on windows through __morestack has never worked
+        //  #2361 - possible implementation of not using landing pads
+
+        ::rt::util::report_overflow();
+
+        intrinsics::abort();
+    }
+}
+
+// Windows maintains a record of upper and lower stack bounds in the Thread Information
+// Block (TIB), and some syscalls do check that addresses which are supposed to be in
+// the stack, indeed lie between these two values.
+// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
+//
+// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
+// For OS-managed stacks (libnative), we let the OS manage them for us.
+//
+// On all other platforms both variants behave identically.
+
+#[inline(always)]
+pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
+    record_sp_limit(stack_lo + RED_ZONE);
+}
+
+#[inline(always)]
+pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
+    // When the old runtime had segmented stacks, it used a calculation that was
+    // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
+    // symbol resolution, llvm function calls, etc. In theory this red zone
+    // value is 0, but it matters far less when we have gigantic stacks because
+    // we don't need to be so exact about our stack budget. The "fudge factor"
+    // was because LLVM doesn't emit a stack check for functions < 256 bytes in
+    // size. Again though, we have giant stacks, so we round all these
+    // calculations up to the nice round number of 20k.
+    record_sp_limit(stack_lo + RED_ZONE);
+
+    return target_record_stack_bounds(stack_lo, stack_hi);
+
+    #[cfg(not(windows))] #[inline(always)]
+    unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
+
+    #[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
+    unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+        // stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
+        asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
+        asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
+    }
+    #[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
+    unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+        // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
+        asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
+        asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
+    }
+}
+
+/// Records the current limit of the stack as specified by `end`.
+///
+/// This is stored in an OS-dependent location, likely inside of the thread
+/// local storage. The location that the limit is stored is a pre-ordained
+/// location because it's where LLVM has emitted code to check.
+///
+/// Note that this cannot be called under normal circumstances. This function is
+/// changing the stack limit, so upon returning any further function calls will
+/// possibly be triggering the morestack logic if you're not careful.
+///
+/// Also note that this and all of the inside functions are all flagged as
+/// "inline(always)" because they're messing around with the stack limits.  This
+/// would be unfortunate for the functions themselves to trigger a morestack
+/// invocation (if they were an actual function call).
+#[inline(always)]
+pub unsafe fn record_sp_limit(limit: uint) {
+    return target_record_sp_limit(limit);
+
+    // x86-64
+    #[cfg(all(target_arch = "x86_64",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $$0x60+90*8, %rsi
+              movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(_: uint) {
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
+    }
+
+    // x86
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movl $$0x48+90*4, %eax
+              movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
+    }
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "linux", target_os = "freebsd")))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(_: uint) {
+    }
+
+    // mips, arm - Some brave soul can port these to inline asm, but it's over
+    //             my head personally
+    #[cfg(any(target_arch = "mips",
+              target_arch = "mipsel",
+              all(target_arch = "arm", not(target_os = "ios"))))]
+    #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        use libc::c_void;
+        return record_sp_limit(limit as *const c_void);
+        extern {
+            fn record_sp_limit(limit: *const c_void);
+        }
+    }
+
+    // iOS segmented stack is disabled for now, see related notes
+    #[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
+    unsafe fn target_record_sp_limit(_: uint) {
+    }
+}
+
+/// The counterpart of the function above, this function will fetch the current
+/// stack limit stored in TLS.
+///
+/// Note that all of these functions are meant to be exact counterparts of their
+/// brethren above, except that the operands are reversed.
+///
+/// As with the setter, this function does not have a __morestack header and can
+/// therefore be called in a "we're out of stack" situation.
+#[inline(always)]
+pub unsafe fn get_sp_limit() -> uint {
+    return target_get_sp_limit();
+
+    // x86-64
+    #[cfg(all(target_arch = "x86_64",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq $$0x60+90*8, %rsi
+              movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        return 1024;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+
+
+    // x86
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "macos", target_os = "ios")))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movl $$0x48+90*4, %eax
+              movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86",
+              any(target_os = "linux", target_os = "freebsd")))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        return 1024;
+    }
+
+    // mips, arm - Some brave soul can port these to inline asm, but it's over
+    //             my head personally
+    #[cfg(any(target_arch = "mips",
+              target_arch = "mipsel",
+              all(target_arch = "arm", not(target_os = "ios"))))]
+    #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        use libc::c_void;
+        return get_sp_limit() as uint;
+        extern {
+            fn get_sp_limit() -> *const c_void;
+        }
+    }
+
+    // iOS doesn't support segmented stacks yet. This function might
+    // be called by runtime though so it is unsafe to mark it as
+    // unreachable, let's return a fixed constant.
+    #[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        1024
+    }
+}
diff --git a/src/libstd/sys/common/thread.rs b/src/libstd/sys/common/thread.rs
new file mode 100644
index 00000000000..5e1adfb8714
--- /dev/null
+++ b/src/libstd/sys/common/thread.rs
@@ -0,0 +1,34 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use mem;
+use uint;
+use libc;
+use sys_common::stack;
+use sys::{thread, stack_overflow};
+
+// This is the starting point of rust os threads. The first thing we do
+// is make sure that we don't trigger __morestack (also why this has a
+// no_stack_check annotation), and then we extract the main function
+// and invoke it.
+#[no_stack_check]
+pub fn start_thread(main: *mut libc::c_void) -> thread::rust_thread_return {
+    unsafe {
+        stack::record_os_managed_stack_bounds(0, uint::MAX);
+        let handler = stack_overflow::Handler::new();
+        let f: Box<proc()> = mem::transmute(main);
+        (*f)();
+        drop(handler);
+        mem::transmute(0 as thread::rust_thread_return)
+    }
+}
diff --git a/src/libstd/sys/common/thread_local.rs b/src/libstd/sys/common/thread_local.rs
index cf56a71d67a..a8bc6bf9d0d 100644
--- a/src/libstd/sys/common/thread_local.rs
+++ b/src/libstd/sys/common/thread_local.rs
@@ -58,7 +58,8 @@
 
 use prelude::*;
 
-use rustrt::exclusive::Exclusive;
+use rt::exclusive::Exclusive;
+use rt;
 use sync::atomic::{mod, AtomicUint};
 use sync::{Once, ONCE_INIT};
 
@@ -283,4 +284,3 @@ mod tests {
         }
     }
 }
-
diff --git a/src/libstd/sys/unix/backtrace.rs b/src/libstd/sys/unix/backtrace.rs
new file mode 100644
index 00000000000..c139dba2c46
--- /dev/null
+++ b/src/libstd/sys/unix/backtrace.rs
@@ -0,0 +1,493 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Backtrace support built on libgcc with some extra OS-specific support
+///
+/// Some methods of getting a backtrace:
+///
+/// * The backtrace() functions on unix. It turns out this doesn't work very
+///   well for green threads on OSX, and the address to symbol portion of it
+///   suffers problems that are described below.
+///
+/// * Using libunwind. This is more difficult than it sounds because libunwind
+///   isn't installed everywhere by default. It's also a bit of a hefty library,
+///   so possibly not the best option. When testing, libunwind was excellent at
+///   getting both accurate backtraces and accurate symbols across platforms.
+///   This route was not chosen in favor of the next option, however.
+///
+/// * We're already using libgcc_s for exceptions in rust (triggering task
+///   unwinding and running destructors on the stack), and it turns out that it
+///   conveniently comes with a function that also gives us a backtrace. All of
+///   these functions look like _Unwind_*, but it's not quite the full
+///   repertoire of the libunwind API. Due to it already being in use, this was
+///   the chosen route of getting a backtrace.
+///
+/// After choosing libgcc_s for backtraces, the sad part is that it will only
+/// give us a stack trace of instruction pointers. Thankfully these instruction
+/// pointers are accurate (they work for green and native threads), but it's
+/// then up to us again to figure out how to translate these addresses to
+/// symbols. As with before, we have a few options. Before, that, a little bit
+/// of an interlude about symbols. This is my very limited knowledge about
+/// symbol tables, and this information is likely slightly wrong, but the
+/// general idea should be correct.
+///
+/// When talking about symbols, it's helpful to know a few things about where
+/// symbols are located. Some symbols are located in the dynamic symbol table
+/// of the executable which in theory means that they're available for dynamic
+/// linking and lookup. Other symbols end up only in the local symbol table of
+/// the file. This loosely corresponds to pub and priv functions in Rust.
+///
+/// Armed with this knowledge, we know that our solution for address to symbol
+/// translation will need to consult both the local and dynamic symbol tables.
+/// With that in mind, here's our options of translating an address to
+/// a symbol.
+///
+/// * Use dladdr(). The original backtrace()-based idea actually uses dladdr()
+///   behind the scenes to translate, and this is why backtrace() was not used.
+///   Conveniently, this method works fantastically on OSX. It appears dladdr()
+///   uses magic to consult the local symbol table, or we're putting everything
+///   in the dynamic symbol table anyway. Regardless, for OSX, this is the
+///   method used for translation. It's provided by the system and easy to do.o
+///
+///   Sadly, all other systems have a dladdr() implementation that does not
+///   consult the local symbol table. This means that most functions are blank
+///   because they don't have symbols. This means that we need another solution.
+///
+/// * Use unw_get_proc_name(). This is part of the libunwind api (not the
+///   libgcc_s version of the libunwind api), but involves taking a dependency
+///   to libunwind. We may pursue this route in the future if we bundle
+///   libunwind, but libunwind was unwieldy enough that it was not chosen at
+///   this time to provide this functionality.
+///
+/// * Shell out to a utility like `readelf`. Crazy though it may sound, it's a
+///   semi-reasonable solution. The stdlib already knows how to spawn processes,
+///   so in theory it could invoke readelf, parse the output, and consult the
+///   local/dynamic symbol tables from there. This ended up not getting chosen
+///   due to the craziness of the idea plus the advent of the next option.
+///
+/// * Use `libbacktrace`. It turns out that this is a small library bundled in
+///   the gcc repository which provides backtrace and symbol translation
+///   functionality. All we really need from it is the backtrace functionality,
+///   and we only really need this on everything that's not OSX, so this is the
+///   chosen route for now.
+///
+/// In summary, the current situation uses libgcc_s to get a trace of stack
+/// pointers, and we use dladdr() or libbacktrace to translate these addresses
+/// to symbols. This is a bit of a hokey implementation as-is, but it works for
+/// all unix platforms we support right now, so it at least gets the job done.
+
+use c_str::CString;
+use io::{IoResult, Writer};
+use libc;
+use mem;
+use option::{Some, None, Option};
+use result::{Ok, Err};
+use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+
+use sys_common::backtrace::*;
+
+/// As always - iOS on arm uses SjLj exceptions and
+/// _Unwind_Backtrace is even not available there. Still,
+/// backtraces could be extracted using a backtrace function,
+/// which thanks god is public
+///
+/// As mentioned in a huge comment block above, backtrace doesn't
+/// play well with green threads, so while it is extremely nice
+/// and simple to use it should be used only on iOS devices as the
+/// only viable option.
+#[cfg(all(target_os = "ios", target_arch = "arm"))]
+#[inline(never)]
+pub fn write(w: &mut Writer) -> IoResult<()> {
+    use iter::{Iterator, range};
+    use result;
+    use slice::SliceExt;
+
+    extern {
+        fn backtrace(buf: *mut *mut libc::c_void,
+                     sz: libc::c_int) -> libc::c_int;
+    }
+
+    // while it doesn't requires lock for work as everything is
+    // local, it still displays much nicer backtraces when a
+    // couple of tasks panic simultaneously
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    let _g = unsafe { LOCK.lock() };
+
+    try!(writeln!(w, "stack backtrace:"));
+    // 100 lines should be enough
+    const SIZE: uint = 100;
+    let mut buf: [*mut libc::c_void, ..SIZE] = unsafe {mem::zeroed()};
+    let cnt = unsafe { backtrace(buf.as_mut_ptr(), SIZE as libc::c_int) as uint};
+
+    // skipping the first one as it is write itself
+    let iter = range(1, cnt).map(|i| {
+        print(w, i as int, buf[i])
+    });
+    result::fold(iter, (), |_, _| ())
+}
+
+#[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+#[inline(never)] // if we know this is a function call, we can skip it when
+                 // tracing
+pub fn write(w: &mut Writer) -> IoResult<()> {
+    use io::IoError;
+
+    struct Context<'a> {
+        idx: int,
+        writer: &'a mut Writer+'a,
+        last_error: Option<IoError>,
+    }
+
+    // When using libbacktrace, we use some necessary global state, so we
+    // need to prevent more than one thread from entering this block. This
+    // is semi-reasonable in terms of printing anyway, and we know that all
+    // I/O done here is blocking I/O, not green I/O, so we don't have to
+    // worry about this being a native vs green mutex.
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    let _g = unsafe { LOCK.lock() };
+
+    try!(writeln!(w, "stack backtrace:"));
+
+    let mut cx = Context { writer: w, last_error: None, idx: 0 };
+    return match unsafe {
+        uw::_Unwind_Backtrace(trace_fn,
+                              &mut cx as *mut Context as *mut libc::c_void)
+    } {
+        uw::_URC_NO_REASON => {
+            match cx.last_error {
+                Some(err) => Err(err),
+                None => Ok(())
+            }
+        }
+        _ => Ok(()),
+    };
+
+    extern fn trace_fn(ctx: *mut uw::_Unwind_Context,
+                       arg: *mut libc::c_void) -> uw::_Unwind_Reason_Code {
+        let cx: &mut Context = unsafe { mem::transmute(arg) };
+        let ip = unsafe { uw::_Unwind_GetIP(ctx) as *mut libc::c_void };
+        // dladdr() on osx gets whiny when we use FindEnclosingFunction, and
+        // it appears to work fine without it, so we only use
+        // FindEnclosingFunction on non-osx platforms. In doing so, we get a
+        // slightly more accurate stack trace in the process.
+        //
+        // This is often because panic involves the last instruction of a
+        // function being "call std::rt::begin_unwind", with no ret
+        // instructions after it. This means that the return instruction
+        // pointer points *outside* of the calling function, and by
+        // unwinding it we go back to the original function.
+        let ip = if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
+            ip
+        } else {
+            unsafe { uw::_Unwind_FindEnclosingFunction(ip) }
+        };
+
+        // Don't print out the first few frames (they're not user frames)
+        cx.idx += 1;
+        if cx.idx <= 0 { return uw::_URC_NO_REASON }
+        // Don't print ginormous backtraces
+        if cx.idx > 100 {
+            match write!(cx.writer, " ... <frames omitted>\n") {
+                Ok(()) => {}
+                Err(e) => { cx.last_error = Some(e); }
+            }
+            return uw::_URC_FAILURE
+        }
+
+        // Once we hit an error, stop trying to print more frames
+        if cx.last_error.is_some() { return uw::_URC_FAILURE }
+
+        match print(cx.writer, cx.idx, ip) {
+            Ok(()) => {}
+            Err(e) => { cx.last_error = Some(e); }
+        }
+
+        // keep going
+        return uw::_URC_NO_REASON
+    }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
+    use intrinsics;
+    #[repr(C)]
+    struct Dl_info {
+        dli_fname: *const libc::c_char,
+        dli_fbase: *mut libc::c_void,
+        dli_sname: *const libc::c_char,
+        dli_saddr: *mut libc::c_void,
+    }
+    extern {
+        fn dladdr(addr: *const libc::c_void,
+                  info: *mut Dl_info) -> libc::c_int;
+    }
+
+    let mut info: Dl_info = unsafe { intrinsics::init() };
+    if unsafe { dladdr(addr as *const libc::c_void, &mut info) == 0 } {
+        output(w, idx,addr, None)
+    } else {
+        output(w, idx, addr, Some(unsafe {
+            CString::new(info.dli_sname, false)
+        }))
+    }
+}
+
+#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+fn print(w: &mut Writer, idx: int, addr: *mut libc::c_void) -> IoResult<()> {
+    use iter::Iterator;
+    use os;
+    use path::GenericPath;
+    use ptr::RawPtr;
+    use ptr;
+    use slice::SliceExt;
+
+    ////////////////////////////////////////////////////////////////////////
+    // libbacktrace.h API
+    ////////////////////////////////////////////////////////////////////////
+    type backtrace_syminfo_callback =
+        extern "C" fn(data: *mut libc::c_void,
+                      pc: libc::uintptr_t,
+                      symname: *const libc::c_char,
+                      symval: libc::uintptr_t,
+                      symsize: libc::uintptr_t);
+    type backtrace_error_callback =
+        extern "C" fn(data: *mut libc::c_void,
+                      msg: *const libc::c_char,
+                      errnum: libc::c_int);
+    enum backtrace_state {}
+    #[link(name = "backtrace", kind = "static")]
+    #[cfg(not(test))]
+    extern {}
+
+    extern {
+        fn backtrace_create_state(filename: *const libc::c_char,
+                                  threaded: libc::c_int,
+                                  error: backtrace_error_callback,
+                                  data: *mut libc::c_void)
+                                        -> *mut backtrace_state;
+        fn backtrace_syminfo(state: *mut backtrace_state,
+                             addr: libc::uintptr_t,
+                             cb: backtrace_syminfo_callback,
+                             error: backtrace_error_callback,
+                             data: *mut libc::c_void) -> libc::c_int;
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    // helper callbacks
+    ////////////////////////////////////////////////////////////////////////
+
+    extern fn error_cb(_data: *mut libc::c_void, _msg: *const libc::c_char,
+                       _errnum: libc::c_int) {
+        // do nothing for now
+    }
+    extern fn syminfo_cb(data: *mut libc::c_void,
+                         _pc: libc::uintptr_t,
+                         symname: *const libc::c_char,
+                         _symval: libc::uintptr_t,
+                         _symsize: libc::uintptr_t) {
+        let slot = data as *mut *const libc::c_char;
+        unsafe { *slot = symname; }
+    }
+
+    // The libbacktrace API supports creating a state, but it does not
+    // support destroying a state. I personally take this to mean that a
+    // state is meant to be created and then live forever.
+    //
+    // I would love to register an at_exit() handler which cleans up this
+    // state, but libbacktrace provides no way to do so.
+    //
+    // With these constraints, this function has a statically cached state
+    // that is calculated the first time this is requested. Remember that
+    // backtracing all happens serially (one global lock).
+    //
+    // An additionally oddity in this function is that we initialize the
+    // filename via self_exe_name() to pass to libbacktrace. It turns out
+    // that on Linux libbacktrace seamlessly gets the filename of the
+    // current executable, but this fails on freebsd. by always providing
+    // it, we make sure that libbacktrace never has a reason to not look up
+    // the symbols. The libbacktrace API also states that the filename must
+    // be in "permanent memory", so we copy it to a static and then use the
+    // static as the pointer.
+    //
+    // FIXME: We also call self_exe_name() on DragonFly BSD. I haven't
+    //        tested if this is required or not.
+    unsafe fn init_state() -> *mut backtrace_state {
+        static mut STATE: *mut backtrace_state = 0 as *mut backtrace_state;
+        static mut LAST_FILENAME: [libc::c_char, ..256] = [0, ..256];
+        if !STATE.is_null() { return STATE }
+        let selfname = if cfg!(target_os = "freebsd") ||
+                          cfg!(target_os = "dragonfly") {
+            os::self_exe_name()
+        } else {
+            None
+        };
+        let filename = match selfname {
+            Some(path) => {
+                let bytes = path.as_vec();
+                if bytes.len() < LAST_FILENAME.len() {
+                    let i = bytes.iter();
+                    for (slot, val) in LAST_FILENAME.iter_mut().zip(i) {
+                        *slot = *val as libc::c_char;
+                    }
+                    LAST_FILENAME.as_ptr()
+                } else {
+                    ptr::null()
+                }
+            }
+            None => ptr::null(),
+        };
+        STATE = backtrace_create_state(filename, 0, error_cb,
+                                       ptr::null_mut());
+        return STATE
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    // translation
+    ////////////////////////////////////////////////////////////////////////
+
+    // backtrace errors are currently swept under the rug, only I/O
+    // errors are reported
+    let state = unsafe { init_state() };
+    if state.is_null() {
+        return output(w, idx, addr, None)
+    }
+    let mut data = 0 as *const libc::c_char;
+    let data_addr = &mut data as *mut *const libc::c_char;
+    let ret = unsafe {
+        backtrace_syminfo(state, addr as libc::uintptr_t,
+                          syminfo_cb, error_cb,
+                          data_addr as *mut libc::c_void)
+    };
+    if ret == 0 || data.is_null() {
+        output(w, idx, addr, None)
+    } else {
+        output(w, idx, addr, Some(unsafe { CString::new(data, false) }))
+    }
+}
+
+// Finally, after all that work above, we can emit a symbol.
+fn output(w: &mut Writer, idx: int, addr: *mut libc::c_void,
+          s: Option<CString>) -> IoResult<()> {
+    try!(write!(w, "  {:2}: {:2$} - ", idx, addr, HEX_WIDTH));
+    match s.as_ref().and_then(|c| c.as_str()) {
+        Some(string) => try!(demangle(w, string)),
+        None => try!(write!(w, "<unknown>")),
+    }
+    w.write(&['\n' as u8])
+}
+
+/// Unwind library interface used for backtraces
+///
+/// Note that dead code is allowed as here are just bindings
+/// iOS doesn't use all of them it but adding more
+/// platform-specific configs pollutes the code too much
+#[allow(non_camel_case_types)]
+#[allow(non_snake_case)]
+#[allow(dead_code)]
+mod uw {
+    pub use self::_Unwind_Reason_Code::*;
+
+    use libc;
+
+    #[repr(C)]
+    pub enum _Unwind_Reason_Code {
+        _URC_NO_REASON = 0,
+        _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+        _URC_FATAL_PHASE2_ERROR = 2,
+        _URC_FATAL_PHASE1_ERROR = 3,
+        _URC_NORMAL_STOP = 4,
+        _URC_END_OF_STACK = 5,
+        _URC_HANDLER_FOUND = 6,
+        _URC_INSTALL_CONTEXT = 7,
+        _URC_CONTINUE_UNWIND = 8,
+        _URC_FAILURE = 9, // used only by ARM EABI
+    }
+
+    pub enum _Unwind_Context {}
+
+    pub type _Unwind_Trace_Fn =
+            extern fn(ctx: *mut _Unwind_Context,
+                      arg: *mut libc::c_void) -> _Unwind_Reason_Code;
+
+    extern {
+        // No native _Unwind_Backtrace on iOS
+        #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+        pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
+                                 trace_argument: *mut libc::c_void)
+                    -> _Unwind_Reason_Code;
+
+        #[cfg(all(not(target_os = "android"),
+                  not(all(target_os = "linux", target_arch = "arm"))))]
+        pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t;
+
+        #[cfg(all(not(target_os = "android"),
+                  not(all(target_os = "linux", target_arch = "arm"))))]
+        pub fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
+            -> *mut libc::c_void;
+    }
+
+    // On android, the function _Unwind_GetIP is a macro, and this is the
+    // expansion of the macro. This is all copy/pasted directly from the
+    // header file with the definition of _Unwind_GetIP.
+    #[cfg(any(target_os = "android",
+              all(target_os = "linux", target_arch = "arm")))]
+    pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
+        #[repr(C)]
+        enum _Unwind_VRS_Result {
+            _UVRSR_OK = 0,
+            _UVRSR_NOT_IMPLEMENTED = 1,
+            _UVRSR_FAILED = 2,
+        }
+        #[repr(C)]
+        enum _Unwind_VRS_RegClass {
+            _UVRSC_CORE = 0,
+            _UVRSC_VFP = 1,
+            _UVRSC_FPA = 2,
+            _UVRSC_WMMXD = 3,
+            _UVRSC_WMMXC = 4,
+        }
+        #[repr(C)]
+        enum _Unwind_VRS_DataRepresentation {
+            _UVRSD_UINT32 = 0,
+            _UVRSD_VFPX = 1,
+            _UVRSD_FPAX = 2,
+            _UVRSD_UINT64 = 3,
+            _UVRSD_FLOAT = 4,
+            _UVRSD_DOUBLE = 5,
+        }
+
+        type _Unwind_Word = libc::c_uint;
+        extern {
+            fn _Unwind_VRS_Get(ctx: *mut _Unwind_Context,
+                               klass: _Unwind_VRS_RegClass,
+                               word: _Unwind_Word,
+                               repr: _Unwind_VRS_DataRepresentation,
+                               data: *mut libc::c_void)
+                -> _Unwind_VRS_Result;
+        }
+
+        let mut val: _Unwind_Word = 0;
+        let ptr = &mut val as *mut _Unwind_Word;
+        let _ = _Unwind_VRS_Get(ctx, _Unwind_VRS_RegClass::_UVRSC_CORE, 15,
+                                _Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
+                                ptr as *mut libc::c_void);
+        (val & !1) as libc::uintptr_t
+    }
+
+    // This function also doesn't exist on Android or ARM/Linux, so make it
+    // a no-op
+    #[cfg(any(target_os = "android",
+              all(target_os = "linux", target_arch = "arm")))]
+    pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
+        -> *mut libc::c_void
+    {
+        pc
+    }
+}
diff --git a/src/libstd/sys/unix/mod.rs b/src/libstd/sys/unix/mod.rs
index acbf2096326..f3babca3287 100644
--- a/src/libstd/sys/unix/mod.rs
+++ b/src/libstd/sys/unix/mod.rs
@@ -34,6 +34,7 @@ macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
     };
 ) }
 
+pub mod backtrace;
 pub mod c;
 pub mod ext;
 pub mod condvar;
@@ -44,8 +45,10 @@ pub mod os;
 pub mod pipe;
 pub mod process;
 pub mod rwlock;
+pub mod stack_overflow;
 pub mod sync;
 pub mod tcp;
+pub mod thread;
 pub mod thread_local;
 pub mod timer;
 pub mod tty;
diff --git a/src/libstd/sys/unix/stack_overflow.rs b/src/libstd/sys/unix/stack_overflow.rs
new file mode 100644
index 00000000000..73b98f762b4
--- /dev/null
+++ b/src/libstd/sys/unix/stack_overflow.rs
@@ -0,0 +1,291 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use libc;
+use core::prelude::*;
+use self::imp::{make_handler, drop_handler};
+
+pub use self::imp::{init, cleanup};
+
+pub struct Handler {
+    _data: *mut libc::c_void
+}
+
+impl Handler {
+    pub unsafe fn new() -> Handler {
+        make_handler()
+    }
+}
+
+impl Drop for Handler {
+    fn drop(&mut self) {
+        unsafe {
+            drop_handler(self);
+        }
+    }
+}
+
+#[cfg(any(target_os = "linux", target_os = "macos"))]
+mod imp {
+    use core::prelude::*;
+    use sys_common::stack;
+
+    use super::Handler;
+    use rt::util::report_overflow;
+    use mem;
+    use ptr;
+    use intrinsics;
+    use self::signal::{siginfo, sigaction, SIGBUS, SIG_DFL,
+                       SA_SIGINFO, SA_ONSTACK, sigaltstack,
+                       SIGSTKSZ};
+    use rt::local::Local;
+    use rt::task::Task;
+    use libc;
+    use libc::funcs::posix88::mman::{mmap, munmap};
+    use libc::consts::os::posix88::{SIGSEGV,
+                                    PROT_READ,
+                                    PROT_WRITE,
+                                    MAP_PRIVATE,
+                                    MAP_ANON,
+                                    MAP_FAILED};
+
+
+    // This is initialized in init() and only read from after
+    static mut PAGE_SIZE: uint = 0;
+
+    // get_task_info is called from an exception / signal handler.
+    // It returns the guard page of the current task or 0 if that
+    // guard page doesn't exist. None is returned if there's currently
+    // no local task.
+    unsafe fn get_task_guard_page() -> Option<uint> {
+        let task: Option<*mut Task> = Local::try_unsafe_borrow();
+        task.map(|task| (&*task).stack_guard().unwrap_or(0))
+    }
+
+
+    #[no_stack_check]
+    unsafe extern fn signal_handler(signum: libc::c_int,
+                                     info: *mut siginfo,
+                                     _data: *mut libc::c_void) {
+
+        // We can not return from a SIGSEGV or SIGBUS signal.
+        // See: https://www.gnu.org/software/libc/manual/html_node/Handler-Returns.html
+
+        unsafe fn term(signum: libc::c_int) -> ! {
+            use core::mem::transmute;
+
+            signal(signum, transmute(SIG_DFL));
+            raise(signum);
+            intrinsics::abort();
+        }
+
+        // We're calling into functions with stack checks
+        stack::record_sp_limit(0);
+
+        match get_task_guard_page() {
+            Some(guard) => {
+                let addr = (*info).si_addr as uint;
+
+                if guard == 0 || addr < guard - PAGE_SIZE || addr >= guard {
+                    term(signum);
+                }
+
+                report_overflow();
+
+                intrinsics::abort()
+            }
+            None => term(signum)
+        }
+    }
+
+    static mut MAIN_ALTSTACK: *mut libc::c_void = 0 as *mut libc::c_void;
+
+    pub unsafe fn init() {
+        let psize = libc::sysconf(libc::consts::os::sysconf::_SC_PAGESIZE);
+        if psize == -1 {
+            panic!("failed to get page size");
+        }
+
+        PAGE_SIZE = psize as uint;
+
+        let mut action: sigaction = mem::zeroed();
+        action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+        action.sa_sigaction = signal_handler as sighandler_t;
+        sigaction(SIGSEGV, &action, ptr::null_mut());
+        sigaction(SIGBUS, &action, ptr::null_mut());
+
+        let handler = make_handler();
+        MAIN_ALTSTACK = handler._data;
+        mem::forget(handler);
+    }
+
+    pub unsafe fn cleanup() {
+        Handler { _data: MAIN_ALTSTACK };
+    }
+
+    pub unsafe fn make_handler() -> Handler {
+        let alt_stack = mmap(ptr::null_mut(),
+                             signal::SIGSTKSZ,
+                             PROT_READ | PROT_WRITE,
+                             MAP_PRIVATE | MAP_ANON,
+                             -1,
+                             0);
+        if alt_stack == MAP_FAILED {
+            panic!("failed to allocate an alternative stack");
+        }
+
+        let mut stack: sigaltstack = mem::zeroed();
+
+        stack.ss_sp = alt_stack;
+        stack.ss_flags = 0;
+        stack.ss_size = SIGSTKSZ;
+
+        sigaltstack(&stack, ptr::null_mut());
+
+        Handler { _data: alt_stack }
+    }
+
+    pub unsafe fn drop_handler(handler: &mut Handler) {
+        munmap(handler._data, SIGSTKSZ);
+    }
+
+    type sighandler_t = *mut libc::c_void;
+
+    #[cfg(any(all(target_os = "linux", target_arch = "x86"), // may not match
+              all(target_os = "linux", target_arch = "x86_64"),
+              all(target_os = "linux", target_arch = "arm"), // may not match
+              all(target_os = "linux", target_arch = "mips"), // may not match
+              all(target_os = "linux", target_arch = "mipsel"), // may not match
+              target_os = "android"))] // may not match
+    mod signal {
+        use libc;
+        use super::sighandler_t;
+
+        pub static SA_ONSTACK: libc::c_int = 0x08000000;
+        pub static SA_SIGINFO: libc::c_int = 0x00000004;
+        pub static SIGBUS: libc::c_int = 7;
+
+        pub static SIGSTKSZ: libc::size_t = 8192;
+
+        pub static SIG_DFL: sighandler_t = 0i as sighandler_t;
+
+        // This definition is not as accurate as it could be, {si_addr} is
+        // actually a giant union. Currently we're only interested in that field,
+        // however.
+        #[repr(C)]
+        pub struct siginfo {
+            si_signo: libc::c_int,
+            si_errno: libc::c_int,
+            si_code: libc::c_int,
+            pub si_addr: *mut libc::c_void
+        }
+
+        #[repr(C)]
+        pub struct sigaction {
+            pub sa_sigaction: sighandler_t,
+            pub sa_mask: sigset_t,
+            pub sa_flags: libc::c_int,
+            sa_restorer: *mut libc::c_void,
+        }
+
+        #[cfg(target_word_size = "32")]
+        #[repr(C)]
+        pub struct sigset_t {
+            __val: [libc::c_ulong, ..32],
+        }
+        #[cfg(target_word_size = "64")]
+        #[repr(C)]
+        pub struct sigset_t {
+            __val: [libc::c_ulong, ..16],
+        }
+
+        #[repr(C)]
+        pub struct sigaltstack {
+            pub ss_sp: *mut libc::c_void,
+            pub ss_flags: libc::c_int,
+            pub ss_size: libc::size_t
+        }
+
+    }
+
+    #[cfg(target_os = "macos")]
+    mod signal {
+        use libc;
+        use super::sighandler_t;
+
+        pub const SA_ONSTACK: libc::c_int = 0x0001;
+        pub const SA_SIGINFO: libc::c_int = 0x0040;
+        pub const SIGBUS: libc::c_int = 10;
+
+        pub const SIGSTKSZ: libc::size_t = 131072;
+
+        pub const SIG_DFL: sighandler_t = 0i as sighandler_t;
+
+        pub type sigset_t = u32;
+
+        // This structure has more fields, but we're not all that interested in
+        // them.
+        #[repr(C)]
+        pub struct siginfo {
+            pub si_signo: libc::c_int,
+            pub si_errno: libc::c_int,
+            pub si_code: libc::c_int,
+            pub pid: libc::pid_t,
+            pub uid: libc::uid_t,
+            pub status: libc::c_int,
+            pub si_addr: *mut libc::c_void
+        }
+
+        #[repr(C)]
+        pub struct sigaltstack {
+            pub ss_sp: *mut libc::c_void,
+            pub ss_size: libc::size_t,
+            pub ss_flags: libc::c_int
+        }
+
+        #[repr(C)]
+        pub struct sigaction {
+            pub sa_sigaction: sighandler_t,
+            pub sa_mask: sigset_t,
+            pub sa_flags: libc::c_int,
+        }
+    }
+
+    extern {
+        pub fn signal(signum: libc::c_int, handler: sighandler_t) -> sighandler_t;
+        pub fn raise(signum: libc::c_int) -> libc::c_int;
+
+        pub fn sigaction(signum: libc::c_int,
+                         act: *const sigaction,
+                         oldact: *mut sigaction) -> libc::c_int;
+
+        pub fn sigaltstack(ss: *const sigaltstack,
+                           oss: *mut sigaltstack) -> libc::c_int;
+    }
+}
+
+#[cfg(not(any(target_os = "linux",
+              target_os = "macos")))]
+mod imp {
+    use libc;
+
+    pub unsafe fn init() {
+    }
+
+    pub unsafe fn cleanup() {
+    }
+
+    pub unsafe fn make_handler() -> super::Handler {
+        super::Handler { _data: 0i as *mut libc::c_void }
+    }
+
+    pub unsafe fn drop_handler(_handler: &mut super::Handler) {
+    }
+}
diff --git a/src/libstd/sys/unix/thread.rs b/src/libstd/sys/unix/thread.rs
new file mode 100644
index 00000000000..02da3a19818
--- /dev/null
+++ b/src/libstd/sys/unix/thread.rs
@@ -0,0 +1,270 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use cmp;
+use mem;
+use ptr;
+use libc::consts::os::posix01::{PTHREAD_CREATE_JOINABLE, PTHREAD_STACK_MIN};
+use libc;
+
+use sys_common::stack::RED_ZONE;
+use sys_common::thread::*;
+
+pub type rust_thread = libc::pthread_t;
+pub type rust_thread_return = *mut u8;
+pub type StartFn = extern "C" fn(*mut libc::c_void) -> rust_thread_return;
+
+#[no_stack_check]
+pub extern fn thread_start(main: *mut libc::c_void) -> rust_thread_return {
+    return start_thread(main);
+}
+
+#[cfg(all(not(target_os = "linux"), not(target_os = "macos")))]
+pub mod guard {
+    pub unsafe fn current() -> uint {
+        0
+    }
+
+    pub unsafe fn main() -> uint {
+        0
+    }
+
+    pub unsafe fn init() {
+    }
+}
+
+#[cfg(any(target_os = "linux", target_os = "macos"))]
+pub mod guard {
+    use super::*;
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    use mem;
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    use ptr;
+    use libc;
+    use libc::funcs::posix88::mman::{mmap};
+    use libc::consts::os::posix88::{PROT_NONE,
+                                    MAP_PRIVATE,
+                                    MAP_ANON,
+                                    MAP_FAILED,
+                                    MAP_FIXED};
+
+    // These are initialized in init() and only read from after
+    static mut PAGE_SIZE: uint = 0;
+    static mut GUARD_PAGE: uint = 0;
+
+    #[cfg(target_os = "macos")]
+    unsafe fn get_stack_start() -> *mut libc::c_void {
+        current() as *mut libc::c_void
+    }
+
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    unsafe fn get_stack_start() -> *mut libc::c_void {
+        let mut attr: libc::pthread_attr_t = mem::zeroed();
+        if pthread_getattr_np(pthread_self(), &mut attr) != 0 {
+            panic!("failed to get thread attributes");
+        }
+        let mut stackaddr = ptr::null_mut();
+        let mut stacksize = 0;
+        if pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize) != 0 {
+            panic!("failed to get stack information");
+        }
+        if pthread_attr_destroy(&mut attr) != 0 {
+            panic!("failed to destroy thread attributes");
+        }
+        stackaddr
+    }
+
+    pub unsafe fn init() {
+        let psize = libc::sysconf(libc::consts::os::sysconf::_SC_PAGESIZE);
+        if psize == -1 {
+            panic!("failed to get page size");
+        }
+
+        PAGE_SIZE = psize as uint;
+
+        let stackaddr = get_stack_start();
+
+        // Rellocate the last page of the stack.
+        // This ensures SIGBUS will be raised on
+        // stack overflow.
+        let result = mmap(stackaddr,
+                          PAGE_SIZE as libc::size_t,
+                          PROT_NONE,
+                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                          -1,
+                          0);
+
+        if result != stackaddr || result == MAP_FAILED {
+            panic!("failed to allocate a guard page");
+        }
+
+        let offset = if cfg!(target_os = "linux") {
+            2
+        } else {
+            1
+        };
+
+        GUARD_PAGE = stackaddr as uint + offset * PAGE_SIZE;
+    }
+
+    pub unsafe fn main() -> uint {
+        GUARD_PAGE
+    }
+
+    #[cfg(target_os = "macos")]
+    pub unsafe fn current() -> uint {
+        (pthread_get_stackaddr_np(pthread_self()) as libc::size_t -
+         pthread_get_stacksize_np(pthread_self())) as uint
+    }
+
+    #[cfg(any(target_os = "linux", target_os = "android"))]
+    pub unsafe fn current() -> uint {
+        let mut attr: libc::pthread_attr_t = mem::zeroed();
+        if pthread_getattr_np(pthread_self(), &mut attr) != 0 {
+            panic!("failed to get thread attributes");
+        }
+        let mut guardsize = 0;
+        if pthread_attr_getguardsize(&attr, &mut guardsize) != 0 {
+            panic!("failed to get stack guard page");
+        }
+        if guardsize == 0 {
+            panic!("there is no guard page");
+        }
+        let mut stackaddr = ptr::null_mut();
+        let mut stacksize = 0;
+        if pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize) != 0 {
+            panic!("failed to get stack information");
+        }
+        if pthread_attr_destroy(&mut attr) != 0 {
+            panic!("failed to destroy thread attributes");
+        }
+
+        stackaddr as uint + guardsize as uint
+    }
+}
+
+pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
+    let mut native: libc::pthread_t = mem::zeroed();
+    let mut attr: libc::pthread_attr_t = mem::zeroed();
+    assert_eq!(pthread_attr_init(&mut attr), 0);
+    assert_eq!(pthread_attr_setdetachstate(&mut attr,
+                                           PTHREAD_CREATE_JOINABLE), 0);
+
+    // Reserve room for the red zone, the runtime's stack of last resort.
+    let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr) as uint);
+    match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
+        0 => {
+        },
+        libc::EINVAL => {
+            // EINVAL means |stack_size| is either too small or not a
+            // multiple of the system page size.  Because it's definitely
+            // >= PTHREAD_STACK_MIN, it must be an alignment issue.
+            // Round up to the nearest page and try again.
+            let page_size = libc::sysconf(libc::_SC_PAGESIZE) as uint;
+            let stack_size = (stack_size + page_size - 1) &
+                             (-(page_size as int - 1) as uint - 1);
+            assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t), 0);
+        },
+        errno => {
+            // This cannot really happen.
+            panic!("pthread_attr_setstacksize() error: {}", errno);
+        },
+    };
+
+    let arg: *mut libc::c_void = mem::transmute(p);
+    let ret = pthread_create(&mut native, &attr, thread_start, arg);
+    assert_eq!(pthread_attr_destroy(&mut attr), 0);
+
+    if ret != 0 {
+        // be sure to not leak the closure
+        let _p: Box<proc():Send> = mem::transmute(arg);
+        panic!("failed to spawn native thread: {}", ret);
+    }
+    native
+}
+
+pub unsafe fn join(native: rust_thread) {
+    assert_eq!(pthread_join(native, ptr::null_mut()), 0);
+}
+
+pub unsafe fn detach(native: rust_thread) {
+    assert_eq!(pthread_detach(native), 0);
+}
+
+pub unsafe fn yield_now() { assert_eq!(sched_yield(), 0); }
+// glibc >= 2.15 has a __pthread_get_minstack() function that returns
+// PTHREAD_STACK_MIN plus however many bytes are needed for thread-local
+// storage.  We need that information to avoid blowing up when a small stack
+// is created in an application with big thread-local storage requirements.
+// See #6233 for rationale and details.
+//
+// Link weakly to the symbol for compatibility with older versions of glibc.
+// Assumes that we've been dynamically linked to libpthread but that is
+// currently always the case.  Note that you need to check that the symbol
+// is non-null before calling it!
+#[cfg(target_os = "linux")]
+fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t {
+    type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t;
+    extern {
+        #[linkage = "extern_weak"]
+        static __pthread_get_minstack: *const ();
+    }
+    if __pthread_get_minstack.is_null() {
+        PTHREAD_STACK_MIN
+    } else {
+        unsafe { mem::transmute::<*const (), F>(__pthread_get_minstack)(attr) }
+    }
+}
+
+// __pthread_get_minstack() is marked as weak but extern_weak linkage is
+// not supported on OS X, hence this kludge...
+#[cfg(not(target_os = "linux"))]
+fn min_stack_size(_: *const libc::pthread_attr_t) -> libc::size_t {
+    PTHREAD_STACK_MIN
+}
+
+#[cfg(any(target_os = "linux"))]
+extern {
+    pub fn pthread_self() -> libc::pthread_t;
+    pub fn pthread_getattr_np(native: libc::pthread_t,
+                              attr: *mut libc::pthread_attr_t) -> libc::c_int;
+    pub fn pthread_attr_getguardsize(attr: *const libc::pthread_attr_t,
+                                     guardsize: *mut libc::size_t) -> libc::c_int;
+    pub fn pthread_attr_getstack(attr: *const libc::pthread_attr_t,
+                                 stackaddr: *mut *mut libc::c_void,
+                                 stacksize: *mut libc::size_t) -> libc::c_int;
+}
+
+#[cfg(target_os = "macos")]
+extern {
+    pub fn pthread_self() -> libc::pthread_t;
+    pub fn pthread_get_stackaddr_np(thread: libc::pthread_t) -> *mut libc::c_void;
+    pub fn pthread_get_stacksize_np(thread: libc::pthread_t) -> libc::size_t;
+}
+
+extern {
+    fn pthread_create(native: *mut libc::pthread_t,
+                      attr: *const libc::pthread_attr_t,
+                      f: StartFn,
+                      value: *mut libc::c_void) -> libc::c_int;
+    fn pthread_join(native: libc::pthread_t,
+                    value: *mut *mut libc::c_void) -> libc::c_int;
+    fn pthread_attr_init(attr: *mut libc::pthread_attr_t) -> libc::c_int;
+    pub fn pthread_attr_destroy(attr: *mut libc::pthread_attr_t) -> libc::c_int;
+    fn pthread_attr_setstacksize(attr: *mut libc::pthread_attr_t,
+                                 stack_size: libc::size_t) -> libc::c_int;
+    fn pthread_attr_setdetachstate(attr: *mut libc::pthread_attr_t,
+                                   state: libc::c_int) -> libc::c_int;
+    fn pthread_detach(thread: libc::pthread_t) -> libc::c_int;
+    fn sched_yield() -> libc::c_int;
+}
diff --git a/src/libstd/sys/windows/backtrace.rs b/src/libstd/sys/windows/backtrace.rs
new file mode 100644
index 00000000000..833b69d6cbe
--- /dev/null
+++ b/src/libstd/sys/windows/backtrace.rs
@@ -0,0 +1,371 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+/// As always, windows has something very different than unix, we mainly want
+/// to avoid having to depend too much on libunwind for windows.
+///
+/// If you google around, you'll find a fair bit of references to built-in
+/// functions to get backtraces on windows. It turns out that most of these are
+/// in an external library called dbghelp. I was unable to find this library
+/// via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent
+/// of it.
+///
+/// You'll also find that there's a function called CaptureStackBackTrace
+/// mentioned frequently (which is also easy to use), but sadly I didn't have a
+/// copy of that function in my mingw install (maybe it was broken?). Instead,
+/// this takes the route of using StackWalk64 in order to walk the stack.
+
+use c_str::CString;
+use intrinsics;
+use io::{IoResult, Writer};
+use libc;
+use mem;
+use ops::Drop;
+use option::{Some, None};
+use path::Path;
+use result::{Ok, Err};
+use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
+use slice::SliceExt;
+use str::StrPrelude;
+use dynamic_lib::DynamicLibrary;
+
+use sys_common::backtrace::*;
+
+#[allow(non_snake_case)]
+extern "system" {
+    fn GetCurrentProcess() -> libc::HANDLE;
+    fn GetCurrentThread() -> libc::HANDLE;
+    fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
+}
+
+type SymFromAddrFn =
+    extern "system" fn(libc::HANDLE, u64, *mut u64,
+                       *mut SYMBOL_INFO) -> libc::BOOL;
+type SymInitializeFn =
+    extern "system" fn(libc::HANDLE, *mut libc::c_void,
+                       libc::BOOL) -> libc::BOOL;
+type SymCleanupFn =
+    extern "system" fn(libc::HANDLE) -> libc::BOOL;
+
+type StackWalk64Fn =
+    extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
+                       *mut STACKFRAME64, *mut arch::CONTEXT,
+                       *mut libc::c_void, *mut libc::c_void,
+                       *mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
+
+const MAX_SYM_NAME: uint = 2000;
+const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
+const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
+const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
+
+#[repr(C)]
+struct SYMBOL_INFO {
+    SizeOfStruct: libc::c_ulong,
+    TypeIndex: libc::c_ulong,
+    Reserved: [u64, ..2],
+    Index: libc::c_ulong,
+    Size: libc::c_ulong,
+    ModBase: u64,
+    Flags: libc::c_ulong,
+    Value: u64,
+    Address: u64,
+    Register: libc::c_ulong,
+    Scope: libc::c_ulong,
+    Tag: libc::c_ulong,
+    NameLen: libc::c_ulong,
+    MaxNameLen: libc::c_ulong,
+    // note that windows has this as 1, but it basically just means that
+    // the name is inline at the end of the struct. For us, we just bump
+    // the struct size up to MAX_SYM_NAME.
+    Name: [libc::c_char, ..MAX_SYM_NAME],
+}
+
+
+#[repr(C)]
+enum ADDRESS_MODE {
+    AddrMode1616,
+    AddrMode1632,
+    AddrModeReal,
+    AddrModeFlat,
+}
+
+struct ADDRESS64 {
+    Offset: u64,
+    Segment: u16,
+    Mode: ADDRESS_MODE,
+}
+
+struct STACKFRAME64 {
+    AddrPC: ADDRESS64,
+    AddrReturn: ADDRESS64,
+    AddrFrame: ADDRESS64,
+    AddrStack: ADDRESS64,
+    AddrBStore: ADDRESS64,
+    FuncTableEntry: *mut libc::c_void,
+    Params: [u64, ..4],
+    Far: libc::BOOL,
+    Virtual: libc::BOOL,
+    Reserved: [u64, ..3],
+    KdHelp: KDHELP64,
+}
+
+struct KDHELP64 {
+    Thread: u64,
+    ThCallbackStack: libc::DWORD,
+    ThCallbackBStore: libc::DWORD,
+    NextCallback: libc::DWORD,
+    FramePointer: libc::DWORD,
+    KiCallUserMode: u64,
+    KeUserCallbackDispatcher: u64,
+    SystemRangeStart: u64,
+    KiUserExceptionDispatcher: u64,
+    StackBase: u64,
+    StackLimit: u64,
+    Reserved: [u64, ..5],
+}
+
+#[cfg(target_arch = "x86")]
+mod arch {
+    use libc;
+
+    const MAXIMUM_SUPPORTED_EXTENSION: uint = 512;
+
+    #[repr(C)]
+    pub struct CONTEXT {
+        ContextFlags: libc::DWORD,
+        Dr0: libc::DWORD,
+        Dr1: libc::DWORD,
+        Dr2: libc::DWORD,
+        Dr3: libc::DWORD,
+        Dr6: libc::DWORD,
+        Dr7: libc::DWORD,
+        FloatSave: FLOATING_SAVE_AREA,
+        SegGs: libc::DWORD,
+        SegFs: libc::DWORD,
+        SegEs: libc::DWORD,
+        SegDs: libc::DWORD,
+        Edi: libc::DWORD,
+        Esi: libc::DWORD,
+        Ebx: libc::DWORD,
+        Edx: libc::DWORD,
+        Ecx: libc::DWORD,
+        Eax: libc::DWORD,
+        Ebp: libc::DWORD,
+        Eip: libc::DWORD,
+        SegCs: libc::DWORD,
+        EFlags: libc::DWORD,
+        Esp: libc::DWORD,
+        SegSs: libc::DWORD,
+        ExtendedRegisters: [u8, ..MAXIMUM_SUPPORTED_EXTENSION],
+    }
+
+    #[repr(C)]
+    pub struct FLOATING_SAVE_AREA {
+        ControlWord: libc::DWORD,
+        StatusWord: libc::DWORD,
+        TagWord: libc::DWORD,
+        ErrorOffset: libc::DWORD,
+        ErrorSelector: libc::DWORD,
+        DataOffset: libc::DWORD,
+        DataSelector: libc::DWORD,
+        RegisterArea: [u8, ..80],
+        Cr0NpxState: libc::DWORD,
+    }
+
+    pub fn init_frame(frame: &mut super::STACKFRAME64,
+                      ctx: &CONTEXT) -> libc::DWORD {
+        frame.AddrPC.Offset = ctx.Eip as u64;
+        frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrStack.Offset = ctx.Esp as u64;
+        frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrFrame.Offset = ctx.Ebp as u64;
+        frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        super::IMAGE_FILE_MACHINE_I386
+    }
+}
+
+#[cfg(target_arch = "x86_64")]
+mod arch {
+    use libc::{c_longlong, c_ulonglong};
+    use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
+    use simd;
+
+    #[repr(C)]
+    pub struct CONTEXT {
+        _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
+        P1Home: DWORDLONG,
+        P2Home: DWORDLONG,
+        P3Home: DWORDLONG,
+        P4Home: DWORDLONG,
+        P5Home: DWORDLONG,
+        P6Home: DWORDLONG,
+
+        ContextFlags: DWORD,
+        MxCsr: DWORD,
+
+        SegCs: WORD,
+        SegDs: WORD,
+        SegEs: WORD,
+        SegFs: WORD,
+        SegGs: WORD,
+        SegSs: WORD,
+        EFlags: DWORD,
+
+        Dr0: DWORDLONG,
+        Dr1: DWORDLONG,
+        Dr2: DWORDLONG,
+        Dr3: DWORDLONG,
+        Dr6: DWORDLONG,
+        Dr7: DWORDLONG,
+
+        Rax: DWORDLONG,
+        Rcx: DWORDLONG,
+        Rdx: DWORDLONG,
+        Rbx: DWORDLONG,
+        Rsp: DWORDLONG,
+        Rbp: DWORDLONG,
+        Rsi: DWORDLONG,
+        Rdi: DWORDLONG,
+        R8:  DWORDLONG,
+        R9:  DWORDLONG,
+        R10: DWORDLONG,
+        R11: DWORDLONG,
+        R12: DWORDLONG,
+        R13: DWORDLONG,
+        R14: DWORDLONG,
+        R15: DWORDLONG,
+
+        Rip: DWORDLONG,
+
+        FltSave: FLOATING_SAVE_AREA,
+
+        VectorRegister: [M128A, .. 26],
+        VectorControl: DWORDLONG,
+
+        DebugControl: DWORDLONG,
+        LastBranchToRip: DWORDLONG,
+        LastBranchFromRip: DWORDLONG,
+        LastExceptionToRip: DWORDLONG,
+        LastExceptionFromRip: DWORDLONG,
+    }
+
+    #[repr(C)]
+    pub struct M128A {
+        _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
+        Low:  c_ulonglong,
+        High: c_longlong
+    }
+
+    #[repr(C)]
+    pub struct FLOATING_SAVE_AREA {
+        _align_hack: [simd::u64x2, ..0], // FIXME align on 16-byte
+        _Dummy: [u8, ..512] // FIXME: Fill this out
+    }
+
+    pub fn init_frame(frame: &mut super::STACKFRAME64,
+                      ctx: &CONTEXT) -> DWORD {
+        frame.AddrPC.Offset = ctx.Rip as u64;
+        frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrStack.Offset = ctx.Rsp as u64;
+        frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        frame.AddrFrame.Offset = ctx.Rbp as u64;
+        frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
+        super::IMAGE_FILE_MACHINE_AMD64
+    }
+}
+
+#[repr(C)]
+struct Cleanup {
+    handle: libc::HANDLE,
+    SymCleanup: SymCleanupFn,
+}
+
+impl Drop for Cleanup {
+    fn drop(&mut self) { (self.SymCleanup)(self.handle); }
+}
+
+pub fn write(w: &mut Writer) -> IoResult<()> {
+    // According to windows documentation, all dbghelp functions are
+    // single-threaded.
+    static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
+    let _g = unsafe { LOCK.lock() };
+
+    // Open up dbghelp.dll, we don't link to it explicitly because it can't
+    // always be found. Additionally, it's nice having fewer dependencies.
+    let path = Path::new("dbghelp.dll");
+    let lib = match DynamicLibrary::open(Some(&path)) {
+        Ok(lib) => lib,
+        Err(..) => return Ok(()),
+    };
+
+    macro_rules! sym( ($e:expr, $t:ident) => (unsafe {
+        match lib.symbol($e) {
+            Ok(f) => mem::transmute::<*mut u8, $t>(f),
+            Err(..) => return Ok(())
+        }
+    }) )
+
+    // Fetch the symbols necessary from dbghelp.dll
+    let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
+    let SymInitialize = sym!("SymInitialize", SymInitializeFn);
+    let SymCleanup = sym!("SymCleanup", SymCleanupFn);
+    let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
+
+    // Allocate necessary structures for doing the stack walk
+    let process = unsafe { GetCurrentProcess() };
+    let thread = unsafe { GetCurrentThread() };
+    let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
+    unsafe { RtlCaptureContext(&mut context); }
+    let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
+    let image = arch::init_frame(&mut frame, &context);
+
+    // Initialize this process's symbols
+    let ret = SymInitialize(process, 0 as *mut libc::c_void, libc::TRUE);
+    if ret != libc::TRUE { return Ok(()) }
+    let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
+
+    // And now that we're done with all the setup, do the stack walking!
+    let mut i = 0i;
+    try!(write!(w, "stack backtrace:\n"));
+    while StackWalk64(image, process, thread, &mut frame, &mut context,
+                      0 as *mut libc::c_void,
+                      0 as *mut libc::c_void,
+                      0 as *mut libc::c_void,
+                      0 as *mut libc::c_void) == libc::TRUE{
+        let addr = frame.AddrPC.Offset;
+        if addr == frame.AddrReturn.Offset || addr == 0 ||
+           frame.AddrReturn.Offset == 0 { break }
+
+        i += 1;
+        try!(write!(w, "  {:2}: {:#2$x}", i, addr, HEX_WIDTH));
+        let mut info: SYMBOL_INFO = unsafe { intrinsics::init() };
+        info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong;
+        // the struct size in C.  the value is different to
+        // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
+        // due to struct alignment.
+        info.SizeOfStruct = 88;
+
+        let mut displacement = 0u64;
+        let ret = SymFromAddr(process, addr as u64, &mut displacement,
+                              &mut info);
+
+        if ret == libc::TRUE {
+            try!(write!(w, " - "));
+            let cstr = unsafe { CString::new(info.Name.as_ptr(), false) };
+            let bytes = cstr.as_bytes();
+            match cstr.as_str() {
+                Some(s) => try!(demangle(w, s)),
+                None => try!(w.write(bytes[..bytes.len()-1])),
+            }
+        }
+        try!(w.write(&['\n' as u8]));
+    }
+
+    Ok(())
+}
diff --git a/src/libstd/sys/windows/mod.rs b/src/libstd/sys/windows/mod.rs
index d22d4e0f534..6924687d8c4 100644
--- a/src/libstd/sys/windows/mod.rs
+++ b/src/libstd/sys/windows/mod.rs
@@ -35,6 +35,7 @@ macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
     };
 ) }
 
+pub mod backtrace;
 pub mod c;
 pub mod ext;
 pub mod condvar;
@@ -46,7 +47,9 @@ pub mod pipe;
 pub mod process;
 pub mod rwlock;
 pub mod sync;
+pub mod stack_overflow;
 pub mod tcp;
+pub mod thread;
 pub mod thread_local;
 pub mod timer;
 pub mod tty;
diff --git a/src/libstd/sys/windows/stack_overflow.rs b/src/libstd/sys/windows/stack_overflow.rs
new file mode 100644
index 00000000000..e3d96a054f4
--- /dev/null
+++ b/src/libstd/sys/windows/stack_overflow.rs
@@ -0,0 +1,120 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rt::local::Local;
+use rt::task::Task;
+use rt::util::report_overflow;
+use core::prelude::*;
+use ptr;
+use mem;
+use libc;
+use libc::types::os::arch::extra::{LPVOID, DWORD, LONG, BOOL};
+use sys_common::stack;
+
+pub struct Handler {
+    _data: *mut libc::c_void
+}
+
+impl Handler {
+    pub unsafe fn new() -> Handler {
+        make_handler()
+    }
+}
+
+impl Drop for Handler {
+    fn drop(&mut self) {}
+}
+
+// get_task_info is called from an exception / signal handler.
+// It returns the guard page of the current task or 0 if that
+// guard page doesn't exist. None is returned if there's currently
+// no local task.
+unsafe fn get_task_guard_page() -> Option<uint> {
+    let task: Option<*mut Task> = Local::try_unsafe_borrow();
+    task.map(|task| (&*task).stack_guard().unwrap_or(0))
+}
+
+// This is initialized in init() and only read from after
+static mut PAGE_SIZE: uint = 0;
+
+#[no_stack_check]
+extern "system" fn vectored_handler(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG {
+    unsafe {
+        let rec = &(*(*ExceptionInfo).ExceptionRecord);
+        let code = rec.ExceptionCode;
+
+        if code != EXCEPTION_STACK_OVERFLOW {
+            return EXCEPTION_CONTINUE_SEARCH;
+        }
+
+        // We're calling into functions with stack checks,
+        // however stack checks by limit should be disabled on Windows
+        stack::record_sp_limit(0);
+
+        if get_task_guard_page().is_some() {
+           report_overflow();
+        }
+
+        EXCEPTION_CONTINUE_SEARCH
+    }
+}
+
+pub unsafe fn init() {
+    let mut info = mem::zeroed();
+    libc::GetSystemInfo(&mut info);
+    PAGE_SIZE = info.dwPageSize as uint;
+
+    if AddVectoredExceptionHandler(0, vectored_handler) == ptr::null_mut() {
+        panic!("failed to install exception handler");
+    }
+
+    mem::forget(make_handler());
+}
+
+pub unsafe fn cleanup() {
+}
+
+pub unsafe fn make_handler() -> Handler {
+    if SetThreadStackGuarantee(&mut 0x5000) == 0 {
+        panic!("failed to reserve stack space for exception handling");
+    }
+
+    Handler { _data: 0i as *mut libc::c_void }
+}
+
+pub struct EXCEPTION_RECORD {
+    pub ExceptionCode: DWORD,
+    pub ExceptionFlags: DWORD,
+    pub ExceptionRecord: *mut EXCEPTION_RECORD,
+    pub ExceptionAddress: LPVOID,
+    pub NumberParameters: DWORD,
+    pub ExceptionInformation: [LPVOID, ..EXCEPTION_MAXIMUM_PARAMETERS]
+}
+
+pub struct EXCEPTION_POINTERS {
+    pub ExceptionRecord: *mut EXCEPTION_RECORD,
+    pub ContextRecord: LPVOID
+}
+
+pub type PVECTORED_EXCEPTION_HANDLER = extern "system"
+        fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
+
+pub type ULONG = libc::c_ulong;
+
+const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
+const EXCEPTION_MAXIMUM_PARAMETERS: uint = 15;
+const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
+
+extern "system" {
+    fn AddVectoredExceptionHandler(FirstHandler: ULONG,
+                                   VectoredHandler: PVECTORED_EXCEPTION_HANDLER)
+                                  -> LPVOID;
+    fn SetThreadStackGuarantee(StackSizeInBytes: *mut ULONG) -> BOOL;
+}
diff --git a/src/libstd/sys/windows/thread.rs b/src/libstd/sys/windows/thread.rs
new file mode 100644
index 00000000000..00f1e9767f5
--- /dev/null
+++ b/src/libstd/sys/windows/thread.rs
@@ -0,0 +1,95 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::prelude::*;
+
+use boxed::Box;
+use cmp;
+use mem;
+use ptr;
+use libc;
+use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
+                                   LPVOID, DWORD, LPDWORD, HANDLE};
+use sys_common::stack::RED_ZONE;
+use sys_common::thread::*;
+
+pub type rust_thread = HANDLE;
+pub type rust_thread_return = DWORD;
+
+pub type StartFn = extern "system" fn(*mut libc::c_void) -> rust_thread_return;
+
+#[no_stack_check]
+pub extern "system" fn thread_start(main: *mut libc::c_void) -> rust_thread_return {
+    return start_thread(main);
+}
+
+pub mod guard {
+    pub unsafe fn main() -> uint {
+        0
+    }
+
+    pub unsafe fn current() -> uint {
+        0
+    }
+
+    pub unsafe fn init() {
+    }
+}
+
+pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
+    let arg: *mut libc::c_void = mem::transmute(p);
+    // FIXME On UNIX, we guard against stack sizes that are too small but
+    // that's because pthreads enforces that stacks are at least
+    // PTHREAD_STACK_MIN bytes big.  Windows has no such lower limit, it's
+    // just that below a certain threshold you can't do anything useful.
+    // That threshold is application and architecture-specific, however.
+    // For now, the only requirement is that it's big enough to hold the
+    // red zone.  Round up to the next 64 kB because that's what the NT
+    // kernel does, might as well make it explicit.  With the current
+    // 20 kB red zone, that makes for a 64 kB minimum stack.
+    let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
+    let ret = CreateThread(ptr::null_mut(), stack_size as libc::size_t,
+                           thread_start, arg, 0, ptr::null_mut());
+
+    if ret as uint == 0 {
+        // be sure to not leak the closure
+        let _p: Box<proc():Send> = mem::transmute(arg);
+        panic!("failed to spawn native thread: {}", ret);
+    }
+    return ret;
+}
+
+pub unsafe fn join(native: rust_thread) {
+    use libc::consts::os::extra::INFINITE;
+    WaitForSingleObject(native, INFINITE);
+}
+
+pub unsafe fn detach(native: rust_thread) {
+    assert!(libc::CloseHandle(native) != 0);
+}
+
+pub unsafe fn yield_now() {
+    // This function will return 0 if there are no other threads to execute,
+    // but this also means that the yield was useless so this isn't really a
+    // case that needs to be worried about.
+    SwitchToThread();
+}
+
+#[allow(non_snake_case)]
+extern "system" {
+    fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
+                    dwStackSize: SIZE_T,
+                    lpStartAddress: StartFn,
+                    lpParameter: LPVOID,
+                    dwCreationFlags: DWORD,
+                    lpThreadId: LPDWORD) -> HANDLE;
+    fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
+    fn SwitchToThread() -> BOOL;
+}
diff --git a/src/libstd/sys/windows/thread_local.rs b/src/libstd/sys/windows/thread_local.rs
index 969b322af99..6c8d9639d5c 100644
--- a/src/libstd/sys/windows/thread_local.rs
+++ b/src/libstd/sys/windows/thread_local.rs
@@ -13,8 +13,8 @@ use prelude::*;
 use libc::types::os::arch::extra::{DWORD, LPVOID, BOOL};
 
 use mem;
-use rustrt;
-use rustrt::exclusive::Exclusive;
+use rt;
+use rt::exclusive::Exclusive;
 use sync::{ONCE_INIT, Once};
 
 pub type Key = DWORD;
@@ -131,7 +131,7 @@ fn init_dtors() {
         DTORS = mem::transmute(dtors);
     }
 
-    rustrt::at_exit(move|| unsafe {
+    rt::at_exit(move|| unsafe {
         mem::transmute::<_, Box<Exclusive<Vec<(Key, Dtor)>>>>(DTORS);
         DTORS = 0 as *mut _;
     });